From f23c63e64a646f8f64ea08b4781c19409b9d7645 Mon Sep 17 00:00:00 2001 From: "wave.leaf27" Date: Mon, 29 Jul 2024 20:04:15 +0800 Subject: [PATCH] release code --- .gitignore | 11 +- .gitmodules | 3 + README.md | 43 + docs/dataset.md | 173 ++++ docs/figs/network.png | Bin 0 -> 951961 bytes docs/install.md | 45 + docs/trainval.md | 28 + environment.yaml | 306 +++++++ install.sh | 7 + main.py | 16 - projects/configs/__init__.py | 1 + projects/configs/bevformer/bevformer_base.py | 257 ------ .../configs/bevformer/bevformer_base_occ.py | 279 ------ .../bevformer/bevformer_base_occ_conv.py | 280 ------ .../bevformer/bevformer_base_occ_conv3d.py | 279 ------ .../bevformer/bevformer_base_occ_conv3d_pj.py | 288 ------ .../bevformer_base_occ_conv3d_use_mask.py | 280 ------ .../bevformer/bevformer_base_occ_focal.py | 279 ------ .../bevformer/bevformer_base_occ_test.py | 280 ------ projects/configs/bevformer/bevformer_small.py | 268 ------ projects/configs/bevformer/bevformer_tiny.py | 270 ------ .../bevformer_fp16/bevformer_tiny_fp16.py | 272 ------ projects/configs/cvtocc/bevformer_nuscenes.py | 423 +++++++++ projects/configs/cvtocc/bevformer_waymo.py | 537 +++++++++++ .../cvtocc/bevformer_wotsa_nuscenes.py | 423 +++++++++ .../configs/cvtocc/bevformer_wotsa_waymo.py | 537 +++++++++++ .../cvtocc/bevformer_wrapconcat_waymo.py | 538 +++++++++++ projects/configs/cvtocc/cvtocc_nuscenes.py | 423 +++++++++ projects/configs/cvtocc/cvtocc_waymo.py | 538 +++++++++++ projects/configs/cvtocc/solofusion_waymo.py | 477 ++++++++++ projects/mmdet3d_plugin/__init__.py | 2 + projects/mmdet3d_plugin/bevformer/__init__.py | 1 + .../bevformer/apis/mmdet_train.py | 24 +- .../mmdet3d_plugin/bevformer/apis/test.py | 34 +- .../mmdet3d_plugin/bevformer/apis/test_occ.py | 41 +- .../bevformer/dense_heads/__init__.py | 6 +- .../bevformer/dense_heads/bevformer_head.py | 523 ----------- .../bevformer/dense_heads/occformer_head.py | 447 ++++----- .../dense_heads/occformer_head_3d.py | 269 ------ .../dense_heads/occformer_head_waymo.py | 452 +++++++++ .../dense_heads/solo_head_occ_waymo.py | 260 ++++++ .../bevformer/detectors/__init__.py | 8 +- .../bevformer/detectors/bevdet_solo.py | 158 ++++ .../bevformer/detectors/bevformer.py | 292 ------ .../bevformer/detectors/bevformer_fp16.py | 89 -- .../bevformer/detectors/centerpoint_solo.py | 196 ++++ .../bevformer/detectors/occformer.py | 531 ++++++----- .../bevformer/detectors/occformer_waymo.py | 428 +++++++++ .../bevformer/detectors/solofusion.py | 863 ++++++++++++++++++ .../mmdet3d_plugin/bevformer/loss/__init__.py | 1 + .../bevformer/loss/lovasz_losses.py | 319 +++++++ .../mmdet3d_plugin/bevformer/loss/ohem.py | 119 +++ .../bevformer/modules/__init__.py | 23 +- .../bevformer/modules/concat_conv_module.py | 219 +++++ .../bevformer/modules/cost_volume_module.py | 287 ++++++ .../modules/custom_base_transformer_layer.py | 19 +- .../bevformer/modules/decode_head.py | 307 +++++++ .../bevformer/modules/dts_transformer.py | 541 +++++++++++ .../bevformer/modules/encoder.py | 398 ++++---- .../bevformer/modules/encoder_3d.py | 390 ++++---- .../{encoder_test.py => encoder_3d_conv.py} | 124 ++- .../bevformer/modules/encoder_waymo.py | 556 +++++++++++ .../bevformer/modules/hybrid_transformer.py | 519 +++++++++++ .../bevformer/modules/occ_conv_decoder.py | 80 ++ .../bevformer/modules/occ_conv_decoder_3d.py | 63 ++ .../bevformer/modules/occ_transformer.py | 403 ++++---- .../modules/occ_transformer_waymo.py | 560 ++++++++++++ .../bevformer/modules/pyramid_transformer.py | 542 +++++++++++ .../bevformer/modules/residual_block_3d.py | 52 ++ .../bevformer/modules/resnet.py | 85 ++ .../modules/spatial_cross_attention.py | 109 +-- .../modules/spatial_cross_attention_3d.py | 399 ++++++++ .../modules/temporal_self_attention.py | 68 +- .../modules/temporal_self_attention_3d.py | 344 +++++++ .../mmdet3d_plugin/bevformer/modules/unet.py | 788 ++++++++++++++++ .../bevformer/modules/view_transformer.py | 389 ++++++++ .../modules/view_transformer_solo.py | 376 ++++++++ .../bevformer/modules/vol_encoder.py | 476 ++++++++++ .../bevformer/modules/voxel_encoder.py | 470 ++++++++++ .../core/evaluation/eval_hooks.py | 13 +- projects/mmdet3d_plugin/datasets/__init__.py | 5 +- projects/mmdet3d_plugin/datasets/builder.py | 77 +- projects/mmdet3d_plugin/datasets/cdist.py | 154 ++++ .../datasets/nuscenes_dataset.py | 2 +- .../{nuscnes_eval.py => nuscenes_eval.py} | 0 .../datasets/nuscenes_mono_dataset.py | 2 +- .../mmdet3d_plugin/datasets/nuscenes_occ.py | 293 +++--- .../mmdet3d_plugin/datasets/occ_metrics.py | 358 ++++++++ .../datasets/pipelines/__init__.py | 5 +- .../datasets/pipelines/loading.py | 214 +++++ .../datasets/pipelines/transform_3d.py | 257 +++--- .../datasets/samplers/__init__.py | 3 +- .../samplers/my_group_batch_sampler.py | 137 +++ .../samplers/nuscene_dataset_detail.py | 20 + .../datasets/samplers/waymo_dataset_detail.py | 14 + .../datasets/waymo_temporal_zlt.py | 380 ++++++++ projects/mmdet3d_plugin/datasets/zltwaymo.py | 590 ++++++++++++ .../models/backbones/__init__.py | 4 +- .../models/backbones/bevdetresnet.py | 75 ++ .../mmdet3d_plugin/models/necks/__init__.py | 3 + projects/mmdet3d_plugin/models/necks/fpn.py | 202 ++++ .../mmdet3d_plugin/models/necks/second_fpn.py | 105 +++ .../models/necks/view_transformer.py | 302 ++++++ .../mmdet3d_plugin/models/utils/__init__.py | 3 +- .../models/utils/positional_encoding.py | 155 ++++ requirements.txt | 306 +++++++ tools/README.md | 88 ++ tools/__init__.py | 0 tools/analysis_tools/analyze_logs.py | 135 ++- tools/analysis_tools/benchmark.py | 57 +- tools/analysis_tools/get_params.py | 9 +- tools/analysis_tools/visual.py | 561 +++++++----- tools/condition_benchmark.py | 26 + tools/create_data.py | 322 ++++--- tools/data_converter/create_gt_database.py | 226 ++--- tools/data_converter/indoor_converter.py | 94 +- tools/data_converter/kitti_converter.py | 379 ++++---- tools/data_converter/kitti_data_utils.py | 613 +++++++------ tools/data_converter/lyft_converter.py | 256 +++--- tools/data_converter/lyft_data_fixer.py | 38 +- tools/data_converter/nuimage_converter.py | 182 ++-- tools/data_converter/nuscenes_converter.py | 569 ++++++------ .../data_converter/nuscenes_occ_converter.py | 569 ++++++------ .../nuscenes_occ_converter_own.py | 605 ++++++------ tools/data_converter/s3dis_data_utils.py | 151 +-- tools/data_converter/scannet_data_utils.py | 243 ++--- tools/data_converter/sunrgbd_data_utils.py | 205 +++-- tools/data_converter/test_nus.py | 11 +- tools/data_converter/waymo_converter.py | 316 ++++--- tools/dist_test.sh | 6 +- tools/dist_train.sh | 2 +- tools/fp16/train.py | 216 ++--- tools/misc/browse_dataset.py | 190 ++-- tools/misc/fuse_conv_bn.py | 21 +- tools/misc/print_config.py | 12 +- tools/misc/visualize_results.py | 27 +- .../convert_votenet_checkpoints.py | 103 ++- tools/model_converters/publish_model.py | 22 +- tools/model_converters/regnet2mmdet.py | 79 +- tools/my_dist_train.sh | 20 - tools/slurm_test.sh | 25 + tools/slurm_train.sh | 26 + tools/test.py | 234 ++--- tools/test_code.py | 15 - tools/test_data_pipeline.py | 42 + tools/test_occ.py | 262 ------ tools/train.py | 196 ++-- tools/vis_tools/README.md | 79 ++ tools/vis_tools/__init__.py | 0 tools/vis_tools/utils.py | 101 ++ tools/vis_tools/vis_occ.py | 521 +++++++++++ tools/vis_tools/vis_pose.py | 120 +++ tools/vis_tools/vis_ref.py | 143 +++ tools/vis_tools/vis_ref_dataloader.py | 162 ++++ 154 files changed, 24083 insertions(+), 9086 deletions(-) create mode 100644 .gitmodules create mode 100644 README.md create mode 100644 docs/dataset.md create mode 100644 docs/figs/network.png create mode 100644 docs/install.md create mode 100644 docs/trainval.md create mode 100644 environment.yaml create mode 100644 install.sh delete mode 100644 main.py create mode 100644 projects/configs/__init__.py delete mode 100644 projects/configs/bevformer/bevformer_base.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ_conv.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ_conv3d.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ_conv3d_pj.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ_conv3d_use_mask.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ_focal.py delete mode 100644 projects/configs/bevformer/bevformer_base_occ_test.py delete mode 100644 projects/configs/bevformer/bevformer_small.py delete mode 100644 projects/configs/bevformer/bevformer_tiny.py delete mode 100644 projects/configs/bevformer_fp16/bevformer_tiny_fp16.py create mode 100644 projects/configs/cvtocc/bevformer_nuscenes.py create mode 100644 projects/configs/cvtocc/bevformer_waymo.py create mode 100644 projects/configs/cvtocc/bevformer_wotsa_nuscenes.py create mode 100644 projects/configs/cvtocc/bevformer_wotsa_waymo.py create mode 100644 projects/configs/cvtocc/bevformer_wrapconcat_waymo.py create mode 100644 projects/configs/cvtocc/cvtocc_nuscenes.py create mode 100644 projects/configs/cvtocc/cvtocc_waymo.py create mode 100644 projects/configs/cvtocc/solofusion_waymo.py delete mode 100644 projects/mmdet3d_plugin/bevformer/dense_heads/bevformer_head.py delete mode 100644 projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_3d.py create mode 100644 projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_waymo.py create mode 100644 projects/mmdet3d_plugin/bevformer/dense_heads/solo_head_occ_waymo.py create mode 100644 projects/mmdet3d_plugin/bevformer/detectors/bevdet_solo.py delete mode 100644 projects/mmdet3d_plugin/bevformer/detectors/bevformer.py delete mode 100644 projects/mmdet3d_plugin/bevformer/detectors/bevformer_fp16.py create mode 100644 projects/mmdet3d_plugin/bevformer/detectors/centerpoint_solo.py create mode 100644 projects/mmdet3d_plugin/bevformer/detectors/occformer_waymo.py create mode 100644 projects/mmdet3d_plugin/bevformer/detectors/solofusion.py create mode 100644 projects/mmdet3d_plugin/bevformer/loss/__init__.py create mode 100644 projects/mmdet3d_plugin/bevformer/loss/lovasz_losses.py create mode 100644 projects/mmdet3d_plugin/bevformer/loss/ohem.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/concat_conv_module.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/cost_volume_module.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/decode_head.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/dts_transformer.py rename projects/mmdet3d_plugin/bevformer/modules/{encoder_test.py => encoder_3d_conv.py} (65%) create mode 100644 projects/mmdet3d_plugin/bevformer/modules/encoder_waymo.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/hybrid_transformer.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder_3d.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/occ_transformer_waymo.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/pyramid_transformer.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/residual_block_3d.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/resnet.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention_3d.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention_3d.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/unet.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/view_transformer.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/view_transformer_solo.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/vol_encoder.py create mode 100644 projects/mmdet3d_plugin/bevformer/modules/voxel_encoder.py create mode 100644 projects/mmdet3d_plugin/datasets/cdist.py rename projects/mmdet3d_plugin/datasets/{nuscnes_eval.py => nuscenes_eval.py} (100%) create mode 100644 projects/mmdet3d_plugin/datasets/occ_metrics.py create mode 100644 projects/mmdet3d_plugin/datasets/samplers/my_group_batch_sampler.py create mode 100644 projects/mmdet3d_plugin/datasets/samplers/nuscene_dataset_detail.py create mode 100644 projects/mmdet3d_plugin/datasets/samplers/waymo_dataset_detail.py create mode 100644 projects/mmdet3d_plugin/datasets/waymo_temporal_zlt.py create mode 100644 projects/mmdet3d_plugin/datasets/zltwaymo.py create mode 100644 projects/mmdet3d_plugin/models/backbones/bevdetresnet.py create mode 100644 projects/mmdet3d_plugin/models/necks/__init__.py create mode 100644 projects/mmdet3d_plugin/models/necks/fpn.py create mode 100644 projects/mmdet3d_plugin/models/necks/second_fpn.py create mode 100644 projects/mmdet3d_plugin/models/necks/view_transformer.py create mode 100644 projects/mmdet3d_plugin/models/utils/positional_encoding.py create mode 100644 requirements.txt create mode 100644 tools/README.md create mode 100644 tools/__init__.py create mode 100644 tools/condition_benchmark.py delete mode 100755 tools/my_dist_train.sh create mode 100755 tools/slurm_test.sh create mode 100755 tools/slurm_train.sh delete mode 100644 tools/test_code.py create mode 100644 tools/test_data_pipeline.py delete mode 100644 tools/test_occ.py create mode 100644 tools/vis_tools/README.md create mode 100644 tools/vis_tools/__init__.py create mode 100644 tools/vis_tools/utils.py create mode 100644 tools/vis_tools/vis_occ.py create mode 100644 tools/vis_tools/vis_pose.py create mode 100644 tools/vis_tools/vis_ref.py create mode 100644 tools/vis_tools/vis_ref_dataloader.py diff --git a/.gitignore b/.gitignore index a623a47..2103001 100644 --- a/.gitignore +++ b/.gitignore @@ -121,6 +121,9 @@ data *.pkl.json *.log.json work_dirs/ +ckpts/ +ckpts +work_dirs work_dirs*/ exps/ *~ @@ -130,7 +133,7 @@ exps/ # demo *.jpg -*.png +.png *.obj *.ply *.pt @@ -138,7 +141,6 @@ exps/ -tmp.py *tmp* build/ data/ @@ -146,3 +148,8 @@ output/ work_dirs +result.txt +*.csv +projects/configs/vis_weight* +example.txt +important.txt \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..3733cb1 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "projects/SAN"] + path = projects/SAN + url = https://github.com/waveleaf27/SAN.git diff --git a/README.md b/README.md new file mode 100644 index 0000000..e66c2ca --- /dev/null +++ b/README.md @@ -0,0 +1,43 @@ +# [ECCV'24] CVT-Occ: Cost Volume Temporal Fusion for 3D Occupancy Prediction + +Zhangchen Ye1*, Tao Jiang1,2*, Chenfeng Xu3, Yiming Li4, and Hang Zhao1,2,5✉ + +1IIIS, Tsinghua University 2Shanghai AI Lab 3UC Berkeley 4New York University 5Shanghai Qi Zhi Institute + +![network](./docs/figs/network.png) + +## News +- [2024/07/29]: Code Released. + +- [2024/07/04]: Our paper has been accepted by [ECCV2024](https://eccv2024.ecva.net/). + +## Abstract +Vision-based 3D occupancy prediction is significantly challenged by the inherent limitations of monocular vision in depth estimation. This paper introduces CVT-Occ, a novel approach that leverages temporal fusion through the geometric correspondence of voxels over time to improve the accuracy of 3D occupancy predictions. By sampling points along the line of sight of each voxel and integrating the features of these points from historical frames, we construct a cost volume feature map that refines current volume features for improved prediction outcomes. Our method takes advantage of parallax cues from historical observations and employs a data-driven approach to learn the cost volume. We validate the effectiveness of CVT-Occ through rigorous experiments on the Occ3D-Waymo dataset, where it outperforms state-of-the-art methods in 3D occupancy prediction with minimal additional computational cost. + +## Get Started +- [install.md](docs/install.md) +- [dataset.md](docs/dataset.md) +- [trainval.md](docs/trainval.md) + +## Model Zoo + +All models can be download from [HERE](https://drive.google.com/drive/folders/1m3r4VrjY1G8N-h7NVyUDc0oOI5g8f7nT?usp=sharing) + +### Occ3D-Waymo + +| Method | mIoU | Go | Vehicle | Pedestrian | Sign | Bicyclist | Traffic Light | Pole | Cons. Cone | Bicycle | Building | Vegetation | Tree Trunk | Road | Walkable | +|---------------------|-------|-------|---------|------------|-------|-----------|---------------|-------|------------|---------|----------|------------|------------|-------|----------| +| [BEVFormer-w/o TSA](projects/configs/cvtocc/bevformer_wotsa_waymo.py) | 23.87 | **7.50** | 34.54 | 21.07 | 9.69 | **20.96** | 11.48 | 11.48 | 14.06 | 14.51 | 23.14 | 21.82 | 8.57 | 78.45 | 56.89 | +| [BEVFormer](projects/configs/cvtocc/bevformer_waymo.py) | 24.58 | 7.18 | 36.06 | 21.00 | 9.76 | 20.23 | 12.61 | 14.52 | 14.70 | 16.06 | 23.98 | 22.50 | 9.39 | 79.11 | 57.04 | +| [SOLOFusion](projects/configs/cvtocc/solofusion_waymo.py) | 24.73 | 4.97 | 32.45 | 18.28 | 10.33 | 17.14 | 8.07 | 17.83 | 16.23 | 19.30 | **31.49**| **28.98** | **16.93** | 70.95 | 53.28 | +| [BEVFormer-WrapConcat](projects/configs/cvtocc/bevformer_wrapconcat_waymo.py) | 25.07 | 6.20 | 36.17 | 20.95 | 9.56 | 20.58 | **12.82** | 16.24 | 14.31 | 16.78 | 25.14 | 23.56 | 12.81 | 79.04 | 56.83 | +| [CVT-Occ (ours)](projects/configs/cvtocc/cvtocc_waymo.py) | **27.37** | 7.44 | **41.00** | **23.93** | **11.92** | 20.81 | 12.07 | **18.03** | **16.88**| **21.37**| 29.40 | 27.42 | 14.67 | **79.12** | **59.09** | + +### Occ3D-NuScenes + +| Method | mIoU | others | barrier | bicycle | bus | car | Cons. vehicle | motorcycle | pedestrian | traffic cone | trailer | truck | Dri. Sur | other flat | sidewalk | terrain | manmade | vegetation | +|---------------------|-------|--------|---------|---------|------|------|---------------|------------|------------|--------------|---------|-------|----------|------------|----------|---------|---------|------------| +| [BEVFormer-w/o TSA](projects/configs/cvtocc/bevformer_wotsa_nuscenes.py) | 38.05 | 9.11 | 45.68 | 22.61 | 46.19 | 52.97 | 20.27 | 26.5 | 26.8 | 26.21 | 32.29 | 37.58 | 80.5 | 40.6 | 49.93 | 52.48 | 41.59 | 35.51 | +| [BEVFormer](projects/configs/cvtocc/bevformer_nuscenes.py) | 39.04 | **9.57** | 47.13 | 22.52 | 47.61 | 54.14| 20.39 | 26.44 | 28.12 | 27.46 | 34.53 | 39.69 | 81.44 | **41.14** | 50.79 | 54.00 | 43.08 | 35.60 | +| [CVT-Occ (ours)](projects/configs/cvtocc/cvtocc_nuscenes.py) | **40.34**| 9.45 | **49.46** | **23.57** | **49.18** | **55.63**| **23.1** | **27.85** | **28.88** | **29.07** | **34.97** | **40.98** | **81.44** | 40.92 | **51.37** | **54.25** | **45.94** | **39.71** | + diff --git a/docs/dataset.md b/docs/dataset.md new file mode 100644 index 0000000..8fe4f3d --- /dev/null +++ b/docs/dataset.md @@ -0,0 +1,173 @@ +# Preparing Dataset + +## Occ3D-Waymo + +| Type | Info | +| :----: | :----: | +| train | 798 scenes| +| val | 202 scenes| +| Time Span | 20s | +| Frame | 200 per scene| +| Time Interval | 0.1s| +| cameras | 5 | +| voxel size | [0.4m, 0.4m, 0.4m] | +| range | [-40m, -40m, -1m, 40m, 40m, 5.4m] | +| volume size | [200, 200, 16] | +| classes label | 0 - 14, 23 | + +- sensor: + + - 5 cameras. Front(image_0), front left(image_1), front right(image_2), side left(image_3), side right(image_4). I write the coresponding data file folder in the brackets. But the pose info idx dismatch the image data file. I write a code in `get_data_info` function in `waymo_temporal_zlt.py` to fix this bug. + + - the size of img0-2: 1280x1920; img3-4: 886x1920. All of them will be reshaped and padded to 640x960. + +- coordinate: + + - The whole data set coordinate system obeys the `right-hand rule`. + + - The global coordinate system: the Up(z) axis is consistent with the direction of gravity, and upward is positive; East(x) points due east along latitude, and North(y) points to the North Pole. + + - The vehicle coordinate system moves with the car, with the X-axis pointing forward, the Y-axis pointing to the left, and the Z-axis pointing up positive. + + - The sensor coordinates can be obtained from the vehicle coordinates via the rotation matrix, which can be viewed as an external parameter matrix. + +- Voxel semantics for each sample frame is given as `[semantics]` in the `labels.npz`. Please note that there is a slight difference between the Occupancy classes and the classes used in the [Waymo LiDAR segmentation](https://github.com/waymo-research/waymo-open-dataset/blob/bae19fa0a36664da18b691349955b95b29402713/waymo_open_dataset/protos/segmentation.proto#L20). + +- The dataset contains 15 classes. The definition of classes from 0 to 14 is `TYPE_GENERALOBJECT, TYPE_VEHICLE, TYPE_PEDESTRIAN, TYPE_SIGN, TYPE_CYCLIST, TYPE_TRAFFIC_LIGHT, TYPE_POLE, TYPE_CONSTRUCTION_CONE, TYPE_BICYCLE, TYPE_MOTORCYCLE, TYPE_BUILDING, TYPE_VEGETATION, TYPE_TREE_TRUNK, TYPE_ROAD, TYPE_WALKABLE`. + +- The label 15 category represents voxels that are not occupied by anything, which is named as `free`. Indeed `free` label is `23` in ground truth file. It is converted to `15` in dataloader. + +**1. Prepare Waymo dataset** + +Download Waymo v1.3.1 full dataset from [Waymo website](https://waymo.com/open/download/). + +**2. Prepare 3D Occupancy ground truth** + +Download the gts with voxel size 0.4m, annotation file(`waymo_infos_{train, val}.pkl`), and pose file(`cam_infos.pkl` and `cam_infos_vali.pkl`) we provided in [HERE](https://drive.google.com/drive/folders/13WxRl9Zb_AshEwvD96Uwz8cHjRNrtfQk) and organize your folder structure as below: + +``` +└── Occ3D-Waymo + ├── waymo_infos_train.pkl + ├── waymo_infos_val.pkl + ├── cam_infos.pkl + ├── cam_infos_vali.pkl + ├── training + | ├── 000 + | | ├── 000_04.npz + | | ├── 001_04.npz + | | ├── 002_04.npz + | | └── ... + | | + | ├── 001 + | | ├── 000_04.npz + | | └── ... + | ├── ... + | | + | └── 797 + | ├── 000_04.npz + | └── ... + | + ├── validation + | ├── 000 + | | ├── 000_04.npz + | | └── ... + | ├── ... + | | + | └── 201 + | ├── 000_04.npz + | └── ... +``` + +- `training` and `validation`contains data for each scene. Each scene includes corresponding ground truth of each frame. + +- `*.npz` contains `[voxel_label]`, `[origin_voxel_state]`, `[final_voxel_state]` , and `[infov]` for each frame. + + - `[voxel_label]`: semantic ground truth. + + - `[origin_voxel_state]`: lidar mask. + + - `[final_voxel_state]`: camera mask. Since we focus on a vision-centric task, we provide a binary voxel mask `[mask_camera]`, indicating whether the voxels are observed or not in the current camera view. + + - `[infov]`: infov mask. Since Waymo only has 5 cameras and does not provide a 360-degree surround view, we additionally provide `[mask_fov]`. + +- `*_04.npz` represents the data with a voxel size of 0.4m. + +## Occ3D-NuScenes + +| Type | Info | +| :----: | :----: | +| train | 600 scenes| +| val | 150 scenes| +| Time Span | 20s | +| Frame | 40 per scene | +| Time Interval | 0.5s| +| cameras | 6 | +| voxel size | [0.4m, 0.4m, 0.4m] | +| range | [-40m, -40m, -1m, 40m, 40m, 5.4m] | +| volume size | [200, 200, 16]| +| classes | 0 - 17 | + +- sensor: + + - 6 cameras. Front, Front Right, Front Left, Back, Back Right, Back Left. + + - size of image: 1600x900 + +- The dataset contains 18 classes. The definition of classes from 0 to 16 is the same as the [nuScenes-lidarseg](https://github.com/nutonomy/nuscenes-devkit/blob/fcc41628d41060b3c1a86928751e5a571d2fc2fa/python-sdk/nuscenes/eval/lidarseg/README.md) dataset. The label 17 category represents `free`. Voxel semantics for each sample frame is given as `[semantics]` in the labels.npz. + +**1. Prepare NuScenes dataset** + +Download nuScenes V1.0 full dataset and can bus data from [NuScenes website](https://www.nuscenes.org/download). Organize the folder structure: + +``` +cvtocc +├── project code/ +├── data/ +│ ├── can_bus/ +│ ├── occ3d-nus/ +│ │ ├── maps/ +│ │ ├── samples/ +| | | ├── CAM_BACK +| | | | ├── n015-2018-07-18-11-07-57+0800__CAM_BACK__1531883530437525.jpg +| | | | └── ... +| | | ├── CAM_BACK_LEFT +| | | | ├── n015-2018-07-18-11-07-57+0800__CAM_BACK_LEFT__1531883530447423.jpg +| | | | └── ... +| | | └── ... +│ │ ├── v1.0-trainval +``` + +- samples/ contains images captured by various cameras. + +**2. Prepare 3D Occupancy ground truth** + +Download the gts and annotations.json we provided in [HERE](https://drive.google.com/drive/folders/1Xarc91cNCNN3h8Vum-REbI-f0UlSf5Fc) and organize your folder structure as below: + +``` +cvtocc +├── data/ +│ ├── can_bus/ +│ ├── occ3d-nus/ +│ │ ├── maps/ +│ │ ├── samples/ +│ │ ├── v1.0-trainval/ +│ │ ├── gts/ +| | | ├── [scene_name] +| | | | ├── [frame_token] +| | | | | └── labels.npz +| | | | └── ... +| | | └── ... +│ │ └── annotations.json +``` + +- gts/ contains the ground truth of each sample. [scene_name] specifies a sequence of frames, and [frame_token] specifies a single frame in a sequence. `labels.npz` contains [semantics], [mask_lidar], and [mask_camera] for each frame. + +- annotations.json contains meta infos of the dataset. + +**3. Generate the info files for training and validation:** + +```shell +python tools/create_data.py occ --root-path ./data/occ3d-nus --out-dir ./data/occ3d-nus --extra-tag occ --version v1.0 --canbus ./data --occ-path ./data/occ3d-nus +``` + +Using the above code will generate the following files `data/occ3d-nus/occ_infos_temporal_{train, val}.pkl` diff --git a/docs/figs/network.png b/docs/figs/network.png new file mode 100644 index 0000000000000000000000000000000000000000..cd540c06022fa5477a54329bcdcc7c91e5ee52ad GIT binary patch literal 951961 zcmeFZby$>LyEm*Np(2QMcS)BtNHZd-f(Q}Cp(A^ya3_ZlR z#`}Hu-uJzqXCL2p9Q)7j4`Y~X=33WU=Q_{x7mFY@6@~kE$?o2`apOKjQC8!|4XltG zH_-25VSt}l;20Tze{R}qC`jKZf>LaN7q?BNl%;OmD2}*yVTcZ1-?35Dw!d)$rxo?* zW~X(&@r@hTxe!^Y7cP3+X_%iV+T$AM7A?hG&kbI_q#FrYfvP zN#@OqT9xSd5SiCKukTdz$KH&NS9-XSbD_w*!VH-iS-rkX>-#_&>+gRB;(bb$`|m%- zqLqJOM}+s!tHFm!q`I_a|M4ZLmv3*=n&SQayZ>Aji0AwN-{7xl|MycQtIQPBy($@k zRWea9GCaK8NXT3?tw?7!lPlF32ZIP~32}%(G&t{@e0tj5PRn5u#-?>ACuUQs27e@c za&}(*dRGZ10`Up|@rD`Fi@KmNR+j>%FM@7hN)pFzUGAF#2nD4#bz)#YT;vbYv9hWnzUGI zR1i96Y2`z0=fL5of!)n14jY_YKSU5s?jw=kNN*a)IFfb5)Szn zQ70Wrv&>HSFXETyb7X1Ye~Wf3+BPg{DBb_J8+<6adDc&NYxw!|=P}ossa1CLLEtGc zD7Bt4j}$DGtusJSo9z+UnIAaOc$1!WTo(TPSy@!XX>M+A?CJ(} z=q4{`Jdl%-@d+argyxRGU>f{=JVqU_z<7G6R{K&A3P@hVmfJrH3mrGqM_C5g%%TNh z3{V!u7`x~^2AFK6+M~X~Y=v4o7{+7E$ymwBg#D$?S|N!2)+lN;3u2CPvx+h2!JiMY ze%UU`p9tP{koSoDVD9MX$f*qbYDu7kU$3D#x$1wP{6GQuFwY}kC?}PYlJbnNo>sXh zNMe9PB)p|kV3D@3MFKM?h~oUo*H;;L^C0A#KRsfYj!5GKVX~&{*)Nh)6^a`9qiAEF z#t21bQglVWi4@8lWWzwT zOMG`pPEB}QyZ+AzmKGycJN5G~b+JQv5Ee~%F6^)dVZ>?{b=}eskjllp@etRpeRJl7 zRUcE`CcQZSRWcPUr|4<9K+h*=;Lw;9!MZn3!bjP!uj-Nf-T0S`kCdO7<^)M}=@6{@ zxV`r>JFoDUP%M2lhcE+Qr2YkWH*-0Kf2pio!;DUbT#EW|@U5E4I+t<}WH#`}tK=>k zrP-%_gcstm{2}E6i0~PGHK0HSq z%a(Qx$zxfnhLc7YAaNl+9FbwzVs?H^$w;gca~?`&q!?3^b$QO8JuhaLqrOQM{A}U$ zO=6`@@_I``sT#CWAUsM?AJfln-KXKVi(GO8weN}|{u`PQY{&jH>3r+OTg&IjVuQF@oA?)}%HYw0tNxl~ zzVE3IoQuH2k|$}H!A=xIYzrIt>m{;iln-ylmcG)VS9Z7;I@b=9B|bzyl$y=jv%F9F z9C`)INjp5Ly*D9bmK7mn#Og}lmtTzLo}9|%ICp%a=AcvHVV}<5t@8~8o3cJ!eJ8~( zTCfLCuusDtz2p8ZGa~c3oDGEL2X;n|gAV=0EDt0PSLjb?*Z~42Ei6I*TqvHXM?E43 z;_8vC#zGwTj&_0HOfrWwH^L^_c9M){ZHp#V@G6?hu1!YUW46UECkWe?=qqVFz3QQQ z_~1#)Qe^`D7u=pt4J@XwsP#B?;av|}T!c4MJe!=2!3-~b4dT4pslgQ>p@<*JR)EG8 z&=46?V~u9lph~EOA4#v=p&R&gNQUyzc@6`1Xn9{4#^TG%?TJX>zoU-L-*>f}Ii05% zQ%5*j8}O0YdOeoE)sh4I2tuRU-XRd^HYXqMsr^WKF0V_cc%R%Ite;c2X-~U;51h7t z0p6bPRQ*97lmy783Y6|LGN(j=3{?_Diegp1Yj>IrLZVR_`2|SdaqMU`Tg=1HY|adu zj1!w~lT~jFaJxw$Uw>9I` zsrao~@G44jDCOJpyvZvh9PXJD{jM1Ed5*B6d`<=Cr3ni}lWmwp) z)?sf8M_qnJm1I*A&YgI^KhQBtenxE zrE;0~Mt#6xKuX2=ht^LgM?!Q-<8N%91rjII_7zq$H5W;xo@xkry3L#w$L^C?OLuZDzegTAo;N28 zn5XK3gnskn)}*lGFNX~nYL+lDsoj75=xh%mu07VlTH@IloE1{7S11=aY_57H$te(@ z70`++Ieon(0#f?tzN?au`y(n12x6EGwXs z+Di-sRt}^#mm_oP?M{kqnA<&9`s?-%jSl`z@0yRxQ3?kNZZgS-s*_|U?4u1Eou0b% zs01`gzO+AGZJ(xhmsa_lXT=~ilUjD6Q{!B8$(aXJr32@d84q(X89gLMTzE>sJ5!#z=Z zLR}^R!|jb?;tl0HG=hbaDGjnNJva*H9DsnLOAP9Rg&rA|(jc)VmIuLVScS}N6)tV- zxRfYsZ1$Tkf{P}*%WzUdVd&W$ec8Q5ke=+}%*s7dUT=%kwi}d@2JSM{?H$&e(G8s- zpSSN$z&Kk+1p z#gi$0L)(=C(|*Vb%i;RMbx0(jz`(J%==pNfnHZtDD^+HGh&M9Fie?6zQ|eRdo}vw) zSOj@LGlC@_&9l-*RhH>P(Tz~^GV8-8>%e^eg)q}P3wAl&>HhVl9BqrDC&*0rpjT2Ju~Lj1hKIk1G_^VyaW?0B8TB3V z_2-919k)Cc6#7~}xnZ2>Y<_aHBq$S@d?4~woG2w@KyZ-5b!qk$yRsL!9}zN4HQ6&f z&XrFNe^4s?|Dr16W)chJt5ub&5^6k z#kspeqSP(9!z%_#6_2TOcv%wHP(&1KEIrks5=EEX$W1fe2AjFB)*4V24>bwcHe68R zMr z*TeDPzPiclw?t-_(@i4d74)aPdC)6~trz}MrR{X-uttL^5QDg}^<0V(l;o1rFN?^R zkVDR=OC_&ok9&w-kY`*ddwrCXG-*hf`LO8umz~IW3-;)^JE1%n zGkhJgxpx#OY<+)tZQiJtlbWMf-n~4m&p*2}kb$)v<|bdvpX59c?@p=aG|#p3c^*l1 zzGp<};ZwufR@r}*AFC_y7x2lmd=GF$;O+UZTWFF`P8Yg-W2nuZwYc7Bm$`RqEou0p z5hcDOQ}M;X8YY9J?j;%_HS>YGx4jC_cJlXwWZG*W)H<6Ii&F+(fiGO!~A#jiejmc|_2(YoS85kXR!!xnwVKU>;(r(J^Iqk!N`@>KJ~Z=R0Y8Z( zeGlG1&M$q=NBB19bg!|HN(=S9wOas#A=KhbeK+iSYYe~1D?FOMtuKWcOsE9s6;sl? z)T+g=-E)6FL+Y5`bY#1Xm9AlSUiVTb`E~|)_e&%1A+t;%z2E@9cfUHXZ~?x}fXpjst1X3^OUG_T>C*J)rAI2<5~@PQd(sn^Cv$MUcdd94MnVa$3J-6& zA~O6G>0fK-P^(4MC1WT6I>jfPEjQ-FsO@Wq=p0?Lo*Qw|Au<|fcIn-8_-a>+-p`u$ zeS&f`UVJ?Lt;1Z0=Xb(OwR{`4H{0i;i;rooF;5EhCF*yCJ%M84+Ufe}W(wA0y3W3! z@tDP8KYoY1|9~1)P?u;*jGxwGMM}AM>Ih|$*P9#V)UixD?9<9nxS~$jr&ALD^6l*5 zr#11}iyTPct;=X8S9;|ha`g1o97uX_eK8sc4*>6!Co#Q`U;Or=?vkvbuYYSD9m9me zbfx3uA7i8VGZQf-Pb=h+tjD7SW*S--jLMUW0D(-tD;C72RY^W%gvnBO5tz@FX@mb< zS?05}?4+D#Ei1+xOU;5t-$Uc4`U7ISTo?mR4 z4uFg@zH?iEbkeH^l)Nm%PVWves7My=6O%ere_H+KV$! ziny=V<~ciCGrsQyQNJr*C*`raDU(SN%xGy)pO4f7s1Wo$lqXM+$zz|*y%|+UV zeEy&a{vZ^JTMiP02j|{tTNz>>8I6=NZPWh1o{K|b|4jkLt_g3Sd`+7J5e8u5iS@J$ zB*vZahjRwg$1l_;2~4gsoOchK#bbq7OpEzFkY91Ye0wSQJ3(gGv(+QNX0WM2-yW%m zwJFR7n&+Fr{0D5mRmh926D6VnEKKU~St7WPP%)-KTIW$5lcTKra^di+%}yLBaSGe% zv2yBqyhHXLz~GC)WTKyJefYoqcnh-Vq2m+MkaBCu_ygiJxg#SQkbspBG%dFG90&A4 zfj@{(W_y5ZcFPR5s!0D#NO>+Idl`k5t{Rj{@2&&})Y(ES;Q*Z=_ownmb8%q|oSRK~ zr08o=^m?$+@6J$l%r3vq@|LJ#!i4Z^}i*$V&OM;FAa~|Z1{gncf3dQ`a%py$nnX{EkId@`X>`p}j>8z3) zJdkek08Wbp-kJfeXyiLEi>Por^!Wb3tDnUYmr;2$re+T`R~byM2qQCst9128%CLiz z9f0s8qPUL8yH1P%hClrak0XzbSdUbb)r~MuJ*V}P^{9tUi;tDl10#;XrzQo>?5Jjl zww|LYmk#TCA1aPjmcIqJrV*Epr7(+PGK=v#C$Q6{?idp_qwAbQ&7=rcM6RFRFumP6 zy*n;P?7h4o-4~QJ5u8{J)cC5j?J#y%68Wy!HtgdXmF(-b5&h(0 zsc&xisxgO(f_FD5J=7xnEIC3Ot-V*Izj?L~Rj^-#jkU4lP~Pm%O{J_hULn)7|g1La&imeA^~YmV^*C* zBYsV+fS10pG$1{%E#8l?4~`0S zA1cvI8@xGE%yH8HdrWK=Y?(xbPAGI<=<{v9!cf)yzUeEMgU98cN#v%h&XrFj~<1PIq3ce zbNpY4U=(#5;lTXJ@BCyQ0>q8{kpC1cXi=;&%nrgES!YPxgy;J}&#z51cDokd0F~#q zo)ueDYYC<=c-+#PJ$~=PNkF3E!w3Y%@Me^Moxv=6I4xD~oEBmd!5=;P2Hc%B?L}$$ zP*x1X3ZSh)>qZgW-fkP8Y!soOiPNm^o4X27K8{UH{mUPk)p2r(+h;FjPJ1hOFIRph zJ4W-zJu(;A%FzH+^rZR}x!C5M2jn#~P-6Zwsz8Z<-9x5T^bi$!DB?UVpI~#&hZzv@ znVjGMgIxDT^#hPz#%=>dhqDEyQ1HCi=NPOXA;0=AWN^ioQ{_I5$k~w48mP{rKR3>N z4Q}l-V#@>jf45A*KkDme03~2eE@-CXo<+!O8&EYB zYw2zxospNvQ~ge}uBN66^Qq1_&AUxb0_I~yjulw6-_Z-%VYrZwtD*mH@dNL5MvnZ@ zs)f73>nn8_jR&*jtW5AyWtjlO7$1s=snIaYlVXO@r@g@a--}}nDDPghP^KOh$_Oq%ies^ow69^bg_vEnwB=@%4E{e-KSAtos^?%VoDVy8NO{t$^Xm7 zk_|nEuOeBy!XimW+?R22oGX&!-B;C&T_)R{*6*~lOM*Khti=DjdW-0HY>Ja#(On^2 z4eOf3&GI+v>Utgv6i9zw!ct*qdwwU4p)VY^ugCv)jo=UPi@z z^2dil!Ov12_jQDkuURF!n`(5(D#zgdUH-!+*uE#RXZJ{FhT3hr*s7K+RhD}ZA-%QO z9%9)W&#_BJ0l`Yef2;79Zurfon;_I^cTs<5nZKeKaN@FU(%GEKMD8MTQjt#FrWVE?Wf#dkzWhEhw;dYI}YpCQ(2n~u*5 z`04WS)H(miTyMu)Wz7()eNbWFI|MpVo6wc1y&gY*fB*ELgujbjvI5*z(?V0G86MM5 z=?kL6DUB}Ak17nBZ&eE1*V(zJj$_P&FO9{sZ3ru~S&-H#F%CkMQ1ZPC-DyDXR-DYb z)e_2WZf+h#|6{sG(775)#QXHzc1-w8p=Br;%ko59J#ELlrnYv5=;F1BEJM(r=f_q` zd~gQA!_OTZ)jV+?d*fV=qEsyf`G8U-9Q=1F@9nFm-e2RwHe?Ha<7v*b6NSl8rBY85 zM+!w>#an-LKN8J(Nbe?X0bPm31aIX98C3<}yWhViN=<7U-A$)R{~maO3%9u14<54X z49&B^rl+^JX?|!HmXw>1O=ii3Sj^VB?xMe@^$-4QnB=b@#zS9~k1r7Yq6$$7dLO$V zTTQPssSf3-I=h_}zaXFU`}@rL(~k>(FgJgb4@5q*p|2C`gV||IU4Y_5#X&J4n`YIH znJ)?ZAN5@Q=vOjUIGs>{*yZ6+g<&g3^4`c#=u-4A1!{y7VD90tfH?d5qNTqiE* zTRuaRDD-31-gi+1leOxi`dtz^J4bf#k79}{bSMFnn7f;myGGTRqvRMS9q=T#2s&! z_D}ww`~a;0aX(01Qg(;dH~&#$6Ii$jB97O_Tn(pLmNPXYpte}fd7P93PJJplIr+bZ zDm`wa!2O%1c(;xTQ{{gZ7U)z$J&3ai{#q9S?j-F-&w38Pplklr+mWcOs1Si`rON)R zn7@U*!&X+xcz^1|db(;*7}c1~$OXRrYdKyv`J!XuQOG5BftAKjv0dsA#?}6NIARTw zTd?%?^`Xf8@laB}X(9m&ME~7gkOND1cX#V@9@PIedBH=VtT3@F#k1S81#w61$0P%l z_rJLrR1nVvj^#s5z*zeo)a$I&T$7gk$z zq=`6w7vSgbQvZkf>e)kt1nAW(^Bs>(K-LS7bEAnBQWVdiaz zc%hD-f1{nB#~2Ajxgu64prUXKuo>;c>2;rLX(c4AD^EKt{mSm${1%IHynM&}Ujq&d zq(LIh$0Z*|9-&gW?uCVF>*g+~9?!%$NN_|*cNxCbCO=Vx(lP!!kPzH$OTned#FX;b z(oQLh5-3PkpEZK3HaZLy;>shRcKYg zXDccm1K&cDqm$DrW1FJvpOV{yqxa21ywqVsV~qtiFfh-==u$j7GP08USGLNh_^qw; z`gN)gUpQ16D~f^rq%S1}IS8sNLGB7QJ!$bo>NUgx*$2ADZ zSMmV6_;P}{=zscJ{LV0%8wp!ectLEGef`RGGPC6PnIilqd$Wks_V~rb4sg9d5;7-O z7S%|IF{W2~6Q%$JFa|PYeqXBa(CnVS;P~nO(vO+~6o2?1;DVVo%_hV3pqqIIG})B^ zu1wUk0slhGjx?8=r4TMsg z-qsA4#ULl}an9VY8IJP&s;Q>~0^!Z&<(FLVo?Z;gcv0AO(OP+}FzlJk?|8x* zcPge*@rV_ofPy&Xx|U`tdDg9(>Vq(FB$j&(Y)THvbqmKZC9diQyPy{!;*66soZ;5^OhUso_8O&iHEzQoqR&>*7nB*5bFY`parrS z*K=M|dS-CX?tXZP;GBCYG9(5Mk*w-+5TLDtVBoe|nKVAFGm)Ut`qg7%m zXRi+PBqwqc^pYhXDnHy<6!KS(H%W>a2BbjYQqBsj$-wdR(1iZ=xmN8`xajnFxAoh` zBkYqbhE%!TI)qB?O1#eWcg1PUYA&LdyQGa*M?Wn=^Ql}0CHX;0?}eP2TaB>5L_;$Y zK+YQ(pLQuoL`&i;=S~j!;7Yvrx)F_&AKW6=wQ0t2CR%7uCbzB|EUMiV zS)<_vl!c)4O^VQE_GmGmojE$!e@zVpIlvN*w#KH0R3&#jor_zT5HxzHlok?z1o(h* zW1{0L_wgfXt?r+rP}tP=cA>@3>Wr7l4uG^O+ojaOFMH=X2;zX8p#mqAtL{HA zZNK8y+(nlOV-)q}x&rjQ`&MD4h|gX5%X?gP0}_a&{_)z`BTmz8Q^!C=zvSGd$;-Q| z^uVhS!8dCddDba;WvHcV>o-O9X=Mgibo-b4qk7;Y#MHr9wOl8fp#XM=-9i^^^EyWY zob|ntpPuD!?7A79fGuJQV1SiW-=&-o9=ujMf7kdaQ$G@FE*&h7Js;|_5YC#e_HK7x zR+f7mI-&U_PCNu3Uy-h)+Cz(1S6AD%BsqYcl~-Alw`#$kJk==5n)(mhVOVw#d)S?c zTgBi>o?bnBxXLhdN!RHdL)jb8(qFfH6WY0zE{7h|LQt4m#Q#q;IDAIX?F8Few))?P zYPbD#X%7|t z8rt^->o=60rfoZg6(mFNU#bTk`?j*Oh3skE`w`>|zqyXxso_wUy{Gc4z|&s_k1dTm zUJ7{e(rjAp$T)F7^cegf({BC3iD=`u#M%SVB-e+C(aZ}B>Rl019EMy`6uXhn9A2{2 z6UOl0*ewkVs&K>K3(cIkqQ|pt)|J?n3+0eCE*wan_72L^`Hcz~e|PeC?x#nMHrmRz zM}C&U!nsG{$|GHWQV%b$Y`olo&bPK=hdwf7k;D~^?6f&v;qsa&YP|YrDnzeR@$C$j z&|pk`+Jt$aWx;%3MCCiSXK#jmzj$T8CJmR{L~kkry5C7XXB0`HSksiUS+{aI7~wfQ zLMT59_qs+x4HW97HAI+{Q!x|sG>Y`E4ib(h`aK_eX|jbdNWskBzQ0~1ZhSv?bzC{~ z{Yl-~b;C)0e>pzYhg(4SvR?N>q|7<&bhm1f=r&$oA#34XH;ryUcM5C}pbA2TlPG{K z$BGaiy94r{srr}5LV9bWKjOBE&9uHv!KWNeqW#FnOA}x?Zpgg=$?2uo#Z2)fmW|Fq z1_L=xiZc?K*1GFUcqNzn!acOt-8mXN-*R)?bHf@qYqxok0|64TlFyUk~pHtMO~#;|AU0@^lXXih(z* zB=?C;JddajAiZN$%ZCf!(j5GpL>1zbCweZqBvLf8o0 zS^wrIop(tKo;}bbqR!R>*3l~+kuPG`SMclIRHNuy%oJ1v9!Ed)clh#_C`72XQ?}YV zb)!V+I5+pj;jR%nxxSdsR}y6wZJNG2vll8=FVP-uP7-QTW^M7?n0eAvZrePy8(&in=KOKk4CcGE)>&>9IPeZ%?l=>K;!UHy68GKN zdgOsSe+8lHB6iJ^_=BLG7%p%+HxlL`7gujr1T+oWf@E{Z|A2c%Y$iNVJ4%5qX;55j zabX;A5IPkB7pOK9#(TmE-#-*_C20Lb%M0-AJ4-Xjv5+rxIVhQ5DfmAfmY?oYEU z`fydQuFe~A34(8NHD1ikaJEy-hNbaD&XN9)Z9d3*AHx*y>YypE(DZQr4GJpD7;eCF zkBgk^dkfibkvQsIZfZ^G%pMS}o$`>_7@H{Fot&{#u7c1or@DVhD++(vDNepWg)3K> zOw(py;7%J$-eEk+&E~fE`2D!q(x3u$xsG+cz_sO`&CiRoey?4a2yj~+DA;rR+qo;m z3YfXi{s>ygS3qHl9JJM_(szA!jor2wt4Tp~9WQa{H)K_1z+2d)On06UxpcK?K*gF& zAnV;R%xixdd0Gq{HY0bq+gxb?)khYuwrO6ZQ1CCYO|^Mt(uxlYmKGIhimgv?#d^P# zL$zopPx2`CGzUpdQy1cQPPmDU8Szu)F%Wx!9FkSXyR~qqBVjDzS0iH3f3k z#UMqi2X)ejTkMHzaBA-jVj$ZSR~& zFeDF2tVav9jZoanJ$8s@5a!!Io0FI%=W6a6>1CC^&(V);LzkOd86MpdCMi>k0;ij*d?p`0)-4mD`Y(hBwJMZ6d!hW7==beA;qM6XVl z-iY7B^XH(`JE<8v`e48Ei7~;}mu#~~PWr{e_q^msEm)%A=6)xvZ1gcG+xr7x{$6qUPpq+@OCl%wKMIHZl4~=SK#-m^ zWs#r_-qHDvBITAQfK}}wskNcI7>VmfZo>3;X6ISgJaJ$MJsy6pS}lx+VM)P?fdKNC zCH~q@<=pO~I|?eb0&HS59sCGIRw_`rwzc>3;3A5N^<0`8kTdYfms)RD4sI^5L@ROH zPN6f*sIX9D$Q*v!ACRF^6<#5zq%_oBGi-Fk(P7wQy1&f_b=agT^{XPvL*r(ZJIq)SY-^dWkq9G06RDwQk^QSoZXzA zZAoInW-A`h8)#jMklZ~)9WxlyyF0WXTJsgUu2c5r(Eq0E#p%BISX&>p8Zk)X_WQqh zvCk4kor`o9Fl^3)hKLuxl4^xl&RA8i5G8QoPB9geNsqfo3^;5c*)$7mrrxTL0yk@C zt`xG_JFG%5QcR=LV(Vt^W_*RpjQ>{mlH~Ks-UKe^%Y|^!U7NEktM|ACSp zv-2I@42Bo=pz2MhWC%&|5)F-A9viItI603S1{_UhG2sZIl`vAAuuz~&9%kZJKR(~m zzZT(&s$Byh0)1CcM2%!?fNSaDPK2#5vHE+XLymO5Hp^D^NUwMm)6|0#%Z4FAlx`JD z1^CdyA-%rsxy&*4YoWpi2?+z)i86Ak3E3rUMCg>Z@T*d#PnJFmq5I@07jCy@tVb|rEvSH zS17pYn-%0;vgzw zoVa1nPqE(6rhRY$KwIaI51|IXr3UY8sHuc{g3R5>*Ou-yX;z>5p@krxPyvs^oSUS@ zTn|B3`SjkoG)_RUQ}fX6cFJozwYmTN%E8^b@2)ougQ!2v^)<_>BiQV^Zkaph z6)gfn8U`0~rnZ}*rvsO9T6!Bj0sB#6FskZw2V2i37Uh}f1O}u9Yh?D%`4|3Z(U=N& z9r)*r*bEYrR8U1gGh}?6OovBYEwF|YT0|Xyv%Mqiu}C2!_IX=Y&R5tTx6dz_#5%ZtZY{ zqOYbRye^EK!{KF=2g>OzXdbb19u{)al%;7}#aEhGMik*BD0IoEm#sP(L5+0T3(dxZ z8XcNUMp^f^f5Dw6X?_)Kgemex)eC?|2gjo|$?FO9o}gJ0Em9?0EUZ>nhkh5?IXeC2 zAlu}R2D;gerG#Wgtd5^$l)eGV<<{kp)2FTK2<)~!YD|Y7p=LM&g@JcH&p;I)4dYwC zQI%H*X0zf?7p;|Nn2*HuI&FpGVdve|JO#mdwW9g?VE#$h2LtPmLn#Le=a}^m=Kz=? z9&_5j^^D_!cNGIhM|6!?ljzZ^;w$XM79L+D299H>&Y|h<+HclZ-vW8(tL#CJ7n!-K z!Xe+^;Dm;Sr6tFx8nJu?Rodlgr(}9UAd^{|OEpmB zO#%G0HeLIj6qEMY^7ZlOdz@WYn_7)Mv!2ME3396{TI8;Cqp0Y+--#A`XkwzC7pH&% zq)NF2do{z-%*=L-*av}{%o1>1L-iO4Vo*=eBlgY$b!wv8e(jxg{n6+)h?TSLo0t^y zJ+s5ZvEsnTUy91_wefZ0h~In@&sBsO0l$lU2TpDXCSd;LY5@dJa+#4 zs6zFeXB>CWr7^N=*Fc>_f+5yuC9kgmi94Kf4xByMKJb|L#;hqJUIGFCI~{PX6KPJ9 ztZrSj0mhB{Vb;di`w?Eo*3~QV+bF_n2GF;uSL9@5sS=XsgAwoaKW^k+jMTIKCKpNK zF_Kp$)k7qY8EX~18l)eaolW;Z2@h%RC@g%AlaOS7Z)11YY3yR|`cnT`N&h^0=-wG( zEY)#Tdjg#MTg|m+*KZI*hUj0=ArBH7r$>m7!r9Nyz(j5tC)zh6(%`=V{{^jIL}V5h z^SHI)DO8=g0u811-Tq>G5lVrpju6W%DuQmnQS9o$rPQSc988egG042Vo;&9Hy?Ra) zCKGV?HPH#cT8WE(7-mTjywcG6L#yIslRT?sL$})+``O>HCJx_t*`ZapNH~#~{_znP zp=1LT3f%@Qbv$59pTBVp3h+D!)DjO8Fh&AyphFeCjFPvvw?EE_QRxAcoR{Mf^*f;V zO`w%?ZF2^ShnQIM>$XVh?ldI0B(QYR8+$ z;rj9f{?ZB8Q91+J#w-yfe$Yk#W{wW02LU$LV7J)Wa?AvZ)0qJB^l=-tN`S~a-3vfZ z!~?0)aV+-pV^ufQp^=iMj_5hl(PCd9;o=f)Q@5|6BU8jZUj zI$wh6N=p+k*No6(2B5xU8@L~;oVrh6=&I5$E4`3{V+~gT0n{3y z%r7;^X82VR)4rOE+$SipV$&2CZ7vrl1a+TI9NJ&4n>O#d2L#k}JYQfHZ)0ov4 z(*b~sU>|P&dmRk0&PqqXS=LobyHHL#LWWij!_2 z;vUEP?b`vek&~*Yr#{?$@ay}S1v`5Z<%x;J*|0$03MEV0tnx+zt_}3t5R%tJ zq_Nyxi#eavsRM{sWl1bu-+xxeYvplmukzc!N8bmekAvbP|Eixgo8PxlrSMjVVyc3W z3XNXTCCEY>*aK}k>+I50+}!xwSHQpEDt_MbEA%Fj(f5Srn^}oc38#w>;#;|oB(D*Y z^`REGLtm5hE`EBBDK8)DAIp5V$6q3<70drQT8*^A@J*9NLqZi%3Ebrkkp6@DIf>%S zB~9_>`OGYVphvpj5>BiRrE3yc_cjOZ3s;rM{uqHsh(TzM#V}8YF_*#~lrDB3Y_NJI zB#O4@yTXG5X=O+${WxIgrpcFC9;}C$@=LfL>oQQ9h)fC(|DSFvZ2A*R1{&B0+-}Df zW5f~w+0>kVyEl33=+pDtP-b{s9YJlK5lfwUl9wA2wvu~IXq*{B3WOrkw)c1T zHh=tday7cZIgz}DqwDGZF4X}0z~fe@Y>#TX`|JSe(V_`(61x*!ZflvFD%V8} zeY`x9r|M=n&GJ&qkji~eY7F|`O^9;fc%xUwmZ*6lRAiMbmCSZHlXHO3c8DpSLuWM= zVe^*Ga}xXH)xi+;q4Mrj8Cply37|cT!aa28xvdtkmf=$i51fc@E?=HTpxxc+?v)n~ zxiO}e-j^a++Yn0st??@blx~ zMhaRJBOd<}eE)!(q&QfX73>^;M$N0HuE6F)AL@%Q_Smd-=>be6eqGez)DrzdRTmqo zUUn)E{KgEr*7_Wmxn~JPm~YGlrMx8PzVc0zyW1QXu`rw$hKJLtHeG@vAKqVp{n;Cq zr_@x8=HubT9njVlase2DA!b~6)1shaVjq4s5-}iJM7gez9JNnfNl*`ytDvceQyVS{ zg}4a$)8D`l=F;X5JH#|Q3`fYQ1;cNVY_wunE;3xlrV*;Out^vsnmRt}@s6TgOP0J?z=aM5 z6{kMYrxRZ^Y`@d^=uY^J(3NHSR7r`Ys*WCY3CcsCCA525WfTWJn_NN^|gf(khNT@{42a;;u1Jk*&lx0qlzgj#X9mv5V$tq*#yml zZAOCYC-{1XTTn#Gr)(Pe4C%8n{K`6dWiDHk!};H|!BhwG23O5u$<^;(c*by=qY!O$ zO~@?2CLo4bq;2eN-BODKkf^hS$NOpAhM$i^fensU_HDtd;+7ph@~;gIvtL!xlCLX_ zTn+~nc1klY3xJtna(kk52c>=icV~5fT>yL;4$kD3&mqjC>w}5J0Q;J=;)Z|rP>9jIlv&h_5xB@I3nvb36)>Je!WmHjbqcAB-GN4 zx+YS7oKlWTvK3}SOjG7R>$gMspzF`CP%0v7I&{k3M}?6I^q}IXwm$0K$|s|)i+N1R zbu;GSsXycpY;<%r{61j@&XM?oM6QJIjsE@^DMGecVWfN=z)ot#oN6O|6{_v#Ry*S` zr0g`Tt}p@ckc=pZskDY}G{d|!rxE~)j*f%1>gc@ust-I*TdAGwk5ICx6F9D2W3Cu5 z0DhOnAHaIL6j51IQ^beo!*T|Fdtskgb!mvQ#&O0fuK>zZ)_I%bWhDkYB< zO{X=>c}QMu?bJs5*p6WUC6RWeX_}+vRj_Vt!*aBe1nmAoFbj+Vf-;{lr&^+RD|4hC*ltbM>wlzO+FtLnBMpYluJ28ZKKlE{DnY>cfeHakQqBEgqRCT{msfIxfnKe+E7cpFh*Ab&L{0n5s+h)#pM>(7Ap^?4L4Bk$QHYyhZwM&#`^ z;HDEBlc=XKS_SCz^>Fh|W*+#dtWL+^?L1P7axtNl1+mWvU+a?xqZ~%b5qS?b^KB@O zRhUMwjVxTA8D6w&`+8Cq{or0dLvhC{x69y0;lf|F$f;R!5KU;fLTdsYcukORjY6b| zd<8Cr24F*?sy$mxnK|KZL+)66O^KI&QL()(46LsR)9g%@>VWn%{Rh@n^=*b<4$!>a zRscu1mdds9WB6qzy%x#&5bz<6_HNvL0$WnW$z1Zk8M`s&7Two>nl5^P5I&YEx>s-bE2_bIp=A` z7-^$-4Zptn+5eQ4B%%)Q@Q=I#Un0RfibU6@c!{-~JK1Ct=KbiRLcqk(#P6-ac59Ts z;8k|&6PGFK`uz@mgV%()_6SX->qV z?}L{OHeW0j9%GhOORuH0)Z{j-qvduG9R_0WG~wr2*^cjz&5NlQSkX>fUAXvUD;Dzy z5@S|?DHQZ#=mgX#%slu06(_Ljp;*@%_$S)hM5AXUnokVke1Gxb?<0=i5*QjuT9V}v zP`o?$DMr_ZWf$3UvaBWsq~Fzwn`aFEaQ)_4^(;{A*BVz0%8642gN3o?$J|J zQkM?-2-Z~#G^D+5rb^n(-l?pAYZHMrcBh}t#&}*{z%Zox2l=xhk_-5MG>2U3@sAUOd{%TK|C?@~7XV1=%E~Y-6!@|M53!yBb zUo4CesOUByM50eOA)>nGJvA;usHXQ%x%_p_(uNZjLK00cBt*{lq}y-;fV(#_yZar> zq5G<3w(NSbU+*;x1p5iqOkm@1(HNyoSG6;=j_~zx@(U~Fz4XH(inuwA*R4BXkN9v2 zG}l%;+p`~z@wMEu&1;y|^`hEk>4Z&x=bvI(yo0|{4>mcyu4sm+m7Ot440-7Xd#laO zMo6p?q^dEDhoXl%DN@E@CPQ~qs%AfKYwbKn!kme_`T5NiagV0js0gUan^dj85jM{4 z#O=SO4C>q@|Y3kZ}PO zkEPE~-11AvL|7N$4X8r6$#$0X0mm{8BzUH&gn)(%egj{S1~~xFfPPEZ*6wx-CDR`I zAUH(teJ!gFi$rojY-4}f=@&f$*TxsUm9y|oCWMUy_D<5R;^#nqQ=i2or*`-U?$CGS zX2de*@S{qZ0bnYBMLn&94RO?lvREPTam#9&-g6T6dzj8*)D4el5*!76jSc!~K2%ci zdTwj+Qj)0s*JG4)Gg9WCg%a772hl(H%ZjqxR7`Fnf5>Ni_nt}jO~pe1K}lS3A5nN0 zZph$6eOjo;=E@5?C7#uUK&f#w(c8qNkYx-sg2M1@76mn@YdM}v1`P@j8rD<6SxtOR zDkJK${G;*w?0Y&TZ`C7BZXYx} zpjKZbz@50V0PLT^qIbXNG+OhB-Bf7=mhh6q7I&|jIE1jRaN9-U-DM3RK0V9$ccO?@ zapzvYeA?K>Fb6Y*Ya81{ICxQY*=f)@8G7xs9!5ucwTM9iwK!5G0W?iNS#b`efvsXM z2u0o!43X#X1VM#vA?5K8I-7lv4sJN`3Ck?8km%GEN)WE|tJ^Yk*sRH8lSK0men4@+ z%miKsr!zW9Jwf4-7*i$@^078Ib^_ZAtst}p)UB1%;FpEh<@vJAjK%a{_FbKDDPA&h z)>XXT?)W0m(HTie@#x?mH_k;gMZ&({&AD; zP7=V|YRlpqI^U5$YO(g3D!B7 z)}_uGaHWla3FJFz=Y1ZU6@s5+AZDjMT;_X zez7Tkg|8q^h!%gNx&wo==E_?NpU*e7%~wDYKb-|Yq3EjOhMEP<*fQiu;Hht+*Q;{vPe z#7fYPZny_LHe=2+`voQR0xjzQ%aL@XT+2XsuweWl;5ojIS0ezY0GNRYX!`J zBe9zCiD@~uN5Vegmd1*W20kJ!s82j2UF^^7*Nc#KG@w4ZeV?*HnT5?r?3@`rnww|g zz9CJxEzsrLMqZD9+5-c|)1RJlm8SwG>lnFPIa5!h&pxxr(m4QKozQzXDfMRCxZ(im zhsNV<M4KEP|^vjg)Ivqk9Fhl;H>-Ccb;mTtSp|@{mg;enQK5!y#oD;^Up>`|S z1$+67ZS8^PSrcX5fCae-A}6d@|lSxme2g9o%VF6U?61Z=B`0&2re;nD%&Qr`&lY?4LNzi0L` zY*2px+E6lxICy``Pn3#StEo??#01H&BwvZqzNt&`DVZd9PDadZVByrOV`H1{>u7E@%3k+(Kq&p5U+Ay-45O$1md=( zM`vR8_Pzu^S({L!P%O-xIE6)0h7QF#>S+pL&P}@=yn|0Qm{9|f1%R2_S(jGbJdMKG z<~4-`ks-!%;8{EaN5B@sLwSus`EbhB(8B1zQE^wtkAsmS5!<9Q=^ZaVMmhz*-GTlB)RWU>CZ6zOwb14(ayRnr<_G?s!~v`8`{5hc7iG z=qNIS@iN26GkN3iA^X|K*@|nXS!41+4=GQArMba&|hxj9*q?H;w@sKZjd&${}9_Y_b3g{ zJ}18a=o|k4DY;!Csk1kdlw1qS9>Zv;unVO4p0J@~bM}HB!rTc}Y zl$?O%p>v)(CnVlq4B%7=z%JBv)JNb?XTAGynK)kC#ao~Fw;ha%)IpjxovV# zV^6alfeB?>3OxXZ0GSs*ev1t`U~MMSCPru!)W<%2!^{m_T7KtDvvc+_VZ<6!Zr)&& zmr(j@Y?rGsogN|HCAQ%d*fUx6}UjS6)w@c^SZuk)7#3)i3NN-58fRL;_uq?d#qO zPAkHApFeYS&0QR9$!^A%Pa-hN3tJTJ=6j4x~k0rV%)j-DNa`}X~ZNI|Nr7ejs zsEotZG;9_LDW3^m{i)jQLG}ECZQQ)#XV6xx<8q&K4kJ;EIuUMudF=)a>&=c5 zW{_kebhKDv@sTjFxix&+BftOnuZQWhp_w3#$BasYu4{WhR^%uemt=&mtI+8jSg_U+ zl9-VL86AQ9!gbO<1e^pgp6$;KgS+^{jHp0LL?VHqA-4eauhntaou zPdt8lZ&-U$1;X7LbOa)zL-^Sxi3^c3Il+O@SVH7018}Q6kDlJfx!^QBdAj`20o)i5 z1oYAZE(vXv@1^75M+gUTe&_Qs^f_7Vd-0x(G$$-(9)NkSZL%bbwE(j;p}K?{qN29n2wLj5P5q~&)o+dZgk^?@m(Av+@dN)f=`;7wBmfz_LJX6vT1=LI|I{?> z-}M;kO_<>Wr+}K>oLN5R4rHWlUlpx?&sR1BAjdv!{fZxL@N4?57aK}C&8klAw61!* zN_=z_DIROzYc+4^)VLKA*dwvu8kSJcZ0W3~4eZ#m^J&g<$@@r(G7y?3Ci)#-I4Jm> zc9UJ4#Av9lzE(x@`R=FFi?#Dj{`ii8eXxtSlb2DHNMA{Wgs6GjXSQqo-abHPEv8RlOa9kR z%TJncwuefb>FQBF&SMC^)rkE%hT&7^f=n#y|4T%3{=MCiU%#UD3ykCrfncp;8j!f- z7>=?U*rO}0I%ZycKNMi$7I_is3YgLHtP~${Pw<_Z`5slzm|<-4e*n@^gkju^Fow!9 zVB}FZb4Udmg$|MGA|*RIzGeA4|s z3(WML7{z4|k3o&-Oxoj8jr>9zmKUpMtr72PFv9!})h`ckb$=iLfBLwFnv{?yP(+wY zNJ#8`6vIg)Ym3I9(Bl|!DNp#};o4X~>RO(|yW$)XbtM;TQ82m^{R*jaxsYxnr~k#lUxK6X^xy z2QI(^e^Tt7O8m3#)M_;NT!eQh(@&7)DjsD)uXQ9)VVo1-E`oy9|s8ZebmTET>M0Y+NT-v zD{#sdvn4rwv@XY3TiT_*?NXRCI1IH)%$)Qk*W6jL#w1J{{J1)<0|={x{@`Xsa#i)L zKS2@LnYhOdskAP^?ank(e2rbij)z7If?IdeYj?O1c6d>{<<09`CR%^K$Xg6-iGM0L z)9Ej=6RTtA?R?L4U4Tt9pD+;Ukhy&`c`@vkwT1L0@cz`l6|3Dm_FPVKH3tSedRHQG zAoi~t&WcC@8|@!(G>(yp%=Oy&6jPg=<%EFw0i)nS=R>(_0m$y*6Hl=V)Gm@J5KnH3 z5kl-w(j%5rj$7C~7B?{bgjym`554#mW;T14JoB_<&-iOQK%FaxqXGj zj%yWZ1>@9@F64Bj7h^O$VuM^9fJWIoo=fLr{D)xiSvs1!xU%OcUL5Z^5LzbZGFf3S zHTqCk*xU{X&uSi7)&Zf*zMkuV9ByT$FJx^Eti?}jHUnEA4H>udLR04(#`(L{fLG~2 zuibkMIJr7fHi*zN9*0H1O4K?baBzh|_$`q4{{ZL7{r20(6Qgw_%xW{|e(`A$nLweo+6Bw_(!IJkQ}OAMG> zYocA;m#_vPWT<@d()A4{IR6o^zh{7FHEzF}G@7WZZs&4?FWNTSaXQ%xysA@04L+-H z<7}5z>Hw4U8dnQ?=?s(Hc@@KHtHs{e zaW9r&v%m-cXl8;;{uR#YF1S0GkCwY9Qu@4w`e3E)4*SuwoVj&d*jJKQI&SiU8n&YCt3CJ`v_INW+*VdlX zojgEn45+!5r@Icf#cGOfZcnY_b)doK<+0n9(DAZhC88*XT9blfvMWRkYY)HdWR+x- z2;2nLJ!{44Fc}U)YTwV#DhZEpjLdK~1)!y&#$VInNxPLpQZ4-;9b>r>m%hh$od_2G zTIj9yTv?Y;4eT}HM07|^;GP!YLeoivr%HnH=<(_PM#MS4;W^mVR?0X`gOE+H4ac&+ zyneTmb6)qA4yVYt*(min?M=?)uV)3SXK0{BE2ur3;Jo72GR)1`#mu9%rD`eMHiEg* zPT=dVitPh(k0njp(6yr=XI*Vv^}Aj7b;{LCN=o+6SQ*64JsImCwBJ^unbVYepw}4U zPR02;a6OrP?yU?6__nEDJiBGm2)2;TwEAbwi;IrF_E<*=O@%RDg$FMfORjH4N(TqyuA3(t&`B-BPS~Dnh)nrjctLBw7zPF|BJxgh~^CCiMgWS zRNjKTzb@H*;)VD;I|%(#xyf9YTa(nPU>l)SgUoyVPMjXHuI2~!4U-~#=v=~VD$f&{ z8W@3zR^efD7w0Edl`dbw@^5!g#)`unmLIo(SVFDL=i>DI*W?4siw!h=e)CIMuy{IM z^O5et)nR%4_-NgsQ1#l693~VCntWNi79riW#Xf%rgWN*!dxo`BBg7=?ITASEuPMdJ zfV)%eY-7-jPcZJOB;L+5)i9@KtZYo6234NhY^%Yrnbio?Pr+nCRU%_GJ)j&0!)^>lh z)@H9wJGikB0&%HH4gGdFzQ*_ma-(PT{IY?r3?Evve{_tIXxers(O!3V2Y<=5$z5!u z!GQbNue7_O-wm98JzNAL*AxGu8Mub<$O zEhxi!y!J4g!lJ!~$TFJgFa9O)M8>{~<;H%lG^HKi}F1xbEAMzQOda)$IiEKYGY!BaG93k13v z&-@yQg(dDX(lG+HmQLzKov*u=TVHi^p$V3JC_YfN_{I1llXQYdH}LMW0JNy$0KIaE zi=1KW^;CFnGu0f(OL0|ooaPS*X%ihTXnkH-WU^v`T;?@tXqP_Z$8gHvP?q;RvX5rV zUsqE)pEi3=v`m%7P{!5_1XEPb7=8Q%=13C{Ipv$Ad=iOMkP8Z*YOpy%EQInfI6zKi07?N+k)|8pV7iqyc^>*n9X((xT%O;^tv%89WIP zpZixjcNsTLbv~`t{XfI?UO%vFP++AC-f8eJXLX0XJPua=wks0lu&-1a&uCeBIon8P z*9p~|gHqV(&;v88?Ey|ouik`T5bx?tQNS&g5{F7*i!@{n7VyilWMBJ!mNxer>j-|gYdM5Fy4LEUSiVdY`` zOZosr>hqgrrayV|GnNw{O4G>k?jhf`J0v)agi658zxj#6vfxMthidn9j>LD2>P7nQ zqiKFgN4xu~OL~w2{AGQ}bX4`y7i+yL0P>w787CxOxIwV&(@Yjg?Gjo@yRDce(N{n` zERMyOQ^IGi7;Wk)AM9X)rw2kz&Mv=<*0f<5*nR$8@-PwwDYTekIa`X?AZ_zl2%6wP z9#6(A%}2X)9$eIZRMQuHyotk))F=48J@ac7L*FB;peFZozVJ~-bWvBejrG=smW2{l zwY}{z+*E*6+TUfAfpmTsM6!EL&AQ2oCA%+Cv329Kmt3D`Qp4zX^{R0YiMX^*QMDK; zBhECRv@e9hKCSX#M0d*7$e94KUlN<`K)Q7>YPf|=d~4Cc*Pfo7T`fN#Vy~VB)lbFh z^5dsmzPxMj!V6r=orne1A1#6c1c!YNq(}m<8(&G$4io(tV6! zb4ebDUj-*9aLglq&#-T)h)gYXsa+V`v?tICPkJOMVeXaX;4-=xnV{PkSxIQ3c(;Un zUwO(yL4?>gH}usQT@#;el!)C*rYgAA>tXaCXLL&d|8t3;uk(JHrKdFgRw<}To)L;?2KoY@))K#%-H>t!DjqpUc#st*wGi$PN43UdxUd=v|eS;WAWCfv6)!Y6Y{otfx3gVD<$;_~5WxX2hp(-Er=(V~AzOz#kj@XB6Y413;tue%hTop|}5Im$ln6L~dkWC56P9;vB7Vk8+B)oLFNiPx5pnxau%Fw=ZDX;as625J@pumck zq02Rd&B#S|*>iwAn!%;A>Y&`YyOhT;WQrx0zf)AZJ4}Z?J@U5v%NrkToS9;p?yFv$ zI zMS|S7Kg{i9r$;!z;zANHLvR%z(GBkMeWquzB5B4N{ynSw>}r!_HxiA5<2dc#cI@Xz zjHYN)L@-0T_^M2f+g|KrV)gRm_&yY@VX4w^DMdzy#6Fz{BTJmq-Ro@>gpV@b{~}c& zJ(5F&LJ0UDd=%MjklF>tk1|0nXisCo`|GS-Zuo$;#JaD_xS2(wE> z`fk%#0a2>QxsWUkq>}%S1|@{GIm&R2I)L4tFTRCi9V0$Y`Yl)J!SnhG`k^!R*`Q(y zWM5}4@LZjhWKmvZB_JfuCz*O!SQ0W}DKjM1rfKL(*LLHSeDS^yQVLFt=JuKJ6s<>Q z{Y00vb}4!tk>b`@J{{(cAe)P&$p#%5Y=pfF(feLg^EFLYxOKLE=K@<6ud&zSti7sv zoG9dEN(>2S7eee2xOFAYG@Nx{{1?rV$hY8X{hSUj4dQF@YfL(!x`;2l>wPM%;K`U$ z;yZtoQ}z|Bjaz8<%v4RwgK16)t8Y$(0yjRV5N5#|6_U?KMi-sWgRo zhUsi4vA$=w5EckDf_eYCw4)&lea2%QIEAqpF_H%U4jM$dulHldq2H(`wSE?HeZ~K< zN6)VU{xRKfC(*y~9HY(7QCJ%cF!(hC_U26=M>jU2wfE?KZlqoZuU}cup@q+R7QI9N z5}mBO_G-687=zm9Uu47#qK98e7DQENPQm1|TIbn8?VWV~GHXt_+3@sXgn+C8e!In5 zM8z#HMLIU#r3VP+)>GSw_oywL2^pmu|5<&%D>r?@-8$KNwP|*#X+tb>$LS@{$3cA+ zKKq%=b)Eg||1t;KB{|{NhwA1zu3MqX%<`OVP{!lW7_gpUV)jYIY2WJjz5$SMgO-FtEDDO>3C36AWx)r3RKNqH&jB;%JQ zS;$YngmHz298db{kLdr7?pEDcnYAn4DCxGLmWUQ%IfhOhwOfeVeS;`F#P%d!bz{-l zE=WqwXVPVUJPBh4ja|G7gAU&s5}H2NNb=3+X-&U17tn(6qDf)?A!U+7rk(RINmhf4 zRRTMR`kB)w-rHix7xlD~%=R`JT$9Cofza*7 zl`aZO8mz9)ig)~7Gg*9He8JnAn}DWxAR=90EL$it)Eq@;Q~)KM=QiC85_OIu#xX!d zA%q}gpB11q7S`07_}j~Av%Mx|D-0KOnbpp&AI%zZ2RK(olgMzyiK!RgRaDZYGxR-%5H^$I;)v0_ ze~W*$QVUE5z4FaD)|@|K)hPM{}ZF8boKsm z7ePC8vyajyQ69xP#6Qcv1JJ6;JY>Ko{$5wYJLu;cbv5V!k)p{OzCM&ClTJH4X!vf; zZ>$S3!bJw|e5f_e<;(JtcKDRX)NQXRmwzHh$66U`~uLVXg24jga&Pb1?r1> zH0{>PcvnCbt6lC=!*LUq!&;+)ZU)@|$I<;R>)t4~KHKbJ`6T zp*x_xZR2-4Rr>ak%nP=d+wBU2>99E`(wjR2bSoygae>Uw?jo3Ncmo8999@l`r_*o8 z`zWHaXhd1&LR)LA2Tn3b_n#*@sI`t`OWF7H1bUcwnIO_sitdpTKUnxI+zaWw2o(Ru z1r1F(Y)|;`>0?U3wS!0v+KYJpiHp^w*56ftoD!q;Y8!96Uz-;(Y9S^ty!{~>7kJ!raQ$J&$f5fl_L zlkTG_jEc_qADP!3tAX`Y-7Y_iM-J3bU1n-yZNtxUYs~_5T&sr*{c%z=yNiGV5Q&!K z^QaJ&TyZf{DRt8y8p5$RbI2#!l(;&e>6X%lFZ*99m*)!~rFTle;JIby@Zc?cvemp5-SB-6FuIl53R?a|xKhA&Yp02V(il;;O!yW%; z-mA;`uGZ9vZjjiHpT_9jo}CA!e=%P7yS;am0Y4@b4$^%j+=4x?= zj^8yBV==W2HA%LFlUxW=rrJ!BXe;BG(6+(W?ke!)WEx?oC-b{egvCGn#Y^(@sUOYJ zJtwWVT-dvTw-Zio<9JLk1-E`u760D;J-X^XJ%kyw%xBXa629QyN#88mpLo{8rZXONCPZ>g=lV4z ztS&VfuGevwU27com2}w2+&@s$zbM<@N@fow@v3k791@F#i7ksLyoZO%o@EJUEZ3%Q zT?zN{bqnD8I`eY$xX6Jh*zQts7#5?7zb^cTOJ35(J|H@YTz0H3j6V{TZ9tzxgBWN? z-)ek2eRFpqL~_tDlrlsBskowsa6Iob>QN|Ph(3SR8fnXJ7i02*aerSa42__5tyi73L1q-cu?!9JON z9nFH&UVfKT=`b?d9RZ)Y(9pe`#6IVXqT(EJtUJGzEO~S9C?*=6Gxv+$=fB$a4tQ;K zx$Jw{4j{My9C1dUeX`)=3Xy{l`u_CI)e_tZT6_MKS%dcqj8&OxCt|lx!UVIP2&$Wy z-2LDCm39@V5;)N{U0V-*sCNey=FTlxR{uxxyMs!O#4>x)+;v^aD;M3$eGcmm+v2t= z9_s?)95YYy^^z|P*fL~E>k6y`CKMKk8{QtYd-PXIce2EAM?t{2?A;Jt&8?M9^Y;V7 z)&xE=J6I2n#ojs-9gGZ#cHL82jC1P6rrb6eSY)WMeX-C@s()=c?c*oLO9t+E0|vFz zjmF4Y*a_|o(E*XLN6*_MtO=qC0Q7H*&{`YO#$#!|#jLu`9629E0r2m5RcLAWfJY#$Ude3>duQsg$!-TXroE_KYG1!;cF#_TtjD{;j~$9$8- zwO{7S>!>;HmGlMbhh*qtV{Nw2kR5JhTN^-@K7rbX{|are8}*R@eWaF^iYiXTA(g7Oq7jHUnh z>hMVbxj@mBHLSbJcf2S_WV|X?O=u3NOdQbUZU7lkvC( z#a(+)soyTtI5Sx|y`Wh;Yv)_hZEcm89}z22wCRUu#|v=xHx^1+j--~hbQZle(Q!v5 zY$IQr;bz-Vxbwy?r5?4nGmq`ykG#CXzCNDyHv4b+eMO%qsjUM~gFUDv%yWeztzGP{ zlipvh{S>N8mBHX^;Ciu!E0#6Gv`EmmJdcOBAEFi(#xFWD)hqo=7xp{upv*@jq|{Xv z|Azx07gwxdBY(6$hmT8DDE2Yg|9xnbcBpN5l!(n(y3|_L(?#BVJ(?f2gv#C5i)y~L z{2&nhaLrC?$K|)|G|s-A_J(~ViGh>L0vcK#n9UI6lOnzDk#cbESk#m_HrjGW~)Eixn}v+f5PzMxb7 z+kByQT580I6Nd6IPlxFr6>uBEi}Q3ZvxQ7Y##G4!gl+eQt#d7J$tMa8IlT#R#${Zl z8zg<$d1@0NRCT3zaEoZXjp3oTcjl9hk!6LXVcka^Lpu7I!cVdq6^Dhj0Z8v$?yTea zEV0zi(lJHZ_qlhmkVtrOVL19mXWQ%-237g(%EHX=TFE%~^e&kd($Bhm$B7+Rg@(h+ zY&=JVf1-@@63ePyBIu98+HoNlI8kHqG<;XV9ixPrfsCV%Or zV!`Kx?Y_V}qg-!oomC36EbSW+?zUYoIqf_AS|e9$9&m2v|Ds9h@hdu82$u%2==B1w zAGUR?ey3(aw-K9vIQs{y|DgNviLi7$;@q^uv9Y9+8|7JuUlr%-d)!l&9Zj;(-5|g=x}!9CN|x-vc~#FD$m@WsrT+mnMe`So}XB<_>#& zkx1Cz;J83igiL--T@CBcn_ng6Jmey#TrUqhyJh9+=2y=QWf=m< zo+)?-JoCJ$B9gl-!^W~oSNS5^*(L%!aTBMqJl5mSc|<8mcSq8T_4H~LjuWnUwUVc= z79)1H4hPVRcLLurgjX@to-)w4j1G?jle}%|2zOpt>9^*hq{b6jSRb~^CH$S-i&sJ* zZ=oE7dXJI-SK!(KD5E=>Fs#VtIQqQ&TSrfqLHM*rq_pO0idqaVR=*SGI06I#q zpJBpH4u4`|=P_%*DpPX6OH5(wwtio*r07J^q?M{ z{8MHQ5&FRoY1p>yICQJv|2jN8T!pSFkQ|E&`@PIhdAu1JO=szBAr8tSdPfF}#;w+^ zYz8xS8sBy44#$Kosvx!SZdaL97P&6>FFNs$Mg6>e#PMuWZuX~4bG_{X-db91aj!6J z+!WljOJFdRzt<;{zR%9XPH%L}dd2rn?hX^c?0}dq|Dy1ICkT<=wP=9{!z}(r0RtHn z33o(XH2@pF01KJBL zg9}9r^&?M6qshEq5pOO3uLc2QKF|oW_!|@14|G8rHU;)tkrWcdIv$VYop7)0O5zUG z#y(nHnNyEU!pPe5iaE%{VNMEJ!^5VzWtq}Z)&OE3eNheaMPAXpa@ino8^Ib%knpcwcS=pTd+FlH*mAI zr5iPQv^e*qPaPYV9H(TXJ@}s6${m;Ejzw5?x{{JbB5bT+TI|Eah#%!U=tS5n4U?$% zv!}COse8}+bWG3q=VgnIeCJFI48^<;iRI`*nd#xgt6fewjM-T>QCqVzibT?O>tp7L z0g|>8cZSYJG}rOE7GIKFc47UR+2-XvLyJbTbi_(Si=zV zlD(F8r#l!)H&8FTxxbsa-QXCOr(0en)%sui2=E&!8HL&Z`N_@AZOeJ@3Z?K0S77b~ zc7wWU66_5)3w69|^YqUWgd$f9_|vQN>hhbda+|den5dmD^{##v<`Qh0kiuVVU%%c* zC@>p8t9jKSp}Jk0KVs#EC4D?SK5k1boL^2vJ))zF61IMuTW*hTuY4`BM!lBQtk}k_ z@KU;F?^dH3=kY8I3h8o)VB*%)GF-0t^Chu;iO9TCE;d%tirwV<>^zMge7oJdr!pR% z5c?V_sPa{*ta!HZ7YG_a(j_qy_cGw={P5=tuoft7jAxMHxj~tdt(bj;DJT)U_6NB2 zP$)l$%YX=wX9g;5oj0Vp4wEK-`sz})JVfZ-x9~kV3u!KN2r(ZFY`6840px!eMV)wgXO0 zP*liAnCQAhOh}&l(r00lHbgS{r;qRW{a-U^mj=2p15l8C2XOM8$=-)L5aLEChZh)J5htCR87jI5SQSq?%_S@*4KHaQI(U4TOX? z$T5lnj$Fg~>2a#Z2QSFZAzTmlK9FX!^-=$@UmR2zuz{bhOuo&_p(}n&kYDb+zE!5~ zY;BXpZ(@URw9O`X1Ru$E&UyKjzTQenTce}_xwfkmmdzg#;n*v-{P4zfqhI+35Rp2J zRXHaLCP&cA^l>}(`{)q9%8l@2CKw$Wi4o9@4iTMVeUo38`pf;7ng9NiOxEM9;^Jmu znDMuWULIZ>N`BkP3NLpDEDlBQ8z*}np{ULIuIs_#3k1#`uDiX7vs}uki7KHEkv`52 zQS}@m>EZ-D)PNJ%Q+g7-Hwu_#-g^G=jnMg>@Aqg1#`%#);ogFa=@-zbP=rZ6bayB! z>KOX2v1jq2n0b$q^`C3D{o+b|Af_{#{9b}-l0z4Vtuqdd?|chC>63M!87cuOR=;^F z7w(7zQ>0Hgi+s$+nYt~)HzK*yLWT)`8pCAWV#3x?hW07XZp_f+|keUUSSC-@p2v4*-RAA_b&lMe=_=0i>ZA(szC42> zDWim@N=7g>msI%oY!?0ZZjxAnF#h@7AcbdR z_RSI>0{1#9MNeo$X-NE^^cyO8WT;4&!%*fx!RU~a=H$Fc^BC`uzE+~+U8qQE{%o}? zh1h-v$r$3IKeqTuvZ#Z^1CFcT8$(?6wiSvSj7o}sKz zHRsnoS6g*qy}O=0RdEvu>a@aDh0|8nZW0wdQK-$kem-9B3i40aB3?_Kt_dVZMDJKG z)|q^5f(E|khqt|b`?lm)dLq&f-$9jx%H-e^(gd5+Dke)einq-5`_J~<8GT~#y@3(i1|-tSNQ$8ceUhN1?q#+wNit~U2(HTh zciH0|2TOJ=7VdbKLFJS853pPmH7PJ&OiW3G3s88r`b6{nEu8!J6A8#Qs~Q~-4su0> z_3PVluha9dYrjZMWAXYs5MN4Py@8pI{b;6V|-TLDkGgdJQ^(FfEqCOk<-Vda-%**pbHCZf2>>xzVVlize{&bW*z7+4(>bGeNHGNb|Yt$JE)ek^S%@{PXF7$P#U|Eu(Tb z21X~nuXJT06b(`&hA?My`77!Orav3y=?o;+Be{i`nvw{yB?z^Ii_PV8!yo$X%s%>Up^kUvF9AP2 z3-2tmwdxPzVRr|$T)!nfu4xWzH|%ytXi$sfgElMSqmf;ZevkzH&=|aTK_Z^Pf6OCy zuf|y|8E+xBKBL>P58RCV-sl1;({4y{s4Fq38?zCPSE@TwGMsv>!QSpF59#w1)8t{` zgQL0R#n`cOKHSwSOPY04Azkxlu!x>-iALSFe3|9b$*UPEiiwcfds+EAoC)-^f{g~$ z)1!cy(27Lnyg>K+9Egi>B9=zb+s{A4pCF20!erdmf)v5RBoLu33rInCoQNsP!XA-Otj6_yps`|L?RG*3dVN6h}d zxeFv*6c#Jr)`dhO9Z#&^zI8HVG8Fw3Lxywn#$94!pN58xcBAlgJkBsD*JY>YctcZ> zmo0F=y5_R`5C=9@M6DM?JZ$l2|tOMQGi>vEWx8yMWI*wzUlndCaY5sXj=qbdd?s;mUx-3?Dn1 zy~wPaf)X^A1#Ic__;xIIu?^lhh+uRD*^uQ^t}|0p8IPSC-9j9xf{$?Ny$14n;B8bE zqRkQ!(v?1y=yBkB-T*Pzu4Lk1k+d}Hp~F;~hoT^Ud;>&uJ3qUn`6KUdT*1HBhOS^4 z*lETv$xf&OsKRA-HVdedyKnS3T4ki@@D^AcR#eHRT;fCXS{2A^6BXgEwW=?D34KxzEaTV#B3J}i_`1~vPn!aF+`g$41fQz+ zS*EAg{ubyH%Nr@`>kl+n6A$u3FElP^X|vl)hvJ5i3^vQ%tdEO`;6c??4h!2^5k;8! zpQ{wzB4d5?JDY#=o~eIMZW#`m*PUJF61HAYjK`TW%QCNZcXe&eC4%W465+k-e`-2R zTO+D-U;aQ>e?V!R39tDKEb62c69|g(R{`ywVu!Da>Od;BDUf=)bhq5PE_zQI7=2wZ z{xx#Dzi#K(FYix;*4O_%*PH#^zztb12Un$R z-3flF*)&!LOLVklfZI$ASqgjXYg=dA^-ybEe4v%X%lgIXP5*(Gm#}s5NxEHW?Hv1fx##5+BQfU!qkaQ9>Gzhu@;z~sI!@?X)s`#VA_k2b4mTf zFVspX+a7Y?W~&f^&s1OrWqi@xS1oHIAf^;Zrny zBD&lvvl;b36`fw!72Ue0TYSnxJyp=f8ktvEC`2UuK(OmxQMe{$;63XS8->@Dd_a2OGKDy1@BIxgdi>q00#Lx&0Yz)fN2e<;R@@Unt_}Ij^vdrlx3^L1##$}esVnR5DVX~1k1Ae%TB7;?3p#3tDFfxO+o1<0STK;43L&99qrXQ?sBBX{|Jlb%? zR%(4K*60DHK%Bk3E3W0-Y~xcJeYpu*xs{T#Ls;22vrnY}mDdZ`*{dkzi6jDS?J)Vu zgDBUh_$B4Fe=6O?Hs||EZ>SvMU(oK2p176!@=7ZC{4%t*4l6COv5&yYWxwH9_4q@o z40Iu@&px?{s!>e+{Ri2U7M;H!0mz_k%wzJO^9Bwp_xEf5?`en?fe9`vaUj~4-@!;I z+If9s9N>^<<&zF!w7}ix0y;zf7j0(&73J5qdy{UEZV*I}5J5sf8l+JP=@uBeTNwjom4a}$~15!Rg{%dtBjX=-!qey?>P6Fs(tAD>@&G>K0Z;q z;!ya#u?=Lvl?4R}mAAG3R*Pw9e>+x2FZ1c8 zggrIC+SYu8soHk)W_b2z(sy?zYisSBM#H&LD#GH`tC-u3X8gX&RGsTzzwLHJz- zoI3crs=JB9a+h+oXux;acI`F0&#&MT?a0?`j0h1(Q?zSl^L$q?+p}kdF!OKVtbHv1 z@`j^jC45NxO><3gQBf!Mw2ls6_A&!YK>Y(>o~O0`d;-!u8m32$&fWNl$YP(1qCv~{ z=Pm}ZF7q{4>~e1s)3x#W!3wX>ji=es97w^2EVhU4X!DOelN<&EZAY@Run`!rZEk~# zrV`2F1r3iaU>RK}8;O;FDN8D1Yim%+*coWJo+#&Xt%8n(+^t1=bb_${j}8cwL;v}7 z=igjif5B4`7JgjTfb{J=`d)n!cte*Dp5)OS!&|aSOF3jW8LjHz zAxls|bgw0h7rXY@`!9@pNi~cG7ZEfN?TJ}!w#)~1 zoOrhT^G5)n`N{)C_0RCxfkex?QHe7w*_@OY5R>)<2JhVJC%xb6EXHJcxQS-$nkGsN z{E-RX*7=NT`1~NZWco}cB3dt1vZ(&x4Kiwah3&aKQs_udaT!~o2=bUM#$~}R2A7%j zF-dU67oH*{60$c6=(YY22Gs2=_c?fsiWgTV0Z4fzN-FLzSo*dsjQ%?MM_GQ9b$Jmu zT;14tEUf#0K4p}q)ulbfXyxoixmBkHJ#{Z41!F~CmAPTXQ$W)!fRR(9q5^_#{97u@ z+8%Mu?;^P;&qeH>u6_67v))0*x{Sqlg7Y7`u1)Y)-@*Vq&H z5K4m^a4~up$;PSMDhq=vJ_u&|m#YtVEfHvI4a7|I?*?&m)<`~l-E3EkW36l%$cFR) z&R1>tBuF_ql)hBOnR%bDn3vYF7CA6IXUmq=pt|eoO5->vYJHh!FOW$ED!)7Y15KS0a$@ z$mB{ZN!9Qleaig;xy~q= zg13o}?*P{`e^n;hDKAv!01kUs?%f8Xhlu-&C;Zh09(a9mDl=p^EC6VT`8v#?p+NV{ zr0~mMRMl!E4~RtsG_7KGB(jxnsbBpU%4)(KtO@nW9qN?F(5FrJyot;Q0m*VQeHqaI zWS`c#ukmhb+i0l`R6ahuF`yU|vQSV+vC@v4Tni$%{Kobo%yZ z55r>k-^rK$ub%LTc&m%jG=1(#u%I89T>yIOKT7AAsG{jNpr~*())@z`;oyo2u=fnws)|^=Go$ju5gn zVVYlTgVwl&B{pIrN1{e*kd1TD^O<2*#rVm0zQ!#q-Fhd5Z&?vEBC~D{3%>|Fom@3x z;uHqw!Jp=O$b8kFuAYF!~0$nZ9 zzyvKdq`68~MR@QO891Uw#J^S2wgq76JDT4O_@28Tef(?s43?pD26~10k9HtaqGlV! z*BrkCt@b4d(5JMu{yVuFgxnmOLcUJql^A{Qr8A3(d60hBiUIViyOQCQ97p%&yC zq(#_lRCGduM_p2QMtM*o?k(BDN)4nbV!2GKs$BS%p0YZZ9RCJW@T~;e-k^g3fAJk| zg|FLY&)MhsT2JjoZ?1T$3;u{wb7t;$@&xIrN%fSJN!zCS77b3K*q^gMvLDBK3}mDC z*%aw(MwOrs&v&*-?|J5E5(z|vIEvMMvk?LWkdszSjy06}`x>BG4=l?*Wgt&u0y#Dl zTnHdcbxIwIN3!WGLYsrKxup7(Qm11gajry;E)$tz5lyHMb;1! z$G#B{C`x>KG~eOyhlQip zja7+2>Z7Z8I^27^A$KTg-aHBS-!`&s)!$44SO^p!xSwcd50hf5@ltY^7EnHviYxHG zUAkGN6!VMj4S zk+D`V;1nbQf!@q685Bjog#NYjA>rf~VX-w)y2A}cfs|xE|6_doqJi;OFbbT;Zsn;M zRPfBl-%9I{16(sHttRIpo>53isF9_WqgUFf*+y!d%phe!zTjGPrLU*LAX|_|s;a<# zR|A&K3!`@H$zemSs!B~G@hpdSQf||5k1(pFgO~3YbEcPBTmn2}czxpqppeN$H6vOj zEW>30=IWvGwX*+Awjj6OzVP|0gEk9GVlgF+p5n_%+9sW`p-JLpyD+V5Ynta3lLr~?gu9k|d^I5z}LDJjPyjlcNY?ewx1S0IU1rm)l`5r#an|GsnX^m8`Q z%@XP}aoPmrbUdBWZO`?V=_nkJbd3#+)CvI>RY zMuqT1A1C8%ZdsShTSb_#^x=iQm1ka9y5X?NEhj*c>hA1R;dPcT=QdmlS-_47I1sxM zowcxw?9N`FOYJ%fh(q(e;vpbsnVavZvYwWXSZw)WW)Vh#uJS(v36=8ey##{R^J|hXXUydj- z?7%?A@>zi{jdgwk$UH4EW8uNw4{Y*iU2j()sn^za3wa}gk)h5p@P@EJ!?B-deBx+* zWE*^P+=`Fd+(J@N+Iu{-)36w4Dwd8PjZyiW!-R|`%S@mE2!l9s0M6<x|L}Zp@nl#-> zqc0zn@uU>8mX8(`(VbCJ(ipmBI<|6Q@i+5_tMUQV=Q1`TC`c-W)7vkCy8knM%_9NZ zfC;X_M}Bz5{Bi5{=SC0lv9@Mo0(blS`-NCP(~7%!CRtc+`dzK|5=&m4ESIUgR*0^f z54g;5wJ7AJQCBC?I;%ka4tkN3%c(ZDM0Dw#A(uK2yX}SHLauJhXY$eI-I0l`>dBCZ z1F&(`KC%b+ry;Y4zV_L08sxl&(#?p{71Sx)AQ(Mv8-%7q2ZS$=KDeQ^F6sHSKeJR7 z2))1ZjcNu!pm@!P83C)hZMvFNlHW1)$FvN60pV?;$JBk?2Zw(1=_g1|4S%$qb@gVX zFcX*?2ZOos(8NUI52siZD)lzt=!BTa%SE9Qbj= z=s{RhaH=HA>)5~R6&E%g(=>`gr#2(#QQ&R)`@Y?(A#4hWp+iJOk!8t+eB7v06m~m# z;ZI&mr-(Gu%9KLxa-OqDJHn;8!QPP!MZN(jX@UKxq0jLW$Q>jrMGDNTeq>mt!BaqR zW@P+*dZq!g&8-bvDaY_{6ugiU(4?ptAY_uag%6aF`@bxEi$i0-(lNo?9WP0FUVShJ zjdLJISLC;lWx#9Y42tVccvj#!OyCKsDt1Z@dwTQ z2TJ|<&s&nkkvW%Q(>RHY5*M8RbboqZEbl#Y#}l^ubCb0J=&2d?F{m7f!#GE>*aQk$ ziS-9Ltw>fne^eUaR`EJ*ymRra-c*Fqw-Nq#E?p&c_c1Z_9Xe*4dskI{h%(E@8GDe= zT1JG?JE9t7SdZxX-aoaE6lEx+ z@zsRr1^_#%e1EB{?k*-jmg)mxhxGa4IqGpdF)InQ$Z-^(v_mPpvPo-8OZaN)9k7KF zVdcU7S}KdPWHfvXm&GVLSi)|k1c_Jf>xdgPQAbL|-|f2B*}hl0XQpy}cB!@_oS)+V zccT;8bPAzDh0)dj$SB9|Gx@G6bC}CuIW}WxML}I92?pV;>aUF7GCRS4*g2_*=$9>!TS;`={OYxSyXC)(hr}FJeINzYE1u=dk3N=f*D7`Miy{F?W>Q;zI#6zkh?< zbjC_&Y;x9GCrzuc+_K!s*}Cim7BMzf+94n?Raln~7w|iz+SZE0vUP+PYz(RoM7*gdB|Lc(q90Rcl%?;dO-2 zc=n*sE!_}^D(umC3>o|T1UEuMMr4aM&QdFPcEYXTV++DBUjSjhm;WeA&c~2S9?^>y zM0&oMK)>m8x1{+D(T&=7mIG-RNB99ysPpN`E4Yz?esOWW14*1qBC5TN)N@}?VVE}5 z&&Q!R{Ku{HqC=_9?GDH7gPdtiVN~c#!OW>xh%?1+Pw$1^r; ze?=D9|B%(MqqnXF*D)eQufOYUq$4ZcUnc^tC8X*Ixsb6}&));X(CY)UD)8mFVOedN z!_TZmw(E_6o=kNh><4z*O2TYHs2`GWKFxUbsMs}dyJAFf=gfAT%A|wh`v2RRY~XdT z07#rMt6Mxj0u9mwGxxvyVXR?4f*^Y!;<2D~3>&im>JSJ{+0Y<)OT4Mywa^bg9pq>U z0$!y(kY|H&v{MX74rOX zLOsVAhQ;3S_BmG8MnA@7r+Ee!$5U5e3K{6=sS$lZVzwWRevpQKpcU7zVdPH+Jqns) z&o6b(&)-HnfSW`d>x>N!l6mf+3A=oFyO+fI+346;UL4PF$se<(I<#u&_8A(sljyH) ztb>X5@DZ3S!^gV|jk>jSry{P5(qjUGe!Rq3Qke&~Z8|`Ug2cg)eU?119Z|Ua5qLla z!cLu$5W@j$N`lNcG9I@`PR~C(?_wbp^LkRFPgmo!nBw*~=|i5x9{pzFU_+F2@D|o~ z5sRE)Zfg5vL=@R@=WkcIZ{i$`Sxw5r1b-C;MRrZm1>Au>Yuf|B{{JmstZQ0oLG#EYU-}(3v>tK-*qdMfz<Y2!UkuZ5YjpKI6(N~q0efBcrO5yE!jtrepcQyQ;Ysq^JtBr*2IU`kAcnN)7 zB%YEuIgj=TYTj+KDeHafQiPWTzE_48TQ3^1wCBk25Aw%OxL{hk`A{s~kQu~wt~5ihye9nYX}FOnYA zA}(4(;Fu*0)Lzx~5|Q>lw9sq#zA)JEDIh%o)+R9^5KyF&HLN{b)uhlgji}6^4Mhv2 zZ}xE;L5QXqSO$KnWNA~h4TZh;eiTf+oA_)BdKF0Vm!vSW(myJaA66eP=wwY26AMi0Ucx)$T_%USd`xvH zcB@Ji($v(0%Vn6lwbY}^Wvmpu_R<<;Ed05(L$hS|(1Y>WZbAjZ9%P8a=+n|>peAxa zy51UqV)+s6AbPzbsFOAk#?`u6YE#QDtayy~Y&>z>94@n^-tD zGh+r6yi1nK*Ds26i`-`;79+sYUSfogJJRaWn{A3~kZH+_BnG8!Zy` z*+{mkR${3@VpaGkid(M^;Um3WWjVA1!HU<(55Mz2n@D42xR;6il?2$Gezzt*?!Oi(U37y%xn)YqPK;eI#0JbW%^ zjeW;_ZXls@H5*0Ihfv}_)+pp^m6`fyztjEBK(m|>Gs%khSSwKsH~l_3t8jiG-t&aQ zmdU$!jrpP^Y^V9D=D&S%keC&1O*=liQA>;Qii9K!dtiG979#!-#JbMHL2 zu;X=C66MWXrSzG^?YTJgSvOR~m)W~H@yWDGvvgT`Xb0lyR~WpnBWLpHjWynV(Y~CF zq?Bq`)?Y-{8I@Ug%Ro~Z+${tI0B^@;X0|D%cp($|K&I`aNUP$zZ5q9F2xwa~v7UUP zFL-!rHiKGEJi`9%rzkZ7eN!C>$eFbk-2e6Lqr`d4!Fe^IKye@W!y#+5d3R}rd#cC#MJu^;VZcbLs= zOzeKvh=5&yCOxPp>9ohNZx9{J^7cYAPFv7OJ8buEE65n42sd$b!43N2zp&Q}XRy)?c?D&vS1 zOL2t9fk5*h>#UzXAMf5v+>`2^9rn$gjj~!=f88jdbL`;x4Bfe~`0RJ{$+p^8&xrjF z=P6$vWbk*_q{o@izf!9^I`G-N_jq)497TY8b4voJC4D*+!{0Y)l-9IAdF2(4$uf!f z-QXN_XRGFhBj3%gEiBf`Vcuk8!P$wORx_K9$=n z$%fK!!j~IgQGt&I|Ez$<@?S1N2&2K{ZKTZrm}(Nb53RXujaOvJcwlcoS(uF8xmgTX z87U|4bZ)g@c|maQc$Om9<%3}#kGoLr)sxm2&vt5aM;HyCHAf;iT3Z&{6)}TF@k@^? z3;F@eT$H!ofP=)|?eW%=R`Y!_p%t;c$AjuFObF%0xeqQ<-Be#iR49QMcrZNXL#F{3 z(br~Vm)p7nvvdi>(F+5fq9TJB*2C1-#O&H1k(iO|f!nL7zY-s-8jlN|LEbSO(8{ck z$&Hwvs}m`+M@&c#vH=pAy#?Gn4kF7haQ^eK1i&r#^Q~>BDtZdpGe=8U@2h#n5Uhz8 z(bRm(xqr7!wfo7|h2~9lq8*}vTk5rv9+r-JtQdbIVhIfEc7>neBKT2mI08(roCKM^ zJxp|uqCSsh-YKPSU3ns@UBl^L!$gLWCJE-;X@p`fK~>S^HM~#qaKd|$t4DvD;5gZ- z#*|!0OBu2$2lV7G+~n?>7l~I6e4K!-fr#u9BwrYYzFbfaD?6=62%W?O{*5#+^(u?Mj&+?$Vt&92wb#A;yAWI$YqlQqZ`Oy-$|9X<(mWp>?8KS zkBP`@(Ds0BncifXk{WtDUS1}}VNdLH%i-e@Je|~hc z>5F#m9hEMR;O;DTn{&LM^BNrdE~)v~bGDxhyZn6oEp@aP-l`n^h`ZG~oAgm4t&)*> zQj&~jQ)@)Ju>&i7N>8D3+)AjGVvIvclamU+845O0Q#@iF1PpdUya4JR;ng0IyLpDHqeJ%sVRYocKCM;N9xyN(h zoEq=9*T6%E=dZdsJiqQnVe{g-oH_8>VrdA^aeSvq;J^wTPvm~E{_s{XCt{w0oe>hh zq(-&h8bN2-K>Xjh*d@2oygX+_4ThUKy2E1T^Jk~Us zDH)b}g@kCP$`sYCl3#*&`K@~ynmP}dqk%3}070NeZoos3Dg#7`%ar0EqQeG6?Ye9O zElDXdul!`d=Vqj5iI4v8EAZDQTpL|3v(fDF4PL-R>1RY&hfZT@@%-FjRyLDE(OTv% zD%$SXSO@JMD%MxKB9}{nz799_x2c|M_k+ve>Xa<)RMM5;u|M|>(ygv=ptY ztH6X_0{9Za$j^RVyliqxN?H~g{7*fIHmv}z$o{b+oy~*4xGjI*`%D4wLzCOhV$RAk zw8ni8u9v>oC3e@SiTezfiHLx*59Pe)Ino< z*K^zt)2_zzbZYS$uojthXguZe@gjxRabU>~vs(KUsPElAqK?AxKUoO*{5x3qjPlXv z)oFiwGZZBaW&>N4-2B3F2$Q9bj^^eLj0z!XeEQtJokuL>sMPz~9ho3Mlkh0h{$o7K zfUB=Cb&_KL97g+V{a= z-L{zo@z<7@Ak-ZK90?UjoHufrtKCi1^R6k>u2J1q|CdiLK5V%s&I*izD%MAG1iSD{ zmvVa!95sm^9=A_k3KwagqvdgxPeKc7M-pD07;VnFM)5@LUZ&kXEK zu{B4vcMlOatA?HD2AjOxy6#j$W5AJfF)t4SR$xyMN5QTkt)L`B3$Qg|vrqr%`R)q4 zZ@(0w4TTBRSr%3n!g(RTtFH{L_gN#(c%)m$FCEcAElJ&xPP&+p%P z&$KDla^N7WBac4y^qi699;$`^QW9-w(U7n9$c^*h!mYIjwXN>A!I}HMjp@F!r=>5D z=t+-xAfQ|-2 zQ)ag$Rh8CVVLmdwZ!tRa8p)kZ6K_T8&Y(K7g5g5p+f5+Dcsh?7lbFpG-d-~E`>)9~ z$hMy*ei8pmOO37;EZ^IC`9)=$#Z9vjRA3LR>my-Jv*lPU^tITj8dW6k?0nK~W2Qa^ zb#m0w^5`V@uUTtfb>G~8O=ZTzAzGZc>|Z+tOt=0Jb@Nnj>S{oFd@5f4M&N!Of*d>X zKrO^Q@cd|=;LDS0*?4TVYqMB#u+Ld&`Fl!~URJ2^x@l)sx;VoR<-&4mc6W>lb4$n6 zP}d7JXuLQ`X|7Htgr10tN54F1subEy(rGSwD>eBAJ)-4`-Ex4P+kB^C6L@55 zXU8Crs>Cs=9Da7brppQ@ibu;({Eva0I;zM!`Y9cUw`tqNKlt6>r9IOw9p!~ONVIrEW;zq%pH|C7JoP3`uE9_V_d97I4+CKVpT{P{0)k^eR zl$EF!>sEQHxM_}T7Ub=>ViQaM7yiPkPoycbyp&Nn9-e<7S$p6`)T@9s_0p|v!Dr~R z)fmAM3tHd)F3k5PN6EZQp)d{l>ZAB}n%8B^uWvrFs{!A33$@3IE2?lXlT9;TbPYB7 zyCV>5vG1**ZkE=8wXH95P~5Y4gnlsepQH<*y`_1 z@zBBl;$UQXWDyc>Il}x>(^7eNS}Sw-N5M3=jDz^#9GOw(HwDw61IdvMcVkJs&p7dYm0 zKJc{QW))b?rdG;FC+5ak`q8}O8GCf4u`#E~=p&qQ{yi^wQ|IQMy$>dEk#f3Y!RSA7 zfUNyNl|9Vqb=-ZWYcf)C?z{q(lQXc4PJ!Hh7fW;VQX~m?_5@m0WA_p1>W!3)4|r9! z(F62AxK-FNFjd}Eqvb#R&At6LcTnDsUNWbd25+y zH9JN1yQi+OkQ>2fKVM)fczTwW%C%17qi5JRR)IQ>GG3QobvVZDQpte(RliToru??i zQw3};QV*L%gm1?h(&F0taSJPbURCpg34ZX-41}b~bNn0b=)~=nQ_M5nM?&6uXEuWH zQU@mftgCUKTSM^8GKYjVsVm{%Z?T52F1;+l5WrzDz2AAEF9&|cg?-Y%N&+^8A(U42 zjEcB4*F8ov$W?1G0ugm$`pzrDYo3QY&ETx4YkV@ks<<~}y?=|$Xd636p~^f?YBI{- zUbu;IO2Y-#_j4*YSiu?RsUFsF`18Ej)jHVZ%<6A)Gu&4Vvp0@?y)s=9UM@6N#Uf6% z+X7A%*BiG9Q%0E3<$3G92 zEOEUZi2d-7Ds#5B!VjC8?2?48u2#G2)9kw$1vjg6MFmXf8Vq9`c39-&NT_v5mT%Ae znc!K6ypxZriX9`d1MRt8s2}|qbu@d@v^cine9KLd<}dwy+RlM#!cFwD2sdc+_8d;2 zRhxV=qg7N|)CuGFPEPY)e&=C$V_LTXi-&?ehlU?3wisioYb$AKE4fZ>YDNiS8?+!+ z({@T_R?P2a2Cm%bai4o?eycqx`v9N&=Z1sow5|0(tt}4GJXlN02YG}pW&Wqs zHZP}}rmQ`Tbp~AkXh?ss(cF^s;MR7;-oxGiHpIfFPIL7`XeGMmWaI5mu+?g96}uE| z*g0bKDNxPnBT&s%cdK62j~$!r&f_P*J;sMlR=;`8zpyt^R{B%d)wfV@Kb;!ttTH&q z^+V5L{HYevb6~v;Z$&(GdhOG+xqs~3cHfddE&pV~{7b{q)=Zk{>6C{WTu90KcgCdR zw-17lx&4z!^&RUlb?ZvY9bJ<{y$#D$8)_ZI@T-OSo|9rTFsaP<;CDT%-+ocrGV5|f z4KqM=qK;2Ia~MZFwx?Am>F!70n7=MgHsC0nH1)w;ba$4u5$CthqOhEC_1IW=yH!f! z2X(&0fD?uH7(|QH&|r0vrrxvmaZJ;3@tG_A@?pZGYUpHWe4s3T6TUS?s{l5IU-vJX z`=XL92H34j`)^_LNT*ccSpL+8I2V=Ljnh0y{FvF<;l*tL@HXYtjevF`Y)a>@ma?ygX632XZ*M0(aPP<# zZCISWL!`ed=Qlqr;(byO5MclM!9zSU-yL$gB2T7U^crQeA!^JZoy=TT7=3{aH(UvlyYeqkw-U zl|E3HPOaG_l3+r86M7z$rks6yil3z+9v1 z1;^O)<4->Xe=!f{=6%w-`l`o>v2{TVR08;KMpG2_;LfAU#3M6I-kb4$7Cn}akMihn1LoYl#o#}#zRK(Jn|0ta z)(~MM3O~D%#e3gpw2<->^!spg!V zy;*Z$rV#FYwi~)60a%W*0m}s9uo#V2j_K)?Nb%Buy;QW_g@_I*BOXO^KFx`ViC>Lt zt3OI~Zxxm#nx)|2SRYX1fT%Tl@g~qjYhnaSb18^J#e6Ao^10p>f_vgf)!euvJ~30O z2cvq2h&rlR_~;@So6D#O9Bq(^6495jX8bbBOJ&@QU4vm=mU@>TKyaL1OWo`-`-;ww zVF`Rc^Z8wzhEQ9#WF&GhZ$#R@k6GkgR+4(?E0sRt%W}|lq3rZ6!X-dvZEmjQ^ib}-8Ul>(L{z#yOw)%a-31ey5$uBOZ!|}-x-(vQ5H=o^! zEGbq*oI*#@Thbgv%y$vGCLu}3Cxu6m^8_Lv1iK>R+8+#1r)oU=*~Zs^?vc=;;ePde z`m!u7&I!CpBr^`PqW7_$yjgMky26A*-wUDrq6PGUgyLq zo|e@oXSE83bBmh{4kgC%g>4RDK45;Aq%`|hKKPt5=CjNXD@s1KS60DOBfZ?{q8#^) zCbJYaUT%^v8M_gUe2>uBd5|&VPvFcY)F~^7H*+8#@rZeoJj0-~2a*33$7#7}*v;ow#HBj&@-i~dv8zJ{+jOGmHlO6oh^XEkB>t!qx@G{INUDgDj(oLd)OVl}<0;RvcX8N^0=PS53M`M-z=df&B^UJw zKc;+JfBLct+@z1pt1%{=rq{#NcvHmJ6Mf;%i)ZtP+t;R!$8xDVyF_roBiO-} z?*r%#m1DX~mEtVxcJAD(!JrH|_;^K6H{&Ay2uP4Uwl~V(Yk|eul_{2 z%xejp-W|>M{im@cm&Vy`@Hrb?58_Z#&g?_`1EH|!I|~6Tkj{NJdb_6s^uxJJseK{8 z(rAMq0{E$~s!=89AGU3O6mn94u?)~B(3$_v${ElM>ReArSk+`~AE=ZVIAmLN`iZ`w zLXRIS@oTrb{0NpZ-1D1c4cEU^b~=-LOOtWSm6-ofT*GdI=QSi(8%uEG;oc=#K1zdq zyR2-^$eZrLg4|q{IH$lT()IC!tPfXY;E)5hR#%oK1qt=1#n~J0?;Wv~Z4ZX6DdF~B zs_K4aug>ufUeW2DThX6*{XutUKbfx}^>Hmq>%oou5%b{4Xhc>0DmEXYAn})34G~d! z=n32N!%{0auO|S&cLMZkKy9r+JqZ79*1K8JGX!xIBMErjkB4xm1peYSoq=(H(kK}q z<1sXXUfU7?tTwNI<8%WMN%9xyqYWeZFNi^u-LOtZIQto>_9|y5z^U!kA zHG$hy?1)~k+ZH2pp8fY&SN2}lWgkX;Gnr9sVqITUGq$$!L5Uc%HbN zghvrJcW~x#6|`nmp?VJ~DUo-SSlDSl^KV7}CY??r2pdrw_wPHfB@5Lts-8lSbRT)P zR0y%2Gy47{t*#HR`q9h)_ZVGX{>TZ7VBP$Z@5O%o`t&B#oARvkkep>iV4VX37vy{q z!%yC~+VzKk`^w5w8zOglowdoeq`jLUC`7U){DrOdg^d57$LyQUD>%-Sq{?xhJBB+S zW1aDc7#UE#c_Snwtf*f(5iW;on|o&psxh^uX?VQ&q4HgCSTKPF%l%j}sZBc;EOwXX za;;O@4Bd^XHM4|cua(T|4^IQ-yth^f_z4gnh1>_{?Xf+BstfSYUxzzX%+~)58P2K- z-M&N(V%oqwW_)w}UT8!L7dy3r4f=v$6KABeczT^`O~&Ixe9Ic&dA4AN347LFtM7xg zh+tAG=EN?8v@L4eMfqyNxZFB``IqCvYjl^U$eXd z$3Hi<{uU*;`)-!qYO?$BtRdM}G$NbSaN*O1(9&0!uG@*sou?>`<6~^O?{~uyCr7yW zy;4gR@0||d6fy2G?|%ds&O6wST}Ztr{*BO(OC5?Ed+OF@dlf?cwev%I;H>qTM;~H7 zX{JM{nG?o6_TrcQ9P2I*LG5Q>-N-ZDr82gwep1$q&4f{X-&cp8O(L4}R=@DV9m-#2 z93P~J#VtYWi=G0X%X4p@ z^d1-Vxwf2M!OwmB5|3xf$z}x|>A&Q?Oqs9R&il^xvrge^jP>DY?D(6t_u|xOO~+RO zP}y^?r23uN1`6p&N);MGht%lwed#Q6$l}YZ0;OcO=XbAOIJ?nxti?#CU$|&A%szjS z%%+OP##+1~Nb%F|Bt{ZEfLIvQ%s!u1T3Q&jun*i$$ul!(jL674ne4)fn8L!NRF8V` znVHwl{N>CoEK~`9?emQ<&`9ww76$0DJdvICY@IRB$6*V07Yo_p~^^OqS(r zSMQ26xb6c!T92ISuqN7#l)Kr18cwMOR*wY1g7SmAHF&W;(pp+}Y@-oPZgYCzQA*L? z4psTh1pe3M`-@>5i}z{H?jS=@kJ)c0x`cne3Lk_IIC6-ONskEI0((dRr2B4Mhp6@5`&x_(l= zCz<5M_GNO%DW99e;&`cQ30P zo){9KNB(z*h9ur2GM6rg%+N_Yj;;rY_xSQR?wp8f5^!3XpPi|+diRp}XnyeuM2 zaq0@hN#y_b;LDWBTsH9S9aZV8Khu=hu7T(Jo20@yGMenQcm~B-NgV1|zkXzAKeQY#8a2kx7~xIn zrgub_&b*r_1}r6E^)$=hX#(G@RI2il;X2QDa->&d&&bSwRHra>;&RjL#f)n`&AF<2 z+;73Ai$`$%sXnVsiil@{^tS{#Rocvimn%&f=YGvgZ!EK-sWl6wa6?`?&1C?CmekYJ z8;y|!VitcO$uE7{L(kUN7L2#}J5&Hp{ z#p3w;`||;L!QK+hEh)&#%Khx_TBf}2!qB#o@;kx#c>~oeXco<>&9<7}g`W4A_SK|i z;=)HWa=qJg@8e2AAtH#bW-vDEMv0*)<-UfpSHw0f5NknVc%w?;sg;ok=Zw{2l`hM# zgnG3%#YFGqbmd0#A5rcbVA2g&id zc|LM4al7T|E3voJb{olLCUoaEtP7V^-CoJe*v)OTfxj2mly^xvD^$nGdt4kpm>={C z&wKM69lW*CbGK3BnB10u3&*Ri$e_-Gwk_|LJ*pNrT>|Hcq~+3j!RG9iKZzn`yY9FA>Zs0?v-i2lUqcjy{8ZcwF41^N^B<0WNgUFj z1t}?yY8H#?c4FtYXpvLJrHjpC_2s6dBev&-o>88FZQeoBbS5e3=mxwo`|3W&>Uj5|9l?mvQG@DZ*^(*R@;S6W4K!Fr zRCFk|y%dJt-L0`e2LSirh`vqE9F~Fv(k0bTG;z>rJ+DKztBV_6N(&{1nCIVL{al0A z{IeX8dM_R)nTIj#n(+wxEEkt{OGjY}wLJW|ezQ?u5b8L_hcr z>(^kaFD^wgjr#DZnaH37Cae=&+N3v`l{th`Y(GL6x63kM;o8C8Pk)V}PqxmbS@h}A z$26Zdo(f&voHnVahtC3!_|opp&aSDWaFR3o_rn%kZ{-Gcq`BD>;MCRo{(O{I+Or&O zY@r<1#MB(=Y=gV+`B+Z`cj>Lgq2g%_QnqwHs&u{XINVsW_QJAtGp{nluV(F(X)KbJpjJc+BPT}hr5@QcErR|M2 z{k&gc2H5|6MLhLo3j6q5Id!7F8?wMj1f$`9fRj!tyw1k4q+QbM=guBs3mN_E&9}W5}nqMG9~lIKYqc z*J}j#yZ&o8I&>K=tN03p@Imd0>*flk-AS!GTM~ujQ==u^D5r%c8~$(-T;@?Be_2BQ zw-?#vCd%){|9UO`_%s{LKgT)pidmiTg{FjfR&TR1lnM+)Dbt{cO2Q__Ds8o+RXm!iz z^GM#+bK^&qIDp>rj{AF!P_ShPd7R8ki(KGCZRR3@c4vt@;~WcU&D40Peu4BKNm8)( zW1eioa}H{OyjtgF&RsTh8tP5h{7vD^*^rg=Uh&JFwq5G2=E5lFC--`W&$;SPH?q&p zwAAL6Ogv65r2ce@)jAaa^YhCkj)#Q;DlF_-+jTL|)$RQKM$3f7yu($)kzN6ahHh|& zLuPN*H0T^H@|xe@;g9`fu3(U6@%QsZ|5e0Cc&tz>r&HiKI5*xmy>aww$FD_ENFwcX z_s9-@M2uvri5cJ5T|7z3VtVq?Fk8OxeZVW_G*WI%gRolr5*;E~GBP zlMxcbt+_dWo_@zkOi+nKb1<^V6#d8j78f@%1y%&@gw=Wl1=oT1a{DdgY;B{6fwuae zXeU$(Pn8(v>drZDHWS*a&%ZlU@+GJ)h5r4lG2er!kB*P$*RWpitC)O~K6+uwskofY zRj0@2xqw3`36UiD-iGhOu9}5`kA=cz2BR#{EJpznY%xXxyW)2oe4(RLQ=c=gx6IC3 zNg6*Xc^|}`0o-HoeN>!Z?!{bA(T?H=*bP)4cj;H|Q<%>dKdMUq_?vo9r_!hkr}9eP z#)d=p<{)ut%CcBeT--ljMd(|8k9z&$BUcdY6m0xF6bWfVnkA0~e~Rg+L=)a=sz#9R zxc#KK4yEKR_|*Z|t0CR>IaaOT9}Ywd_Ai3|b43R0>nEeYQW-o@t{C>mesJj_7mu|3 zbCNXUf409Z{XZCc%c!cp{a;uGq`Mn7o09JC?k?#PL2A?8-J4Wt1W8GyyF);b25F=u zHr+f6&pqed@r--_zsq3of>*HDTyuWsCt;=yWZhp9zkd2e|Eb^f-y7KH5TJqJ&%!s< zNKDEZ%3EEekIV)68sS9ci3CaX0bqq*O=)a3)0B%cLS|P^uHC)dlsIsrwvezU9wJKu znLx-2u)|&f1?@n!(kk55W#FUdG}6R-;Z_N%T7Mh3-I8RgqM`Ilmz)<;(O-5!btwM+ z{!i&?Y4o{(w|Y&*$p=@rLO*}mX)DP3bIm0*6V@U2C=ej}Isk?!>|RLO#0lYhUkp?_ zuLB|~8o>2mC>Z_Jp7~>W`OxldDyd%wYg6`ePvgOgE1yqWG*+I;c^uvKr8Yhcuh3cM)})Wl5&{NfGME<7YDa2Xw|oJ2N&m$HJ2 zLI=g$n^DUiTdAy;_UJ8+9Ji^JN6=A4C?e+iQzN)fq$$wo^Ry9j<@yr>l?pS9wk_GLq(^L zEhz>a%X<%sa~M{w0P2Sdgf{N@eP_MYe$Ev_Z*dN;cNQk_v1#ak;-(TZh~JfVuP8d5h=W$3uG5!&bUv$kzTB|+q6JO?P?qT{kbRyL$|+w?0=uD7qElt0 z-uL!0zvIA!%b23?4RmI@c6ZW1~?I6A^`eQaL%M7XO^%WG6b7`w46O2sOZ zGG}M_jexmBPr7K&5&;}ED%faDmO;90%?dBH`OugwzxTc3bKH$Xg6C|I2jLGda1`=B z%NFY5&_$rW>{CtGS60?#wmVvqW+aA2f{qWj>(vum_CEgEJbk{&3>U3UA>v=JSoIUZ z)iEj3hk+_WVlDC!9COTM?1KkujbYG%PS~#3c#byB!hV>2RGp7U&`FanvFHaUU6sy# z-5RfU*?1Q;ha=#7H~S+%#BwZIP*+3b+%HZt-=t=rqQB*LuweLFysSA<4n>1}7IruhP zb}ZY&^ccO`kh5ZDq99t}Z(D9*TcT~kWSuh6xWBs52-!Y3oY*uUbn=jrdxp4RXg;C_ zW$6&pHCcuP3qL&YQ1%%V+q?15%W`D?Xu-BaMiq-b@j=y4n(gjtPTpqyir`Hp>dBD1K1ywp8dpz=v`zBa2;l=R) zYY^*OF=~aU=6&Vgy(1{d&K@-Gf9QHO=Gh)ftCU{S>;v?_l+{P|%PXJ%8r+ax=ypi- zSZ~0r&#z-EzCj)#>^)vC&sdM+IY)|o^Le|Zbis-lxE3M*Z?~h#zo?Mr`K#xDZ(fCW zKk74^%;qPek(8?H#9|-Yuh|b*7DDs5U$Kd{DR`G;{=|av-kmX<-utPM2P{^S3|llW z_Byp`s=KL-HS1rsH{?nKjOxE*UY{*zWv zm$uq#6M4#vCk`_-AR`DPF;({k&f?e{K1PkT$4j}nZGzo9`?>8ky+9u zl-46vc(4yP2r{YOfEUY(tck#e$H7ZPEC*`#z;0mMHW0qtVH^w}F*+Rd(5*TC3lT_G zk(`KMec7f55Q4;^O`Q~(&o2Qw_}C{w^j%!6CX-nolbvX&(aE!$#hj#HQc+!p8uxBgs4a+7jQu?; z-tA?rj%&=T1H2+?KKA>sY+a`m0%6KN zHcrA*f_C^(*IBsymjZ@&Q<**j~UJehfIlR{^_6 zpX<0tBp|ge9lG1j4Ws}@YwZ&N>@16BXue))@vQo2CG5N!ecS9&7-7U;Gj0T?~sf$y|y%eK*N&9;0miNoRS z%w}RDqsMamGI#~M(#R{6_&<_#w=@px3ikOG**h>MJ z!RK#tG~^^o-tBZK|wvlK?9`Ob9dXmhJv1Nhv+ofOB zLIhmv(%xo7Cj8o@D2l2W;cT>a3lW7AlAfj%j0#MQNP8xH&lb4wP~?4@+|U$Iz_I+j zm^sCo`8L>?Qr1-1uDP6l48qJOm?A#8l1b~q5~SZ#aM8hfRr~r`0wZ)XO^h$B-vS`Z z1IEf#KOde(XftlGxeB5Pg_;hXuNXZgBUn}%Yzw+s-p zl2v%$nM7iZyw<`O>g2Jrs%@};%9;h@q;C!k4<9wP^74MsPn4@Zv_PnVo&QH8v|<#KvBWyyMru8hVyKjXmkZM@3-%&K+#6755iOZpJ zFHpCoIjU@Lnvrsu6~8GIoxKX)b;RvBSCsg+`Azd*5OtXeZJvW>z|(2E;4zKpL)>(` z%er*)mi~f5#iHj#(C-Yr*lhl;v02o3*idXmD{`g2Jfil0TdMhb=-yO!GpD$~N*%B| z8OHpkKvY^<+6DNDRo_;c^8!F<;SdP7Q_V61H!ko!^T`o{6}Hy3i#p@!AZW=mAH`Ia zp*E`JNRULpy!uT%+5ipk%)`?Od=8ykz2c~1B_E_RTZW`|iSWiY=5QyxN0(o}GmCMKa^$*`&XJ_Rdz z1WHBKF!bd+;t;5U&6+xPkVo}xGP6aWS@B z9+UT;_c}J7q6n);WtKUfqX@Nrz%p!SKm5|$$8y)}7M`B+`qN0&H$qmUlJ_4?h?abA zKOM254J`Ux;UxHe8TNv3Y=J}8`huP{#&_7f&uRqe!;fiwvb-T2`OLN~ zF>Q%sL^@u}wQl*F4$wGEK0Bdur|!I=;w8WSN!fYh>4(`6@TgWZS*&B;@8jsye*MPp zZi7s zOze(wJ$zt`&$_>qA%yp-Z1cnI^i?;x=h$-K*_)3+4kpYr8mp2GH6y-ZSoLY?-|Hxy7 zb7{mI8$AS~*|v-P*TyB5z`X9Yg}5y-0FFiS8v6w-D?Ek$M(?C|Xj0)Dd*}#TCNw(+ zU>k%auz|GwE>uRl5j6ZMii|&{34^*fHp2ts<%kjia*1jd|KQu znCSRwEYEbDDEhe(Z^ZRQ6r81l$Qp#Pd25msHDJCsRBO5ZwP0lTmX16DaodIA6N{OH z$Vz|q#FmI%52-D2VKgqiY%~9nGB{%T;3Bg}R4z=H!n@#ApCm)XHUu(y6CJl=aZ&zs zyTHZ#ikea0>@Y6n1#bH(B80GMKV`2%g@duN#KP7UZj%)m&T>ML{o?OJ8OGf9oYNj3 zuxbnWVK|e~en$HQk#$`4u}%48TT9sy2Aq^$uCYDtOSlz925!=@#MW3qP$zF2HdgZ% z#ho=;)?fRbdh)HR@hMpjA;z6Aop{~@a|Y8>g_R5*wRs^9Kae5{5DHo`E*JAQ zYN(Tg(nrE0wXmx((Yy5=%ei*8;01lGZ3p|_a<=!0fM{{-%BG#@?xF42@Sjdcy*9IX zjL*!yG|$lrmG2ILWmxy_8KJ#n_sH1C(BC)CG9xDcw0NF2FBoq^{z<6zm_hcSQICB{ z?m44ZTj#G?BDf;m&KSKZznAm9&(Fwxl|3*)Bo+wOT?2|m-#zMai)!(*H97Er`AwWN zLikcsAlPe(8Z^g>+|%uhn5kU~v&2&i;z#h->$j`otwy&q-~tCi%u2#=rOwaKW1JTdkkTJ`KKrYfByB9}JDjm8VayqlRYbU5n3;GXKFWlroo8IeeTOV^two(qA_W~C zlz?rDF9*n%bN^o3w3)1Q40L*L1I|0qpm&u`vT66y=IJE#fo?*Ag4H(T6yN;d_cb`Q zC+Px$DP-h-EYw&=`sBnkMbNxTn^l|(TW!fuIwceK!t$7rR;s2L*%WDH=0;S419h#} zg6BfgYGmFj%3>eESz(?Yye#y^nX9%SuWbLL_}ziQBv-NTZDuvv?w8=@_kbhv)3UX6 zk^MH|61-u#wRvm^6Jr zfF7v_xoSZTZ`7<2JLRm&*Jn(GeM}Z3DdC!@x{Pg#X8*kkr~V~3)64Xb|DN7YgQBr} z$QB9usU;(vvA@C6y>7GEWekgDv9E++7GOvZ1> zqynB$Ac&K-HF9A-d9d4)UL*oOF7M;BGvOGl@S`AD6h%Og2D*|-STqBp776|HJ1DKW z6?1|7w_tBDN9480JbX+sc1MZEuXOwek z*|-(u0B#A$Yzzv#8wl);6g{K919)n!u*m4ngb5(ErV0rOoCDp_<<)QVI&5z_KFepd zLJcgl9M$*kw{E;}&scBen#s4G+MgVD+t^)n>~U@DIZJj40)GHZsyf&X@;TJlB{-`` z(Q|13L@{a9eBJF9G z_0GQ`Vs38Lm%n*soX@>atF0YGd=On4?_sQW$Cz?IX0kQH0{_LV#LvfSzbfj#yQ)cd zB-DNx&UHz`MdlYJk40qw&c$r%p15Db4v18iTo27k*~cL6ZX2_4mPey3WT()REes!6 zp4|~!5HRM<1dx3yohdaI$GOwPO#_v^GGDw6^=C1{%+{z_Vkl&E_@G2j>cIOl( z0W?dwo#v6+6+E@kZtGq;7aGfIH9JVdq}S_#&DqO}zgr?TK)0y4%g$bd(|~OQLyuT@~@1j8_H8jd1N}SS-Qk!8tMj z>9Ur?g2>5#JO$SnI1q3azslFkew6A8fg$W`0FIndLz5Vlasu=TU-C_kxhBNR<#VKk z;qURQnl5KCGKLc^gWBT0Cm%MCC^H!LcXu^LBA`p|06c-unqj@}!nauO&;wZgL+Nw8 zUI1;6NECj@5=+486k~6I1~xr;FumW?^IBY7{){96jz7Mxgui*|+W6Sy_~)HUK`Cq4 zrEwkFYzGkVZ>p8}HY{HDzt1@((U~D=I`$px0pLKBxn55-*?3^Cj?uSFwg2ZKTbs?* zf3czNe~$QB>4OooPLB8re5J{P9+04a&;3N6 z9(s?0pEs3+p~tfi%s=1jg+V`(*ccFs(oPmb9Q2-DzMns1_4Z(X(;I!#$uxGKkW>7c zNc7U-xYq_3fFcBxBgg9%!~?fV03W3{TbU<7fXR z5h<)+K}M*9GgyY!QW9aAYCXf}N4;e~P1z9IrBmf&h><&TYwUh7$z@dxWCay|{rVNn zztlL&rPmM{z;6@2LO7F6Kdc1Y9y9{ggf))81TXj4hq0?nc|V}^$9DX^;_!2PM_w-t z$&q>Q$STk+tS&$Nfy)MiwdD_|-a9*nH@K=;0Y;AFKGh0FQz#KwcJx4W}KT6e}Uh!7)rhc4N+|_yUUuR3v+21R@cAKI&U~PF4n|(caL!WGm z`gjDK6uI262|i{y2WF(7!{rLt^eR!LA;@q17}II z+R%W9P@99D`cUFPb2p#V^_H=P^F?ifm)!dBuZPFNI7j8Zy797K#jb_rPft5(eMoVM`pEc)!L=&DzApxM_09>$^EJJ39hj6sT z6SI_((j7l4fLpHWPpInZSf8?jLjR>14THIyWRH|oi%!6G zzx}HjMW!#}F(^D?kR`aD9}kFlN<#re30m&chLWYu9e5w)1JBN-LHVsJ&fW#EY9PV6 zb>H|%lY&vV$|U~rrs}cVM)Z{U-2N$8crWW{FmuQ6?xo*_mTBkXQUhgALM}Qta~oFf zbr&1#L&mtC=y(ub{jaOUtK*dbRLnzivI&Y(V;6$u%c95bj1_mfz1E0HJoaT*B}}Re zBbVrt8xF0`)+8!PoF70Dx3`h&D38oaN%8`xu0mfslnSwS1nvKUr75S2 zRYnh(p$GJj&jLtVTC2^sd?x&_0P^v|x7wFdmOC}V)*8A2tVzO>q>L8yc@c8Yo;{u^NK0sA^)N*B*O3=xSz%jK%v6~nm5L+51Z>Rie_xtU ze|{Ga7eUyone@PnsS9m#L1>Fs{ zu!813oE2XIQA+uPpr6FWIXVvUtB12dg-k4pB?jJ#1xD0m zCO*cq;ctjE3|Zj-B1?>?O2NzTQ(gRXIcpxwj2_wNF(b37FOsF7Zbj+4Ampg*B`Q)a z>lcfzZJ`27XJ(`U?oOG7E1gO+eCui^HAv+I(Q(pgvwd zCV!RKL&S!%ASIC+alpx$y*$)?vspIE9QQp{T(u<5i%|AeoKqk4GOkt2Z7WQ0K+J;c z$Z^4G@b{-f7x7UGh=zR=XmKJVlKpph0a1eStER%}@gG0tCGsk`d!YB&B z9dhe@v*!ayqGRcFE!6k&P{(rPtoA!*6}Lel_mx5J zf03!hH)1ar4tu(69>wPTG4tt?(;nUuNZ4K>7U8}MXF)#;a7jy7-v_F;w@L0cpa|!}a?!&!%t0f~BuG-6 zXEo>*x3v5Mmiq&ToBvEm@Xs%y@16*kyXK>$uruoj&U zqJ81d1RHE0Y3M4GEP=*N*_8#7>^mqLyat~qDzDQu6+vYq9unV>Ouq74wZ4$75>?qb+aJ9to+<@*f>E|{4&9wSNCbDwjv9r+tsl-pUyz&velEKr!~*W^Ow8bHB}B1-^=9B zpqRU(UjtJd8M)s3BLwCLlja3T8l|3=wCZ*y07dhE?=MeSOSQ`pJX+MyoN#h?w8vp0&J)UqMT3&Jter7S+y zw%Crc*v?|ea=}n|eailYSyFShXU`x>e`}3*a|n~@&LYW&w^sM1So;_J2C?Ntx0(JX zQj+6~47UE5Yo0O|Q7gLKwQV7%`mTRaUTuV6&H9nSeSg zT_mHAm?*C}L3nQuZK{`M61H6kXoj|`ZtOxo%HTM2+AG(}bAFJcJMa>;wU1M>$V&LQ zUR#ooQ5f11c6-L2yBu2B>}m;ieHvoe+#cKo*T#sqtK0x(kXs>|O!Mw8A2j)^rV>=f zs+{?YBLSq#%qu}twH8UppWP0-G#BnuCB?{0I}Q_XD;)IWx8!PQFFa;Z`o8@nFlGaG z6r3loAWiHKo+Y4TBPyfqkF|^l<=>el8aQ_`@n~X&fmq!uARMbyki4P=Z!f;V)T?st z+Tt>9d7D!O5Y`Sq6qx~Nehgk0ZO&oqsj3d$xbSJ*3Hv%zg+#0KZQ#0-*nQ&FXuasP z$FX|d-D|)7^xUXE>pW{uyeN%bL6#O77zWtK0c@wY7>!Ws3#&3|p0!!-Hg;&4!&+DZq^UUxWdl z>r|v=7KoO`Yy$>OZLIo;b*PD6h}omWhJa$d;&GNqv7r3&K>S{$o$p}$b8H_!9otnW z|0qi=+~Ou}c4UO1AX*r|6*ll}3OvN67YKnJb%H19a#F1p$@5A=SA4RPZg2||nNGkn zOnLI1jF)0%EQ5Ovoq@EZwxu8rTZ2@zhF+`RD+im|a5;=M-Dp?-7uv9~#BX8*C7=3> z-+B&}YawnLbq=*SxGqiD5)w-=^4X$l(X*Eif-3QUop>hbV0e2Vm5K5f{fXj4b|>@S z{$NhNExNW>DoAxX`70ssM>;dfHY2o7v-m|BCr%{>CByQ5wr&z!>0-o`qf}AG@%k(2 z8du<36;E{m+?LGlV@F3!+sVIKz;b;#2=X36eHV=VJzoskJ$|$YXnQV|Oi>!I&fc`1 z*UIQ~@_pW$BAUp?YHjBGXG{Ilz2oj{Hn8TNiNzbMvIocz*me*`=G^k0++%(Uh+% znfDBbEw+qdxH-P+bikva%6<+q&&!9dG|3kfx=JOIho#^WR%UzsImEDOK%yIlJ4*Xl z=uxtr8Y;N)fIiF{RAEa6<(^n{LI;SNYy5@8_3qdVPHcch)=0b+Mp6t+?p8qjN#>2I zgD0H}$3HkA-GmT-s@4^fmRaL=noFe{m~NER?P0e9`Hx?Bt(W%KP6~K(X~}PiaC|GO z0SDkx$!s^kFnj7)c9Udu{oKZMvnPoz+P_#k;Mn&G8ee-`8)c)G*t-T5RQ^e}z}BkE zpfz!taQSEcQ|recH2wN{Z3;L|fV2`H7PI(xzw+3(0{mpMSoxGF8e=_@-n8&QHG2;V zxIN14vuVFz`jf^x&K{!wc=3qY@NjuseR3s7qPs11wRbBP6xhDyjYc3y2wk~%T|Sw; z+7o-kXEiqA>aV)LV$hWnId0g$I~^ZLAQw6=Rr5!A3b|ZjqQ?Zs`u(w_>_O*STFrqB zj~P1tehQHEQQc)dwX#%S2yGBq4mOTVY5aY6J@@s*s{n~3C%zp0e;Nm8c-O9;MQS!O z1upG6@i^v3;8~4qC80}oCrkN!A-d6*z?LrBz=#I6BrA}{z78@gW|rW zA5qq-E^Ka)Sm6D0C75`(?lbCGE9?P>C>BV{RJS%qUjzejK`x*2+;dyXVFAOlI6ow6 zE(TxKoIxIq(@RimB2pBcLtetPF39Th2ZJ1!F{H-xL!6SUHp>Je^k1t?t#jA2rEo4| zw6UAJhmrs<=#t=Io@T*FsiUWTg7S0dixh!|H$M6HceLK5?52%PLmdluVYi9!IdP~= z>rzaKGUmR}`r!6gz z0bDpF1oYMuTG!*7SON=2rQT9G(Vkh)W#-U-VB!HOUtW6tZ!)-Tk}KHke=hKk6lt=3 z;+$|JnSWe`oQ8Et77qytDOzt<-4MJ+cdZCLP+KhndvED&T{g6g=}oj}$=@`+_<1xl z2sO`=hKhkjs!jK^Tm=ow*l2yF^2i5LN+m(Whgv>D6pcr7KNM12Rnkh4M4FC=cUtIB zj~=?6wekG&2X*nGneeaF<>uqa$DQHq3}-BMKaNzNP4R(r1M7{*GZ_jXC!MMS$RMNT zppN}L*MET`G*3JC4gp%CTt+EytUh|ZW zd#*1vxlRi~mD-cOc6n|Bg~h3tv_7#qA$*r#_Ew{vu zDpHcEkHx#$!Dv3Oh#6f1y6pVXd{x8c!*37Sre5}O~g{8r&_T06toyQm1Ip=-%vO5??Z%?rI0{S zYujFR&ymr-I*ry;%aDH!-jBH~)X20=7QIh8Zp4F3zSb@TB_So-(5`VE7MWHKX|b&r zx)6cm;n^EF;$}^_5eai$K`3Sd4Ch2C% z@&s*kj}a1AqPcSyo-*7j7D8C=W!y)bL|qIq%;yeek%f}&H(Y<|;Yht_O?cGO<9YZX zIvXcM!e zMuGN@K{!OCX=37_>>~jIg2x$uuS9ax(3o{20W5u5zxrnDeaGRKDBdi#m~+q#4;Oi- zf?O?^ua6I&*3V3@pFiHCT$vQg#rPhz5dw+ahX(Pu&q=$D978D-{_&D#_`$>X1lTt1T zm+N0E`zMlJR7ej|dhj#AKwB@Lqt(x9AdRPzKp5R9Wni}y3>YVPu@R)gQpVrpq{avB zywuK&77H{g>eka>bfv480-bGlemU8r3Y-qYZmWMun|-(>&=&4IL}C~=;@g>Y5erSb zbIz0!6;Gu3p2T84C@alUE@d8It%O;fe)fxTOe`|1D@~3cal9lEmyKPIKR^d~*vgT# z$YYzXlj&azzk}ledBxzc(&B8r*|vfk#Hx2xQP$!Q6BhzAf5UOA^NN)DVFc>PEg$D> zPjmRAK3q>Q7AT)xQgeHX90y?^EfEq_u-|J-=V|{7c-}@L( zEmZL*|78g%dF_mrN~z|v5lBK$X@733P29>Fd_&K&H?h%QhRho1HHv*ShwmXQ+*TTM z$CwI5RK1LbjI6A+r?$9{>q=|u$$Lz3JTmZmxLlACqb|F56diYRd{Q!F40Dy(<>ynu z{&c8!9_;To@6t(^@}e*oWp`z0Br_BN5@va-vGSE*MAtOTnzTx?!dxWoSu}_~84F!Z zq85pM$>VS-8t?;`(=q^DCOnu@1W2Wh3F^Hmf~->^N5W(KbWjZiRU=*)+~r1&$?T6CD%6Y}2i#sQ%P@`7LmeX8KnQ{ZSj@VsmWG z*{-`4rpLhoPsE@82TOnUSmDF#U^uVe(F4GuV;lJ5kI?bzIG|3xtQ3CdWlAp=E7KVe zZZbZcqFhreM7*;zx*B){1fwJ30@#==cSVcq-xbM)Ms^Ds2qZgpCB9i$9bgzkWb_U& z=H~9Fp502aip!Fhz2I`mC}M?SXv*E;+kitmYN+QE2Tin*FR_u7d_t<>B2FryuCz>i zHRPLCqa)x&BLRA4o;o(et!t@C)h%i$Zl#L$(n+KW>u+c>*9pi7XTYVQXW%oAmV*N= z*qI-ka)(r-G{?pog9S270qufprHj<0$$ht{yfv}2-sRwzk-oY_;CwYu(HgT$(+^Sf z1Pjx|WAPM}mj$v@sljJRpeX)I^`G&U8$#E3@XCMX6HT7l{5zU-?1$J9#SslpA~)yj zEX+AoY58Uph+Oi!##AQG^46Lib9sAB$PUYgTZtyf);j<4MNIgU7<)pWvcJ9!j(-xW zx$N;DhWZZCu$)9EvRk&nRhuEK?ug8LTiDBo9z?U2Lq0*wfN}JoI5=!;_OU zY(us2cr1STRh;`zusRX^t=VwoTW!Hij_C<)|Cy2!X)C$<>k=bOzJkvlBI&GcFO+6X z;y*_4XigQVOM1V{GICy@I5o=4hZLCkug{(>*`a+B%kBxiZGl`at1dM)kb3?(z}`*2 zXN9WHA9Yh+T&`jw`a(u>JeJxU$UP6=_5A$u%j-@akwVZJt?eFeIJAp4!>9m>hJ*_j ztf8@^u=(Vl1lk=ai~{e^?G#8^1G&=qVgL%K?46(9(Q@0O$UfX3?q*D}7~jLii026$ zw=vuoBDe4qE8bZ5S;WWKZc-lC58GZ4$*?d^s(1hze35q zTx`UYnE+X+Fs0$lu4mX2UD*FUjL-idPQ@DsczrymkYYFI^S(w)zN`g(KuU^vF_ag@ z{b{m5H4!{~8;HU0QqHF6Pf8g6f+=@6G57ZGc zh9972NOC>YUt`EnqX!`^@`h9)^?qayx-2*~0y{Efa31jKx8dsvG+*?a-te%tUs-IK zOI!XfXlgQGSvlGHl(-H^i#UObkf7$~X5?JL>14PW6%1KorV9`;FDy zquoGh-yqAMWsN-hKw(nNLd8#04aa?FeGrcAg)~1xC>JPJJLRfYl|~1RGM6%hJB57 z8xAPGJJVdCH9kmQSGBEjTS%5wP?M9Lv=27}y*hZ$?vnmI7Iz~?T$4)brHjny-H2`& z7;P&N>_nYC9&H6pyRR^2_{{(rvyo;?;oG9XWrP;0byvSnPnFl4OwqnIDEw2QNO?)z zux%Yy`L<~Z>GFdJ0;8Ku9x;vv?xOcsoXFRjwCY=X1Pn|nEt z)?di9G>5R>i8|U}0t$vMREe#O<6fQAW%4}u*l-%^A4uC68jnjQ1=A(BZRO<%wB^uv#84TJSdeYQfm+rNmSefJr!Z1c<5aC>6i#B}-ol4|t?oe;yfx?w+j|!8cV_?m$?UHh$-p(02=V zZ&?d8Vsm#ViDoaoml_)kuGG0cS7y)*z3ErcufN0BufMVD{ip^f364?5_&4I;gxB2_ zikSAp@_ENt!oWslMCgAReo4>&Wqz9`+}WS`;#!6Ai41mQoiY>x!L{r&Ep62urfXU7 zJ+A7wBIgA5uP%1ZWQ@vBV+|1Ti!?2V^8|GH8w)%C-K0B??|2r2U3>f?#&U}sjc#y` z_gA@Q0d03k*I^dCYqb;&%RWp#zP^yHG(C%lyX!qaTKjGKE3!%4zIeXnsHg117&i`F zSTLsFGa$J45^}R!ypb4qx1oNmjPGshbxz(&_;{AXw_$pC;IV*55yHXEJp&@y%xm{` zd@-l#blZZ+?jlk5?044VbH7W&z{qzE%o1)mI<^(t9%g3GfSmn($dl^GXIM~(RN}VP znJV~b#RX~nNX=}zfdAp98DGC?&|6J;`=1$uy^?D)l*H_xD6Lx2chK}25T2K0fwU@^ zmhbWdZ}U$BDECbr9p_{{Z+zZsoJ5>K(kKM}H~9oBNiF{35|RSRv@AXfxc4@L2+@-9 z8G}6ahu0G5gkQ{?L-U2Z3kVx(`M+^QXzhWG!J2c7O{3z%SaM)WZOY9sdyF6sEPBp! zJ=J*$rS3W?v}4E~1#VXYY?V1?Z5G420bAWl-_fpi${M%*wX!v+^6 z$S%+=N91@Jhx`xTLM9O=N&y%6|H)g3 z>OsYAuqoZSIc8TIwNnzUd@*4)ay8|jY)gX^ME(^DRW+uglxAO!TIko&Z{M@lZ$EU8 znsQ7I+Rt{T`;S!S-xxj!01sKA5(N?t4qsjun7ax7mA_gYOf&MVD;d>wtI(zg;EM z?rqUafy?1PFBa|94*v9@ds4_8WtwWX)z(U{4WbU_$#}^=5cIPRRuq>!i(hn&_dd15Wt|} zlkO(i3lU&O`q=1Y%f&nXPMX zPUquXQ4S{(Dng`8pJs!-hu)<^KTxn!)U)f(-<9m#!+_WNh0Ux9kyhR&IaeC{y^HF1 zJXBeZ;u_-EZI<4U-`7#$-4?`f=GT161dQofcHmfcOtM@{5e=LEAm2wKYw6l3ABIMJ z9_z!n`1Ud31oE-Gb7vH|`*cvEJi4|tPXBJ&cvO&FVUN(!l+$q1btL=V19hCc$!dRo z6y-^4n1yEj8lL?pM7Aa~(V2>O&_BnoHr)E@KBy)rl?h^52`+tCB#Y56OfN@XSC=Ds zJgM{df;r;=S4-G#!>u(6>Z|sh5fAMFkN3Y;p2xj~baaLmnK&=H+;`egBD%mHr z8{_|<>?Xk(N*9}j^xjB?0GmTb?#xT!d_LtG)~bWvMCN5w}|^5I_rw0Ikq9IrP$=nCH8t=RQgnaOk^g$ zQW>kwg9zCh@#y-Dsk#uddFl-Ugk^v_WqGc}lkIP|gB%cn6c`SaX*`4t(`C#hK3)&% zTn#?9vQLsmX~c3Z-{apMj~sRF#_lUV(2k#phB~$0+xb0QehHV_2qDROIIIHP>t~Kx z?Io2~Cki!!!>&(x3-}6!<>g9kPOPoRl9zjbo-e*jM_N2)qfpIjS?T?q=Ki(AS+5F$4&(G{WRHu-Z`w4`Ee6FA-v>FeQIzB%yA-0(L@7s`G`A5 zGXxseG81L+pfZ>K+DGwRD7;zxW%;{MuY-PC{=gxlsbO!`c&^r{O%8j`6*3F+0PoT0 z6fl;{i&~4Cur%3iGrj~j@}91XPbV!aq1?uDVVe|A?1@Rn5hFDWKKkaQ33`lQX^?3W zLI&~Fu~I@vPLuCSylC0>^oJR9(?k>Am-1?c(w-7nY)x+I*PAig^>7T)BcGh;4M%*^ zG5JW&{>(;5J9ny2)9};J=ifuM`c;kB1m1+4hh8%{-nH%Ikh-3=jmm85&ag(C&eu>qn5l9LnraD5FTe3V#lPEw!WG-|nOPArPI}AXK=?U}If@dNe0-`{WeXnB}|tw*TkVXnAra-Lgh18maV1M1H>F(Q+CI)*PyY_u-$5~ zJPK)%5pFVoUpT4Xb-?Gicg@nP-R4=O1KfQL9h&q$2Udm53!cG`5#=GP>7gW*M4 z?T->Qt-&AtuprPVaw1TF$@elCL5lynS3Nv9y)LzmJbNOwpL z9Yc3FN=tXBbi*KxbayB%0)p>_|NFk*Snso*XFjp`FsxbZ+1K9ZdH#+g5V0I#A*-c> zCsyW9b!gik^ov30`&^`yNUYI>Jg0fcau7YH)L7(@KWB!5PT!KHbhUGUbe)1?-Fq-$ zfLazUX**b{t%TTUaWu+2MF_EDZtCK!sFa0>)qo;XP%?kl$uTyO7c;H!K8{&xe!BF| zff-W~ZpbSQ6d)N@CPaU7&Ju}{#?e4c{#J8RiBCl4jiI`-`o|E+K@ZjYPwj;>9FIl$78v|_O;r4a1sNlHG9kqpOIrIB3#+xQ?ME|=_H8os^?M7SMlH>B$ z)}y?{9d_Xn;Jj0#K)Vwq3H3`T;f7=&NlLB;gDs`bR?6#VX0rnT5xZ7#l`e~gS6fnN z7Z6jYP}ayrlhRYAjdg?#UmRzp`DELEl`+a;zej5w3CM0dPt5DMZg81RTMF=T-;>aY9>z`K(FejP$NNJ|hu>3W zFY{4h^w*yEu(@oMZ>1|M(RvZLNVHOhkXN5bxZ5u^F=@!YJgWIkyuEM#(!$C8N$w@b`$S|${>(CdSObS+Y`<-ud|-wWbo^j9sBN1 zYySIcd{vd${K7)K8M)!&@D+yOm_n@wXN(>;a-uwZGd5yohJpP@h_foyhSw;wsh$gF zCz=$dgx>V0v<(a24X@;vg0ch^;ndYZ7JUhb@I*Pib5K|+{&OviVX&#;txA2Y(t|Wu z3ZxeLo*a*j3v8GqTOSr0WJKr?vJ%DO+PX#d^7dnMEhvQVeL~y~5OTFipY=pDs6`GCM9YHkl&JG@*;xaW4-4hg zB?U0HcQ8#lQXF2PVfU4>9%_NS_6`_tnx1iWxpNu9Ta20sqIZ9{*jN0gC?s#ujjtvA z0NWk{3E+KlInTH9EGKO!)-K_J302cX2dy-^seLJ>>ZenbondD{W|#@~TA^Tn>&Kc_ z74Y<3ETwEvvfwQVuXY!2O;uGt&{qVwJ0FJ_05}+OHGo-4?;vK=tp-MgSp!3iOB}k! zMC@|4XSg$)cQEq}=J!Si9}QqvW4q_3K+2%Sby$dGUQB(fk9T_=Br`4ud;j)8D8Tq87h-2NenWmYniu z*F9ShkE!14+?p%zU*+Eo*1Bq%OI))Wq|4Kdx0p_@vgEfHaaDMck5Ct;Ft?}*1kOe5kh#^qO?<$`y3 zdGv+TNl?s@1<^)v;&9#G1q{+3i+EtHgv0|m8 zZKM%hq-e0PFkwUpLyS1n8UCt>gjYuF=|cbkIT2;TlfrW~$`Of&TrzuhZ(bt$8!@$k zk<|B0bRIaTNb6PxFl`!S(2gpcAiFL}V_tWAif<}H!h<_ROa$H8dW{avr+AlA)w=p^ z#I1qNAfmx^{yb$^tno`ZLd%#zcBHWtH5&R!Qr>t=D5?BeJ?{IrJ-d8`c^1*in|j>u zOAH)y5c_1!qAyZ>6ZD_8>TKW=2QB~1ROR06nsm8}NW0|*IC#i8UkdQrQG`VsDri^k zFQ@)%Bjic$wwS^PB6Ml(AF60Z**Ek-dqV(s0i^VQ0mB{iVY5qPhe^X9q zKZ0{GNkErgr&B+ru=j=aD285IvC+*5K=!Gssv5F%4>vG7T$dGWp9UfoEQEyd6&6Vk z=0Rr0n$bzmcnIo{F`nzHF^?aq3#1vE`dhlLX8ZysMmB%|f;4YtAD}A=t}<--qRWhe z?SQ`$v*vK(K467SpkI%~CFaSO)+~!1Zmt|s#I;!a#IU**hmul$%_0B(Y{~g+)O)we zV$Em-fi$T>&t4$|Q2iiu9Rv3&Y)yg4L0E|LL8e*i$=q zCDI5$uZ!>lKT%5xG4D;2A~Qvzhf#R$C))X-tB6i!bxk|8d)2)N!n2{>lfJ@`%O*&5 z7>#4FlmqcGU?}?tTi5?!-{4WfWM<#^%}8To&Vm9_Mum-9xU(1iU?RGEz8Af>TY@K{ArI2&z(x%&Qs*J0dr?Z zfH>WEszE^y8B6hxJ(^d~);sXQE27_dtuS(BEX=2);NsI9!b!d&RjYYHcEqdggjbv) zZk$yRvt5%B*wWRZg2TyLUxtC9i+A7+rE*jdtf`1x)f6|;tawa9YXI`<6v2_5DjT;$ zo=_gh;KOn3M<32`r+VoDH?^WD&Kr|rH{x>tfItk1or|)(OEY{Og;f(vP--s&F?tGp z!U`6B%c8Va_RRSHonWgQSP}f$o^(Nl^s@?{hnj7mK|z=-=_DockM-TZLK-z1o{)Kk zC@2$tbMfhqx3qoNPQvg~;Jq)J)jpF0IE>X)awB0T9$r0mLNh{Hj%w( zi~MaQu=e@~8dig5OFMA|a1D#z>##+cd9L!8FsF#zJ^1%eKy2XKAl%rp;49Wd=qC#> zRC4Od#+Lg$4P(Vw%(ZjTUO+3&4Pw@xo~}hHH9wa~FNdPQT-4O z%QKgl^1!<>p^+zxQ;2tdf<{i27ygv7+GWLWAU*xQZ3%7nAaFaRXUAVDF>pKcU2R)N zfUv?jQZF|rh3FMLU-zuFS|Bf%0@ASJA|U)`benLQ&9z8f6u_>-^n2WCc9S1e%n+E^BH2dB|0R z{NWpd&oMI^HoJVkKJpK-+H;l@t~bk)N7f?V1z+PJu@Y)jM`qx;r?Z%@tsoT%DEsvr#B<`OxqOIHa9PSAO@BBw{Tg_8<@>+ zBs^q?wGQgXaD}_Fnd>^m+Ax$RG9uQTCEjgFQVkdNsAM5B@TA4IEFPlth}eTYBL_gB z4lOHTR(UW>*Bfk(e##CH^k3A%VP=C^J9IP;!)sfwDYJ8mcS?~N;IN~bzI6W!Q3C}y z$J7`67QcJeLY96H;yACrxLaQxT2>Gc_A69y)ZMl8#S({ofM@h@@ zzuXu9YrurieIx0&NT@EA8hb9r`02{Aw-yDI5k!EtgaeaU!(5YBvhUmWr2%@i~HcK?&+2*!8daC`A+c&?Py7j$%Qe6zPp zQf`-?;M{R~VtjfdA}Mb6W2|4Jj1B32F57EShQ$}FcC2}ZXF8$x{v`<{Hcp#>fP*sj0&fgcUrG$enR7(YW#5W`@NXcxArFtH(~UN4Ux%h zJxo>ww90UO{t#%iDF}|etBpX?UUU{NviBxF=z6G@nV<6}s1kD^8^Tj+;E1feS{{Fg${7W}G8sLSr2wO->v1BwZj0 zJ&=a6*q`B<>?#)h_?}GptMV7|3_cBBrgmJ$%l!RZK}!E z8NO1jEVr`z)VlLIREh}`lcADva1OQR;S<&?cpwobdYy#> z3a3yNxiC!r#J3#VCw}C8*&yS8&ry`1{(1`hT>j*k@bgNUX`)1V4gVoTL}OJzz37@z zD|#`EM9+6bxz4wjBw~Dlom$Di>El}x;F-q&itcv?I$LI21F<70!2{b}LHk}~lDPe- zn-VpdEei(en*CbmNayN#+iCH%S~!3Pp#Hue|Ievn6hd|$uWXMBTF-hOL{RxL zs0i9(w31tYs$zv4-{N6r^%nU}L3)iH68NgX5QTwOD$p3yUbx8FQ8Yn;j>W+u(YMR3 zok57_=4PCn5;PCnE>=w%-!&=b$#myZbb%4)(NrKZ86Cs85t#(SqTA0Y-F;-k)Vn7O19(RreF)k3hTI#@vH&)s#8j0V{m?DwA7ckBF zGl5Y}k7F}kVHIKyB5g}CG5%Gh@&qLp5|uIEA`J!tY6CW5 zb?%3t3&nBP<@1b>v7ff~adY#KNgHKdx_6mIZE(!yHXwPcv*G6Q=l`1f|DG>4(DxdJ z6x{q9>Y8MWlNoZSU)~(k=sV_;l|KVV#i+X^j!#QHS75M}Jp@(j2g{WjMZC_gk*c)7 zCdgxWaZsZ#z|Suq93nwgyZP*Ig-(@L@;rb6o+BQA+Ed%A)Ve7xzFCfp$+W>J z{}El^AN_0#AS5S)Eq|F$e8{r0z( zM1(d(=!;s=Vvd}aGEAp7`$UoX>VI8xo`!W?v3aWZ7^tRlJg7QKx*E~KR-T$H!Cn9#8qWd1g8)LWtrVMJQ33Q>C*YTT9E zhQ^(y+m$B+v%_CWe)z|E1>T>{X1Sq4WJz0adahq&tRuYaO@TC(cJnh&3TMMbSC_8uegDM!cuc~K3TvH0~N zM618}=XUQ%A+%s{U=l^R?YA#h_^*c~g0A&{ywkYbEh;Q4;dIzkM(Pdc?)>@AFs+5-Wn8=*%d|_yYxn zTNpBXLmd2CJDYHql2G=e`b`D+8EEYSbMciMgkreGqE4+Qa(#CHcyOjDJU6EY0fp8C zLPd1fAyj*Fh`j=&I{)9X-)5l{0@)QuNXJ_gERE(X6piyI)?d^L$rC2XmXZ4?ku^t< zM|AN^gONPAbH;=}ime7Z5`|L;##~_pI10K$ z+ECDm7zh%xIbLOT-^U?q1{we+iJz2!_aRxVj8B;f^wXKfSWLKg@aH6&1I2{NA z*^wkv8U=}Px))mNsR&ysWu)!v;piS7=z&dNf?f2_D-*eQyoolZ_#iO}qVIOuC#`P2 zl8>Q7#Qv}ZH_ZXav2+D(!d8zgtX+P2;_ln11_S2}8;(!W<9Qt+|84+)=LF$A9deT; zU}sqzzHt!P^F3=ICcLqFK{4Bw&Ctv;q@8_snh@tT(n;TGp=0d{OZZf3@O0(s z@0|TT#d~J6nNIRse;VW}^x?gP6tk$j{Ci`$6-=9r%oxB3G&-^{wDB?h`7{tE4A|BJ zsteC);xdK%rZH!5F5mf=(4H&_;kzd$^9{ZJrsA-7)Ot8fySC)y)l2zrTKW3=k&+64 zBm7>ONL%wR#rk$?|U0UOHf^4`uvn{j*p|>rt8G_WmM3{{&uU zCDK`@Ieg7>^;1{JsUMn9SSbyCc)jY)o&e1_7f8AS*Z_si7+-`~VGOIMC<93VNe!Py zcu5wGbgElai%^Z1?w){9w&?SyGap0{O69NkJ+<-DlQ0cvJ=@`G6n)D}5?3Ef^T)Ru zoBf>)F@g3Bp@Ws|lf<`q#V`%PHcRpWOtH-!##G1F{rO`$m~0H*u=p2}yRFO0K$bPx zixpyAD{Kd)aaiz_{l(Ze`A+f6K5ic&b@Vp=O+_CW<0`C}L6Fo>9m-t?;dE?xciqSc z8?#ItEwRX!lJ7I`jF$z~WP&lbs})k6)kMreT2EVNS>VJ%nAA}-Ab2WY!hVzUr6h&V zTdJ-^6jmHJM6o>;{Y*q3L9I-bNm5IZcxHl?OqCiuznmpuGuh5nOhF7Ql8(46S*dL+ zf$)=q283=yXag@2Zm2R!H4GM(BhW;EB880Q>b1~|zbk@?k>G}kdZw`^xn#&}{qTE! z9Wq!gh>@7h`m-&-QW_h{Y z0|PALz7H>bf9;g!iNt^ErxpIQI#}G}Fn!e>^DZ^_&Nc z_Ca#hA0<-z630BKj>V%2Q5^P%e}SJ3bwg}=N9cjVEe{`*ZTNXSjSSeO2x80`o(zV` zz#(ys%QA0@x$aUL8Lzb=IWak0#Oj{xXP5OQMGD7xW9A93@))#+EE{TzaO@E}W~z`N z)Hf6n%J>>ZbaDwUXDU1{qB@_lQ#6*onlS~*9K}70HTA~f>oclHJJsee zi0sP>Bs^?j%cOX+h@U;*qJ22@fP?mB+q;C6UIfovM(eMASvbsmI?Ntrs0arHE13}{ z6oL})%!97^Y-k)`1Po_?@gpQX^$U;BHD&QtSxLg<2IEZ$oL4 z_P|{3z4;VnzPu+Mc`1wvRJWk||>4kd40+V!` zlTSPpMQ#Va8o2BwzZq%%SF)m@;6u;N&7E)eX`!IxAC8(8I+!pA%5Bes4_l+1GqteN zvo+yv^ss8QTwGsmv4x_UZ z8PBnf%mflM_BFSQx%^lk0BVT=Na0cKH6HMA(F(X$jAApxA3;iw%8@$1I}dif+9w|X zl*MU%db_<2hlC7diYae-AwF)EeODmVW%%IRSW?h`J|!O+RmeK;XGO$7Vh2t$?Lq&A z0-?3CY=S&PrLS210yi2F(`_<2j`{n2MAp8*`~9YRvfAw&Rzu;n?B@5yyr zKS*~9vxg81mqN0or6D_!@KmL*J$_0XSJL@$+sSX;U9(jniBwdw2EVGP-uFEyKQUFu znUce?dP?;<&8jyPVOPEZnF8A{ymljCL^a$QYbBQB$FV&{1Ib~mi<=dU5(^y&p0oR? zL+&dH!b014R0^CUc-ok*xlc-h9vs()I;ik|q}4m%(|;(J>8gAh3I1$OQ-)*+i7$fM za&uj2HDi_~frq7RWbmGZ+}Eex19ScRjItkk?q=BaG=?doyUMY$G+U=6;YV= z4MhX!SUi?4I^s0v&=Y~l*VSZ=G89e$TtB38KG=_ssn4Rc!NO3Cc+w1f~qk{NESZ9@Rb`T7c3{By`!2nUj+DJ*jIkGgA?*rI z%T4AEXDsb|h1ds@dV5rWrpQou+y%w|o6Ro70o*+7yO>Z(mtggCIRGNsx}`9Jj3g6z z(ND6_2vt@;`1gwcp6p`a1o zOKm4>rnPyialr7P1D+T+TER$B$84@KGrno<;DFPNadb?E`qzWTs|T8JIA|_9?+cSa za1Zy5`XNQ6Zi?2SyvE_m6igE?gSP1H>SgW|?*sUX{_`p%uI&eEu9{CZR#Xp?S%V{( zUE^puS*}Z`@Ed*?D||wlTqMcp+?Qt8_vAR-&54)%G>j`%nAI&{ad&$=bL%vF+0-s? z(Z%(bl7eDlW~NA#W)v-9K~zZSQv}#IO?-ADj`qcmo_LD4ua@5R*rj~> z{CmodJU@aD`vGKZ_>O} z@6p2k=XS0$4|L)ouHd-}D?LweUGuvxQ;hs4-RJ`NsKAX(*X~zfvf0u#-nxb%h5H#D z=ln8UPu6qt-_5NY1qNzX6T-Czvk(T`gv$j_IbuS;27-SG+RzFqXGL^S8HDlm|BRrD zj^p^^Vf<6O#}BLB@HZPy@(+?;k`mR(5mlGk9bwHQZP*N5P+vlh(!-1dre~i-o@c*@ z+`_vQuQ^fjFY$lLc2oN3k%)g*n8~G_^?e%^`g~>2lPG2>V6BjqY(%Afm`zTMIb0`- zpC$ttHq^o+hOwM$!NNiZ`-?P6GrV?0mE?~Uv*a;O*jhbX@lu|=#>8?op;f(_7DbMb zXoCB7raRjA1e1ERBl-V@mR|BOC2RZ_TQ7D2SB$VMU(9Agl)|KSvZ%Bwb383RqY24Yae~Ri3(ju(Rup*R$ zrr8Xa*7D}`M`@rTLJoK=9F_;PASSfFNEJQ8IR;ki$8uM{PXg}Hb2=?h3i3xdK5NGA z42ZrKv4O)A0e@fn?_VvQRibyIKz2CF_l)-M8Eq7S5H%vzW|Qx7`ojY;0Q3w1vcUd& zxp%;5KvCL<%vf0vICW!&j364#`DYofo+z_H)0NKSay9b^{zk|+!WI-kRg2s8UNlW2 zE_l&1o@w-%y}6R9cbqyxqkgvTrJF`&7dBh4r|OS* zabaJ6U(0jrPqG`+q5fSDVlTcCSqHN zHOp#MFa0Y0&6_3*1&$MP9D1D=GKB8ixasjPcy@xFAc3pwqIezVcAgG2wIXHU|G-1p zT^!Sjik61NTScabaP%zXU(s8!$Db|+|EYk_sYP*D zPxAV{5kv7M9SHG^_wlOl65?8#iV4f&t{0{R3~^88c&Gm}1_bz-2mp(*s)_O+^OgYt zwU=1T{`0z9hxp6&)0X;Rk0(I7OMA=$N);J8$?Hk|y}FaM^uyJjUh`p9#P<(q@I!Y$ zVBkcE9^Pxp*zojnU9uiskrr?)W&lMJ0|SE@S5Z+3gH~C5Mf#a7G2eh>>mCcA856@$ z9_k3xohO~aMSc|m0ihqmv*$W~H33%*|GenaLCR7WEpE4adUZ!vZS<%?9@ zw^aV^zGm`)5)1az>dkLz9Le(nYd(Df;-}Q*KZRbGT7Q1^s?9A{#gJ9w3rVFWc9xlt znFBSLUZ$xIUAyGJ_iwY8A!a*No_j*2@)nQFFF3fl)wa)PGuS83`W3nm<^ZfVzl@s{ zHlcy|X8uD|g#oUiit83BdKuafw07|IeWJ{t`ZB$0# zT8u~cO=m*h*J;Zd$;fkn^aFVbSmO7o(uudEt~6NX_bf^f(;_ zANT*&&{UdnY;ea-zg%wjy>S&7m&9iP^3>f&Pk(j{ zkg+r+8V(QCN{ffsZvq-1nsh(|6ef&h&T1T}l-anI*|+iPi_fxG5AIdYN?9+qq{B)V zqpP>{xcPkd6WdL+ecPqx-&b5$Vf+AMMs?I}M@2y>9E7Lb15r!u*gAd@GYqMi5hb2A z`_pJ9{_*>|iq#YCsjT!17{e1~;$stx?%z~1g4MlK7U}O6zEps07mykBn2}UQM=E5+ ze%+B*pow*EK|U#a{xzZqQMM*Fv$>^q0>Gd_Yc8&}MTLbZKEIANf$FSx5g7nQQbM|b zUG8g&-tHFy>XK)#{y<_cfBh<`sZr;Efg)vz>td6q8Zpy*K3buu?KZ5sBxP(AP20G~ zCL`J;l85OiVxLp5B7D5sfob+PRz_`O9PTjMP;aBf;0u4QO?|^Vx#yxOOOq~uV`)taxP2Pw1 z_~GFpUlew5KBQ>;#X1>?svYh<70mnp69P@o zGQmKcaE0gFA5#Z6;GxgH@_l5evZuphqw~|mYe8*w`}pn2us?}I9=}k1Py(EQ``os& zSFp%|#lf(6@KdUghzdeh8i2FsM9_o>;Mj~WE;O3=pxSdZpR0bUpDR-X)JaK zBogcn;^y#lqE%2Zn2dwDUOM=#2u-=m9&<++N%rh~%qGfU6PI4JaNJz(#KXE)=pmrw z8+=a5eXiEoGrDV{WSH&XWDxptOpjispsGln(Zz)lPY!azGwNX9-LnhJAaauACy>`z z5zbLxW$nh7%6)CBwT!)bh1^b#`Fa4tD)j4}7>F%OhtgkR&xNjDLznRR03SP87eykw=oEnFH6Lnl_R!tPm;M@&4 zo65rpw?NdJ%+(oR|+?B%4)@^sGQvsyrD(zTi`2m@;$p zcKezym%6yrjyB`y{n1C~PJ)=+ZPamb8huFmw@C9@pdoN$S)ZF={x(@|Ae{F*I{;qx zGg;KneJ)2z?~UgAd+x&zU7A0;f?;*0pZOj%aQr(dt%%fqokWEn7?J+pglfjia(aCD znzz3+0H|gWz^5|Xp64D6ZFf2UZ?{D9WzaHBki6!nI8Dp}&PPKjwS5IX%HYrbe5+2} zuYc)h206%rDjWNYk<=LvN}+F;4;nDMblkXx=QtBWTFZZW}+9ahITJfq-e0z;@km7EEL&^ z&`U5EgJ`E4J8WsvyLS$GC<_fOnbi9(=#@c~j+?{5tHlk=nNh=VMg&(^(rev<>Kh!g zZu!sQxy_NWy8r#P-HAw+=0eSh*^TF5eyTtuKLT=M1PI;2Qv;Ddfc3leYsqP_Y0fy> z{pkB=-Ehq~ulxF8FK$)s{@zIQ-DIMN)ootpV`0Vdiy3-Q7Xu5oU(Wh<1gP>oM8a#8 zTaQ_h)8zLIvm$&>ES@B%&(dg_*(=7NThtj$N!dEFh)gHYw!i3LT* zci{P(X)>QTh$YS(Gj~C;nY-5~bYiib!+zc6=99eMHg@-!k&L$HWUcdUB3*{=sxoD6 zz{Ab+H^hf$6pAz|BHi5WZanJ7U)oy9ITgtpa~0mv-vqnxiSEudH0_1=nKt>8po^S0a1`!w$glti1t4hrzx+udNW z&6}Q=U0?crd2rj(4>sM68IXdND-yV+!99S*oH!`b!__6XQ%)94XX7u!9qX5^x)yhpaYR%%|JT%02L2s8n}m#l%~Yrir~bNe^V-Z=lkPCI4u`7 z-V6yQXV-1fJ5O~&hGgM(o^o5^t;*3VRv)8Z$(fklL($x3ZR|_8Q`g^p^3HQos19GDc}IcQfwA1Zb?X7U`dMxHMSE!Y&1 zd`a{%C@KWonuy8YIbC3n$5K*C>#hQphiqzVcB`LFXcKR5&`L3cbI`acd(i z6TLO6$M||;WLZ)I6Cr~ZqvML~7z zE8{<`#ZC-Ny^Pyvu(r5LblGw60TME2{&mzlGAE7d2C=$3^Vb3XNn=!zsG7 zj2Sq=#O>nS3pRCBNVEnMwxF!MGQB)){D9sy$SjwuPw(1iu)i8cc49!UG<&gr@a(Hq zy=iLHqzkEI`@I%-UWcc{<<;3w`hhPs1giqxJ{zvjJ0GrUNNzR&j`HVoW2txsh+xdK z0eiF`J$TO|spgE{<32(h*MXVcDsZgt-@7$k<$j~b;u0&fw<|q)crHIDhwO%Ux_p5Z zC`s}$0q(mmL)+uoaY6lW~H+hZE%25hHU}v4&;SmHdQO%Bn?kZOIrm_iJ)`PEtA! zJ>~KWKWg>K7a#PA1%_KC&n{u+TcvHPt1#K>t83;ka}&u;l7pjUFUI2ze_^#Eo`g{o z4RgBTdw#Up&-{d#dFmq98Xpd5R1|N^5N+EDO4(D)OqoTs=&$u_PvqD1Z59-*+>c1<`ngF_+|ZR@;4hxvLvf+~1B!JAuQECr{E`Js_bp!aqTV9?Ily(f42V)j}{K z(>4cO+in3;GG_eqm8MGr$wXv!F2Q7uD7s49@!8;l(<~pQLSHfqv$`+0dx0l!OWFCE z;!V1`GVQ42ivTOyv4nh2`u~Ix^tjKxQB8~{M2q>ylxNwVk#(m2kSX{fP6gY|9=aBf z2s02&7(Q)IIr3NtvN*5c@esoDl19^CVW{Yi+?Dm~FS-@Pu@P#t|1SSGut;IEvZ~Oq zCt`|^Si4tq?@Os%#m=YqI8700M21rOF~u(^%cB| z{*@(3KwHp6C{2@W3;Be)CM)4dLBwUfxSq*6jzXrgK?5g zau)!V^SNjpAVJ>2G~-R~1U$;Z_R=wX`bESOX*rcvi~Ds!%5BbNamhjN1nCa|8IRD- zxLeAhevy_5O)Jg_k-BJp+IaqpM8FhT9M!rG>zv4-vephBf7yRQBmIs(9de!q1MiE$ z5$Kbvkl4vxdqb_Z>z>G&TvxG{X_8SVUbFmmW<-2>oxPOr$PL%5N5k-d|56c!{b(bo z`4anmTBOQ%+2@I`A+g&g(s_3==StltgYVM{5mr8zcLIG}2*!AvaRgWT$NKGisrYB2 zzwEm_qp0X0CzyHIr^fF6L0E=r+eKR>X`V4499H_@BEz1ETNZtoe6~UzJV3$yQ;aylZP@Q(ceYTFP!h84g)}7d(%seXk}Gs=$3P_>^)Gyzp3QG~^dqC6 z3gu);EjT)3K{VJB>UZ4jJLMz&GBr38@qB5rBKX$2o8T*SHP}?(RIRd453WR`8%La0Wd^6ryh-as z$H7wL4Hy!>=W`9v;e<<4P8qZJ};B9#=x*d}X8ra;n*f@CX1Z62q?YUeu7yIk_ zf-L zAbNs*073YNd#2I6J59>ev8R+hf9jv_<0QDysee97#T%R#s(ZdM!(wwtYn3eOs*jBl z8DrE0ibn@+KS7%2)O~&T0mYOGN;MnEtkhIj$lFw3%TC04K=z)FiqXx*5DD{8@-x$F{t z?$x~)eZsBAWcvzzfxD&*$FKv!q|Bzi*uk81t<*_b%J7f7lVP(ZA>ssN7cMs^ zCnrPV#?;(AJQxy{pdtg(YL~9fmoxT+0)fs#MXOCwP)TXDNbK|RG!Mq!ZQ<3y9ec>{ zt@T8l)>wI}f=ny=yghWBGB>5gA^{B9f*~a#d-d7bx#90NhLc4xd@O^B8U!UpRZb=v z&615JH2F}Do~gN5CE8)aou(u8?)uLH5o9IH{YD6<&_2E|=(^SCd*4w$TpHJ@I|;_; zjTK#Pu=8`;%Jz)XC-WyB40k2@ThURaLc%82mSR;3@P~?LTzTm+xu|f!#H33K!5IxE zoCdou@*BS5qsKQEmVL&=el{kaDE8Z8!-z3WLwaM z>-fI5fa)j_FPR5$6eV7;znJL*5+XI2h-X|qZM*U_N=u>cU92_3vnQ?#Uf_SK_Ax7+$RI78?MI}NH zH&%LnBq*AHD4qTI;gT;{3T2}uUE$#CmqN|*SY-`iyyxN%iE)t*3MRYrWda1T zg9J%qT%BEJwTEmR2d$b-)}n1CFN4SKdvDegu}|cOU z*JB)^r{~Q#J7gPp?>@rk0Ymt5g|&_pNBBwne%$Q9S266FFP04fiq z=;M=>PD)kPZ&AOU7s)BrC^X?LmdTkw`SVNiFa)5>mbFDrP-3TRoK2N$im}YS7okq% zom=%yi5;f?R3hSzdQ#e_$Pp4plb}oSjwFd)1gRv2qWe5ipC;Cu)9!~c+AJs>%>*2 z1A&kYxr0#Li zeIpAEUf>=f+Y_O99bulRnys_Jn4LyPvyRFf5FJAJ)?x^pvu=m5oJ<=jyt3xfA}7M0 zWA6*hp**IM-0N1$Ajdun`#cnGH@WKle=+t}QE>+Cw`YR8yG!Hl4#6Au;O_1T5F|K_ zySqzp3GVLh?(PI9)A|12IWu!HYffFTxak*Gud27I_OqYg#v7@k{6w?MuAPp@MZQW> z>R3Lss&H0oHb6lnI2-cWI)+id3GW>;-`1km>cqQN5;&@S%lK^GH9LCfeEtRXPrDP^ zi9cW#**ex;;{B;n}zaA%RT+9D(+CZ8jk7Qs)kmJhj2O}0Tw9LlVJcQ zm^LMfM(jU<@^+Z22;f=&00aIJly{d^Izpj12>v~vi9;KZ)#!A^vEgB5i#oY69fYXl zD5aLwZi=w|*yhaL^Z%3OBILCF^W{sdX&2aVy9gH4cP#(u>hRkowPVO~1ZUtDea6fw0_#S(3U_ADWORJAG+7edE}fIG1rf>}XjNZ(1TJU5DVPMJ{P zN&Q_}^!#F=r76p=+U)~lc5BUV=0=8c}bu`S$Jp=26&34q^AqL0nM=y?G zr*jdvobN5HS?3a~qI?6|SGDKGU9AQRZ%80RL zc$lFxbBX;2pFIEUvg|<5s(?vMOzd%(W|^p&j7-Q)i=n14akE*^X(9Zd4FaG_E&Sp* zRZ!0okd!+!dl>aARUpQH`dzs^FddEZzfwkea9zP zlJpsIRbg-GGJc`Y*L7mOa3kb^??Djv>?UN4Yj841m(Oq@0JEs^FiiOrpaXAB8)o5# zaaXUzW0U^n6?UFobEnm-Q7;0ZMy!@1w|@QjQzEJ6cu|7lme9^5*H77#d?b|O3n0Ww zfr$bz7aE||XhIJDekQEKh8?-~zyIzvi|boq)S2@>Y*G`*4?7j zjxoLxevVpi#;9%O9sha%z92Hva$m~RMjj8gna zKqWykGt#;)OhVV94sOoeC5@{kCTQ5?|A-7VJIA+{d;Z;JvRvzLJ%QaW&)Y90@YMPY z4rF{LkMdv$)+@(+UeC30tLP+>)0!RhZ487D*W+4LLYtj2n;@{pu>Pnx9ZXQf1xcS~ z6474KOs1jBxV553XIt($D_rQSJ14I+mJfv=}@vHr~@DjUV7vvPTM&b5I3e*(hz zhQ(vh!5oWI+X-xjuccIM2w>5hAx{$loEnw}P7UjTx&rIjRhwEY!`laKR}v~^?b%@p zgJS&VJ9%w?c0fbf$+%9yQGQy-^;kXU(AP}OJGA+qxo@a2_GT{dLA{<04;MjM<kZ=Eyiy2+Od{9M(Iyev`7)+`rfqv<@QU`suR89I)r(N)(9DVey zb=Ttg$jy`F#GyrzQN=hd~%KUsWcdeX^qN2-*>Uh{t)lt!<#mEhHxA~k4e=)eABJ2kLd-2A5M8m=%qc3+{6T%dO_RS7%7 zhoZ|zG=aC=QjZN%$FZH#@+TJfv%RT%B~fs{V`H0vmeU1c;f;cFu@TrKzJ{*YKxwHv z3~hr3FO8LEIx`#0`&wjKV62FV^1CUoDi8h+XMCWxiPB?)ucQ|R)(BcYHGV$RoGgnw zP6>9}*$8b)qy;fh-^b+lHzHdqZGgWSeUVq{x$RM*!Y6^yRxP}-L!P~chYF|7Zn zbZjlw?^UN99`=2zb~|}`+}UtRvlUQaTLKtr`cKN*J&4P*tqTLQ1Lf=BM5nZ~eE1sN zEvMCSduKf}y$Mi;|4-y7eu9C^ihsG~B4=vXUjA*>Y%sEwKa;9UG5p9V-p|0DY_1ZA zz;D>Q0kwrnTu}O>$Ub~t7^{|e$rKsb|cNu@DEg}iKNzY8U3$Y7y znSfi6?QrdM$bp+{A$k3`CEN)pqFmo1Dq?(2Pfy1ZpCi0ylAyYkeX7(%c#BRDX}u5n z!UC7MoXsJExsw7s`t^TrH{1g2Sv=N)1U(xask4=NJ#PUiCPxVz%=E zAaf8(;~}DhanQH70>)FKydu(nI_wbxo~fZ`emd2lf8{Qv_U9#EoZ_xZd{DqsUU=;R|aO?+=vZaYC8(Oa$4+4Lh$8@XBdiPBkEzJ7M&C0mp7^ z22h-{qzBgSUtnjD_7@7bhs9L3HOcbC~J zzB??dgB)U-VYkhnpz!%Q3T}a%Vd=sfg&}34FW``1Rzi#RtuOo5nfCiL$Ak|SqLuBH zv{d9eX9;?x^jeMW*9Oh(hOW--fK?0jA(82}7_7i$b*KZ-vVqg}KW_B0= z6AQeT!H*51{6@1P#o59bWu9i@sEaOzGj88gCyB_`RfO-}$W@Li%IcR;xp&ZXqgDDZ z+f3iSmj{BTMVk=zK;Ok`x<6tApGf8~4qn2jo@G?&<3MB!5hIsK0CNJ0X6%GGEMYiP zs!3+PcoOdhM?`>&_BKFJ&eKbeB2ICW?-XPKz~~B>n{ckcJk?WvQU8A#J+>{Sy{nrZ z-1mS>=(T-hE}uPVU_6o~5FS6+joeD<%OrPA3Do%kBJ1oL=QOD+neBTmZ`540e3@m0p725she9tKF`g*qXVk;de49X@X@1?TG9`uY`_UA(6Ol0Z zWwu{m>D!LxW8;XUK1l`c5*f8MebCm`;7U-!d)uz@|2OPZc0ycJ6L)oOEl##*@SaiZ zKWWH@44$WBdYbt7$+yxoUoO&WT;6Fr zhD73D8iCzLt452*T-2`{=hum~nv1fpK?UF_84J%lZpv6kiDfGsYZVNY*Qh#dcuq2m zsuUhqIIbHni9YB1@1t$4e$c&(7?y^nP`q&V>BYC+ZdV5a+&hnJpUO=u58}TQDL~&jQVftKmbIBRt0eE$hS`c)W%NggbDj zx&$+Ix$t9OnN=YzTQY;QCxwyzP!S0wmxLwN$H^Ua)2+hM?75oI;?%T~D1VFY@0~3M z(Ss!R;DQU_f+*l5Oz4^a2x!3LA*JZncGcj2{?m0SLq=zJ3yK4 zi`wDKU&kTiLk8ssJHLi9_L^HAa?J>2h768C9=##QGu;AMuMu}L!{~JfG;SM4h7%VS8*4j0xBn=>64WN!)^?AAy>9XOMrsRE!-;OW={c06czRO#q_F4?B;{v^IR#QqX9k(mZ} zYMlT+f4wy#JU+g}C2swja*g#|$e!E5H!Uevm{gC(YTs}n5H6(qT{fdz%8xyw(+ve* z;1kEyp-Q)*@PTnKDQZxFR zByL%eP%~82A4XN8(C?qxNK>->p=(~!jsI|k&-W0N24|H9bK!-`$SB*GbuA|G@4uYI zDJ|u74M`Zb1sipKMlV1i7k6{bJWMA^MKM=$8UB~;x#rM@T$1U6VDP6drL6VjF&;=j zF!%Y5Nq9MVcb(<%w**?TE);T90;pXLRfb~nu+9dANAaZ^v+kgd{hrzc>Ue^(I;c?9 z>n7I{=_4d=Pn1^k?l#+kFfdzsc7@aHrFCXw4zvgcmd<;jW$6>oqKuHEk!}3k;k!|5 z?ngpL3fWK1RwTNm-+GU3Dv(2^jg1ofC<;LCK~3RCO`N$RhVk)5zn8NeGK zTbfywoEyCeIV;f;dKoCPgRtsGkRakMspAYJmIiU7%Gr1Aj_L;^A`8PG0o|1$(9%Y&PTL~oN zKej|d?Wz2Ew5y!GMBoYHzaX}u}(&--pEY8CGG=WqQZ z$tM@Rp|tBIjtxv6%l*t$NtLwGLSV#b9ThBJP9{}}Yx9=yT6`a07SWa6n6_n(`&S$~ z5QQYDkoFKM;FIXjVQbga^^@vqFR$<4YCnB;!D-WLcWi2EGwjlO7wdW~uTZ>uqM+lC z+msADsmV#C+}LB3pikQ`L|56&kL74Fw~Mm8F@~v~vY4@k=luD2h{z%CJR;)`X+&W2 zZMZbZOWN_47Ryufx_j){H;b9t%2QkVwEtyxI70g^jF{lG3FC}dW|FXvE5jm(-h<>w zZ0aEAWyTMg6VWY9eBi+pX?`adHTw>8tm2s~=u zdZ52E9}{XA;G#C8cl~&D5WF3xEd5$xPPesCi5>uucC}`Q4KQi0FJq#>ulAx{hh0$*-Nf%#e+*r zZ}7s=VHT?FdKnV0! z1<25*t{?=}VbV#Qb`@*ZUJ5kSggIo8X>}?Dib!Hd83r8 z&()S5sYh>GC})b|KsiH3C|(jOn3WNKDc`5Jbp4iPv;sHnIS(x~0coeObca&>!~hDT zxa2oxwadB3@_n+M)dG)dq7 zejx%38`{K^gzIPFy}_bft!8uXD`X@Cq~$l0Sth44gBxz%HL*7}QoCQWE}jGwpX=ZR zGsL5eGbG<=uFmr$?d4QNijk#ZZY7N1W!g}lJ7tlLGu3J@hJ75)VP^*c?JOK%K7{69 zQvY{#^i+tj!p ze{AQ<`qha|BYIO8>3Cg7h_IkV{&*?2|M}Mp0xlqf%>tHx*IRY%(g0llq`gy z+Og$hFXFpPY2qf>49xjjXZXR(DXcIuOJ~@o~DIM z^Yu#mg9?Sd1kM}xBr?!#(SHx%RxpH0;U8x;p0;9_k)GN6wA|cYP`JQ^gJYH{!6=B~ zh(UFb{$U~tyU^d0I?$D_h2^NyIe^+&FyE&SlraC5pl6G}{N(Nt(Ey5d^ha2ect!~p zfp4AxyG_K{m%^()gxlYXf18{gF3>8{&8SJ1!{{VP5s_u$Q=tg@d9FaLZ z|D!=7FlgSJE{uL|XgYZCOo*B97TZ+r4#{ZQrwslIcTm*7k?IE-h>O^y93I}H`e-9l zb|8b8&1r}bUE`RGx&g5CNf2=_Pe4$g_D)K#!Kf*pg@mqV8kEhC)e0-*r5W&317gN= z=LUdZd0MM!8cDyrYJ^T;^>MeNNYaA{mJYl)plCA^w0avDg#tQcQXLinJ9w0`z@umXL46fCG{vp;;v zMD7#L)Gp^S+oevqBkY{ybYAgbd{!_=hGBo-K8$%7EXL;^@3!|5>hs*dDz!0@(3wC0 z|M$W0FE+Rn%tj9Li=^g_Uz0Svy-GI+MU=uuz7}8flRpDM zFpk|rwHTOzJw!uir#ArTEj0&v(y7ZvS)}U;Y5w{yv*W6otspiCw2x3>Hh{aeCe&w& z7UVF>2ijYib^8Bzh*CaLTl{-ZgWuHhXe@;{5fxmvi`#vqAFNG|TPXGLetG0?D8y8@ z>CLfHt-L*w3vqGi>z+qh|8bL)Yjz-zK6S%R&&?G)%+0F=9z=_2zZ}LPNR{Zl&(3xm zv7yYgQL5}=h(zya zJ;(GZBZh}0X=x~$Qm-PS*cdstkZbQ1=TMm+DyEr*;mhgNlqvMz#x|8(jZw|atb~9e zChgB5JUj7nUyZ%p@G4+ir`I-6qz%a3K1=gx9i&~iP!Z4TIW#~IAI(k=Q8;B{)vf%x z4sH+2CY@6%VZY6E9X`;Rj(1cMiLwZt*Nc9Nd?Unb_o!J=E$mD2QVQo%tI2o%%d-g? z4V7YIqM#Ixiy;vAGF$X@lAr5mhnlO*?L_1Xz}ivfP~wGPr2Rs!OUfF4fsx`_)f)8W zrR~dVLA4kqR<98z1zUR1y=H9Z&v&vtrTGa5YcWIx_x|>5>Q>3$*|?EXG7!giiEy-L zr9M??Inhji6I7WI9&V&JYko;&;?T8Xs%?b6QFhTuU`ElEI3Bu^s4(KxT27H*?{4b_ z1W>*{UM;Z&J({u_5*e0y?t^HuyLmcqkhwgcr^QiEh|Uqm$T~s zSf;(R4Fxk^teu>_K;kSD};&N<&UD`-L5J^Mr+Wqf*hbYe>}6M{D5Tr@*}DyCo}ONRcu| z1%Ff_Fjw>wPV_HSu(p+hWS=db`{LQK5|-h44gseYwL(euq6s;cftK%ZhnYp}-$Y_i zVxXNZNm*jl?h_hJ%(=9ujg|wH)^BIXs%8_!Xi|Y-Y@dtc6T7(Ip&n~`yVEYNt~ymg zj^BF4xU=GIUGlR-p7KDX2rJhs@m=eshj%;dXpP}Py@W4wt;#WKo-{`hTi&#$e@iX^d*7lj(EJ;7!I8$Rh03 zTi={i2at+x0d1tJ>I*A2UR;y4ifsrl{ED6CY-sRoR~>Q4x>1QIeX{uFRI0f(iF1rd z0G8DA!l%s)x&HODx!%!6L59(HNimi<=?5wwm{%0(QF9u*6OpGkkE1uTjyN}wnYIL26#p_}mQpsjl)Q4AZ*yyUprS~hH66$SMG zk9N?I2ts6JboA~&21Iy>tL>m@{~q~!JYFffnYsd@9sFBqF#(E z{db(()A<8F0UqaPJ6L$@zcMF;z^}1bbw!G<9D=7D#h|NEugM>5E6DT5%>BZlI*7$t zg~6x>H{&OA()g7xIin0*hYzPg6S_@LV_&?20(aY-Mb z%Gljq&bHoZe;k#+@P_FMajbHHFm}`#ACwtK`MFEiUTrb&kXkYXP?>TDocM7_*F9g} zJ@G0{eJRVQ^DWanBnlEpAQ5FVsK#M~U(a~}+hc&iAdgg9If0r7o8YMPe#$xj_ix-3 z11~y*kK2@rzr+IHa@*S4gfGVnv4=Xm9&GeH50c8?uhVU-dR`vRxUzW_wM;$JUe zTrZ_7X=&_Kl}?GN`vJ0yrdoFeMCPsK@&HPDEbITy`@-(Wnv{-n^W{482yGSv!10fc zo`GnB?2h-V8`!Q_XYaQ?%UqGCAL}3EXytE_Mku|jXy1JIM6PkWc$9dHwgyN)s{Q&l zeuTJ}8-q_UyxS0PxY9bhy6(n(MxH+n<%B+-_ISPSOPRw|62d$?5<-C1Ol$pY@-Sa<3})~YoLE-Qe+r5Z$>vG87%A+rpUM8bhi zev|CQT17qcbi4N^t-`g2bki24wH;$nHEnI$h4E(b!bR=s)9y&^p{s8D8?V_Wkf zyI-=)|9TDg)y7FxY*M{?IP4lkiD78eGF1wz$FLyLa4K{O9NzIjY#?bJG#L3|{R zaL^SB*8=msuGc1Lf~xpRj=&v(O2`HpzKBUB1WLv_6rek7OsXB%q$ef2DxC7Wfax!g zC5TC7MTX8%Xl!1O0t7_BYO=bFan1&%T2OMePF(SvswFYSv+jZ3i9vff&^)wGlh6G>wq6a|PZS(`ixn~`Ty@UpPJLIF z*htb_3#W(vQIWx}=PZv2TZ6!l4_{*O-CA<$K?TvoC9uWX1578V>_?8!V=j4gM@le_ zJaJYR{a*F0&XM{i6L96jPXhqGa-t4Pn41v`f;=Zju}Sw_T^crNB9<(aO;=}FT*9*o z?pg=Y;tyori=-H$>4*}{y_4A@1HxXSb8QEZ25NIm8-mj{(dmb^Bn@Jx=FpeOPZo22 z3pT;@XA_(R2TonI>`{3?3zS?}N=xtiWcD(^{Lhesp9fB$5|*+tXI~NMX5*I~hE_NzNrdLBwhHX|J2xekVcIFUH7@5DqW29%) z+^h>1<08j3uA^7S!T25V$$O+k!Fl!daB$w%@KvY%WuaP1T3WiVx7@#HAb7+Cz&M*+ zvg&&G)y`?B$0vPQFAzEhu$0_RxmwmhWIxRazhv$$5_IHz7n-+hsrIE$RBDs-P7KR; zTk|%Vg`{8Y*MT@s-BV6RnTx^!<+@1uqW4o3s_yP@JVl_8>o=f({PgU-iK|ys*lyHr zu|-0<95No4Omc?fgVmE^l{Hs@4ogeNAP$afyNW7e%d!M>o&8XMLu$p#BFSB~7>|KZ zAoHrnUAg(ZY(lAGR34{rz(}02I`yCmF<~$dALwRB1{6+MixF`hvMPc{sxXPRp1OzQ zfClsou3iVA_}F{_=6`iop;zjuPTGkbh)3>Z32_mti^s8}`8=dZA?c5qLD;oVs=zSa z)lY=m*S#(IQz0IvT-Eqz;uWGN7$8oyBCC1@%wu_&Ody!}0|0+B%5{qEIf z%+!W!ePq@wMalw9>|EzF0T)ph;pyqi{iRG|vYYBAndih5m(u;VBT?~AigcY{Rg>s< zzoV!Wz;SiUlMYm$GRL{vU^_FOh+{X@k>k_YA^aJM3H!)FjuKwp8F%A9uLnXw zb2VFkA#o836eD%?z01%r9Ue_$aJLV9bZBJE#4GM{)!6nOh+dJjm>CX_Prm-M%p8dnFgC&uLac3)B<~B1nL)d7Pj`q<3oo zrZ$y88hY34*Q`%W#N3&`o=n}%^xSu3g*St))y8B>Xw NH`JOP!xAayIimgn_sa) z22%!y2>-jpIC9f^-d`Wjq(if9n7Nv#r!~0(BZ%?On@|X`M?yrBb@d7xvt=tzoRjG3Tv%q zCg82DM(f``Bv3laVBo>#apF&SH6?f#xoU8@i-p?*zE)R7=z0$xt}r7zS+f|g{LBfw zI^5?kiLl*MD>)TTW|i1z!?J8?Rz zvykTGGjvl)0P#9nV{=;SB$=o&){R_GESV7=A3G`pGA2wbEfx#Xgm2D{XDI<2eG+|m zQGEY|LhFk~Mmwq`1T9v-(aXAfcpnpE{zUr=Ob&B(Y%24ZWe+CF{4nAKL!mcwyA5=s zDmoWFONE+RDGp+362o=(8(s8hTIs9IygNj3q_J#HG>)Ky8@ukK9q$0oFJJrfh1O*>4Xhl{=PQqcf z$gju*v*4`t+1U_J`Cu`ds)0bJ#2^NyA+`9gl;ruQb&A~Hd7qjZD0NTE3M4*bkiswO zr*~Q5$b4XQ_K%UTm7EYB8t4=?h;Au%E5V5E#suup6EQ!>ihVm^*P2_1ZWJRvN!%AW zC3Y*w5!1mMEk~5EpoVFw5c{D(JxGWcnW;EFlU6!I`udo9Dsm!73&cST2NaV2HrE?2 zp0%3#ombHz_mZW!U;kX1c^i3%U?wllFeLaM?h*Aq#Zyv5c+@$MhhaNkc!=7{gW&3< zD8J*DJtu+uu2ujSyyzL@Yx?(I+Js1tCXW?fq|P{XxqU*9V!6boi+<(c2^iN=`q!~A zguQ=2Bfi1XngHEoCpe#UYF_24Y=u&ezHq>6iO9q|PZoMoDlzK`H_2AY3P|rowx>#V z(VcKx#z%($GBadVc-4b)T1k_tuPvn(UV>4*F=N9P?Qp!flK+FR%Y$;Tt%-k}e z4Tdh+I-IUo_+`paZz0r@YZnU0QvXLq>WPPF43CVAG;(oq`FNBLgofSfL}K>hwKA;5 zuWtT;y$<9t$Zyi11Wb24BzX)8bRt%Fxd^q^HC$+QEx-vxL?`K9mlHbn>KZoTRJtdq zCeMYZ_)M|vED7I=>isy;aXaMY0GAcCkgh9m5pd7fTQFf!Nz~i+VOW^1@ljAv%4K3g zNxXoD`OU-FEIJdvb1iB-wtICboVJ%{YXUNT^$%do&DInR6Lp!5dC#a`f5Dw`oHx{0h|VOt+@RJt+Moie z1_HN?VVU7KL_fwQ$Y8zvTj#wv>YsW{sAH3sm>!>rZbf%xsDtA7r5ij3k+p5ycQm)X zLk`5ylaDBKfCU4xBLZ*}@ZhHw*1$vacq-P5citL46$FUtL0jpOgwL|FdaxO-nO25d zHSt2wo$@t2T0KdIT}~QNXSZO1_&HN(_bDzAv2gNoGa6j2r47VhAc)W}KpT*;Wz5`|2ItJ&$2Sa>s&EX7_R z3YFGlFX7*VKU}R}aAid(GRajU0a@~SLR;bfm%_gYv_SsZ!VM!_jS)4Z1*`*};F}ga zD%@jXj;W!f4H4?DE5;C_s`utGjj-ycp#Dx4lC@o@C|{X&>8^X+HrI2|6%WRhv@bAn z3_ZH=RHAZpUs(d3)Dk6Jo6%yRR1?J$V=i?`i30>0H~Q*Q@5_j~r7sgb@6&{stT1SY zZ)=C|dQ9SRyRPr)>moweetvP)TU-gL)zyMoN!*KT3=M5qiXXYxRSamKpT7r=?;CM< z@bWeZ;tTkf|38afGrGU1Z#8a|Ra7SZV61Ci_8COIYkobX*NJ?uuGg#y3(DSNmx(?=j!_+BsCVY}e8%?bTy z0HtxS*l<`7!Rw0z`)01(8Q;SAoM3>hN=*oE=fea=G2arALL^zD z@;Hy5dmg-ytTKW=+TI=}ZA!U|f@PG}1};oJkM>?-eueUrNsqJDAAbnuUdyRq!;0Sg zZ#XxiYODlOx09ATGvm?M8%dO72~C(b7Sxo2^3LM%!5Ds#5$$lnag8>}3oh_{LsFYf zY~JTsmmPP)=icWskw~aYj^8lm*-HtFcT+M-z)d1Ag^YFcFx!RxW`J=V=f&>r0sn`v z{^XOu_b-JE{TksfP?|3&2yxJ}no#G`Ko`Z&97?C{h(AbN0L;?6MIR^*W^%R&GV^X| zIXS3YA*KQewl)g67I8m)2zBf4f5h7qssve33+>w>GSpey+1n0iyl*tSNb+zu9nH|& zNBXEMh^YtD&wjO?^-~2h(2WCSRxr)v=T$gEvCxN1(KAz@GS0qnr@xyzr_O{%DA^$XVfPJU+A&}H03^f+?_fh`;B6xB-sXdMW^w-h@M{U8ih z-*s~Si#y<`fSBBGs=qGHjSx33d7fg;YFA*$<%uJq!($N!f$bjeRlc4PN{sF&<9?~D zdfBR3bb?7IvRCvi23KwzbQN9Z4Sy`wBMt;X%_g0)O@Y@ZCwd zuCl*~eRTf!;XqvY@`<8k7>8cRk3hj;cgx#_Xelqa`%Rk}=v!KN${v(G z$m~?4pl(gk*y+li`0t-}J&dTh`gYqC_p$G&C$XSF%Y(?kH2*cm>VXcImLX_DMEter z@sC(ojATQzyEHbfM!gWjyP5Pca(tcThbLz5KXW-lrl>;EYBVrxTpG&4lGbwG>_
    upJvR!j;JgmH^PGj28;zk9@PQ_*E4M{d)oyueqpV8-K zCOYS@650zPS>iBc$J*R=Hfz-|coh19h8Wx#v#MLf){e7i8d?(FQgUsIiGOOS7Stg+ zT9BH4|K3Z_CefQ)0?Dzx4g3Ukh^<1jGM2 zEtbb|Ay66-yfG{<;$kta1nBSC@_vuptQg|bR1OU5>ocT(&wX9Cv}_-oP3M&Fw|YcRVi-Ef)bXP2STq}Pg@dOp5^1qgzo{r zuIZS62j!)s-$rps21}mqy)gn?Z6IjM72?*wTFtaFM~ZZm{@py=|Efjvxbqe z9G7J~xS8#yIHeUH=PSS8GQtJI3qD^s?sgtU=K&JV7I@wvd@p`{9|k-RbW9V13)Omd z+r8Kz!}&{a9?CzAPhR&*@OJ<@|DMMJA@>fA-*+<&h`R6N9P$! zwkgd?V6w@ilkXI#@;Zw%E;5Um_)UGRHxb{M_C0u?{1r+(*yyir`H9&p?HnI*{>!u3+Ij#XDOF5{5U>Q>*#soLeviix{WVg{Y#{d*8$b#qSX zV#^Bw4_Y#l?uJCxE;#aPMsK?E_h$G%vEp=4l99q7lU>Wh?H*Fd)_|9f$z1CM zp~~-Z;i+56L0ofdb{6W++wey?pl-mT&vA#sejLj)5fPq zVXo*lQqsQ9o`%sp3`j`KfaZ%{68_7g6+;?2JThsU%-k=->%!g^u2ENHM34~8oCOq$ z>!&hm(?N#Nue&jKQqOyuy*G!t6Gb&Z=mi&a(Ig$#QKt&*`B4pZ1`@9yFlg6u<_=(j zrF?`nyI?h?Kc+*yR4?K@UcBO%7NrP+KC3A9Z4pfA;iS?#}{W(nvN{0EjSY~40oPVxPDPnlpwOdc;>@L(v=kOgq2M8 z!-sVd*RlD{_Kat=-7BUP!(>d>cUn-mX{q^xXWjw^-1g~ zeverd`Vn}DPVO#pxo;x-G>ZgbR@8PnWm!{M0jc@$ZMXdLtKMq%pbZV^+lZ=cn54;> ziA)i6c{eaNv5E1$sL6B+Q)IR(0~i@&1~2K87mMH}fdwCNjh8djf22xtlJ+*8^r7a$ zyfn*2TRY}Ng!n8^K>Ymm{`k*)&u(5XQdR6;$%7Qf-NdV|0{p z%oIy11KsHWZaX>SzG%Q#yPD}$-}B(PD3R>J zwas^z_KWw7naz4{oWf@R@2B&0|6Cm&$9bVO;Mj$KMUdmOvpJSa@c(ouJ%D_J`kQEg z=Hu>@gP)aF*C;2emb_EBlq%6~?up$fwhh7EV(haj(%xDOjLk(}tB;^0ek5;Y zHRxa@y!^@zeQ4!oQCU~UGAKZ1k+ItD<)XKGIf>31KOhR20b~RYP>vK>ptlrT<2X;Z zgS{LG{PXXzCQKv%WU^_!yKLF$`3*~zy@52gHEB~k@)0E~Ds;G+d8r;`CnFCGB~s#E z*Fa3#uYwyy(cDgF&k32H+k2*+Kc8WoVU;o8t8^(+AMlg+pXCA!7{Jmwg365+4MM_re#p}{PnO%BAl*JtK(6lU zrs(Q+`T$TZy?y_EQ$SQFXhv?=cZ>G~(G{=e>#vnuWJ zj!ywY#?D)U#HV;(k?6FUEB&;<{5}FQ#V(fymER!Xo~R!Ew={voCT=|2oVZ7 z?CJYIcArf6F%R$Nj=7hz-YbfQ9240LO>I66$;a17lcfFW=uS1cL>HYU!j;2Lc#B5@lg4~|CRuoKVzdhUoQXN51i zOc>^&?x)O=d+S6`y5STa;0?Tc-X2x3e_D>QA29rQqiS{&x7}FyA3KLp+0Vnrr7Hah zaQViki=+8E^>5VbcD`h%`zdy7E_6k22#m^NQ=2v!%RB)00k{ ziQm_r+%?(4U{dfC7@;3sARV*YdRC=Y&rw-v(KFG#?cM#Z4;eV{>35*kJ1-cr7fTAG zN=nJ~K?04oFVz3&EkPjG90#})$ET##i%q2D`JkvI1$Pc70J{@xm3H9|G9|uaPsW-? zM54Me{9vY+mtJoM0XdNLKAg`1HV)X0vI;!wndG|&3wxu|eL!TS%3YK)jQ8K7*8qyKCjUpZ5OUE~MZ__zn~HKq$RURsdzT_JpAo zu_zv8)rIN!?DVT>9M?O;%*G-}kG`_kkCFICQK=GW*L;hc38=V$H;2V1Ni&B#lk^sG zM9mUJz_#@+l^*f@mI<iBF}%~=+Kj)Dxe$PCgG50mayn%o^D#lOS)n#w&;njoE%~ZxM+1h1IwsJ!cuyGYwW4+Toli|@2k`CIb}FDTgylNp1vNzm>&BXhCP*!7n0;% z_>QleS3SDl?#Z+Ek-*Cmye1>?FD@>$>MfAHyu8M9+kS5EzO)1o;(%H0|I*uUz|i7y zYgAzdUoP_o5FR6TO?EWccJnW&_IKw4lbgHyPFvS&Tdnn6u{Ea5e+n{kq~78?BpPIM zLK_%Gd{}8`p6NAjLNYmWjce2Ghwel+yzB8Vd^uc;}G<3n!vXhGcpx!4o%er%;Was8=iBht7dw zBvWmdB}GmL!goQB9-B&pU0P!&)T&B04|l3U*4MVwgSohzBwRzYSI#SWU*odE44OD`4zFpTuYo(x+rj&omj>+W8k1qUzg8s}(i_iHH(FYD>9)l6@hDug(u zMkHz!C9N7}SaAIQ81hqeN`heUu-RPsNMHr~42)SK>Y#?J50Vm2?cXOnl$`YL8?`+= ze3pVkn(bgt*e?oXUusncuCD>bQV6stN_p&G0|r7Fy^*XHdntOf(#40G^-7J(j65;BLLbVILXmr_De{1?I4kH&SQp=c?aAL1?lA2( zRpW{_K@|-g+4UV$LE%81>RG)<57#s?+PQZf4qCb!cu|?##8Ps~q0Y!ad!a0k=TV1t zZ%M+Y?`AdZ_5}*7AO*)={QxCQ%eCmto77l#nAu`$)rvz;r=nlk9xlP~697XxlY&EO zeo&YPiP|}Jf2aJ5nBgsU^Q@ul<}nFw)=r<3GP~; zxNC8Dm*VbDfT96Ty8d#@lUW!@8n&e7%1jtL{NLpWgP*Oop?J7GOEhPQ31@iyjQz~DTZluEtZ*Om}P1+0sgDY%EF~T|` zu8l%p8tQ%_+c&FnvWYdSVU|U<+;tuKd;`iD4{KJ;T9&mg?jim@Iesc>E(^(-zc47Oz zyGqqE1X9ct4wM{%K~RD#w)qYW+g_Qr(I_yY{;ZJUpfVi3RraU;R&vE-!8)R9`47^} zk+QJTT1AZXmz8yEh;_5-Sbp+%RzJ4`T|}mAJbH7$Pl_Gt>`-PLhV;yA@hZh$B-AE* z{I2VeBRTr^E@0AhxgFWF`5F$%Q&$)WL=i_qI@k#$9>SbP+C46#$Q2ZdnJvTU2KS*wX}E{H#jl|A>~&)r0G`)j?g z2BrfU(xt9Mex3+zouRtW%Zw!|&O=i#OZtT|Z`QYdjS}zm>CQT7d2TT{t?GP;yF#F@ zkTc|?T?2EXv%xBI(?|B0w<6Pt2Fm2bdezFzBY1iTGU6!OH=~p3h4i>0E$kma_vvZL z+ov+QKXSQ6QpGZgz%f#fye{QB1IpO}Y`GNC5l?XvRq@;?=4l3-l9>!=$QsSK^28v@ z^RefUkU_n4jFeD{2#5+wS;Q* z1OO!i(N7lz6MN%rWLzcVoUy*`1CEjgpnx5?x>Woc_ClhVIUAzTm5pU(+vMDlMf>zC&D1AJ%i0+_7>VE>wx}Xifvfy>Rt&5aH#t#l=kqTPB@0o>$&1zGjbX5(>8R>; zQ~Ia?ApHz!)X}t#^Asv&5(&w=a8P?L-r(^*{-0Ll|Cxbskk=_!x&uYyhy{P$`iCH* z`Y>~|rHfG{h>L8g*We}8!gV(bymAXme!clj(sgnBcp4IcUfqgu1$b8i9Q-nFl7cWR zYa4q8@7#V@&SfMo?3;)7?o#|A6ZiGwI`|hXXW5|V$;;LnABp2;6XJBvnG4v3|L3*+ z{PS&yw+p?-?||_@nB3&S>K{|>I#CKOSf5GAJ}v%1yo>QB`&Bq|vBJt`JYt=-aT50ws!W>uy#K9bv$ zp{RnSVe~)tBUYif>LQufo3>ykB3I6yBVF+W#QQ4 z6sV!yiwHx?G21=Wop>T{SYA~zsiD9!jR=?HjA3?u9y+FSEh9ak4;5)KNmtE&PB0@P+dqdI+fxgp@BLhNdO<-;|{aZK9z+Af6=pWZV6Q&b7Bh)ZGHyecaEj7eix2P*UCh>?q87-k@wd#W_LnTz5o z!s!9Fi=oDaE=HWAy1R>*lAq7+5G4 zuG?t9f~-dc>1oM-xiI6nhN9a7T%xk# zSXTdgFCse6?Hk@s!YjXdt>bamF~~!wcA*CvUfve6pVpz~;)m#Y0WR))JwZ@>J~u_V z^IS7Sv4pcrOE!v7_=c(jpC~|e=;y3MLqjf0M&4ynR0v$15cb|&oBRc@_58@cZC-KI z$?NZLOVjM%GbKxl`;++d5CCwX>`%~=Oar}Ld7lUgq$8cEy1GlBg!tWDgIs?%5&WW| zEd-)aA%#ho>WeB0h|4|hCS|E{XE-F5u8N3V5O|U&JPM~QNhVwSPn4HT*U8two2=VI z^u~4j>lF(wch~4Kz!(`TYyE1V@H9zJ2=Jos5^9)DyY6~s>J@U2xtBfh4vW5sigE+|TS zGgzv{y9K(HrT|N%5L%$AIT(vZM!AnU=2H^}MiWCKH!6K*8op?_3mEQCBTemZ+6cq1 z2wrv=_&zXtJB)vWRUKjyaqWO0BcCp}trGgo9!wFK5$k!~hHtS%oHvz8H%NN0pMH~q z)*$HUcYPnz*+z6FV;32{P1+DH3^VytTZA}6#wU6zp%drz&xzvAxN3b#Z^OXP(v@v& zqfF`rN!IAxPN5^Rz9;bkF76J9)y6Ba6-y%>Rl;E`Cl&RHG=Dwj2qMbS)e63U&xy5{ zs~1OT)2%&_y&%S7aMgu5(Uyqf&HsjNe-J-+i%WWxn|H#SmuJ_a`g}k}-+?vt+^nuY zH9vpUFiPHCk$6E;EVKON=mIsog(L zFD*Ubhn6ZM?`*Py37vfGk$yAR^ayVGQdS#p!MsdxibIf3DHz;Mh2q2jXnP!gT$Ec- zYSxFs!os$vfMH_qV`(AYFR~z*n4n(sg1R@?hSPy4%*QdEuAisx#;>P3dw**JI}y0* zstNG$I@nw|O_fXt&S2}09l860V4@>VNK77V#Ib+P)bt;e22=ihNS7A-h+yDSX-mX< zJmne`Oo4U1{Tso5CU5|V>D2<2TvoWNK zlli=HcXrgX1h7crQWPC4d`x;tH3+$QmS=4Db(!~c?zvYYOxTZ0qmuthmQ>7ePb?Cr zu_r|&z#Wt0o7+DRx!Y#}5VdR02hWUuL-Tg*Aox8f)eNsy`{O@1 z&==9s?b0TIj2w?qTnAT$1p3%@_l6T7wuDlb~pfH>=d!I6AA3I*3 zCBay?Iq!WxRJ{VloO<>524h9;0(%WQ+J#;1S=tM81qB?PJFnfbE-%Lt=>$y-J$hC3 zk^0c{$kL~vRZI#wcsmVs5rtB7>@UM24sNPBDId5bxJvc{v&Sei0G^EO z4!X~mqI|g`(5z;RMk<$rn`4nUloEo{hcefq8;#@R+d69t7{ca*TI;0PKgwlGjZH;^ z$pX_&$oOk?L_;mGULPCKkY&*6cM;=w+2I3^c`k#~9n_b+9+ zatPSVSRbOcbSQ#?bt6MSu8#R=vc>ANXP+UU_v3dH9NPHF1Fh{IuQ>*)MERqHl-4SJ zY-NU^(K1&g6l8^$S}US4@|*(xOj*@XkZa`}Naiiwu`dz)L{iE2MSYzvyDwJHNtt?o zgmsPHaW-*!XYL|vCR47klN$4Y-p;~w5}|UfjW{()#=u+fPmKu_ZANdi=c}&hO!!nx zERNHgHf^0*KH}N4V1mLk#*#GB-9te|3u+ZHv2L9`^|SU}uiVkh+&s|m;rsryw1WYx zUlBWx@s}#YFD5o94!-kz_v`L>ggHH_RavTxi!P7bVo0&s;&rDdw{V>VObCmz<-ipnDl?~g@JlvU`hG?ZI!hz(T1I0 zt3Ma=WYs zjW1S?BQ&m)r^X$QTCmODvIa>a6;fl#3H9!TLUJ19U!^cNV&sRbwkPW9zXtR zz4P2l+N1NGTdKeMi4#ZmF@iu|wkul$AXx?iD{|$Lwy9;wibpLeh9u|XB-#CpAD-|` zJfRHopa^rMi0#OcvZ+R96o9R5c5Xt5X|&NVcFqSxCL6?gZcwQCq&#+n!K!aYVuD1? zpDhDy*}}ik2R}I#>B>_55(o^P&W|bZiMW&R1jsdD5kkWZjd47h5 z7opZn%(uetqD!XQE4V-y1%S*)%cL!Jk};I!N-Jf?>Bs8wEsI05<@lB1m40^e4umrk zFLzJ^wFn(O{u;=RU5h`_8L?^Gk<%B(XA;ddiwztcsS%$>f+Zy(4IRyELT<@rHf$V< z*7XP{W;%lpU|!3Jqj?T1naVZJ^QzcD1{Y=WaKjCNqmV;8dGF1l%VK4O6gXahB-!xF zjTugh2)w0d6s^z6&R*a?HqJnte(2qG+m>sXop`p4qV3;l_nn=-jQui`JLB)!MWt@aIci-uH=GGo0DxC&c(L=l-HMO{A# z$8jA>3vm#PZ?i4rD>EokK`9KVpz@$Wx=gy{<*{50X zJ(mT+?5=RPZN!pe{z~32u$ZjyQ20an0ext0biRIg;Yl%dH$9aV5@IEleU}gvZCGxu z)A@2eiXczr0QTCF0uf6GTaW~8xU1jKP)v!v#I%5oOH26+AASWYY7v|N98+P!*+%EKHRA&esU zhHf=U#F)}-sd6nw@X`kfRkR6}au3?D$qKpJa51=|*{i=AvP-JmR?D15XEd z){i2apns>4@8{zOeBp&u(H>>1Fz2p2CG5uL#=S9x8u934@8Mqol_s-s<1LOuUz}O% zGq@7L=7zj?y4btaMGf8aH@}&s2@RM6ql1bkFrEBff+Fm<3#t64N98}M#KA1H4rfx{ zQkEr;vc)$eQPGjnEGjspXI9rPOyc1F+8ymKZ}c%aHEV=tJ{b?x|6X8@YTpKUGt8qb za+H&6XZ>m>Aw#gC2WxafbwiIJFH3k3-sR+|;Q*|u>Zx-abr!!di_R)sjagRw^xH!M zwSR61sDffXkr&MPb;<@^*eOb(i+c}@+fp%f%emtrTHsWv;I~_;HWv7o95;v$M(9WM z4UkwV<_iRKI}N6s(>XjxT6JeWwm1L34@kSnF=o{>|Rn${^e^Ax&t-)A*vp+P@vw+n283c^%;HzASb(NFwX{#|81} z=6M&$T)e@jg(!DW{YdU#myxQVhugok0tuC)Otr_4w0?1*fq()eM;h7zGk|{~mn(sz z)M>{8e+`*nbGZm7D^;X*_kCs*t-G<%up+K=x}WU)rfd9BO7?R$S8lxg&ZpntNS~_5 z^M6Q)<3@Dd8vwRo-|GHxunFF3YH=mOKp6fCNP7bKEo|!)y zL~_N}jyN(&ZqlLsi>%y*7^?IdY&~mxcz7KD8ZFnV@Yu6pnZmg;zb;zQg(K-+@Tu30 z^y#DQz17tX_3s-ud2>78>@?us>bUa1>W3&*7xRL8!}Hfz@ta}o^m@6UhXA;1joRIe;oV0YAQBiRDghjT8 zG_QZ_N7+dVE=~z%%4~pILxjQ(vXiCjo;LXN`j_NZe$he~I9`OLsG=fRVf8P?4GoA- z)yDg4?FkIn33PFV%y~lqSMgOz=nbRSk^ME-y8(JjcYY6V@7^-RLd7h}1SFU0gOuoN z2W;-ZQ$8#l*KN5>>O5=b<5VdHUljjN2p;bw<@-m>r~bRP+}fz8d6DWsXptSQhVA%4 zakj|N4=r7M`1z%XKdIV^*uU6>xMDnLkTc-SwX(t#$HHl^<_l6JZFEo*%aAx$h?HZM zcdtiXEjfKW9xD4Ft2-28(WsjNIQ=9mN81Whm-hgqIP5yJH5#NmhTW0bJvsoi?uwTt z)kRNEumSxzCmq}ilVQL?imlyQ&&bGUq8$rgj-FIx#@Utmkl{Sx@4~P^GY>a}H0R|= zg>S31<%q{q?iQ>A{_XdBqkEWmQUt{WGh6M_){R&=az9l``nQkKeQc_Fc7M6ZuOHy! zFiP#$`xw^ksW-xzux`|UBGmTUy_qy%n}JAYrxv%&P33MSS8;;NiNNQ4g5ero=J#s`=pTQICES4TmGNqck~ewo+fkt?TuT@2xdZp27TgKIBR zD8xa}`fGwQKj}USAUhzh%iF*AUUfUf#!-{QJE{E97a~^!cVeYw7Hl<3h0N_CX|Sh8 zkN@2kvb|P*pj43rIu>)S7W>l>5AetPL6WlCY9j$^Lke*GE&eB0MS)amvZ0xhj{M2+lP8e8tqJSvgB>EhG67 zW!SCZZ6`X112Ix%3Qh!LtmoT~1?YkZR7Z44WPwo~pKs6^_a0GMBWcWR!0w zf!R|O1IH|;t~OGV4G~vacmLWHAN7i4SaW6Ap!#wsv&)vba>ajdFw`0{^$JB7PbFEu z2;wrO)w6_mON$Btd)=o_0Q>2RL9feOE4xBzTv5fBH5qI|dVp*dJ&18&q(+HyK~ocU z7Oyi;AH`9m^&eVFrW+w!ng$aDxG_q=u}J{@*2ApI&i9T6!LyovXTM#QYs9NIgo~Tp z%UF?M(dQXo&{|8Z>0Xvdj41eQYtXm3WhzsDFg=ad413rs0CvmAk#wlSv*dQ)m0~T5 zS;r)L$*}EHg7^k>sG?u_ASGbCqQznw;4jXP3|mp%h2q?1j(t=1S-d8=z@{r??P>$K zYgfe(w8^*`?%iR|O~leHf|_Y`b&Sek#~7E_cdZ%Oy!p}N;7EC4O1R6~oP6wA{h8+APbR{4sHP?Lb)O`HCnpKTIm;o@g3VO z30XSL32lG$*33yNN7A$Q$d1lGv|uwntEFf5MPbsaGNp?3MqWC8aw?|sxsIP`E^rvs zr^@E4$0tm}+=Hva(${Zq%poNu8Xnn(W6(%)S^os%W)+hsRP-R=t0UJ{tSI)WzpzBL zS~DJQMAU}`vk(uTc+UG?fwdDULlXr;HKE_}RiFvGPi0Cx`K*it+-0~w<}ksH!09mJ#z+a~i zTt}OkM@F+jHy4Dpc1ofwYmFbFULj9c!&hdQR4x@w-J;-H9{M9KfEP-VtrR-`=SE6u zQ)KldmT%OXXEMQ@)Cd|gB@iy_q+`0mOWJ$6Ug&UJw{?&!f1*EvgPngGI6N_3riv_) z&|axP@b=43HYo~43i=C01l)^D%!M&agG6s!Z1QkwijOfP=?V=ZLFPZGMw=0$CHZ4E zAVt#}UT`4(2tSNUg}I0c7R6oK=7h5JW7scu+fOm*9K*A>I;hVkYI$4g{EB2kKLw#J ztd~+Av!@lO2IniE7ml0s(x*&gN-GgMmuAw+cw)mpBxjS^4HbbI_<9xRg~@x1Yu(Dk zijaf`c6`#K><@i(;Sr)`*YbIoRE7juqvN$G4?haarD{f&1XM$jsfl*bu}w9I%|{B4 zCa6nd@g>?2jNq9Gs)rOCYh)o0OIc^_(fXjnN3g|!)fl6UlVswOZA8q_MG$pfISi?o zDBk)SRrlP@{Qln3{QSR_re#zh)f76NqzdN|cqaPS7Xy9$I|-?P{1H!)sii3_XUDj6(HBhc%}cdd8h4n{`&$LzO68R@uGu(_^fg3f_aWf%;8>`0gW>Q%?k`!_ z^@}Lb6^( zZcMVtv)=*K5O@cCTz+uuChMap-jTTu40w3BRx#;>1zwUap7>L20WWlnqfuYShOg&& zFTcpp1m$b@xNm`B!GDI|al7ufMPkmt^Wx3E#9oF0+?#(BwgziD9RxX!^1V7%{#Nt2 zBDItHqIqxLa|>x1Mn8bl@`l$~onsHbBkRU|TR*R7fel1VpfOnl%@*}0oY1M~pGg2V znV^9XWbB0gGkIYA06CE`iT`cawgw*uKIaO%U_A)mP+j`p$?=N@&w}6=!N*SpPH=9G zkz4e;4`Im`{dJ!i4MTwG8I249g=LnjL3-DEywA0fkE;Jw2@(XJ%vsW9=U|F+t|@$h$MgTcq% zVQ{uU3<(uzYMPJh-?>p+tVl`8HLW4x>&sZrVMa_s(r-)sedA9EnOXnMn5-Phxd#fg zD=7ivXsdqj%qY30rQgNGubG^R-I)eKHM+LMdz>99lrG%e1(1cyICN2xx--T{X&Me-*M7fS9T%Uk=KUsWk)kLN>0s2Kx*y2v_l_%+@tnF_5Wz|hPJ!$Bi6l1j3R zTL;pUgce|Kz8gWAffck$mpslV;_=B?SDd+Ygfouk#c#(>Mg*wP@>N=Y&S#}~t};&`5iX= z5k!x$S$!9ZOiH$>bPYzeduzi;$(x-lSGo1^XHRe0Ua;&1v-^uQw4p)G|FM~{!QLwQ zK2@LwKI|Jz`(IUVD&ICOeWA2eZ&(XI|k+qgn+m`O@^#TGh6 z$$j=G+KwL`GNv8k3AUK8!}+4kF);7@YteMilD&g-qEP)!uN| zg9=Dh%Xsp|IKl+nAUaqRu$^Md@5cD{0(|FsyU8^>2V4zbdk6l#1$dg=1q+iN?nMRO zkofJU1>gtXWyqj8cA<#g>|6~Suowj?a`Zgz$bjD8PEkR>hiMa5+=v72Z2~?!cUF>u z=LU$kLIU5aOonZ|e0BXNz4!L=zy{#!@xvwCIS&_AlJ&6Zw^7z@+;?ym9tu(Aw++;t z?)N`|gRq;e_!oA*0;I+uyvyg3tsa?x1C3>)rrF;+_8qS;fk>h^!dqcFM)erX_vXriCHJ+V$236jtL0^&squ_f;nljr2Xb zdl0qr)Z>j=TG+{p&;aG%;!V9wQ~hUZ^dodVI%2}5`6HwumfVDM>bX70N!&!jMGVh& z-ptDB9_-1u8Hz&L*z;xmRq2rxT|ALw9deKjgq5U3of^RWqp})&esVHf(;FEb<_)8O z2x<7Z`zZb`97o-@*>9zPTB9RGpF@fqs!*Fon3UI*XsNZWAATO?Z#0CU$LHKqlUvyg zp{11DP=v?ZkvgI4kP{9Wle(yO*1g2q?*?D7(00;>7nu5L5)2FkX6I*N*J^rE>}{P# zM||zjwRA4kbc>px`Y8+DCVr|j?=6pFw#i7ECEUk370uZ#jA3hr(J>{;y7`VSO;!}8 zgkQMMFG^8BYi3jVft<&shENj{?=VxNzufFu3b+hfrEQ0|7{}ggV#KYL6V8UYi9t)) zzR~1(nJQ$+A0%E62Im~$PKm%o$gmyI#9x_ETPcjL{|=3@DAYt|-?w!2hX#n zE*0&8i)y?JZMbp|@_64vedZ6iV!r~Rt5ijTtfeZciU}Rotu}N)o|<87gEGAVxajN( z$brrs!W+;+kCm#Y>eD2O+f_wld~3r|qo6eNaf{?qlkB75I!VmN!PyCDWL#uTC^898 zMO?h@X|Ie}@CSjPL`WrrDcmTmtOQnDq)?Etg6MB?pqUmdER6Bux=(7+e-iPsF1bz! zxxI9PYhcc%N34_%n^nV46?sDZq)-7&~e0ON@o1rbXfg)Mp|Hld`prJY1wG zkhT9E*=SUoD@UxPNnNwR(r;(_H!H}6h?=yhx-_zIU&7$WU|sr&156kT8^PJ7UJ~$z*FZNkU(knc zcv+4dm%v-o!bU7iK^B?MVbx|B^MwYKw|CcJ<)x4>u z(AH)limT@ZzzDF@_E(5g-50o}e124ecCRXXG?UJ6O;ep+$Bn&jdwW;`iq z5|;PU@E-Mk9~8U%y3`$-2Y#L2u^m6VCVao42QQud8PR;7;LD-?I7ITq74R1~5cB;_ z6H(+*@Ex=JWzV^I{celWb>C1uUv zgZ~YmSI1uWZs2Qw&EBe0r@PoTkhE{<{f6>!7Pt{3=0NQP9uyrQeLsEJtv9x@?!o}1 z^ZAj!fm#C5yWg+cw;g?jyRJ$^gHZ#gv+9~!q`2dA4TLiWMBit4!25yPbqD{x_xCO(s=A(H1bT(lwI|2tQf zL5ADSh`a%B5}!}L_ryJqmSU3$E^3MG>|pg+#26-2TzpC;3Uc+)Pz2-w*KeM_D~2Nl zG$_DdxcfZaVAO$}ThXJaMLx9hkh8%khC-xa$eTBcBCRlbmLWz6uQ=0MBbQeuKI|9 zRU^$1+sx_DPUqUviCDqP<}Tt{!VpMz)?+N z44|F{EB+Bzgyz9eHzp+cHAKU%cI{$riSSj}pU=x4Z~A;Cq6~!Y(QK^-VWUP#z$VE4 zn%P)E-CAT8qaViF)-qFg`VA7+&hC*lGwhaihHSrn1f`5v)|Zy$k9?Ld4i-v7V7PHK z^ig)QHM?k=q>I5-rB{+A!X9<-sKF7CExJ)hA%uEmOiJq zY=W$-E!pJ0lVg8R_#I-Nn4WTR8{CU^j)NO4^wgd)Zp)D2b$Dx zdblC9lS5C*DWB997N&p%|NQ%^&fM#W0CSF94I-}6R;FxFj6s$HKu4Bxf&wIgMr`hm zTOPkt#I4>PaRQyr_YzRSQnf%u{s_2qcOc=FPe^m1jW^(f=-s^oTYM~yRglima=`|l zi#W~KD6dMr7Idf$iCviDO2-4`jCwx3cHH`S`pO8QDM26%?d|ig{4Bj@o zdDM7Z6@7;fcV5{a!?lygj>ef$f^zTBF)pd%Kx`N8iQ*8~M%Wv|yn(ZI>P3CGh&)JND% z+TUKyAKqf(DtipK`R&PUpW$KW1hzZGzBi?{TvmWvZX6PC{Dme*l4p-BPpN=9Ps%_6 z#;A(_uK8eQY4+zrf_uCi+|>`v5xGpCQJdpG+v6R-4I@JZ5i~6L0?x_e^7!Hyo*4Wx zvhlPYqCn`HV+&Zjh(OD!BT%OyKfMr~s(S?m{<#hfw>L2@Z|vw>q`l|I!Sg6FXondCuHJkiJnCKe3^6c z=nUoEWkVFd0S-hz3NiG8yZK3M5~c#Dt_3G=5sT&nZMuE$O)QoL4h2L=C%bgbOLfZi z(p~o`702*w5T)FgMwq|Fsa+JS+@CGf4?56VjkjVy9q`EnC6UE<<#?u)yZ-DJ7)hc- ziSt$4PiTk>nRjBBdcXnhn<(D;+LFDA&Z}IGcH?Jy+VGS`@K60?e~zQA_Z$HFJfDLn zDB~bz0nULhYma@nUEkN7yN!5f?80gCrVHEK35|@5%9*p5xHw4>!&H0n#Hk-PeOV!q z+^lxX8|~g_#vi!jkJAU`{`o-l;-gK!AtRmEnhN^to139ehZrnJ2#P%Lum8Q1Gwojk zaf-SM|5>=o!~KNnHO*uA9+Wp&gKt2mu44G9GNtIKG<7tX!%lWy(e(zp&l$M zjbU_FGvvu@3_LS-r)SAmM>h)ws+*xri+F$6mYQvILvF4n{L;>uB-rhNXYy3B)pzyU zZ$#(Z2H$9gv z8|w(IQt|RYs;X0n{i7XX6*4pO;bj)m1$~UP3i3 zh22w7me%8awn~%n{^bYjL7Lks>}elSCT+fKIPH#)c9|BNfJnG2LX_sX3qGt5RfoLn zugC0Ci-J?c=_e})t%PPis5&m-&I38Wq5?NA2}dL$PrFFCD5Z~r`1tZzTF2n1u}9WH zL2z|HeZ09;I!Tj(*~<5$djM1jjBHA-l#SFzQgNnqk^cg>hD%;&*OEwx9>k2)rWc=) zlu@mdLg^)H{bwBuLB#U#tw4E}kfbO#i$vh5&Jf*Vht&I>2Cn7Y2k~34;!_KhMf8X) znzqH!e{3>Sy)Q?mH%=<#=$V_#-GSW*QU9p^t?zFyuE#Uv*f;sAaL|&aUxNsw=u}08 zrQt!D);8v%M84q>5q0iI%6_+5TLZ-2>p`@6e(IAeY$BJicm?PX!se5y^*f9^872`n zPT910--mwGK;pd4S2H5Uzq?;a-|W^Ki(kXEh!`XOE_D!-z76IL!YBSz=U1})VBVkFNqcL+e~Iaw{YBqiNd1Z455gH|wi4T}ktfE?FXDK2BLZJlK;7?P zZxH9!D~;iAPvJk6MnO|C8G6R&+v8*=2$Wj8|7C*XBE&(h-(?ziyvM$_TKX$M1Wwz1 z6r20I6y_fLj#g7dVh>~cc^$mr0Uz=RWJ2k{iyv&BR4#t1Mu$AjN8>Rq0f@#g^k%dLp}8(++U$ce9NCWQ()*N`&H2 zk&c;=;hN?_Z#a|5n$KOaMSM*#l&#;4&~>WuWq|G&_=@CKDG7)_Y=_~rTP_C6FH2m( zBmU{xrLvDPsbZJ{u|nV`nc8JS`k5t0b&*=qXF~-*PEoEbDg3ZF198U3T?1b-A=7XB>|vfG@0Q|^k~I1x-07IMqF4L0ZSB$6_oS6hz+p8+j(^;m<^LhJ6u3Hf9FR2)m_h${Yem_q2J0 z+&@aS(NlAT*w}18?eZ8qD|0F-Le?Lz*2J-473N#dc@F?_7haLo1c*bAilisQPrTS8 z^q;~F9*c$Py*H5$z+8AYuh^vCD2`Xy> z-8v(R6+(5S?GL&6TFM@%m3>26Q0WyN19lffKY~O&_Hz*Vv=ZU-bo!SY+G`(@k$#Rk zRqD6HGrqWUw7t-gbRY&k&yx1aye}EacHhu~qTXL&A7>nv3Y)~x4ui!3*(QP9POTsT z!v;@keNL}Ci~?3nPtpJWqak1%7Cvp%LuUc$R^W!b;qS7lOZ$Hg(L-rhGK6N?6xaGq zQ7*id%7z2`!U6O-ZbU`uC?(R@A&(VjMaJ&VG43OwUnw(U6k~?^MhUtl)8vXZ3nL4o z)2nJPV%GP9ZRGm|Fkp`u-IT;IgTgHcVsu8zk2vjQ`e-xFKEARA$~~>*LbY2{GP}0~ zrLr3~{SLoeN1KDk)r#t-N>Uk0OrumPCfEaCr3V%o2#XQ zaB*)}SO0{2@I;FjhbKhQFMvN_>l`+Unjh=*QSm|$WZbE)8KB^;@B8!VmZqj7l%qOz(@wi|e=h+8s zD!p^h?EQ0Abe9;)j4%t7;`)54fKrl)#9QbnIEZgHPn;Q zcgFOSa(fA1K^c}jb_dwe!0Oy;%05!Rp;}mP_8g)Qo3=x44KB(U>p`n@iUS zYsE=gxOF`TuNe_n1BNbF#;&+}VzX*Wkpmp%;wOg3-mg5PaiQ|^Kbs$c6CG4kdGK;+ zGE-{&)Wz3wt8(N?Bb3Af7(YKvnftDa-+7|;x{bpb!ivp>GrAYqr-xiMlO6mrTaun# zG#_!G6jH8y|2Pf=*q*>hF9VSGI5J(3B(Z1X$H7a8bq5M2&r zH*Z!S>`UeNg}cMcRbyABN<{|}ZOA%Ym_f8l9*dLCPbP(j=m>E$OmVg3@g8>R(k;_g zUAeNq;>Y*32qVL{Kh*$A;kBNK%5o&ll|uajs9T-HerVz3?YG1eL2=jWOQ_$(E6PVv zA!ojAyU!n$`|(7xP6e8h@H+UG``%L>F~0g3 zO#jn5vV4XFhO`Z@${+6liT()0H|2EV{rocMSr^iS5HXu$> z4!RN=`VKfZ=Nop_d<+$*T~M#Lo+R)r^g>4{^}BD#(~70x(or$P$Sh?Sr-al%-p4q5 z)0q#es}p2dlaq>C*W$BS&OxAfj7v@fU-Bjzr#=># zNVvKY6;e_%q&$h+IsS|+SG-XUL5gb{L-P`OMPm3S9QC^O#PoJYDH?P23VIJ$*>K1E zVt7 z?3_|v(bW!J7)<-v?kJ8H6vgK`7sSP>$?4&<6D&AP0VdEibbRPvKe>z5Q=F*oh0)G_ zE^i8z3*{ z++K75^?TTLkzo*^%NUH&On9mj5{XP!VW-HrU|85i5%W5QCHt*=YN_^dwElrcb^fkI@1%!_`0d+A{^y@f5 zQ*?IkWG0r!zPbFJqFWug_)1yTRAngLGMFN%12SDet#FI<&x`fgqTlo2_wW3i3S*k zX?1}QSkGLyy8Q8oiM24f>t-S$1j5f}b`Ffp#`~F@C`ePtsa$^ZH`AZ$j|>@(APSOa zIJSUoF>sB^vk3Se7xmQK;N{+Xmsb=7`*`iSHXA^4Dl(Nq+qA0&?lx)rDEc;9f{tPs zOZsw^*K_qcKsZZi$DTXn2YRCPby>r8?iKb`{`nm2P#>GiTr;$`<&Rm7m(E<&S93XZ z?+?@MGLO3RE<{MS)r4`zTPfZw5E%jsgb)EG%nxE8qgh``!4urV(}J<{9HH3Rt1rxc z&n8__l*X?^OpnXHYqYMn%BV(X#!-Y@uMCyxI$lG)*jMk^;BAMtT;7eo+4pGF?&pZ> z6N{c_2hq^2mvIR2)xo6EEf_LbI((FQIJggyxD;V=7Kh0tYi`zCA=a-rZpeg&$$GPc z|3f~VhEVf*t}Rb`uBS}P_RLFtlcZ2ZAC6QkAaRbcTO8j}JHKlE<6YWql@zWB>7fW& zZ_4_S66K}$T-u=SuAz#A`-bKw+}45Q7Rp(KrLzqFzOvdgPFSkTx)mz?7`kyqZ!oO> zdHCcK);d~DL%tutfxj~TD~uq(C`TGjW_O~5Qx@@HP0)ipIXd33h$%(>8j7^)>xGN{ zD!Vpad43e5552PTsWlO9Y&wvvlzcnlQy6_~KngsW;z+34h+Jr*7jp4Tkk&~LcLnLN zN~aB~cBqtG9;ac(qC5^Y<7|iA)bIrtiNN3ghp%^xjw|f?hpQNEY}>YNvvFhFnIsJ+ zww=bd-B^t#YOINEC-3yWpXdML!+So=TC>ilS#z#^_TE3i^E+SIJ-mH=A+KIt2ntTg z4I~YQGUO^M(o@A8tC<6DT*jjkZn%897kDCf7kp;t)u~Uz$CZ@QB3ky=G7|UX;p9hb zVGpR)6m4Ut@Vbw82T4=dsoM~)3WL50?&@A+m&ApGVY|C9T*(qsc;hzj%my+o$8))#hfyc&`Q+9h}4o6!|CqjlK zu^nq4lbv9KzAyS~sgsSH0AZYOp}%clOzZqujFCUV{Or0Ut~)VXXcx)~wNf*&u0}IO z@cWZBj*n^?qH4^X5KoC0b7Y`DTm~AQwZGz$bPuI(Bh{_e6M$;x_vByJVsFq_HR8ni z+(_R^*O)Z&3f9mcIKZQA>Sg2NDa1*lN|O^oSk0!^?)1CYzJKDZP>Z@bFOV&SsIsfN zaD7>Fev?y@!5dil6~aXv2}Nh7p+-=S9Mi z>FDBTWF)Mx>DXg*y4+-GWi|5%i4M%9SASz9$E-kzB%njPV+2$a6GJCn3>i)|2)PUrCJr(P+D`N!~1f1wEZpEH66kLPw zE^Yi@79R#%9K_50gdnxRE3_wIyxTvFa;My9z?#hVKqSVO2#ClF-{W+QZ_CyK!Ykzm4DJ#syHw@9Wb( zZ0jFj3JLXQI^P@i`>sGws86Ya8DVYKjgLF{GU&Y#@BO8~CdVQjSyJ1SklyS+#2-zo z=PYp=h4R*&ZJ&Q~w!w@4C}0G%{3Ad1Cl-C~=L;%|iOf60`4Nv0YJP3XbCKpnHrPiB zL5oyH;?MbSxO0{~zL}V)mClp?KtYWZku9<<1kng+dBu4@0)#%y?`W+_UuBz{kP3yj z8_ASHRwWGN=$Ko^Ce1|(I~5u>j;hGz$(?R_iXYBYis^oOwaoH%&>R`e?XX?tFYvCd2_G>F!jL%y@@S?c!9d>-P7dqIyjob_h8(TJQ+Z?43-lo>eeO0t z*ZaGCj@%%I)3{S{ljbtuNOZ77ArkhF_*72J6pzZ#lLl>%?QXiWaoB3-tEhoze5TzV zZxk+^&-{liWlQ&eXCNE%7g)*?N{wmzU;a>UKUWYD4n4uKtv}D|FSKKiK zIK2=)u>r=x!AajggLiK`A)S?3kbeVJvJRf}?rhFvo@hd}GrAS^kF}U%kDHB6fRl4$ zM9EjhJassT&f)anJYvC5;n{D?lJh*Jzd`$m->SLCrPrC}cbW%Wc(E zeui#9g~N;A%f)>+PO8IF)Oe)Rx1uND$AZvY+J5~H-)R5HfyKzP-qpy{ zqIRxs_1It@>0P9H`|WYC7>t{L zBIR4~ zS@`8LZ{2x?!8e^gCEG1FYSvgaVj&ZNJ?0jri0n}8mjg%?A-x%<`Ag7+#Q+NEzfYPOs1$f8>S^0IhUzj#6TvcF830%0xveC$0PFu% z5NvQB0@ye%lo4ozTm=&svj0Op`F=ZU`HwLN!Dx;9+pfUV7Jj%6Z*M(=A1knGo;GWC zrruxnFOSY<`GOv|5p2|0MfiZ1#tNxK2H5n4&IAVp2U2{|)?B}9at4H6ALuS;-~GPa zjiqi$`5)I@3JJdGxQ4YXUv^xC3f;RrKmxyjWwm$?&l5^JhWqNA$1N*-e!vEKic4CYu!Zp_PKDDhC4>yp5B_`vJfE5nW=(A!EU2UujPtD1r97p{=%UpZuq8# z03DZLqWV&7ho?CvQr#f;qLY@D6?L`qNQ;;-YCf~PT)mjC4n6PuU16?}Jeqx;P3|RG z_M_rvf!Ox<`Nh~EDP)s+9SMzXWYiciL!=H!jDrQJcVLpN){DnPx1G!<$vo|jYL#vR zlpg710KSURU4Y+gm}?(>{{jYi^?VQA!HM5T8~DX0x-R4=B`G8SQg!k=I(`cx>n{Zo z4#9I|;nNbEcI(=`wh~i6Lmp6#D#$pQBOYyTw2BLWkGx299`#UtAj40v_$K{wMaT%4 zXf|aLWDQn+vFiI|4_8k((;2nj<_H||VA|YkjZR9`jHZvGN*pzDn}RpBmIzLULoZjO zj@WQo6wN~?R!?S9McryE`Xpf0=+Rg)4GRqU z73Y7iR&J45+Br%TyH?2z5$XpN-(EBZ@_~Il#9c~;rtmm+00>p+BfpR;)-jBM>&>oo zf=kG?%oSnzieW;E^>`eD+@ueSbTccaQu39oJayD;x zKHa5UcU#$h2V83oIPnuhPS!I|7VX{kWJyBnZZCazg98PjKpp&7g2RC|J}pQAw;hNd zb@LoNHVmp@?E3OObD5+7iMLqMg@X)8OSt|>FX(@q-Fh8Bb@6cK>v>XUQMaGI&&T}SY$8aten7?_>3 zPrREiwxnBZ{*JCQjyYq)J)%@U^$M&oO!{ zdBhUxUR8~ZcHK3*bG)OexGMHM0wS#mM+@1WUcPho(U0PEVk^5SDl7-j=`gxXL&df0 zCUUX?Ua-#|hPW;cmHcrHAGWbBPS~b4=Pnbm`8k5i$^tVw%IQYqYe7u4NLJ*P$n!p&koEj6Xu4P5kY<&XlF7M=<#O#AOLAFHMAEu_*9Uel_zR!2% zjzq6IAP0~*((~+OwY{r!PlwwxMJpI2S7$y!8{$42`{Bd;jf}X6I=CJO<4+U|pP3|y zNk2OdHsqqGxX8{i`3%`|40Ze5+XEnpK7Idm{`@!7Vvr42_QKJ`y?wWdnvE1ab$wE= z7tB8SoL?UceFhe|sS`c8?X-Etah#!L^IsJ3Oo7zAD-ux7HD?$u7DCXFBb4hYVbxy0 zP}~lux+@QfwzZ++v|kUac1gwf?x_A%Ab{%YrCbgBvJaPW|MFS-c?oLOU%#E_i#s_V zBi)aZxgBV^ZI2%jrHVU{5L56USmks(UNRCR7yoa}4KKfn#=c$E2bd8rLtYFe(W)#> z0u6C(EEOf{Htd=5S?$jMCDs^SWUg^Ab?ep}w=81mU9_iJFu@i8FR$EMpbBKD`NfkV zRyR8`@`n~f$I=PpNbcudI1S4+st%dcSdJoN%j7Z5WNg&8A*8;(;N1dpKmO5B1eweX zv6l}rOiV5<=*8qQ1@rE!&nNJJx19Nh%qjP+bD_nc6{5~BQ;7D%mT?2xx8xv2Y8M{H zxO(-uO%&sDz5cWPnC-LBoPqA0er`_A@OWb=3L2C7$Zl39MGWN>kE1JalC>xfF0N!5 zb5XH=+Pk*0I2lIVFqntQN^rO&1SR2NC!XK*LzlU>Q{pz+=sxkw{Vq=_k-0iG34h_Q zXgv5HI~YwltgJ~4X>fboL^CA#Dt`~9UKAa9)YO?a3RS=czO?Yyo&|W7lET&tdX8MI zrJca-^&jZADm}gTt0ImBa7YCmOEjpaBp73y_j7&S;N)-jX9&1bm7_a@ZOTdI6%`3uBjGUrSmz>+OILAITxjosj~hE1*8H71WkZ#-FOt<9pn zJUVS@9aHKD^oJdDlHtF$a(IkwC$bR9A542YUt?_a9ij!uM1ZnxYCo++G!b7|9*PT6 zxwKKkfDqEe4#bDb6C5w4E*$&`{a;WCe9Sj=)U9Vkpg1@3!E;Y{%b#gg=<8vAm&*LR zZ}6Qrw-5p&M~t#1_#c+HFZAxU>NHGHE>S^$O`Of-{MJ`D4l#qHKp>+xu5C>Nz{d)) zkWg_h^J=k#&$OkxLaed=bLmN&&ceOyeYGDTdIf9hy+@q=gB+kj{(R22 z`q1R$Nu4U!F2l#cQN~$@pr-beoaU#%AXvau#K27hk5cIRkp0cB1bd>^LGn|Frw9e2CIq4zJN~|IrMjyD3TI!lBY@8AmfTPS zK2p7+G>JA18)tCf02>}16QxCFi^xl=7X{w!z}Qh9l@CguT7Kt=onfb}jnEo8&nTJ+ zP%I1^UhIgSgHSz}^Xn2;P5ncOP9!$Dqb-dM0+dOqd?8Xc-NLUbe+*~4HE9dy_GvU| zLfNaR;o;%i$1Bf>VrRuHWn6rI;8q?9A`+6ZrKfgJNFimYb6cQuQT~)rT8=m_c9u~7 z^3=Nv@hvy3KvWd+&cO`X-&QlU_4l{)-q^Q`*sU=-Mo88yovNZ}Mpt71Ij|tVxbTyy zEtzxPKh089NT=^8E159x9T$t*Pnd&A;FbI!AwKtj)TLqK8Qzdt@YrCp<&57 zj}1XYWQ9D{JS(QB{fEOmY7>d3y*!6q+MB!kei=TM?3C8@F?l3PF!h%4k9cim(0>?1 zPQ4z^cZUF_^)?r(?zh{T9f(x{E1~D+$A1$F6deSDm|j{c%iu1ps#B2$7s2lFp)$t? z6VZHuK+3=R$k36OIy{)VFQXkc|KWLOJ@0-PxLbNbE(B!E#t>8+2#G(@77tGI-WK6! zAAu@GztYF6b60ThE(#tL(H=1QDj4O2vr{RLxrVp!V_r`tMI1R5qS?EsM&j}EDc$_L z?yz;uwGm0eWDutRDK6aH&Gt(HlwaGoe}>oen49UnEB}54vHMpbj1C=u@{&V{dZt!q*Md9;-P3jP=^| zb^L|?Txq*ddQh%3NnT_Tp{@1#FK@{n#NkZgp$t(}iF7&O1-ay6!Abal6jHS4DDy`? z=`lOqddqRdkUeuz<-(xD@+It1uIM&$a$(goo*&SU^3ZLag;W$Hvg@p@!;O`wt2z=O zMdEo-1hb7C?AJ2p$oNExlhp<#t`BV}+$!apZ`o7Z)uFjV-DG0a$-dgEgyRfkTbjGt ziH&-NwizolI}deeX<4);`N|OAY#?3KHTgLlADOsIUNe}R6h2eE@Fz#Yz!s3DMuvS} zE$CbWHT`spZL>1LY@aRRL;KHu$aK(MJS4f*PJs?zfQ=7P!Asp4gAW?0V{*ld zI4K^>M(U^^@rIM8!E@N3WJ)w?jGc|Y07qHSDJ501@TIJ*@drWN{4JzYV$x&u>G4TN zMj^#AbYiadMvk18zTvd+$96ZdQV8L$xOg76{VVH_$ek}3H3LUYHuLzZ4q8|`*F5A5 z8Fu-Q=(A?=8S43p+(u$M2%Tj=y7k?IxPN-gJnaF9*JFYSC&z-EWx_e8RPOI;h%>(m7Y!Q zMBtljj*#%*a!5UJzJ{Jt!ii*k;J248a#5EDf@f zcUghTUy21z)Jz7mM{3Wv>~nxrQ$-vXdX5)_zsg%*9dpt}l>1ik{yPY2kK*@q zUWw!LVoyZqJz?)98kLzbA4D*zfkpQ}mcodL8DG0M1RE{zH+z|k@4wp z^baYeLV=@LRAhi47T3!6`bFyVJH*5 zIOvy&Qat0uNlG6*44b;fAyZP2j({?Pmk>PX2Bwh(Zd#e6_YQDA1 z!Db#o1%8-!#$cTZtjZisz(KMoC^mFf8q;GGUYI0J9qW^kUbRP};RD80Bd5hN!pQ@n2 zdr|Vkk;hhpXX*(GL5(Vk0jcR`Sk`D|G5`Z%w+Yts!VnY=xqeqY&xA$cGnTA~zScS# zVpd7emPwqN7KkyK7L`$4#gm7hHwz|iDSTc}=Eiied) z85WQQQDc1>`KM+#A~4`fZ;9B`M=wH*FE7v^M`SHFIKZnA*9sD9j^ zt+no^=w4{~-)cSV`@dQIBg2)K#eO1J61Yu*k5*n8fppX?oSd8t_Rx`^ z`n`fAe#Xvp-ERuLUA0`oyx(EZCAeu1ZFW0s&%Qt40DPvLlDL_4=pn3SFR;x+$Hw~? z!In@?%YicPz!HaN|A)DW`>knhQy)IjpW&~u#`~Ant~9&cH-8x(uHWDA9*(EhgM!HB z5rn#2zHaHhUkP1Y3tfGF*nD0G9yn5k_(O0P#(HiZPpInO?=e@kVe!ssT0XLQ$;kJ+JO8wqHq9hth<0 z+Ri;XWxtB5kSZ1{dm+)}(=+olbI}zBen<}4NP=DRid z(GDLB-w_MM6Cxo_A}+*Q>ZRehOhR9xX>n@`ldQ=6+D>8N<)vo<9voN>AI(?P*&Smd zBHrXF({2ZZ5)OqY208Kws_P>nMgLN-&THtv6Y#YgX(=u`H-q@^I%icZZFW7SQK9d1 z-}$ur2e}L;XRcg46Rk%my^Za*0@VJSx$|=kK#negM$1m)1+kstzx!)KOMXT4eo54N zyP&J_5ODt>*ze&x8KAT4n;S?0$zL82xL++0pTG?Y3WBTy0Wz7UBtoSez+$sf5#VmV z@}GDzsp>F-6jZD@!xVC)HeX**0++n;)8O-;Q z$0d2QlSeK2yfv$b&1%}ZCwIKxg4ds8jxXC&?fM}^lSc4>gCr^oo6`SMn?48aA~61O za__tM;~9u_onvH0;)!O5B$=t4AXl70gxUFqMfkofLx;M3GNiSlkTy7vokM1arIrE_ zs3P1O8eC=lMDs0v(o}<(REHk?5`pgPHN;&y5o~UKgcFU4m2UWWEua+Jo|X5BjDNqV zaD*GE$CUK)NGq;dF&8IaOs>}$E=3Zh*Nws6aDB2RaxUc4)LPR8+Zh*6a!9! z>>}ifd{1sbetmYriRVdG)6b`x@>`)it)8f0_(o~v%TWZSpKX^`)j~ef!{Gy}_o)hT zQI6tGC52+N*aHo?t(x&mV(Tx060}{?p;a45;w!Y~uq=f#sQs)KX*60#BeOlxH7-9+ z3#@-^4T_NukY5xo%T;+bGyGv)-1``CYnHK{X!WN(ER@{MJR6S)Z}4(QD&Ao01 zZ2|M<_WsA|GKY1<-eF1ZP@Y(cGBv1B_a|D3Vi`_G^Br>GZnyiCC`$go4n4IMaE}{n zwz5MMnh9FNiwHZt6ynG}#4qau85Q;BjB+c^(XJ*ZJT^u1e)flc-C6fJAH?c;kod>6 z8`SD~()hQPp*;~+mKKKs6Tr0TDMCpuFE8J$b!*crBBZbSzrWIwi$ECGu2ScJ<$5+q zgs5LouY+{NwPXHC37~`_V}v?!@F!9Szz&|%?Yx%BLnp)=%arJ$nOL_ust^3Vb6RH{`s$Q4^b5>)2NXdf1r~=`XEEW z@!eR>F*p#xnK8*6rwcdX5E-#@1^F)x|Ks1Fi`9I1Hu8Eqh=1<}(B%2f=wZ{|)DB*+ za7(v2!$n4yQHPa=98`tP@QCW5S`#lX%SUtE_YFufBBvl^WdjL4)HpRP*9f+Tgg)xp zvvzPO^%c$EMB+(%Z!0D;#$fbSC9$|3akqE)Og9)KWdZP(JG|ZMoL`UpD)pCQC-0E) zd7Kepz7heCXX)jMcgsjo9;Ay1Om%n%vuV=`pbQzE%bCNJi*&@AWG50ymX~!jxXHiu zs6idMA!;xW!24dRrLaO#%lV~ksMC_Y@iDWokS>}9Cv1@CqFJI2i%L!X?C#MT18`Dc z8cr;Oof%N2G_YI}`zk7v+?<_1nO*iH4JGaT;>b;AeYhZ(Ck9+~Nq-I(kA;Y789w*K z+?34+#Ap_Nr%avL+)XtM8E0Aw9&fQ57zcAtuT)u;8b$ux7@X?+UjAa-R5y6lf~~#G zzZJqIloQ_$V|Y{;U7wlslr2uVMW&J!$EjJA1Y4Z&U0V@;Hyo|?R_G1W@Wr32;J{E@2=vEc>NeKV*Vr{J-5>(EMAM zuj?@A?{~?$>U3}aa>De&-+d0V9xw?&dA}DDjHb#F3?b0!MmhIhMEcwAhXGPm4BfE} z-1#_BJDLWCL$oX1An&f6=zSvF4H(se{~>DKVx4Aeps?o6^4GsIj1N)-JbU|n3^qa) zDz1*~m+uiEHi7?7pG6&<1H}u1%yUrI*MAc{hxy4;gH)3R1mX~0D~qmi>VJ~I9CT7K z3KqreEdKT%=fcx$yv^+FY+i>SfV%MLeY#=bHr%z`_G-v(%ssL;rXRL&Z>-z#LfPFn zdvHr*qa_da;_3tT*ICGz5AZ!}+gG;?ajMj7Q}u#my#HF3Y2^|T!Mh5&%(CIh2uB#) zX|rtZDT;zP(H;{iVAc868QdpJ24Rf2o2}nJiqOB>o$Y z0{KkgskBsy9RTv^XQ5fZe>w=HmZ>Iy2mX~=`&slsjP=7w+#5C=)+e#~zaq(f1ttZw zX}`?LP}vDoB=Xop>UG8A8iQ>g6mfI##lj6Il!M{m`io$#yEglP!S|446RAAyurIU# zERL4qK&*>0l3>F;C`Vg7nv4RO;yBnuqnJ~24qxf4KNYJs=LaH6=MXdI8etzBymsF9 z+8?u&uD7mand67Xj6f>Mp^{pQcM|^BlOI{wIpTvzDz}!JLl{C7QA!xhEE)p?;8rll zTxT)9jL`0Z7f5lghxSBV*9Qxh^g8YIAlg5LlPZl z&U2K3S%^s6O413k+4>p|p^yWCI&##^ef!cA#Cm5j#yi*Yx1vd_tnwwvBOK3!*%g`#B>W@~NJ$)H!zi)&UBK}2nkRZdNkACi9r8W>A-6c4 zPh7pa%`AEhk1XQc)t~Kt@`TDc@(iuv6=BdVE*WT&5NH3h&VJN4Mu27J9T8j|43S7E zNtfp#GSLDD%8zAxCKYOeLXqF3QGHabZzKus@`|d~cdGf1h2Y}c_m19H3`GB5O?7o79=*t_vgT2dIb}_s0HMfYU`gYC3o>9Wm+KRF$ zW_`HDzp&ZYU-{wR3PkTqnBfO2hd2ZMAEI=o5borQ2ZDqdn2VG>d%obXJ8;92|d-S3fjYwudMGjtmwhKh>!s<@!Xf#;&5K6q~7MT~4VJ?3&KE(au4C`44wrXvG-Q*;*$PTFfYf=){--vm!{M^Iq7(?%vG z#(RRK$Ko@Nl#rvhH+%OTFWP*$y|$@C6tjPi?np7S0^kSB;AHs1qY80?;pU%2u6gy> z*08nPP}@@YE6IV{pIYZcQJF%V1(KqOr$$1*T+5|n`za8v<=4Wk1z}sM-_m76l0_NvtP0Cq3 zuw;d#_N48-9o^$u8TdDJ%^jXt%xHk~ca-#mK5OAyi_R63-%6w26#Cy3c8do}ZELN9 zdr*(d1jU?YsTkLChW(O!DP&%>#yB7zVJ6B$5zW zAQMQ?HSjJH^ysXR)d%-{e?APE)`F%0du$IkB|_2vlCnMjZTjMZc3}5qqj80EzFzW2 z{3sO&7KJ!Mb5@*hOT`ju1jI`b`Kkk1N(Y0lQT9R)8KBdc&X62ka`rr0MU;MEp5Z88 z596(0Km!&%d{f~gm8FCG2p<7{IzRUwh5ZU=tuV5-U@Bf5lP{c zGbwE>G1Z2hybK}-v4RGnpvo-U!`*s~oE+`Da{lw$lfwnfY>#6V71HFj{Z07uzk3L1 zEJAC=fru~SCr`%I;mfR`7ggS5N!~-^D^fQbX0I;ix8bxSdwE8)|R5HGPc^^^xP2}}~(VadK?zKKb7 zifCL1>el#yX!GvHqkLPCq~KzpEoI4yGz5EWOVOMo9}7;(qojIaos=n3E>~w^<&F3> zQS%^ZT^%|q3a3^?J49Iw)<)^UK#w$olWO?V7i(*)T}Cglphnar@2i zqe9|i!~6}8IF4DM#0HwkH7Y0v{4^#a6p~p4hXmI|r8eLjZ$>;ReO*4_>92MDZT&&k zY^xVNx@GeB_JNH1;zT40Fd$5u zIXsdJdGLGN0?+FKLR&*@Kym7CqP^d&QSvzBVF*2in=Gf<$u`1yxVE!G&d2vjzp-FB zVGs3scw~GJ2TFRz7VGEuJz4!{;H2+uq^T5fVZ!gvzrO+=9%D1@P%&v6z(3E5BPyIj z0wf++>P%p&d)~Y|Z>t;z#|`dSD-C-LCI*!cc7PYbZ#Xe7*jE9csG<@B$!r#@HIOLk zHj#6ou4k#m#UPz*Yi-zu;D2`NDLk#*e^QmZJsIW@O8*^O$PYM_9;viZaDYS)u^!lN zQT~J|xqe0#2h>Dr5sQo z8E#*b3(Z-hFo>$kNmE-TMVg^UYN%VUVmEB9%!$U$+ZEtKX|$#@{9tXv&q_jyHMij> zYFlu({oDGHPU|MGrUoG$09|H_Ym$i8ZnO&FL$S|O$q7;ew&bww@fXT9)OYpMohsn4 z46l-;NfIZlaM5A&pp%`;D$hY0B!rQJGD&oaX=(VqBNuQ{F~|_&9l9FYPWx!uxM^JK zh5CxE+)}Vzh67EdD`HAUJ&Di=mc5`i=)0p&&EBGNIl4;nOIrewA5Ct4I*}6szgAN@ zxS>jV#cIn-M_~%R2(d}nShn~k^1kpQ>-lYNQg{np=}N6CZEQwd6C^=Bc{o6rGQJ*|sioioy2~k6qV?a(HQTJT`;ly&>lkh>L4t$}3Gnrsi zuoUcC7;I5j$Q&c8orM&tv1-fbGXHi%hFiMc8g~lDa=0J5aZls=uFW5x%ybJRL!`-+ z6!I?ym}WOAffT;bEsJCUEt1nxNr~4V_6K;GV#J2_ZA;m7&%6mXBCMW*-yk+1ZF`1f z;dJj(Jr7U#c^|Q5vWrb1ZnrEeO@0uRI^^_Wq-u-37^ejKPwV7sHgfymAuPC_<*YqX znVgw98d|8X2fI!eJ@#$39qf}gN5@l=_>}ZO4Mtbr9ch>A!WGuA*92oW%~?iN`gESB z?q0&TVFb@-4D1GN)N4}oW!wbi;4eAk@UxKn`iMNu@n2=@JDTEO(mei` zI7&4Ph`O19xjXBvrplFU(i2O)dDgdME#_SXm~y0gD_q;MhK#||dWEt3I?2aaw|GZL z>s$Q3l6bKvCd5VAK&{Q1K{M7Rp9tX1+H3zE#)iEc)q@?)6YeW+7g4A*@ehQ}`t5<; zeXaH(s1F-m{9$!pF!cQ)_WgHi_YDPy09vL_cvDzb#ZZ)s1U> z04q#w`>V8NnSXMBD`jr8wRSgJb+J=Ak5wla`?g)oc}~$SLW~o=4+fCtHw&lXe-8Bj zj+Yq`dX(XYk8jZr53vq|x(4fN9vsZtUoPmXn3e5Fc&GxhvV{eq8jI<>iYZYtG`BR7 zuy`66ydZSs-=n6#R&+E`?TzwO%Sb7Hl;= z`C?uBAkkq)BCP{)q((0o5Fqh;RY(=@3rLhE$+VJ?lcQ2?(=60ki9bCME$%|gI~C6~ zLd|*p!_k6SR-SA_m7M@E=Xkf0y3)=Oo~ZcM)uPMw5l;VGiZhQWCzSetLTX4G4g(c> zxe=jE`!wCI0wn=)ym)t!!o0;qlk1K{%D#)brqd}u1V2M6gy9R{}wZ% z%LmwRU&69x4)E!;y3X%Um)C%jT*|}|C+2x?(a|oGf_#8yKA&@!Bf$H$9Y}FGL8HHa z$2%(o&urcMlTP&|mMgwwpnd0Vx9y z8)e!YpLBJv9V|G}2Yq^+&|~os;d`OodUNce=MV0?ZGBw&-fFP@hBNc|$mze_!YwFRkv3jp>F5F9pei z5EFEM7d$+)Os=5ky4WLO5Td4gN|Cd1DTrr_U-AwgvzA-^8|KnLFR;|i>wt#;BUpuJ z#g%CF-Q_uht`~|q6IU;{-Ba;)zt61$Aq0=bu$(9FX8JcW=wZ|Q_n*V#%_;h<{19C0 z(jSb(y225^U4|g(VvG<(;~rdL_*(9T8j#kD${g4Au?Mbm7?Fq;2x2Y!%!K=mf;g6o zi%XaPOT5eelUzn*$gQcR&Ms|ol7a@Tl|G6BcxM;&y{vH23#K-gkz2z6Q4Kr=p}VU zie{68tgUR?4lzG-WYZJOxoY*R4zEq$i}wD+LG1fskfdJS)pY4gMq8C=JyXY^k)Sv_ zauBSChYdPyB}b#7??C+nLyitBLyp~SA+A(j3mB}K`6E%^j8dRVn;Ab-=Y&H@z@jR~ z94x}a1O%u~<_gj5p!K6tn}Uy;B3aozzdMi8u)Se6#)uM=_Bzht040XSzf57{=6hnp zatCEBpoEK0h@-;-gLiebjftyvO&hGps)fN}!DO2Ll!-~wog!jg3mgnf!OfRr64x(v zwu9C(#)S*KL5*^s3y~Po_mFp{ppc{hJ&A;cvrrLu7Q~P`^6IG>JjKke%aA$^Klq* zTAZ?>r5kWX9oCf|(bu@hB<;HEkV6MZ?fwz{L}U4+{CE54_IvKGH+)+s!#tX*0E8q~ zcuU4qT_+~eHB`RwD@2^uWB@DbjbXwL)X*vA?cH9~(xHHRd|bu!&~2yd9{JRcF-Cwk zpJ30T9M35X&L^_Uoe!|h`>pvL)6k7&>TN7Pnax8o^>!iixFhr?_ITIQO#>MP7cX&A zwbf?Ast<9*cnH{Dv8Kkq_PqN(f>L`(4d2GNcBkXXns4NFr7{OdNyr3L7ze%rQfeU< z!4F~h-ltN|FKZ?VGp!gln&ZoEc!d>RV#ZMlcA_G~pY%|c`>W95tkNB_H|~dv(wOZ1 zn6F%G>&~JVzEPKMelZNx_+|BnFQWLzcx54ysUjWa^zR`_CzP|S*Z4IFT-kdE@jCvF z5W%9AloZ9)6PClW0YEeQ;Kk$q7lsz)6QZ0THD}Gw>As_a3ojk$_mQ=-u!1@0f@|_g z#jQECBDjj$lj((u_R6*ar-oQH;fFA;YIGv!NQdDlI32Ev-IvKVqgZbza#vg%J_hTa z&q=2BP=}|ssgf!1vpgI%_eifwjM;*WOaixsFNcNznYS04ja04}{W+fI@4Oiqg)I5k z73*RKKD%aZfZGUHSC7%xuY#r5CN(HI#QMVRWhwHcH4&~rthDmrq(a-1A z($DLTUc@voN-^WEv`j=R^Nhawm)kfD+0Cf|&a-O@rHyChTRQY^+3LLdJf1>k*Ev+g z;#8<`*W`gE$ep2^MW41Ie4UuD5apApNsLuuk2~S+f-ha?A}4+DplnN$iaj1N zD+~06=j{BvS&zuR7{@^&s8rp*JYHfEU$IQ>s1T6 zT`kqr*tF^z8IYlvf^6Mh@uRF@`PqOpIr056Tkfi$zkgXXUPHibkj*1z`Jo#nc|oS> z;O_`&SyGguyzKk$T14cik-rDQg{U+%wX1Rq3<4s&DFI`-HQk=?aVrA)Zr*G&<0`?OEzXW}W((r-<-uhz`fBG(VJ;#M}NQxS1Q<^G_ z{RvKFDwOnZ+(ZyealW47__W26s36&ryiUt^xa<13?W>Ohlj%rSyy-KXNMjEdKN0T@ zljyJ}N7YGbmOeQz$m!($t>KWug(2{02utCXSGdQZJ#OO{OM1 zMqTRsa21WUutYCs@kIkm#0QxwW+x>8QF>}lRuO&sDo?<%Jj+1TP?Nq_`eSS^1fZe>qD!A3v;{u*2T z<)yJFtxX48x}IQODpgcf#ZYfN=zMs37Pp%nCvjv?9=QrVreMebd21wFihU$ajq#Ch zjB98N(%xV;sfuZd&m-aSzOj4UniWbq_dEBwnoAUl^N7JuwFx3ajo=uK(5NVE=#UB} zfxZmPnr>9GYIpE`=DWIK6G~WI=Pz`b{K?+TIa=RsU}a~Y(mF{bLy+4aB<6f{0_F95 z>H)shGEBxY^i)*L5hPO^Z5+#gRUg)+jZdE0f*aj)?cBlXOSy$5&QFhpYn*JUbtTgC z*Yh@KzXmV^Q^?kS2BoyDskpQP5fO7C7CWcWeNDtDB4v) zNlpS)_pQFy(NgN}JXr~NOj$G^X`p>x7l;?D;-7 zt<3#U_(Ta}u_02C?~A%j9h{tUa{VVAwX%QmJm_evI;>kU<@BkN6}6DpYk0l7_q^~> zzZAmqTh#vfv)J?Ib}cKCHYLRl*Ujy?`1R51OTrhq zdCxxB)0_H+a}}MhmoLzHUDzum6SaR3Kk|JTA3NFT+2{;|5KrWCZ?)1w(Ui0=oxPII zHV{2BVmxaQlyG}@!l>YdUAJ`d(UzsW!Byl=V8ssf`hGJHuU6Byq#x(v|g~5)2Yt8sqabi(W#+kMD zQR!WGs`Tz|kMB+(KA(`8A={bWh91AmerfbMo{CkrdSnbqfIL+KD?2;oB6Nt|_ptE) zi?O$ginCj{ghL>>hu~hg1`qD;gy0mx-QC@STjB1(3-=J*9fG^NQxM>{ z8Yd@r8c!4rfPYTjSiF#P$MT&$E)4LU)SXW-^H;2xMfu>Is3SH~$@e|AIOKK~zi!}7 zusUMPME2=n#o+tGBXWAlr6VkfUHd4k4uuA2BN%sQX?~b|+}AcSa#3Gy_b#q$)i=75 zvelfEp~T;iQO-PTdEB0FOxZ&%xOe)In&`$r_L-nSRR-8m*>B4QDU<{&WFLjwel@N? zE}XvCCJl4r;TBmar$->o>AD@1GG0RaiT{Y4?n zNvc1XuqFj{BgrxFT1vfa<;%9Re`enDsGsB7T9Y2Mp!;rurBe)8Y$9c`!!3SphR>)C ztRbL?vRG=Sii!u`xqSpj1oIyhGnagZez5_WIZRyj{9^-;f{P_5vySSh%RD0wOz$m3 z6FgKxRNpaO-0{Awqnh11`tq63Nz(iNI%@@xO|Fiq>83Nuqwi8B$&I9c9NdGL`_UOZ zXWEa841L{r0(D&8ayVW0@yg{c>nhXz9BZDY4P)fu9UrQ1Fcadwx&wX}^MYHCyjdw(5umB3Po_&p|&0=Y>G|QBF)@xaub!sRVO(|ep@<( zy&?eG*@1i7ONkn<@1~d7fI#(RVZ25B+DTCZ$==AR)YVich3lF7@9`7ElqffPc^Gnwfj-*Uqe?)Oov6;oxTGiQgzqdyfkL2|IllMd zt0hBHf(tjTPwA-)iQ$b9Ci;yUhY?(&3VCH9UL0FY@b_P8bfWaTT>5loyYU{r!M1>{ zvSS7+CRuLuNPd)3!t5=ICdRt!#7|AQ&Im4;&%vMH5se2c1>pLw-&*H z3Zxv#0<2V0F?dIvv|*yi+H>yFX^W){7ThRZzai3&-Jsg(nN=$jbQvNh=HS32YD4lY z6>irXQvp_Q=>1=$MxZMN4-1eWTE>v2DdUqOW7-gZ@lnl{QOaEPpvkv$iAs;bI1^Pl z5u4DXu#lRsJ+7J%NRptgwhbju9IsVSk}A+S{b8C7Hti*Zjvn1MolJEbeXkW*NFlr_7-g3AEVefBwuhZo?6q;*=3P{rmHF< z-9|aSw&kL7^lSY3kRM~YP2G2KWe!}g*kzjKMet6bI z?H3*>df|P%aSnvzFh5%!--oF;K{jh#n){W$q+dB{N}V>wflx0B7@>^P;z~XQuH#v_ zv$a^~-P|iYujcj~yY^%+x={OTIIxv7<`#ANA~Uw**H|vi3{>jI$!KNRSU8CIJqFY6 z-d+K&hr}O-VKL5A_jC4~u9wTGxYkO%xre2N!3H3A*P*iYw{^c0ThG@cvZ0KWv+LGL zL7Ek}i)7VKb2Np6M*o_W@cXmmZ);W9D0n72CEvi_ljuvW*3GN+o~gH0dd#*}Ropy; zhLL%;My;aj_0XRzB@#YWXvph}7ou0p+>6DLcdk4b;+B|1nP3Ek(?*G~BU$SHDIg;$ z1I2PSNo1WPG)x}b)J)%^oFx@JpvG*`nSVOc2UX%InT_&fZWJ5@*z{Mzukeg1j@QAP z+>i|SRtsy6-)XvoyoDy;SBDW`$FPPAxGA)7fQKH{r6%rZ_eNCYj&BKISOKIdY2p_e z>N@W84$Po7C5G&3V)X*0i&l(D0v|Zfr&~*1qp&?Qpy%EvUZ2NWEGmzjyWOangT>7J z8NSvv7aO)8-UV=h#>($|MiEUUE^Panqi+0K2?QLn_}wQ!SDQSjqob#P>UujOY}Pxp zL97Ns;@I@kT`RV|RSx_|hz(@gL_|a}Uw(ny%V0X3Y4E}k9(P`2)%V|Py+(0?{XFG}ioj^K&DN90NjG(NPMwCwtFK_h_ zK4wPv2mAdjuG%ZnMV)g2yQs|Sqsf!ZuRlPM76$IWUtkF+YV7QMEHA-@PJ7F>sIf{z zCJfB!dZO%V@iw0~1$;w(Q|3p&QDX9v(@RKFVu}_tO=8o-1t+4sim(6D$-zsP*oDO#(O4V)n8+M zi_;AsV0BcGb!gXRB~C_HH2P6@O>8Vg)Q+-TYP3qP*oF_XUy#LF`p{n{kKlU0MTU;~ zR}E?Ubj0t5V2N@HWy{Jdp@6?x{?PVNjdI5rBEch&-)(8rf^@O$q zy9&_{RqK13UYzUCtjtpm55o|2c!?ep>6ngAyg0pylO+udm=TZ?CZa&8qLM0IlZ&p- znfYmotFn7W5+eK+-IbMM_a%1iPP-VJWuG{kFe`&n)9V%fadvTd@BNzrhCwx~E>$kqAfEM{{>M}a4u=C~i}wfmzxNQR&AD3s7+JOr zdc$2$*&%GVethISg|XmVKNQvtl0TSnh~>PW++JHanD$K>A52;}Fk&&A=<@(&<$x0M z{h&}{c#y2qfP1}OmWKU<=j*s}+xW?ql;`(1yMI_BCZHotDHWF>z4yIcN}N!e0iRdf z-eD_D>zAzL@dulP@Ozt7F2=UvicakXdo5}r?y4j%>qS|ET-~seYV=7hCCVg4_mgeM zqr=l=)6vw?>lEJ^)O?8x=dpgjb!le`MTM1Y+=Jgo1uf=_p(nCbyWR2d?iqn{WYlO6 zlFX_PB#nJ#*@VBJ{MEmL%cAzDPDFr!Y=@1Vq3cPYzsl-Zz|Krd>%l>|^0!soL6j;- zWEv^Sc{j1G9V@-+*h0z+f)fJmlU zbH>wm&Fo#1{c19ULQv3zXo4kBR4;(Gn9&t*@X6&jE(xT;8M+x^J<5is*Qi)`gsvG4 z>Oga|ZOyqofShXN5xS^Zs{a(8AEGqPQyb}mzOvT71;^sa)5>cP&KD%fl~lN7uYQNL zB_LFWMo@ldTSorZbd}c~RcyvDJ2E-SD`_VL@%`+)fc^_IrhjqQY$4$$?9}3gADvt+ z=6(%V5n#4q`_zw1sj)P5&{`FtYD=uzI2cdDmveE?p8A{t}FUo%91f zi#d#%?{bODJ+T6lp9Z)s&w%;h6VH0Z)&ql)@h#p^aKl80Cq_CK^kXw$mACq3$~6hd z$hNjgU3#76{QH%HBFKVj*!()FDbLBf27q%Eb;sI0jRM9zTUM9`=3<(2{t=QAR(Q;S z2js$!J%^H44MZHCOx^lKF0NV`Y`eNaPo;G`Cug&awRCEOg~iOy00=#j%TpZLDB(~`$(gAc}hIn$+%rCvGTjyC7s98AnkMPI~tC+d7I2H zEAC$%To;cz{r$u5)cE$}bgf$%o%jp6;&M#^W?utFcw^kGfi9Hf)#G0LT!fvuaM5T#rm2CWfKH;3e}gWs$UHk#SDXt$xUv z1F6=4(Is?@gf-i)nZZ0Ri3KY%)CY3xTNb3f|C3Wxr`(){zHC)x`|7Cu2&s2)A?* z#Yq+0Yd700w@9^lEpvTSSr>E_&G9?R+x^Y9)a09a+HD_x>(U^rPen7XI1V*bO?*e)AzawqaWuAoXzz+fTr{#;wroj z*H{Cy>f*t7dtPf{k3pHwWi)lqL3Y6z7~mlr{sXbE+9s+u8Ezn^_rtx}>E&~`^3fBs zAY^dFfUfXvSC3$pR?JKTXlBIm;bZ$fD8Y9P^owlSve~*thyXl>kXw&Of5rLdqZvDdcH?f(&x>yg~PC7pcmAUkNv`{W@ z#=3>khKnQgi_Cx;R?^WIY7FcrHNEdo>s}@%l)N7%Ied2sdk z7%D5rdCCQTzJb;(XOv=Hc!u73l;xfMdPoR$fG$?jIz9rikJ$DW%ct7dIFlUp>fJz8 z(D=$Zp>)ssbCL!c*FW+Y8R2E#c<{#)-fNZNnC5?!y!?OvX-E8H)sY{E8Sri!RT4i@ z>y^5YB6~3pK^DbRb`O@240A0d>->O?{S#UNCLSWb8@b>vGB1aWs8elCb?^tI@W+|a z&PtbO58NIUSs5{@FmmRQkh}!(%aH_G^Rao(b&fyEC-pX1Oi z9SC$t6y=@u8p}J&C%L#b$x6&~3b-gLPt#*!s8ZwOHhuJcmj-Fno%B`PS#WYU73 z7D$h@8)X;lmS`EdFH-Kiyo>UETl)xgsIW~vvzPFylyp)?U#-e$`ov+7tTjiEwF+xa zLYp0PCM8_Pnq+i!Q*ZB@ss?nCP*{>`ljtNhw@Mj+_vgf$mWpx+jEH6$I zK>Jd<=Z02J>LsPLMeY#x=iiNW`Rkn#b3(Cfg6n_iroW6bAFbxGOZ;Yv+XbZ=K}zu{ zIidJt4lXPW8D1}i| zMz>Zi{PmD2X6$;p^9;PXAMN13Bq_wf1~EDy`ZODKR7(0n2&VxC3BCl{Qp0Agy#%6k6x*g#)y>p0p9chyfknS za!fZ0{DfL&`j5^bA(-+*BG;Rt#okwl!#(ZY&1&!Q9kS~iBUBUc!)g*XN8d_Hhf{JklU5>EMb{Uuu2AF^eolL{d2gPfW?^FWW=(QYxYvCM+fH zRN3)T(wx?5%FD&A_1`F6VUD`JG$uN{RVK2&6s(5ez)U)_v|X zoeo>=N6W=ohjtsC)nsU?aLQA|?i**;n-z7IHPfUcf`HBfEqUg&X(%*ML&8r4Go6Le z=v0hNkT|j|`6^wHMN~+ED<5t>A$y{d!bmH}0d@A6jJ$lki<_HF0}Bgp^yReIS#T9! zHIWO`$!A%m5r}i?>BGVP_yhC_&PZKt{0G!N?H4+jMLz0s$aas4N@*8~E-1edj9+3& z&dhe!qW8D-Wn)rs45t4v^mmLF`MiMY;#S+^Qiz{RGDfcPYZgihm%khteu~}Oz@HPi zT)bu_HsP`zjZ3gzc;b-fB%UKjYs0eoZLYK9KagMn? zm$=Cu`dUV=UjDP*lP;9Bn$_r56foMY1=0&u=gCbAUI5m;S%P76*YzfEK!`+kI4tj3 z?@x2V+}YRg1Ll)#opdb2!z4mtV(K9Ajvv1gw~!(AdjSo%?5Tq!&GI%AHNZ=Q1S26;DIH@i-iUHC2D!dQm*+_HAmt0yTzFRdBG!*}H|@tN=Ll6!wM@~o|e{BtF{bZGHukMLFF zW~;NWJ*(1UNE}S&HV4Qb#}M&k$hWC=klnFJwK=K$@6tvK(GyfYO{o!bS1kF;+v+Y} zf7)7F?6E(gLbL&uVfo78NdQDJaU_7kuTD>^ro4a833#2h8?rh*z75dOP}!1x2-=_j z-Q3^d`@M47jCTj*?W*#`zTn@uzamrtyUm5uY(`#Rvz{%mdt)#H5$GCQVmEtWLpCPP zDGzUGi!wx=CAtfxW}wLNoFq-S8m<8QJ_rrV*5_cze}*2L0@4ksxEbF~(`gb-*uj1} zHJz7uew@RBSI&up!j5#`7w7i=o5lIPZ8_JGg<^n+hNHiSGn@Xg(7MFu+9W1?T+0)S zq^ok(R-dCRDjFJUY-AZu>?XWXXyAR;I-8-!({=hJBN)2k0kl`aGyKzag?eTcu=T{P zy);jm+13j#=b`Z>QMIg!ljE#hL zeIrjK5RG)q45vzo54tbZ1p4Z~u3h}?-s>B_0UJImLE5{oGMz5g1!tz$DMZhVccTtH zCaFqyPmjJ{)4ttI?mZ6IU(0v@9d&wpd60K^FNIc)rDx!$ShuKLTr9Rh>v5m)6F+xb z7%ppGF@(4SVru=MJ_zVDbV0`5ytJ=UnL~{D4|@(`0B_jl)((v8wB2Si6&f6v0T7ms z#p}DDg^EE{9B&^`UyPWBklRS4Aun&;SyVsE&c;Q9$Euf-pN}+-qAfaD23nh&qk{(a z=QcK&u?L8-cQjXe5WxdFjDd17Oj(8%;Y>vPx1w}SN`N$PJ-K-`Qv3YahyZu24DR%? zq5dA#^eJ0k-(VMaS2=M38aBmVBmHW-8d&7g9y_2YA&4nG^yWfDKg&eUX-tsLopM+N zp1z?Q&YPZFBZz+`({1B>xkE*%D zvP&#E9NHZln3u>Y!-s!Cvu?+Y8Kj8^IliK=Pej7;HG*$Vm4Cpxeoy8?tipH0adP(7 z^&8E{D{zHbiGS&t51^#_Gxa?#B)WQIsxfj782!6B%G3L z*lS{|K{jG?0MC7OWst}T^6?#OtwirPUT2MiSE>h3GqZCH!=v-Fl=UuCwM9(L*4bs@ zn&G3QQtG52k3%zuCk<)Ma3+Jg--ulCw$Zoc#_7%;;`Axgci3n{6QLRq;#DrkH{M#G zEPZ}vfO%V>2U0$}?L&!m?^ma-k(TxDHk0w6^J;@y^Xe{fl_348=UgkDyOe3>i)!+o z4xSDl7xmXmB0Q+fEa&&U$TRfHT!m{;4%JrvMfd%ew$f8IvYj)=fk;YD4$ArB_Q5=z5PF~m z;`~k&z%-hIJ=-Uh;+Iw4#5WrM`K@9MK1fMxImB=Kt}c0*k8DJ-IT@-%JkE2nJ3p~X1e=wHr;v0oA<){PSyH?RY%oPDBI4$wXDIwa9DOt}^F`xgFWq z#xsu+nVgK6X|H{wtwK|cJkILZyah^5tm*-xm_DfgL}LmriX5X;WUyIlDtzhpvSy(V z9ZNem*@KXEGQ9mYcVEVe0okrl5n9K)WSEw|S0kezdUWXiyE%L@ z^i|jg7?jZRcftMz1btS^K6ej2{wX6Ca*_F`(QA~a`-=HYoxmJ(>bXdXV_{O8h8{Df zJ@u7f$cE3{VkuBX9aN=7`?g62^})^U#L4P@4VJj_D01@p{+U9B#u>S%cVu>R*y=FM zcqT|D1VTw0c|_6bauxD-{o=(OYEr2=d9+NFKyHLHeK^;@j(Z)pAM$-}8u`=5<@x*v z3-a6l%~|oGz|&YwwUNYdsmV6#?||^*^YxTCLQ!45>LBWX-99@>hfWQ^7E^sGF#acz zT1YIbph@H>(5o8T_;-79*2T73*+3xTu6Fua@^6KRn%a^((TuogT`F+5hB)Z8vkoNgT!^T zmA$J;UtrT}y#^nV5qtj4@k;o5^2W3Bn*p!OA#;}QQlsS0$|h<&+XeZuMb6Si^4vrb z;9%qzD+PV=PJ!FH@I5>~NUF&}wf@Y^s*wp+L#5zY1X-JWlHI#Svd^Tgc;?Pbjd?or zY*-A3zJIVCFy^uf9Qq-)1j{AyG-}Y$P)rjwEs8*B^!NJRuN+Cj#wmzhaZw zo$FN&<>8Ch^NZnm#^8)xwtPW$oMdI6Yc|N90ff30%4w=k&=A2MsSiv@7Vx2S72n~l z)E0;K`wcs?{ZW(dca+#bo(kkGez@fXw%+>ThPI?D4ndp`b`d7K82s;flt%}JUR(YO zzrMZ|EW*c`>4d;b%u>DR~8K+ZQ7fMp{%MNy;DpNL0va*1j7#3uuh>f|`EYt){Mwj!fuJT6A30hFwyslZf6|>b}Rw6tDnw zao+sj)ani$=rZiY(e~66{8@(3|MughrN$1M=!6Xn?ERj%Zp%(QQkP=qjqdB9V`PB| zObD+EQ_1^2APExdu?UTft%-$flksCIqF?&in&s9!>Juanf{LxlC0knZIyZOwYk0Pw zj15==%{A>iK6uIPc=l|)7ms*iOVe4R7_;Z)urd&Em(r-xr=4yyqYo)mqDh)e(Pxl8 z@;hox7s)UJf+ieHEkh83z0rLHp(*)umtEuQS-^w2V}D+``LP&0*^r&<@&Z9T7S#|o zat3#blB@t>V#z*VtHV{+Erwn`o$fk3%N^}hzs4gQ{4$7!5W$4m68>h-jBK$E1IZ%JA)vO_No*Dd@Pj+};%-e4*f%|(L; zbA!f$B`J2FJVXXHgs9U^)YgT()Vdz$X{nMh2xNEbq)C%=Cfqm}tQQ+TbMdlBZ}2i0 zyk4gkRhXv>K*z|)^1OACsDHRsBs(`d6LbeKxWn@@?|!ey=iC`;mwDY0roj+uG4SH& zH!(6%FL9|}Zm~`E*`5`q$%Ty3?j^(#PhDjv)(>*ryd9?%djL zjLbNqrYWas3doGsWiK{mQW!iiM}iP~P~#*#wh4&wQ+U=rq)&dPdS9#oa=&T2pRLm< zGP`E**u$sQo3jpG?Ub5WIB4*j6ia61V1foXq#$Yk=-eDW)46+_I)Ge(0a5F;`bXfJIX4(XOF3}(pJl)LxkiD8= z44{W(HEPU1tT$8{RpeXNp(?O-Zc8T_o!beke=+ab&mc%+x0#IbSzAO%4;n`A5qC9r zE^3lh-om2FJAk!}u;g1gvV8i6>_j*zJZ8Eq6~w$)yf@vKHBfq&<{dp=KWBbsGNAs= zQg90X&w)Q+#iG@7JS$YID6j$*0{AW;O1&Gi2uxjE_tAx?0fRjUeWfn7mu5?xi9_J3 z7~>pm6@cD%uAW=cHL^nZ34+fLC$4Ms<7iCn$*H4IGw3L5Cmpn~DP?a|b4c0mb#QNb zkTf2Fv||x9y>#MUo_mhG{oy>R`$k@vIZp7Y_I_B=O&Gd6HD%(Q#d9+s!BpG6W@40L zKJ%osfi*g3IocA#)-*R=SE8$i+^a}i{<=XtU;`(?W1~|M4sk+z4)#4me(~C)Tsl43((gxQ zVx%Yya!HVRU z7`gizzHm%9!vAGhbjUs|y*1x|S3T)d2A{1?m~yPl1(qQwO9o#^{o^Lc+v%SfR$Fjv zJS<8)V$i3>(1QZnyE0G{C==?Fn;o7JRdnZKa#i=<$V{6U?tZi%+#MC}uws2_uCisw zX?30(c!0Vem3~SN;?JoaMMJI8vTsvm(Mr^|!$Br|v;?R^6Jr3M&gg`QgFyjST@G#U za*zx)Rk?1L*QgmsVz`J=SYd%EqYPq(DvEv{dULU!xy3adwfQ5SE8IOVLfHlalN!gm z(hpNu#`a%zpw!8c-N$%T3}DiyibeU)I<%0y8fnwagi+Hbr>O56#m=lKzI^gOT_<;E znanb0Ef4hqSI7C>Ce6FRt1n#RQ7X|%UcMXz!=}zn%ESd)&es_%Y__`z=hb$W zmR8BJC20*jQbj^~@$k%e$}&Z+FrG~A+GhN-+5}}FsKH#Wfuic*^RaOj#vVsMwTO+H z%AvpH4quKHI++lK0py$m`^VyFEXfLVT9U@cF3@mX&+vekQ)9bsVg=p`@lNHZIxz&u zG5k+erEyfT$Z?`$VuvwuJ(b|Y!z-f={)XnJ#`*tMT>`r%;iy^iy5Cn8-N1Mr?m&^) zV|gwVy!BwqmTx7=2Tdn^W$$7(PMCd0mU+_e;jwh7ua$2>&;ir?|y;x z`i%e3#`5;XNsb_f-(bhU6&1H?SL=}`YXbe`HLCPzp>5BykUFxrhFg{sPIe?%3evb? z{tMz%=tP#Ymd}Kyk;SiHMf`k?3r{W<-*3#}|j$4pP81gkY3HJHH>eVe$GRh9ZWvf+#v`hc% z1-)?jTj$01KiL@>3(u}|(Y!+z5^B|C`U`|sOQ_NE(Ydrrxyilu*202LGO=BEZN$7T zT1q)xR7vFlrycHbtN!0}ITw#O0+g`5Q0gIwV8k5T7F>5ZT+1?);RUXk>!}HD?dFd4 zArL8zGXdRy@sIzJ1lN#X(?Pr;)L%QMU#IfLrQZK4M0E9J#mBrN|9p7m{&?~Aku?IFetVw>oN$M!E{*buQ}stNDa%kDuD%XH^a1H=@WgJ(qcX<_mf zVnHu;pw|&KAaY823e7f%7%mdNePC5c4cY(HRf>P9)F2N-2H;V>T0rJ58?Uw>wKWYG$1 z*u)`0z_L0PTFw1~^-FezL9>7ekX)rD70XBR5UAjFNomI-7n9$C&p;i2xy|uwv_K?2 z<=ZMYgJ)OU;OYpWLneP9QwUOQC`bBjfFs-93$PZ9kByDmmvb!$&=DPU}<;$Z3F zTC~h@nx#frS=aNZtUb%sKm#xEH0=dZMlx}x4Y6aWF}0OX(*z6yxZZtFL6DP{IE%+J zRBXLSRo>Rc_k}4HUYbf+(q-DbNFRs>&BC?XwEl3R-{*ZID(k88Zt@08>EWA0nW=}Pw`Dp3H0vifOc z1z%#eUWOXV8|}(MwhdlG|Lz(1sVZfCVgC3LDYM<>PbLiM`wuy~zqj%hhM!)R&9eQ; ze{@0ytgjnc{Z-+gYKB1LnY(pyz7inUrdiC{Iz@x!TLDpbRl&jUC2JgjwcwqZcR!wg zp6Cf#_lW`yEo_uVYd!mq=X8GS1lpk-8cc?ZW5NKWG7Mi)dzgYO3OlRwZZOy}$)=(z z$M3m#lSgz>e>D+eq%#vD9o;Qjk8qU;GOR!s+&WeHiU~GJBVmh0jtdQ32q@aAZ}cf2Vd`7FVd;!J$74ZkpXE0Q-{2bYVo-H zY12{73Z~dG6-{N~qm&qEY;L*nq^Pk~(iF9V_$S6$g=%N1sFb5-A?H}bACc(rpXn)f z3fvK733M;1LRAOq7Y%HAPm0A2xgnV*5HRrQ+%owotoA6vdw8y|LGO#ZD)dHXu(dtA z-pPHg{9Bd#LcipDp>$TNsDom=;Am4Mb`IkeNsgo1Jra|DzS$FeZSXqkpK#%Gz;EWl z83*)QTZN$Z>dGU@Qk30c<7kibuyc~L3cPjryghHEZf#L4b=%hnTwex5J#mx-B*I>) zNhSLcCwRXijR>!{l&1B_K#GinCWAU>C~XGfXj~oL-+|_Ko9XzY3 z_h|tB_g%}B;f*(;2yG0ZTMOK0u?|V@2YOq_ALJ^ zfW6lWGxP9KW5GZjEe^!GVS=JESvtaxT{B9h&WQ^Lk~cE>N1m$iVo$61u~Jlo&L<|Y zIgFlL!yIPp>1mz|5@_d=HRod;#+lSQ$;Xkt+b^SEVv$~{jo8%QQ*@OD=V5^Y;CHV; z>ZnaEy{SH86InikAe1tDi)K^y25OV$xK;Yp0|S%qC|LcAdnb2i*@62Unt%ImD3}&@ zw>}uLRmoXF_Pf-J^zmmE+zq3Prwa^YZV_?Rt1jII>0lu?1Z2_I(=BscJhI}}6>w1o zFk-I}b6j#ARbLD46caVGRcMM0qbEz)7tDqz@FACQDom@xGHW^rQc|53*i#)x3L0K~ z{HF6@r6VirWY9p`+{v9;I)(?^vW;elY~|uO$vVzqC9(Q{j9euKhZ^3caL)9dZOg1O zlYEqVmTQv->@)s*KRfOgR<8e|*vnj83oSL9HxvJ<7vG+)RXAC7+P?h&S?1|tz91H6 z(bvzu9#$4)A>yt~8h}6eCLvwJad^^{-zv#%yUvu!W1rZx&ld@Y!p+;rT zOQ8aq@pW-G1gxMy6ODg!HAtGutb3ktf6txCdW~tXUSmko9REqn>;p*|r`nVk`Lce8 zgR0-Ln&T$T<71aoYn|mjC&q}OKO7fK88z=HXp&SqBRxa^G&zSux$(e^ z-Ks)bQ?UyDX3waR4*6xBOfIrE+8UraS_uZpvkir1@;%0V{8axVR!t&~OU~^5E@!Qe zw$$F}Pn$!g0$Y^AW-$#~((?~q_A2>F49(2wMV4Z7>9WU=h6SYZDbFT3hmGlmhS}=W z1yfRqvaj`R#R`Yv4Bf+qLDRZ-{y{n5qGfxI*+e2DXs+@A;re(h3H#zOC0zJ5ZMEGx zlT+dw0>eg~vGg9aQ8)3xz(-W0Qj0P7u`h1;DJGLjVXpw^i@l zO5JvijAL^7*b&Rleb&y~5@KO~O6#Wclot%*c&<@ss68FQES#ZL^5CTLWMO3F8)IXQ zMD%yqY1)Fefzc?9o!uPB#s}*RWK}u#kS*io?+Q zo4S}tppyvEEYe(qtuJ1#H(@bk#PQcGv@>`r^5^PhF3|;}P@_xs#y{>{pZC*SmSf0^ zuR$}7nd33V|2v``5A9yOK1D)$zr#Noz9|dG{;O<4T3}G@-LJ>EhX6HJtiA7ce6i{E zpILSkgTeH8;19d+Cp?3~1V7?kKuDqXcT2t+jG$B)_V>;Vn`;83<(JVx?w=4@8hW~G z{A<9D3hflJc?L4=r?Ay1yQl)zwd!c>3AhAH6E9V69381Hhau0%PE%x;#6bvH?@1*O zIfjBr$y+__Ie^tpCs`=Di*M)>=rt%EF)FMfgijGQ6%9&ayjqq24kEE_57PNM=KIX& zCCL@4y_8Gg(!-{x-LFAat^dI?5GG)8^?5{`lV|<^)!3hwYUG0^^o5-UDrSHRX_ZH> zX3h7uQ>+t5_KI7J^~EcM0AVH!Y|KaMGcxd(gpXdirsi600p@aDW80oH-K)hm0uh`2 z?`I+av6sbl`*m&Eepx)|WAbna*Iw}hsXQN}u8lkWwAL1trB1}vqM;mJ zOllz6F;OkW|2b{?8T%8*wdstP(hjmt&9UkHwcXK)T`H)AuRKcXbkvt6Gj*1#q^f@> z6GkvO*h&`aU6z0@5t4jErC=$5&ZGHWF)LzEiJ6z!DG8pVWsbpzwxv+icsgnpQ@01ne3u^;0 zbM(qMf9;fj#RSc;&k56QKqkIA?XP~)ZgdOwIh~d^nIGB1j)Hf|I-tUy->BCgEplIC zu(Y&Gj&M@|H5wivXX-Y@{JnA_10&itwT2ugnv^3x3}Wi@3i^Yba%$HPZ! z^>tM8-RvTnxa(Y*K-0WUEz}O`lj$M51Jb+07s8D8B*Kh$`o7?Yz;KhbjBo6`K8nv5 zqMpak$|H<5U1ksb)ko4o*R{v(J>wfvIKve=PJ9p2w!8sdUGr|;h{(GjEZ(ODQOIwNL!+^yr|azrRrC14c{aU!$V|Qwk9_l2d_#41qF9#2%ZXOHIxdO*N9x za>u=XJRpe2K-dE4aGCeM^#iuXS3v`FYhQd*b^lw&8yYd`{9el#8Pw@y@8|Z5F6(v4 zg(_Xz2b*;!ef>3#>x>7oGwB8MR@~i%SFv_uF9SyMQDZX2u!_iL=@Ap(Ob`c}+a^kmz({4@Bnmq9H?SlM;peYuwk(9|`c0pM#kxj+^ zB#9{$cISz6$^N#HD(5bjJ}f$nY>}2t;DG%XliqeIfB$YQDXphaYl)Y?`>jh{^f8$i zKJ@#?pR&c_ze5BgD3~NAevKl8ppD#6VO6KoVtZz<{_N82Nfpgg@Mqn3ub`dU{D7c6 zg>f*IeNjrhfxK4)lcMzR?u^qs{{bX2zQTw9wmJV_Q|+5%moh~(*Wr9eD zDCJf>!7*}REwg^oFmE9q6>>G++0F+F8aB<(@Pa`jksJQe&oU ztRKgyJrnEU^dF~x6kDX@5TR}x2{UZTDE$7(Gi{6r@UgUW;~g(-)o;hKkzH9`r*>5w zPn>SL{@}+#7Oz5X#TYur)gUKrpX=gs_eV{Ph}%9T)|9cRx>#HcZ5$i1zv%k_zlrN& zTq^WSi8yq%Iy_9&`dYDjE3oHyMr1^BWL1&cmE(3~VGDZlZ~=lNp1(d0eW;xqG%ALs zdKqTm&!U#UMl&U1m^5xor56&$p;)|}mH7%7q{VMpvs)rwo)xMfL?d z?e;lBw&H*(%^SRo`fI+|e|pIh!pW8y_0JTSC7t;04-b~z?=XHxcs2&^7W<;?IFJi7 zr|%}~wK-^aJ^%u-?Rj#&!e?cGh^ZY^%kn-xom@XvgqylhF++i6sp(Eh z4AeuGZ!1V6>~n9~>T(37K$;g35i%F->l{hvT7Nu|*L_A{dq*)ViYtnI)ABteIjblM z3F^N9cep-AOnk$FCSSt=QFDtGSy0|-3XUk4&pi>Z23vz$K2buf2^D4Bn1$c}^+(6$ zh#^6i+~^3}2{csOUZg(=_B5Cdx}@yGQTUoLeOnNUmkFFRqSr&^(4k36M1iur58V$% ze@ox4H+Dl8*Qc#N#eaip5Z=zgeZ4=W1-un7Mi;xTKS6qhpUAFX%-7>3_^512*1(nR zjYKD7Kf$=qdFS^&}22Wufw@(z7^SqV^)g*kWxREG`#fBg=n59rbcUAKt z%05R$*>pwO?_Od#zUREWZr7DES)NYCyX@MY2UR@k_x9;m;I2Atzw-k>(fiXiR^}?^ zpVDV>Os0jB@GjLMOuwDjRu6t}wTxfrv``Je?`|^Il*h)6c$9r}6S@pI?Q*Yo;X2kU zFOX<3kTS-KTss(IlVekP?vPCidEQGAj$;MUUX5z95{Z!eXqkp6Qb; zFE+eceD5cZ;5Cb#_B$R7^ORA=pOy@CjH~I6BlVTmr~v{_cxfPg4<#FPbnF1@JY>Lj^R*#D2Ox8SPkeYd|QB&0#xJDKRyjtZHfey`!3XC70yLCTra_EaSv7m{1Kod(-@P65rB z=IiC325UL|eDtv6?=Z!G1bdu&g1QrtAa+97fp6DS)XyK@i}Sh#WfX1_2VRtfJ&>CU zDZqH%e&!_%r_vLRY0fUVHhn|(Wi@E(`Ku6Ahz#=;d65uy_w>l&gYPc^YP5s(2h2D- zd$JNvUmv6yVf`|8s8tuXxI5 zI8mGiW(w0@L_>=3fptM+X>OPa$=k^D& z_n})_Hen)oht|s0mbF(AX4B$5<#*kvlm%iI!n=nug@eO%Mow-ZmSNNCTIV68Y&d*Pws&g_k({!RJ!!WApmqxIeg4)V_Q`<)p@KNH`N&m(3FxY-%U zk&i(r3w65Sf!ynLv)AXWNE@TCOmMpyy(L~!{i-88F!&IwX2cAZqF^cc$J||_s|G)tvDe&BAIv|=8H~Z;EW^0T4d;=?vWEk_q+4u^4#8+2YtzU$R#)Y+TLJZZ zJoSP)No|ARtUDFK)(Z*vkWyW~Ntrq?z{8Njte+Hr9>Q6FJ$z74R9IeZ@cp}sm;jY@ zztuuKh_ocIn z6|w{59i7+276-%`_VF@_*4zak}cJCwe7Mq zT8Sxo$B{Wc`%i+@C}=ZyXlK`-f806i{hGX&5G%%7QW4SE9dV^K+l{t02_Y)h#aSWT z*kNsadRsUG89W7f}FT!_*4(lNni9fr`#K`qA)f!6n>-0{@=KT{A z$HvS_e3;lBuY-KAD?1qtABX3!UzQnsfO*vieVJtxG(Nx2&jw_kf8tS(PdQZ-`Qpx> zis#c`aEMJt4QCI1zH+?{wK^`1BhfaG)`lDBqc0~C;q3d6KKixVZ2C$&*stGK>97u^ z$Sh1vaxcP0+#`0NB=O--nqaT;+YDUhg3?&#MMfE@9<65vZW*e#h~eY7Gkd3F+oN| zk~nw6{7z~6ga7?6?Ql~3L@z9SWXYpnrb#Ka=`oe%8R@zO_4TTYYGlz&$JlS?Wg9T# zT?HshGUrO|G}lKCMf?~k%hBAX-o{Fq=&Nm%eaao&&6MK^wJERdf|&maa}Av2Ejj*` z`fbFwdPU99AtcjNKg+NE7_AY3d-257SP$5_(Byf`$i)m@&4heq40W8%+hmUQAsL35Fx>tGM zG{45>))3}zDnAMfeOFnXVJgppxbe*|eIy|xnOp@Y0t`88uPqY{NU^|q)zNCZx_8T` zsMO>@N9-ue01t_EIh_GNb`z6?TPTRSUXP^nzD~{tN=ir^rv_|Q%OzXR67o$L@w+^; z5`BdR@{ zeXq70CERypqA=qsugKp07A%H0hqw6Lvx()#aWe&~U@D}|%d)j2Teuz{w2LFY(;we9 z&Yof)sS?SB4RQ40$n~bG;1#yV8LYg_80{tgt{cupVHdh$bmbh@<89i)2(Em7Rg-5> zoyN^-SUVD{5Zd_Pkx!1Q?Yr_#kgOzg^ED2w++4eIFVpiSjC!x z%2_;US-0F2je+QvO#SCAQV+KI7jDm9lAb&F5GwEEdKc$xp)#+UsmmeLVJYgj(hIdh zETETi)Z4I*y%R!pzVYzC@mw&2XAUX_tjJ&l4&9c|>SOlPtl@KV+trk;M9dc?<(9T# zL}~2v&!jN^{>JE{=v&(D-u25Kt9}=I`LV+Tf-g=BWt{;Z5ldi#5LbjO!{TM@%R9=x zt|7UNsZyN&@!D13oq;5Lx?OgOwVmWm^*#rGk;FhkwpV!mynemzwjoXBX&@|6K}5=_ zkj-aR5L4-QY;&>fGQQvz`ZmOVx!u_)D;Fc%%G16_7Cr=L_f&zqgx-FP-^UHJpg0lV})J)RTpoX%F6iIiF!5FvbDYbx2 z{=4??+#L=;3`B(q#Jr`Q*i0S_L>^uaz5t0Bb!I~%@K8w|6W#P2@w^x^0U2qMD*(Hv zlQ45!OOJ|)mZHr7k5$-H8GVFPIycLIF`8W$CwR{E2=ioCwzJIiiQhA55hk?)?-Xg2 zuy_-7iwh#?fcvHC5)-5g?W9AKUKAkaQ>hrEKYlsmq?P$-+YmDUTsWA2l}63W#n3xn zF?6ABL#;Re{$oYhAS`|F>s|UTueXl(M`p#Oui=c6)yLljVZSLw7g45%cgk3$UiZvr zvP+aJG<61k$vYV(Gw}+onezQCIb6fH)@d5q{rA~lgi$w%`m24pP2jPwg4vi8Gv|Eq zd#;IVltPQ>m}Gtpx|c)00r%%_)V>)DGc8{H3ukwxsXeAFcEvZUn>g&;oRp$)!R*OO z&1x8vEeKZj$8|I|K<8v)Oa~XX&Cvumr=>pDkaa5xjQro6*5Ul$-AQ3_tA2~_=9Jbc zShj_wc&d z`PX-@_9TB8%X;Wq%hipA4!>)5zW(X=F|c}*Q23TbugUUcd=sAG$}4+>nc^R z!&M>sZiU17#23u*g-+TX z!gNW+6DJiaB&+cVM^e5#;<{hEa`f`M~Z z*Mk{)ul28Zw#@`5r6X?s&3G$3qT#Fw1>jA9f<;#|8rHnuMqEgZ%;%3u&g98LRfmy* z)rlix)G7MStY+M6N;EU=e7JbP6Lo*~5xoxu$hSqUrL0!3BK>0D8Rj7ymWEpa; z8gcvaB!9?19QT^{U(ViN?jD04x2*;@gAqHw@)?EJF>(acVevVCmdrm@61c^HeN1v| z1rQW6hm6U={z1O*iwH1C4peo~K9r;qQei8D!oyyC%UFAjL_GGKQ&RLRvEF!8`n>=47e{bAjyWJdg}>tY zFuw~^_$V-5Zpu(T9n9A_+?XV+I~?D+>8P$yB!s`hV7S+!yOMA?YOW`$op*YnA5cg4HEFkTie556V0#wc%S+s_hryf%@EYL zuYgU?@i%As+hn-D;HlhSM zS&&LODmV!;zd|_jh}_S*q|(t4;j$O%Z5ehlH_Ia>D%ws!!RxSp9nxGr2bxNYD%AWs zSfWa23Q9|e$$nR3|BM@9W6*Kau-|uunI{1sR!HS_RjNC-3rAriDdk4i-$iWX71hKP zpgM3N>@DBBxMIppMcn&{X?Jq59H8o1c3zR^vOIJau{t~RV8I9^(W=8s-a{a$2{ zU*y?bytQSt@fF@h%6dKWnHY;n67uG)2=-L9^nf&*c@S?zm`ef#b#QvsN2KHIYvo`D z9nQt1;uF(-YryWV*`YPbhsh+l!*CL;oma=bi}RbHP`Leq_zHy_kd>iN$ISx~VA)oX zm0QVdSb&TPf}7YeglIm;0OCpX(?R76$wI3~tluANQh&9bb(`JoHKOsMQ|JJcR+Bk; zhzcV>2A%IrnP8Ups#Q^dB5c)5_5?cUcY)IRba^ouCh)zpoJidd6W{gy&kE!{k{~VG z2#Ea^2e+mD~faj$e|LZpq z`^%WBUgQfxO`nm;!l{agcF?xONfSq8H3J(sGQaw z6S%ai5XecqxsQnUgL((qcE0^~xLDwx&1>(ad}?;6owM9~X!vYa`My^08T@o|`T{jQ zeyG2=D<%a$x>ruWxQlf>{~;S*a}*XhHxZ=EdEOr%Y)5q0LSt(OPl~8`Bzi+<9^qw? z3SHbWyf-4~wpm^-BAxGtxumJ5-E&0v!XxtYP1p-+cn&{WP21=cKPUMQD52~hkpPQvAKm_Vw-j{EM*FCUVVCm5Zg$4&fUI ztNJ_ZCJaX0|M0OJGh)gSL;YGSiOkQTtStx2@= zy-xm%$x6z!z@pd^65}embkHNmagbIIc=eqCh5%1m;H>*EM zJQnAgQ&~uyzIb+ygS+?xF%+7|@XsPPd6BVEZ#ejhgudlPKfPQ+Muo_YBVWifayi03 zVw#h~oQtS@^OjF(7OR?I(z>5?UA9#uzcr3l_P!p(sW{$jO#LH1hE>&_QVsxZgR zpyomO=R-qi%hO`zbNZh?JXpqhMc)mao%j&vrs_?FH-EbXE)~SSfXE}BPN9!bdmfpX z1)%hBH7VzLA&0A9m)En?)%gQZYe1vS1S&CYk(nPt%iGc3@>csF+*?y3_npAwR~BF4 zmwmzKVu_jH?&wIV4wWYD1-`CJxUZ6#0gn#Q@@C|%Fl3v;2Qr(>K+rwgC0@We-o^OK zf?(n*^m(@vs*v1g4he9km0?*cmM>1spsq#d@S2IBQ*;Vga zjjvn_oQMY8076b&Y zUC!7Gs0ci_&bV=Oi88}ovnY!vQ6Xw{j43!_rXm?!LJGoABPJXSCVaWkMC6u9Oc^>( zIhpqy$EbBbi*1eNBr5d5d8H`@&|w+^{1#f(2;^iQdbIOgP9FHee0m1`QDrNKa4Av! zkqV0IzzuU;9x=VxLIKX}4CPEgv`yLeqTyg-?+XFpSWG29Kc&NOVg?_mV zcYKY3?cy@+ZU`DC82==9vJ~Pw~D}wZ%^EaP;fMwA; zKkVqU`VW^8a}wg->#0uIC3Ttn0pfr+#*QFc29oY}XbN4)nwnhPXlspQjkWdWPi>Od z8GfH#Yw`zt^$zb8c0aW^LuawjV&TpHoWMkp*}S-DP;VS2Bz>=q1l5KxOSRSeku!R% zWKojvAfFbF`rXc3B-ZO_CeS!@;)>aaLJO~0P982DLfK+<9ZmW=v3Hwoa^A?t z#QN_R;1(@5ygRM!!+fWx+99ol`T6p@wN1lCx0EvE`fRHs&K3X>5kvYH-{Bsw6p`B3 zd4e_{zQ>W+t@>a|8m(1IjDU?q=kcQJDHXEB)FKzo_dKynnPpDP?M*McW7)}?hW_II zcfA(dBUZ=7&sXwfnTS|H*8oJEaqoKmcyF+ZbN_VJ?NDRrpuY6Vqx^+=_M$mV4-h;I zyv}pmoc0O;YiOtM(>Wl5zWiA{Q6QM}4|o`{+MoBIX*Dh{Td5L%jJaeC3d4|!EmFLk z5D8=YowC-p$(MhT-bnh8`+{%_At;`8N2WoT!Q{h|14Q^(l~ z>&Ntj>69WnDAlK>f=0ia_S4awewF`ae%W%LVq^KtVZ_{3Isq7{jL23+;J+YsxAKD1 z1F6>|-e&vm1+$bA`(o~ge?a@PN;EkBNt7?}Zrk|jlZzs1cPPAa*HXf7*cHVDhv$cD z3DPbB&Uu@D-|L6tQ6&Y!GlOZTqfMFHc%rN`(zhEZb|G(jpNDX)nlOYYyGg^MkINfZ zY6`>u;*Br)M4`fXS6ER{O!0~OdE_%UFloRGZS(yc58ZI-44 zio_RroKU^xvK8RZ`JV=dl)I0ez=?mOT=JLg@gF$x?({mz`Z)Ay+(L?!v`xsAq>2B5 z%A2AJOzM@^#orWou2ki5YS7v}j%Xi(kLmT1;WY-@;g#fFFou9uR5R}iOV;umfL<)E z_OEiRsR6Gzs+mDb^nbC}PYTf5LLUCCKI`9mKu=Z|Y7C#=8~MqS_6S~=C@%Eg`{q_& zcD*!0Z&ty8gsi*6okG^Iwf)lNJ1Y9!@pDh#&XV}QxFb5QQ9ci14D_N8e4f?NbiEF4 za@A?+Zl&dL%`cwIs1XEF!b4Fn4GdD$Z%V%F>wNMlEOz*6yHcaxlLrvZqtdVtRkcWA zB`dzD!af7==t=yc<@ULanm8)Q+01GmrodIZ<=Roy0;}NeaId~Datq7>xvj2d&>B2m=^F}ATblfu&I^EdBRLW>-1ko`~t!sh3bS{Ats)0 z^3b-nKO6-Wl>hN9k2zr@_)aj3HYY6??$wCvJtjp3fa@ol+Bj-#NFw@@qEPjxZ7`M7 zOuR|b3p?^AU3b=fhzoV^2?N>YP-|X~94;a-k-;hnM82qjz!kY)TXKe|Y2q`J`euGm z(}q%~XMkfog<3MMZ$(tIW`5=OV1MkeS)J&02yY^l^<&fk{X=+PL;d4yUTURpIPX+u z(glv5bi&MKxr?ko{^RKAw}=R~G`>fBsEQxL#{^ZTc58aT!OZ0uAtxu@lt-QH((on& z0NnuT4(Qe8)NHND4cTv&R(IR@2XB(t_3*c@wiJ?L5Dcr%Uzf-b^)(n%YiGTOO1(*c zlDL=(=6&4-I>^IA6m^I3FB$D>=dZxPKm*aJNb1=3tC49eoht*sgQNhviEZG%sN=QP zB_Q7_N4UY3Oh73k3`ogzg1!e{yzWm)fB{lN;CY{H+8Ip5OIv!m0NgaUXZBS=q2djv zzbOD@I?#4x*H)TZL_@NH>Mt8?w7+qW0vZrATc{x|$GCqe@X)U0 zm~E$X-4q@$B>ySYrO@SGrMf*`W${ZFAe{AzXDh2=e{dVvNenqSU;bKpv2hq?X39Wg zGhdkk_jsCI(SR#oIUiXdRn!vnIk2k2(6K#h#N7a6SwIujuBp6rZIKSvXlY84yi|Jv zHd?D_UYgEc76)l>xqKjVH|C3h7WN4EbuB03^X8P0zYuK`ti($dp*rkC9KwT&)S(N% z5)NjiSDh?qfeZ84bf$(rrG?U)OqiMfbzVPqJcbt+{WU(XSa7_Al+k4YLV}G>qQT?Y z*+eSeeemwqU~4fj2Bq;`0B8H3gJcylWNBhAPX1f5wG)R5v}Kkf{gJ)Y>)^35=Hrw_ zp6ix}3C$WoqNW>&S^Z6Gxi0tcTzN))fQ=uTd{JWN*#2pUAK*AWm*@4nNe4WWR5uE> z4%_U&O)hbA9fsHKqUmPc!_BrVdG@eDTGmF+cn_X7afM1mPqJX*cYmPi2+vu62jch$ zZrz3z{p)r?Kr2rlv3KKt#kEiSXFmfep8l?HDGA6lj5xX@VM%FHaoU8JchfLV!)&+L z+^4Ir4Os4w7EXE}p1OR5+VC#U*+Olvg5;4uVfSHQb#owwAWf-A@V=MqxGgPj6tM!` zctO7@oMj3VH5Z>}CwbJzg=twM_z;{LQh*%|yMALVY-toks3d(bq*1od63AM0&YFA; z^Bw6lZxXph*l-V~6(aJy?tb{p3RL=>wE~RZD4JD||2AweiMpWVW;-Y8+qbkL1O6c^ z5bavPrNc|~(?ZU38c9k}}*y~}IfSStxi{<0F_=}`^ z0y?p_%c(9dbR8hSY^d0i2azLjOq~g&H;<_jlY>IF+JwEqtL*!> zHi@#kV}o}SUsgOR>`d#htvAGTA4SsEVzLxG6*VUG$xhb{v-jCr1$3sH3N4o@B-j*$ z*{kvFz0rA~1Z~b+mkFeYDS;@bGUG*mun!GbqNB^FFwtcAR^A6b0HBq&#)KbIgYk+{ zcY#={o8$#&PoF6*_3iiZ%k>~J{geqx`qYWYLq!P*(hAbdS*#AUzFslwC_bz!!V_uR zs(z;Gn)|LY>?I+jKNV^japJj53JBEMLg~K|`S3~v55;u9^O6V++uHb*=uL7UP8mHl z*(%}30+YQF&?q z(V@FtZ3Ti$dh{e&Ok-7AZ)g75#h3>&b?Bxe9)X~G*0uv z=Jk6h^WD^4O*rL04=Hf<6?`}!*s9HWW?QH?DV|#o$4P>Vh-rWWFSU5Ce=l&AZJm5A zo3ysDKhfQL0sNE%tOfrQivZ<0``_=QcPjXeL46gm7V+DV%-d+pdzZLQTNn!UkOSr2 zqV;Ygubp4P7i7>%!DN?S7-t%$tgU8VfagzT#SUG>$-a-8uusIGzQ_m=%G!t*mY4 z1%BLCi^$Zd245t1-hH?)?R<(^nWx**d^uT78TWgcuCX_XH5jiW=D$ktJqBDWEKlQ= z(yO7dg8&{tbDJ*U!K9|FR(r!+psz+)YNUllyIj(&-S_^d-p2p~W4CLD)=SF%TDS|r z%OjTy;9OB(sJBVs``H=kakOY_L_JW~zA`9Q|IJNu@|J=yxQ147{ z>=Ye|-M7m9Y)#n55J@?Nd-~p8qm}St z6~Tke#!o#eX?vYwP|UAg?arBeGVOx{-D~Z5 zu7TFVQlI8{F%YQbnt+{pIwC|j8xWHtcgKd$*0_wC5jb{*QYU;^`bp%=29Bz9Is4v1 z&#V*1TWL)79KF~|2;YVH;wK^z<9MbC^LDR|L~4Tz$i*m)BE%`N=i}t$GUhR}9XN(U zqM}iU&+2X1zdI{LM@xBIi(mub9WDV{0y`ALj|1P?m||%(daz^qs7WguD}PRVkLM`R z9-F^;p0o8Pg!?_z1J<$M9^X`l{GCiqw z!y>V`z;d`9QTa_uVnSdroY1YLb7hE!YI&@BU9&fSbh1{^h zrI_Qv%dW5}B}mt43l&$fHfo|=c^@~v1w!4H*={OO`gCDYxcU52qq&nvmsF&UBzr!4 zmDa^DWzSQPpA#jsxnB>f`zeskx?(+-FVZsMN9U(YmKI{Rw7R<-dL~(UMQ+c}&o206 z+ATE<1DT+~_S=)BohDZ!hq*ZbFM<)q*Jy73Z%B5vNp{b z9jQXr_fbE1j^sY0MPKiD867s*)u@^+RGYK{Epz(Yzi9!7Wj#xh7fbzlw)|Y^2;1uazSQc$YqEKQ}* zWDdkWMq*ieyr!Qodk_9_y@&}RRJHe<1s=|=mmEerZKOI{d*fM%{;NUIaoB5<%!}^r z3=D8N^~e`E)ud0o#2GS0rnbH!McC32{lExGZCdo>22rQWZ4#>IN7zK>^&RQVQ0KJ9 zze)G;i@|Dw5Lr9LL_mFx{c4N&oDJFj!2#zc5ve~urZx#MA7rvQBd+Lpx3m_BZhyy6 z;#m>glhxN}a^4<(&Fi^mx8Io_Cq_xf3wn(<%bgznRJ>hd3rIc=#luz80!2$duN{`_ zCQ z#Em@o-RlxgwCPxifBGr`={uGpn@5l=ob>&o%kxFCqF&82@ba$Db?L&M&hbgVnu71c z0#mI2A-v@~S?7kT!&o97cdw1NXBDcGM-YEPJW`3Tp=suhDqTf!4Vx-Yow8h5Z`kg7H2=si?8!o}k z#j9XD1yAC@P0YDQH?AekE@+Zl)8CC5YWzJablXyyDlcaOXsMHl>EooPGgDKWr! z3-dq>VR(b$uM3LZQ@bm88A!gGU^KPaR_28kqIT$zkLc%Pfmm*3;$#WY%=3pK`$8Q7h}IKt(x5ybd9?_N~+2UWUYtP{%VtC@9! zuFzhXh)^vZI;QgmdyYNsLn?c|IF+SOQO!#TSf%Uo-)|I znsrPj35p6*=FVd5)0>oO1tH{?Rjz<0J&s77HFxu)jnDA6>~toVS`TqC4m+=_c2VdU z-2ILrx9r;J%B5QyM&K+iSFYAt^0l+Cn$F zx-nCUYxRk`ydU)o;1cQLs4grueR%a8;b=(>lbnygTD~yL$6BG$BbN$nQq0P}fA97{ zL{1n%qzqK(q4pp_fHq+1syylMK%e*WoDssr4^!&zUiUw@+FN1_^=P6!KVgENYt|NrIs-JmB!c!!q9et^R zcVDi4B~WQpAxNq?rj^8=N3!w1odI2^raZmf1G6C?R0UoY4RK+tpoRit@$hUZI~y;B zo14?4^+$t&1p1SA453goqPatdgx z1)GkZgJygD8gSz|#c(HwyTX05?);==TlQ0G`NC?%=gl!*zR9M$AKT|M-RhDOWn=Hr zC`IEEJ@X2Bp~{yJE*|6giX+~H^kh2OJGK6mUh=f<7-sc?@;~J?D{mkZdsW#eQnIp% zL*kP8=nqWPiNH}WrFB|(N-V&@(s>8g>)`xcNy0iPbNchnyIL`AYnNryKK;(cIrVZD3wrwvt=Pb{^|a8VB}xP7_^)(@rnD zoXQk-WQmwIQPe2&t>)hRi@s7(cGiZo0V=@O(0jd>eZ>Eb_15T>AW+)YcC%6Sw|QxKock(ny^(1k(Aq zilq6UK&xPyn~V6SKevUE_g#k)_dbmcEdHJ*^}jP(GS2ALK=A5Y+9p>LDxjyq_0zhB zi6gqVLZrrW4K&bdiMAQz&w1%;PpP{K#IVuDL)lAWLu5napFiuSx>_nLRqmBnJ_&w0 z=095{0oHOd)92TSJXUjCTSf5qaJ2bQm;jEdD46@t!5g>-E;)~x{7=g==(K)@bUb~3 zFRcxQm`|GTw{vx-)Up5Fxx|-y7B?Wu$fIYVGQy+rf82$itl(ITkoi1uJl|WqJeRLN zFzKS3=QU^7oMrb0^9uex!|i$ApaWbe+w%d>^L9*=U$-{GSV=v#EE37MqTcyqipV_y zR#x@7XS0%x|L)*YhPSqtSkM60P|C3lk=gFUSLETw$p7F(@qJKa+2o2j*txU7L#7&{ z3beqrQqIh_`EA>)&mlb%T7woAqRb|w++w26-g*GQ>^+V-=Vs|=J!PMKsu#FSAWt!k z?4-l9i;O954(6JS_K6QI!O2&O&&*?^?tVYgK~VpVW%-Vb5;-J6&RGW`6rr%bRx({z zCo5k4@U!-7u($Ayp0&($v^0p9et)Kbx~O0M#wabDIDS9ENLnsN*4TFhToC?MbdePN zGjAm*Q54esvFK|Wmb>p;k-;BwQRtAhu6NRMqHof$?b?}~go#wrl1!)Qr>!&^JylaP zGvoF{1R_iQvx?tw7Up8l=J+uw__#&hzRea@DY1&~`I42Q#mLMw%Cy*DMK4@0ZC{MJ zWGTEic(v8AlLC*DI~*dsU|rzL_j65NY>8;?LHvMPXZ}4}^^dEDzQSTA+N|FLR zJO-qz>qnVxqq0V&J{?JxK6fK4_uk}{JD*9FDYhSPmdtNGtZ!nrZB=DS!9=oGPGDOG z1tM`TIBqbx3#&$&2Yw|t;}u`FS7?~z7EnrZk-ux01au&z?dCD*Hl~J!f}YF2%p(kU zCr%8h(j6Dx44$#7|K8v=Q5HD(Fz7v_gPVNyhv%=&5l# zbk}b1O`GAF(dCDSB2s+0*&OEdI^A~_1ZnV2e}7gbK-V$e5BDyd1(_P>gwtm zWEzKq5wrCCC%S;uRsaa|a#odR2g*uFQNns*-xM`0w3G(VT9ytjiZuTxO6#k!^T zz!*5G z9yD;qhum8+`5YE_>g4_pk`ken*N{(==vIP$!eiO343iHU%^u@^HOA^WVw*x2lkV-dZe(mCgOBd6m;1sYr7nC zx@4$vn&t=miFT*SXALH|($j%99Jhvty9@ye1Hb#W>i$ZF?~V`S><#?4)xA^)=FbMh z>*_t;wy(-e)@m+-^UpOAs>I%!T(!eGP*!$+s@SBYi#@VW>8MyRgPCyS{a?I{q2mG~ zLc#=qzq!Pxb~#BQ;fT87srcpNKr8nkC2?@G>Bml}|DXoCee&3jch-`pM|eD;116@x zZ}0IQ-;W6QFd0~6WS>NAmRE=_vaw=6?}b>nU%iTWwee~TX~$P^HMsdAX*vc&hwjBd z{L>kAkOBF`XT{vny&#PJyF0=jqz7R--=r}Av;J}4m>5f*7ICt@{&=fOI{C5W;$^_) zussv8yzDcHvW4d=3f*nFWMh7QrGNJ#^myLz_1P&e)(v8JX#BSZvk~oz>nakR!m>IG zCMSZ^89C*V+69Cuo?tNGW=)P&v$>f71!W8Rz?++RlocUva0ww#!!t8$; z>+}l`w~S)L^yf0SjH1KZ?(JJ5Cp7?B5jbG~p&oT*)dha^k2|N@dU-tMF}8Ra?C6^s`Sm=oE1iDp_2V(&en?tLDhXS3~Xk7p~SM;_{!9k7zy~QS@Dq; z!x2MlkKS1+{=OxvzxNEHCjG|}WBh;wBw#EPC2-tl%f#*U)EwbIb#~HYRNSVJi;DS; zH_UXbj~)GMG_^lvD5h-#4AVthRzBS&t2?|Y+l-t>Of>aGC=qJvYAHn9ANm?rh7p^6 z3K>CTc{3j|>SvP5;5KzeHh60$1W0|$(q_%$NxAR%Aqt({0$wYeGi6q!%dApUFU5G-}zSU#YqGQJ69I#h7Rl7WI$a- zvwJ`#o%ZTKn*CM18E%FtS!PS}!Bda5lAu$aBtKic3nwcVi3UWej}xY5tOAO{D@XVq>CG zCs7Hs&hp#Myw>eLFPiOmNLqv!ekZqt!T&l>_+E}SlO-w>B`lHl zmycHt+pc^8_i4^^KNf)z4ELrt%1mPiZmxScz@rC*Y6;S@A3%cVhpHM z?+lnZSuL6Qv7%Wswe*Kx9!lOhw$W6PZG!({zb`9*1}PbPVWr_=UxmD~r9Zl4 zV+uiv+JPUWii2g%<`}rYwNI)|wM)f#Ny~_(KmY7_vdqit|J39)mdiA_vb1EAEn3Ap zX?`-)_t>1lE%FLTY|*_r%g(CdHww9+kak?XaS`-t*W0ek&*WTFn94)xsu#77x{ec@ zc4Gopf8q_`HiuVL%t6;+HE3>s2z1!!&_>tA4e0nLMm!|A`@@+3msPDkyVM z7Rr2lxc7C{cMryg`nVw0F`74w`R=Sr0@vy9;m4oNv!@GCg^%<8|ENKKk2H#~3}zNw zPs{bd(Br=#H{rj%fYw$v&HK1;9*YR1s1&>FJMm;RaaY6IA`xW7PfANwM#0-A_XMPC zUXGpbo-jl&s+RHHE`oP^W32ZSxzu=o)5|DV(9>9NqdNCUYw3N4#yV{q`;X{TpawKc zEi%SyJz37e)YMjl`5I&J*N6EdSwqx1&+gQ8;uK=Y@aHJbpzzC!KmfB)+J)DnP4$gB z5mW90`^{!dr}SyHZ}(}G?6L#0iy>?clV8c6aGUJz28F256?yh;3NHtHDLyg3H%t@KbK5MjA+_7H;* zCskKt5s;C9ZE1}pOgpUmZNJ5T!bO(>{7Zw>B{NCG8wRlXr6Lrc-99d$NLU8iCQMyS zYJQBex1{TN_ZS7d``wGhtojxl2`c>gvTff!>NoDOwYc9)MGs&!C_vtpE7tc zp|H^HMaMx3X~bK(#6@KrIU6hET0%9Fd6?|33HI%G%^ZUJ4(nDhxA3BtEBHE@NSEKv zb+9Wj0sj+OeY&{Dv3Cj?UjhD~E%SGiC(^fzILrq_%0HFl5a+O6*GWo8k0QCK8d z*s&aBak`L+4oIpoF*|b>(wJp0eu$bG#5zuG(@rmL5W6Mi<<+UEIsPvG1+p|}HjPui zgyaZ{h^%ae?VlQb?mxBK0T^XGBKPU_W7};67_NNLigLdla5^gfxQ^3mS9TIn#EWr6Uj-VSVk(A?TiF;wRbstJI?JhJnA-*&dGvC?7c zF%5BqYYz0|14l)n+Ppx#iD&Nfv>!Ao<9!)udXz!CRpD`UF&R5D^w?j3 zmy4b%e{*VKmhN`%bZJzV+fwi(dG>?1+3`U#$E|~IPYF_Drzgf7^vEB?#cwCe(RRNh zTy9eStYOgX{Bp#y@cDgZJsZZk^jGY0=g`^$j?s}z83~jKBS~TtCUzb zF6FD0FBAqs+~h1Kjhsorx2@$PrOTZZj#1IkX>FCAFD(FacLuPpGM`+>QJfX2$YAG{ zHE%n~EqOL7w5Vh`q8r0xV^1Jod-LDxKRpI9_K5Z8 zen4wRqV)t@4VGh;pg?kxizdSMs7dkm#3zfbAdRYJ3;olvFx&tp6$wu-y}!5lkNx=jJ1c|b6&a3Fr9I*)l2L{{eO(TWmFu{ zlCB+Gf|KCx(zt7I_mGed4vhr~9^8YwH0~bU-K}vA?(XiEuXEvsH=A`vj;mwW=20Vf}R<`e|}JE5w97BL$t# z5vBJS4F>@yXYe%&Evw0+?`h&@oBHP83ywL46I>__NUP9eX=BJv%TU{-Z96I9T?(?+ zxm;=HTuC6tg2#du!pBk{vSLIimHIZ-pF-oCFkS5he<&#_4LVX~Yd}p!X-nC$W{z4n z62q8Y&0LGRTX}al1@V7beTD9G#IyXkDulYbUGoDv?!!6&cgr=>L{7&)NAJaibP2c3 zGCA!J;$_Z-$c_8+wly(I6!tPa*y$d8#=OCAal)x#>bZHo5N|Tk=iy#`$wvNl?A1O? zjlLhhIl*72hiT+d1V415dWKNci&TNRzC8zQnXdpKgj|21zPHo;WL6&^Q(%D70G&=M z{e|X7c1PSQhj1e-QxkkE30A3km1ER77g+b|C5bA>=k(FRwE4U}C}%Hd_#(7A0eTVO z8NIZ4nKgqIUp-n!V4|A*qIvd#Bcf1Qp42;zlUq33xI`uBGj++B95=~?C{eTvAn%E( zW>xF0TL-7@*#w9GeWYWEs8s&|{r1zEw?g~%4jXayL0slz{PA+#^kT@5XeBx`V|dpI zN-r;10lrxaq%F=-96>>hrbGwKEw8bpfbBj6l&u4W8~KikX*vAD=a3lq``|D$mrQ8)(Pxr;>jOER(Y^_J4$K{q94tJ9 zZu=%rK1u_)TESaMD;9iCM)k!M@jDfkOt7rzYTT2F)GoColp-?*x9j2vD-K_c4%4=- z?qB~=R0=MbU}obCA>Zwy@l!fkL)=);TGaE6qzVY$Lm0_0&{C_pbi{oU#dqz)KHPU& zz+J>CbZ^q&CYHRrZ&{QxQ*;$G3uz6p0vA@#Av((E8M2$GKtIv(Qb*k}$KpCkZ6W&S zVLY|a@^)|YbMqCn($?rdsZGcyJ)piu3RbxCYy|$p<#-_zivQBiiXCcS`nR{4Z@vs( zvh;sl0scE=Qah;^X_-~Lnq*t}pXrr14spW{hF%P>D5_ z-b$FbGIzZws{H+o3eK%`k04hd;jPvyLFGMc3Vwi#AW8rSM0&I($Q3wfs+m=Lr~s#l zhvo(;g$?MoEA|)0MYbdvegGW=8VjzsEWq z@yc6j7Y-#PYVtA@6ie<)5t}+ zFTku8Hv04>d}oQ~k|mJqPrzwm)+Mc8kgk5AqZr;~eJVZ@^TNC1dgr!BW4uF^Vyl6!!ax_Xgs!j9W?rWcCi2f!wgb?qJy3AcwEfgIibf^P_KF zm?tktBiRd_J|rgEzM~co!I;liVZo3H;!-l9!Hcj+l@Pp5XLdM54_!SWz zJ~S9tSnHEjx>l+F^{XS`+Y17^k}!aGBg>&baF?iBe4UTlUnPluOW{$*`#r>n@<`%w?7zrYyqChpFXYl! zGXu8$0P0VePlp_!HN0Tqqz^D!`hy4S?#=_*#wJzPW-k{N*YeIxM#IqN=m;i{!Wzo_ zR#PPy3%mx+sAI<@G(&$iMq^w&_O&4!X)Hp%7=dEkn14Sut+re?#Y<_nV{OZ6s=<3A zmgQL=Yk;pvoKF^gTrt-z56e%<+Rpgw?rPe1E-luw0WAX-Yup-i0mRceFFQA0BeB-; z<6qL7SeY7rAF;umNNj*LTI#?Nci=lz2-TlEK2qTJ+UwT{fCkFHawEQ#Q3zGuMc0Ql z2^?Q;=&b)Iu4+js1HJQAS_*j=D#V@p_2H|hzuTM9A+;j|5MLd$@eG~?iV$0j@4YT; z6k=aG?>{7BZP*Fj#743><>H?0q_PaWfrZ(Aw>qAyb?x}l)>th_8G)HG;Zum_HGE+O zO9*cycW^1RUpQmjzbrUZHs|klG2_ch0WU_-)$(>=AC+(-=!`o5kwF&o452-cRc9;(iums9u3*LyEZ8%sI;H1c=^16&81R$+>}x2O8=ji3bbY; zQ!<(26y#;pDn}ps{c5?32j%|AiN;MMP9W-0iT&!F_m>YZ^X_7ZRicHD!e?O_jU5(f zxF5ZJuh({t;C(Qx@<*b3;KDwF{}0t~q(m=X%=P)b3pO-H@*fYohkDa0%0<~annYuI0`NVRcq!2s5Q)8;TrS~Zy?Mf{wnU;cYG_^L)JcDfQhbC}pE6^M}Nsi|1z-?j@>eyUfg zjhg2#z?|%;Dethvv z1g_cy3`80fT#S&X^zUNV6TR7~?2IMF-!fgyHzw$5VD|mvNyIeeF(UhwYyi?vm@clP zploIb?i>A{=b(q96M`kpKi=B^ys+|{dN`4#;bF$B@QJe)V@_rp9eMRjHDP+3qh9T; zq{TRA#u4pKyt1^NH)^GN}S$1xSr;GScsfdxDNo|<|>1%)yKa<2|5d>BG zYs+#I{>{TcAH!Pl7Nl6!r>_^QWxp+eGlY{>@92i)B$vl2H@4%kv`{Q^UrKel-}oiD6x<(3u2^ZV^>F;_0pEJhkDDx*{jdO z57*<59N1^r05kl_zzX1J`W8N3Ma3;B8QOlhE$*@8{r}?9Ey)Kpgh&QqByY^J0C!G> zUWI+_RE4G9uJUkw5aaK3Kn$=1pHsN7q06KI#`pp>TovxV2mYK|Xw_ZTz&z_1PZOY~ zS4oRD9*7(~m@YH}YeAh1+gP+CN^3{ss61MBdrly-zH4DeBPHVr~?bQ}li;&o;_J6*fhESz#x!H~TD+P#Bu-YwIuS4rO;cgv$G^1+L`sL& zT>rVFS9Ab6$SPAJeGO1GR>uHh#HS1=4&k?c2|@Gj;im96ms>4qb$FA>Y-1r-pd!=rUkbH7FoGO%}V$DcNHNSwi zM3A0x7)wb-$$~`BK+5PsFr>dY_x@6kI)=E>=;AOswu?E@#0WIee%b#dCi-@8z?IVc zn@y}apnwrhqi~L!iqRqpDX&F<&U*AlqhgP}ax}PD2ud`>upaGY^ZDnZH^3s@94EFZ zPmtBI-GD-Do(be2^R4@fRsV60_+tOh3^U10({E6GNwYDc!`K01TKoju%NHXc7T58S zsB2Ot`?-V8qy5^TwSd`c=W)`EF>xR=S*?jsH2xVyxaBTAICY~{5!Ep4L3*dUS{FOd zj;+a);vGQC}>^2uTAR2`5 zv(I3}gczvrsZ8nQ=7i92cnsJDzZ8c)l}xB@9=m(&RJ)N(^tPYtjI`#a!Uwp`qZ2H5#c7C(efX)xty*B}(sna~mLlc^)va61F zq=(fWZhI&PBE7952NdToOEG!1R^?@%hhG*iC1yAUx-}pNZIR_U5tu=rR8DL9oVEWH z=ERD2x!4acC%wV@{!)!>*@8Rvm^Q8=Fougfz)K)tX(BN}({>|1G-v(UDV>qk-=AcE zdd|r0qpl=Bv#i^crAp;?~6Rrjyu)S*8A=F+%$h8mE_yraHF86RSys95_j}IF_0}Hy|t^C+Y|FOz}i5HDc!C=ri z;DDHix6M?jOm;>~`cxa`=7Jw(MknmDUIrVBe6~M{glG@iezkEZ*=6i2JKXg+ct*2& zoe^8uPd{2%Tl;<^?jeSWC3SneVBAZ148Nau?Y4~icS#u&;v@Ry5^)pxapRGaEdcv= za&XzhfkU4psRVQV31vru#18_m9>8)VaomZE;fb&?f%M{<-MrTwCk>I**KTo!rWZMX z6=anD0`g3U0bf}jXg{q;)B9J9^pIG7yeHIDzPSXElntx`u8u5`rcY4PIuLZFxWvp7 zj=!Av2&I3@Y7Z+zLFoVcjgimk`f#fV7pt^q^0e3wy;nSC{sAf4b;Zi*+V73mAteA0 z6Up?`)#(?E7X4xznT34Dt>J6*@z!QngeuI9Bkjv#l)VArC zt7^CATr{jFCo$XUB+9nH{RLLfO(TO#q6+=oq_*Jambx50=zj72Q*%>(+caJt`(>yr zYZkn+s;EEMU?-UH*62Rc8qGDLfo6ai#vC$N1+WQ>NyrmfQ9k+Z>3~F0dH=&F71rcfl1Eh~ zc}||Pz3ay4k1IhmM1A{#!jS&3?{no!g0yq2y3sQ;Ni69-Z_VE;;L2#O`n3g@SX#xW z?HbGbx+OH^vjRw29^JExRSrLO5TtPk5-EAbEkcd{E`O>YRMpHt0mR^6D*GILdR%bT z1RP!cDK?{WTLZEj<`vZ(`ta_v)^+4=ImmXrIm(ErwEN}0c6nQ3T;i3L_@lJdcn$*uw%&IEh*Stu*Tx2UDoUy~AGE0MRvM7e|)H_Qcuust+79W>V+E z{A2fvQ?`>hlN}^Ph zasBHZQaK%6Dk1EbOz!wRcxE zCQsH)6m0#j5Cq7u1`vvtPv{B498O@?Iu^jEf+ZuPc(eYy8~07o+vP}=p)QF7s9vh^ zp?SScxH%g=`h9e!vlu@GmTHE*qlR0%CjC6j1dm`L23TXE)*>!O3d(MzEBRbjZQ0|* z)YkqFG(c*kF{~%N-Kl0m|JG9q?6bAKL|LQpM zwnkI%2twsgy{ijt@^nL}fX0F&DdsONJWKEtp8V&2EOCQ}HK4d{2dWrbsT3mZ;}RDz z6nsl`@Co0RmjUTJ6@qMLl5Y&9Rf*VlqI`w~bwa|! z#6YlVv89nbNieeui|t9J*v^b~ww#n9p)VDCx;hUTD!bJI5Pka-MaS$(VvlVjMYFBWM zyrO8b5!EE>@)UQIFnngX5`CdHknk#R(;leS#lo7N7W+E9M_afg$4@cok`CX^AyPJ{ zXc8fH^H|rQz)q6rQmXfrm^iOk6p(I>J~jI;F3_bnMw;RGqB0A~m6lbv@_$mr!Obc< z6|>hwBT3~iUu831@SG40_{_cpHL$deCMds06QDMZUd`wH@**0&`u#zF;1{#^Z0F`U zjt&cv0+p^hWXmtima#2+X;59MN1vmNWh>j8YmfWQCi}c)uhrKf9|k`>h0U8x>p_!0 z+!M}WKdN|D#r?XjViku`aNZ4zsgUzKGg_@3bYs{kovDl<3L%P~y4}wckGKBdCuG9Z z|5iLA>eA@>LQZ&`Ih{C7D3~HL>!0k|0tSyB)Wice4KKJ3(#sXc*GOz+?pCF1^9ZYv z;pt_CXK3P!_e#mXN{G~tZH`s-JXF^8-B2fv402LWFRP7+@~tQ>CSTQb{rYXNcidXg z;M_8HeCZJ0fJ=ImH;1N|c`9sTPSQ2jbT8CZbrpAsuc`|!Tq6LZGZW<%vV0ca0s)YI zFiGlhp9AtHY8?V)R}M_z<*c!Sh~; zr|7vqqUSrTG)M;%)Z0joZ{-7gWaWYxU78J=lFPIkQnaug{m}I2MSXrACWQ&f?;T0f z*OHP#p?)RsdFX#)O6c3--H2dB&&ohPoxT{t@&T`YUX%Dv8syFA^|2bB@tdD-e40Sm zV4=Sc?{~Plpo?LCANKB>e>>0ef11JkZR;WqM}Uzge+E+-a4e!SrI$lJr2g+``8R}$ z9PqckN{-R7xurdkFdz)>T50~t#z3s5M#F;5O-Jas)8Nv_QnwpxV z?2=oSBYI^PHjrile7LD$DUNd7?ece^<&5nafTO2}k;KnS&Y>hWPOK;Q3uF1=yN8Lc zhd8JA4PA~Or-swFq9=4aDxGRxy0HC)0IrF3I__g^hDxIe_C!ers%);QtqT1GcWks0 zld%-R5LfaSdzmA76;(**@37eMyibwQ0nmYzt-+u5=0IOtv6RKHP%}q{4Ff5`uPJ$L z0(838H&B1?#K$7-QwoSZ5?$`gcf(hG+_~IZPd>@N1SXG0$Ibq_om$G>DDE$=HBUD1 zV@9`tKP{erIt)$c@|i^V_)Kp{0H(C5=r*l2La9BsyQo^dt%Aw)L(+?A>@{Z+RR+%1 ze;z|UmdrM;Dov!okwZ-u-w`r1$^w(?b@a#5G~f5QAE_pEDUHsZ+{3Mm_4-da%o)GV z3;CYG^?9jH9Hs6NO|#vo?wLUF%8~e+V5Qz|*dJECTsb&b0jN#_6whAuk^5>JQ25+D zR;M@>Zars6;tLSp(l8U7RnTo8@CA*wKxCb3<@L2%U1Ww?YtC3q$#sAIOKn$aUZM>H zz+YmqcT0wM@~b9e3A%g^gnwfZ;3K}aw~;)*M;i?`v+f8$UgiAD)1<)$r~6X)8n!ny z%t5eRMtwZk=k||>uNYQ32$|9`k|$8k_G9AKMLZ^CkkP$d-(`~BU%ueAiK$VL209D> zs@&m2$qS1h<{P>_Su&X`*WGP`!$A~mTDU27godgt@d17`Ut=WXtgR~x_slxLbMfQ} zqI2I4D<9zHD~f#0HkNHRbd&dPC^L%tW2pTidm=#N3b3hWL(Hnz$^ez?>W=#BHd01{ zzD>yI{}=Fp7P_DwARS;rs`XEDz$JUHKON@NvZJf*Mwjh>huzeJ>TX>*Q-Z~R>)a%T z8njVR41kk?+ITBgVIv>5I+&WHG98_je3c!!6{H9inYcA*oC9u}>~EPxluQ3yM$Tj{ z`H)FobygyT!)1Hd=)c9kvem=ncUG@qO}c>$8Cjr&snUZS&wp)mT38$%f?q;EtEpqo-Thyd+#Zow;iS9SS7=t$xri;vXAbB4C9879O$`SC~IzOwsBGGs* z6|+z;WO09`pJ&NESl7hlKNS0s5Q_StWB!>oRAAR-JFh2Ptv_+E!^=#lY^=Xz0TV5; zY%hQyucQnS`}5xVhn@*C5io!jpu`J4p6m$RH7zdr9;-$HI2nzoF3pv1UL*z5l+W>o zw|Xq77!R7c6AfIjh>84%^d%}Rc+s}{4tTQyksowuDTN)^t<5LPi;v633d+C50CeUxs*2`E7(TCeF z2qye=<^a84c+coQhyYf6d%nupexn;fZGCwV_}eAJW@R9rUbm$S-mf6#H6% zi~QDO9VHnt3cq*D5T=4?-~Y$sen4$)*cuawL&nYzR^4XM0sq|eG}o3q2>LZp3^VHV zV3`~hSy(adVnjGZVa=ag!hN5ucT#Ht!4n||TaW@1=3G0{rK2{AewhN!Eq`2G=3;nM z4d0bHJB|nl@`_(VL>39RD_7Lg(ux4s6aSADoVl@emCLrlQE3PYJ zK4<|krLCY$9hH-T(+&-nrgqbXh4R)|{o%Bf`F2sHha)eHxCy2Nxie?AwDjpCB9w8$ zdh_xrSw%=ArdB3t$|CHFuQH@dJs&gW2k~5O&@=Zw7yb1r$Ba-E4nK6>x}Kzr*trlGWZTNMoC86=V!yvX0|PbeKa>?wTbZ zL#bhiU{v1N>Ri6pPEW<1A0ZoskD7W>Sg(JYw$9RgZ8C%&sh;O5R!y?gcJ^rp1qc;F zS+p)VXXA7m+4D3?C{S_hRQsA4*}sd*FlJ|2Aq4pR;|h#jKH$Mvj(IgO2G7gqt9oGg z)Bthdf-+XlF#or*sXP`GqhUKz!*!OYh=MZ%!;a@}53fhVel3aIWsEpHLh`|DIT-ud zOPm+#7srv?(&8>I(L1`$+Mo=@x1e6Sq*HXJ9| z=mq^HLhq`>=O8@hD;=J1e1^&s|M4*#^vBL=_&j_F*?#C6_cLAo$v%86;=n$FAF^k# z%hZ(W1g$G&_`PVifc{(=6nO)J=2A9NP1Z#*(yKyDzyG#fIn@|V*8W1zeu)o+<;Ufh zZhf1Sr=9-PtfI%z)4bywlFABLgzBV%|Hj!=74E=MZ}#T*;H{JL)SSRm?4!!RL{LdQ z`TXg6w9y1#O6u_N$SO4DMF>^!*;3~)D>bV*+3f{IP8FvBhYj1NOB13UC0RvWlIX{j zLNs5jO*Ad5pZRLFXEmyBL-m_wWV~a@yN5GxLj3qgKR!y=VJk&b7upB-$3jh>ChEd& ze*!%nTHe7LCiV}>VuYUeqM4DTUKkmf2sZMgH}Cv3m?6!6DacFNq>2jf6gqXzq=!RqN%zdOJu1Q@wc^ABgp@3lO-HzCp zMw_c|Cu#AvvoM*>(<04gFC)WWJFVS1e(7wL81E3d#)&n)drRg3ubDR?;0z9)r6ucS zfHmAppnY#BUP@4p@Up1LoGf=%g;33OqV>1^lE$0#@iS11JYl%dymcT4tM z09CvDb3*~A!+7jpl+p&lK@6}VKCRmu5A(UTk*OP>_e55s9Ip_bbh8DsMX-p4Cw+fB z5KLW^`iV}U!xPDIVM(4s=kEWS3YY44tfHN4@3eKim`>PxrtcVVOpoH5!+G|)BX+r? z!K90!Zft!yCfyHuQyh4`AMxsop(sz)uA^@z!0>k-%=YB`Sc5u@|6dbT7Gu+f|LIlp zpRP3tQo=;B9XC~a5BaKqMBVq-Od68t4RG!}1~Ole-$lB-QTM|P7ZHHgT3bos{um>80iTj^kO%7AH}2AiZGUp6k)`$ z%{}IQajcXT{~kSvJ7A^i6*{t)K%Jg4Wx5UZo&*kWAuw#CY-g4Y;EEzcc2hnB)}|V7 zaR3qZ(+CY8;d z6+M(kDK;Uo`2wox>x20pd5Jk_9wbjrg zxA8-Gn)BGB)+e#g@a~w}Ni-7H~NZRMC%VhOs_7Ns~v5P9WqZ zL95tBP9ZG0b9Pw6fcj}!igyn?ru)=bm>vS6t!uXAFwkwfAS9k~DwC|`!7cwJkf_za zyGO~?43=C02Lizkhm5gU`MYmhPtXoF{qG#(45SMHC$9i4 z9Q78FjENBw<$?~`#^{q2jY6=Trlc@gO`|3gV{T7Rta2d@4u-sEtKQ<$VtzxRY)T5z z$qwtPw9#adl+)W}cuqJe%uQsM&2sk>Qrg-5UO2?3BopkvUtY7Eh$I z3xZympvU}Z<>6`*PaNbqi4qK z;7}j|H(k02-_s%;A}%;3dqeO*QMg?V)E)}tU6^x;i1;PBOQBrx07Ya`s}Er+n8_B_ z^72Cc&f=~ec@%$`mDJX8E9jCKg&yY3*D($gvkV`cB#;RyimVtJ6d!@gfhieVue-ic zQ2Wo`nq-egrSlL4XYs-EobbeJlUo1!v0_u_HLWg@)q%0pb2qG$9zZEpLpn;$x$!tc zVT`NtL2WTj(p`c!;^q6AmIy=VUVZZCKmXX6vQM&>7ie@ET4JssSx+pS;Zvok5K;2S zmcG8+0@8fXkhXV=3gGP~fAJG4=>Mh>dsGhp78t>D0l;DKDm#+CT|%(kvS6FL@pQ!N z3*GBcaKybl($2cN96>x~M=825#Iw57r>`?6eLk#A1Zr3?gM_AmzJuBHKTgX@UiTdI zS56O84i64bf1Ms4KAkjO9k%^gL*HAuTNT;!I%~quMWhIyH<1NU`j!1uHfw!|{8=0t)Gm{i;ey_! z$iFH@F?>$EAQtGu-qn_U>SDOzWrwau7`kJ0mGVpQopJRA;K1*(ie?vNYh}p^vJfG%V=i0vEk;w6y4^Nh48a=xu?+MM25~GrC`<`iN<+!VC07XC~uE z(=O0{VKoPGYDI%D{EVh3l@-&@LA}*HK+Z9I;tD{qqsG)hJO{z%1)KoUfjLQyN5qHT z->V3_iMzf7_@n+iK2nwe+0YrtXdQ*&Nc!k-dHRw&#&jhGy5fVQ2glFesh1WkMd3e( z_!Spk#JSXtr<-{!*D??LjN!hrUZN!*acc?e?&q3ZbkkPbc1Z|42KQ{w8-oX@Xy1-% zaK}W~zhz<=2sOBGE$*CkgfCy-a|siBAv4 z)F95Zya$nZu)w-+ty0RW4H?!QsxiKXb?ctZJyXRdos40(0VoG4y`CY|)Zk;odB&3l zVSKZj5ktd@GKuk=AvYnAM5lR^Ted(8WuI8zVI9N$;ht;pb=qyM?!^q2YA{Pw|1si1 zQKKV<(bMCaICZP7Ts*3vmfyl!)g=?v#29CYR!(}H$6WmigrdF85$bh*u=$AJNwOQ7 zQ{vcg9{N0;u|^z{J>a%qd#aohagC7&Z9nNe2?_w)^VD~1U2qGT9>!9w{|54g-b}Mn zKk-?zSw;-`>v@oUFSabC+GKbn>xaM`z7eO%76H5ukL4_6lyA)n87CgD8yFK%3nKhFr?szyXCb-H?p@J1;3AF$D z^iwM$x9U-Yw_`XRnH8eAKLn>B$MBKt@FuO z$q6x;k3g+4|JuXvwaWe4dasv>!&jMqJho8s2@-~W&>09j{e0JVsesjYP6EgAi{OR= zYSkTkG8iM>FGl-Q1Uk{V@Y_GUP?4?wuNHd$e@Tc5pU#;*)!tDGV~VO9U838 z=C@q&L;6SUsgq{D;7L;x8{=~|oHOOT=Nc|qN1-O^VwQo-{O)Bbs=2eAZ#CG?ru;CH z5_Qza6u_8DW_kpJ6Bto3u>~`HI>Z^i!95Dx zH2-?*6+_!&*Qu=$D45K!>K%~dxG-9{>+3ku-@=|>oqJX$)SSwLsG`l66OBZJ%l6*X z!cE3{lUM?IZo45?(ai$X`s(o+?hdhnav|+?OiaO0fc%@Gf?*t|JDKeg8a84SRTm{y z7-csDXUeN#7~W7fLKsNpi3I2|PX%mti2j+hU?9%_Dz>= zD?;lnC`OaX1VYuYY#&A_EP6BZQQ4b-s;^wPWdwXw>41CgBhkqvo=qP=ImDiPnb7UB zRfTXpT+A#52VoEJe7c1;LoSXvxxrX)>H%92zc+TUun4}~(FhqVkx)(&u+}_gA*j|z zX+jd?Q~v=tercAus9i(bmN};?JqqU-egQzRg#$Wix5oDNF%Hrazv8E3_IXZ+)^{nu9`U3NYhUU01ROl~h9 z>o??w;AVpVzX{7o0H@PGBM@B-FNFRhbWs$FXx75GzW%lpt^i1$l%vaG-y_I@mEn7g zdi80bd8?RlzJ!JJ9F6quan0O`*LBwfR=&$+oSmOvR4O6HUxp~vF=%RbD{z_OvNuk3 z=%itdRZIWH)EJqD{OniG{Vm@P`#GZfiJf~#snr`ZCE!hvfYRbKfx;YI%fpQD_7*X4 zKPnvj9&wv1u|?V2 zOWc3%l>wj29zf6*#EW^k`v~4OHTE&>H!3+qby!-;tD^;=8-k}UPWcp$V7R*OlWRkqCT}B!9#kMa#R$7TjnlPf%RxpdbplxdQcP}8A-SN}wJaRG` zy@eNMz6pj41xs8(Ln_iZ$ndzEdA>#qt2LCk8bUyjJj5=ud(u^4vjL$Z!(i35OQD`F zftQJuzFx70Q0a*R#NmK6LQ;70sb;y+0H{Mi4mwe_+?39dE8gJvY@KxdlDh5xdFP^@ z5t2XTEiq%XKdw`6b@{I5Sse!}2*b~_4B7Z!WOxm>!@j2K$_guSD~C zI>2V&RcS5+zuvLc73?!_}S!_Z$7pBBSSV80pD;Vln#Bo1;P+R^0_!V>J z*vpIrr6SmGDI=vReM3@l#;A*2n7AbYLhpNvAI5O?; zX6BX{-zpDWc5VznC>RRE{d3%H!)U@1!4HPWylD+I*nR?^u{vxAmf;X&{a!mK2v51@ z0(&UOkx9|y+26(X7X(XnM+aQLS|GH3z$9CsHG>FIt7&2~=>ZBgDE|C2PF>0ZvQIah zA@9l5Y1T3VRSEr&(N~{$8h*0RAS>csXl&Y2e(%j%FYIRS+b3|fGgNZVFDr?#FiMoO z*G%5e&aNGS!7OODYFl~6T*b^ca-Sf%?3f3u;xXb=55*zP`1ucOFzxoL+q9(e_m}r{ zO_7E8D{7_fLav#74XcseU8{qE)5$5x%dG3`-rbQizA+$6} zim8?kMA{>pHmKU57E%PHOxAL5n9CiJeWn#s*Pn@j);)b%;jPX0$M@%*&14U+qe7q` zW)h;Fqo@K;0nQGsv1hIS5e%o&Wy7G>i*E@43GW1l&R7`=JTU?aF|a`vp#%&}4#GCF z(7jI5IA}-L$WV1J)I069|q12R>#;C%IWk&t-51O#nZi}eV&WGo6j zTM>&gy*4*HH(NBA2upF)awIgU=R0qI4^=>zbgtv|oKY1?y8B&7OW*u_$Zq=Z%ZE7iH#jh9D zi%Fr73yPO|*I(c9_DC@PQwT^lA%u+TQ9N&G6>(IgCmN_WmQSt3Xt}!o96+t$=O6cvihnDMS1im# zE7VHK-l%GzmI~bQ>9R(=zaZ%pt0J8#-*HzzxaD5?8>||Qd4hJxwZw=Agyw;c;_jRL z)`(LG!;S0Ky;!-D+1PRP^k3UG7~V%2J1YvXB*S4+MN=^_kHgGOu37^v{=_?!dW2np z$o-@R(|L!d#qyxtCy!FV`524^{3U|xVr&0Z_$R6imcj%S#gWPxw4EJ(YYbHmETr>r z2_TDIe}ViXmW@sOgK5*S4V~Qe#4CzyO!_6F>zFKNsZ+2K7IW0>@lCF7VWc#MHiUZ9;KR z8zm1s!ZgA+KRx3dJ;Adr>7jhMOI!X6oIgKS9*0`zM#oKE(EDHEG z6Q@fM?)bdYf-q2fUhw@~nM=pJp}4{8zElioQ4@IfEEnF5Fo~F@pFYgt`A zcj#-V6w7$vC%MV7r|@p4SadWjnh*W}hb&6JYX#QsurUSYkB))9ItTLK^#Nwcc znkWw*ej|E5XXrwDD%WReYm@?dU1GW<0K8;`Yg2LhZ^1mYoU_ty;6li z!H)$Y>U6RO#tVeecwnP=B7~6F1rBG03FUXJbb^?Sgg!~z8%&eNR>8$iIptjqv!UrOGu@DY5=fOci%R)II4?lEPvrlfMgg)PfC_&Uy6Fy;kpuy@F$G!v_3py4NBI-r9YWej+}fc00J-vq*A zQeky71nG(vMN?=neV8hxtwy$n)>qI}7y2;D;w2L>Eo4A5F*Hq$`5N>Du&xNir5U6# z%vp|~tjVdzbLj|*i785K1`~f5TnA|g*;YFgIgJF?Cn}kltbo1bP3)^ddQuqrnuPuZ z?(LW|`6kfM(y2bWY!81ojWB%fnecNrM1Iek!I)}u^8r16nhWk7Ua|`{M!28-k(+Jn z6bO4rx>bFU>M?QDWSg(!A%cVAlE>z;0ceC<7f-bM6Q2+MiEPib4 zC00p>_8qE%WL_6RQ|irPL4nLw3nfXsTIUDHO!oP&xn(O24?f*%xrPD97`_Lge5AIV z#iD6Szc8VCz_*^ihr0^Y-FZg-=U%pHBaj#b1(O$e5BUiAZ@*!K2_pAuBykwwFl{lP zq~hXN>VSEN@vR2XH(RD{aBgAn+XA6?4taR-h zR9C7wjo!eEZJ>&}G>=RBId3<3?-vxt3USB0TN>(PebNZOh>z;@6r;oHb14Vf z2eNPmk#xKJJQ?nAF19fsTFg-aw@ujGlXbivcA!apZ^A+fhY-GS3hcXw;XMJ>^6qNUkmBNDDJ$8(i4vl)> zPwe^)P>swf@MX6^hc0Dx4f`OXw-Ne*26mP$d8t3_R%H9a6!ht}UsoT-IuhxAW6Mp! zZ(2NP9F$&PqYY(vB7vSFG0>Nwis`^I(s+&Bm&L_hIrk?L3v0```u<98zJUCyV)gFE z2ndHlUoL+8nP$s2ZluYu0Xv*2v&yoKk zOQWB^MMZ&Q69iQFnPDTtVG$6(7g|e{veFpFE4Y^mv-A^3v1FBkJG&f00WSS67%>Bk zg=tjGTIJlA*`>}QL}B)qv>3f?umKn#zly(9kIaC&Je@v)=IGweRU7NfOq3x(l6Hf};7z77}p(??`jGPS$IBRVA%% zU}hxudf~8t91_@VbZ@7}V8*S#rX_gpi-Tx&JbrS8`ul~WU3m|R*bLj zQFXy^`4?#1PIjca#QD2gDRiq!ANlqo#jopeY3=nEdFw<<{X{T4@F3jGKO)Mj5?Yxy zozyRd$Gp#d7!bX!GR!S|@CR%X#t6ATZZB9bnXCkkUiPNkR69MXgvc20>KPbd{*;_r zoMJk0*oF8tIpph3TiBS#7X$`#OG!-4E>p3LMoeMZXo}>?6Q%m$s5{Z%7|Gauf$ge< zzxgr2hJ&`<%jP6RVIYW)0y?Cmr(X|UC6UKAW_zkKr_-)`oNREn! zzBXciGOL$Nj3|sM^Ho06#F1YGd^;Yfu*shK`kYPxU`n)f4OSSeX=8s$|rcFugvFr3(Eg?g}|5*r4do3yz zRG+upUn_~ECI9WO9boFO--aqv9N&V7fV-<~;QN#QIsrU$4L_yF7LMTkVa;gz(a4`p z|Nba8U*P04D`UQmz2kPl?_QN5Gyrc9kl4Qardw)5%}ic_P1MM>w&HWWT$GjRzT(k0 z?s-1=4vTnQAvMX`j>r$$jBCE8@6)-H!0dZILjflq3;P%Y~+-RA? zj*ibrNLtr>2NQNVFsmiQjcY8$4hqUcF{-M7@6z`!xY2#|@yE$pMXM0zL9nK7yRXB3 zc~lDhCY#_^V$_Y#U~5e>)iYBoH_dAfRnWA5RPmdskF&Zd3Wfizhw%noKXrVxj-ao` zMZck2qqKK8zRf1u%Q#wT)APNX$|3o|q}0vH=(%8>KRH_X>eVw$L0Uqqc+hRu;GNOK z2I2Ok636?XqU01OCz12se2SnNbnsLTwK#X<mrN$yg0uJHek26xz-ga_Y8|6_aCAz11;AaibGM`dyW^X${Yf zYAAQ;hOhwYr`8h3?fgBuGnz4a;UC3F8;p`NN_S-XBfJ;5^Q8ry{(v{;&1~b5w+olz z490P&I@J09MfBsN(2>!CoaTSLbBx->q!Btd9wgoh%A~$KwKwL5-^7`bLfw5ctaalh zLglc4VqVZDc}iX8Jjucjb>5ip9fy{XQh=yOsq-~|g4^q2oDW`=#O^M&JIG3j4!pSe zyqy1|d^3Rpf{U-0RGa$=3T(rThG5M>$ln3HP!@77P;k8)~6uk^;m*Y8bkK&m@2{0$~Q-S z`}};BUj4}eTb9pbC|Sa!%b=^GR9~DZZb`7T*&}W==D^yTe~+otvLqGpblFo3=FlJU z)X?v+OY_3iBjW~&lT>LDmb2iZLhAxe>QR2yG=1Ib_C^A_vJk8TK1Llz>LSIFxXI|y zhd$#X(Z$40*S~cs=Ktq@`rC8WrqOh?4u3v=2h!o@pgAh*4jMhar4ZZuAl`G+$KOtE zbUXAJy~MxMSR~?MO6)PQbx($a+ed??MsXNY3yCv06X~?|9UAz<7c_$YYf0b{ckU z1X@3(Uu)Wke*@Ol1Q!EKq9#xwF32{y<+82Mhn}&31s2@?!&d?_ga6pTf<=LdzW>Qr z3aJU1xqdoYh|K5WEzU2`ojeoxDZKN_AGh5<*k`ouPzt8K?Mm>kNzeKxG zvF8VzK7MrfKM6A8Wy1vmh9+%XAVa^$%k%8()_x+kVHFqFX+;A$slrXTt)ZwvptG67 zmK_t^CF8lr2@*K&Dq@ZM$YD zufIi@U1Cy@xREcS&zApkXRW}%Z9y9O4opRo+-{}pneViJ9I4>0ri^rti%UwDWaXEG z4(q3zS-^_KtDbTE`n==-BZX;Hp21%B0M5F0XsKHJhMDG4{5^FjvFgk96z7Oq^)3`baKj7{D z#LK+MPHgoye7WOIs?9#vpeBD&D|~N-46Lx`*W?7+hzh(fDSULpgi7AYdeps92vML+SQ7$}ebIo`ZJ}?b4+iv+m(D#zd^da>( zBQvJ^H0aH&20h~6-Z$%c1SQP6j0u<7DQ!x=860OgJH{E$*hqCnf>J3&NWD=gqRSqW zUlrGC_PQ@{Hx~EI51P!szrRnYE-qI4>gFcj4-7%A^2s?|X#Q|z?QY0(?n^WIppDt6c9QAw(0a z)&KPiP4sic?pjtKRY6Z@u^DVZ9!C5@f*>jCQednPTs2$)vWV&0Uu<0b{(Uir#rjdmm_NcRoWOcS2{_G= zqo70rlOkY1^iXi~4+qJ)y!Jit9=@8`BMciZ>$-C&nPs~mQI%U7HJk?9K2A@e?Z+5b z;>ojXz#v+bjtR6R|t~(3@vTBJeVbwVm}Bqo2wT$ zsytoL^L>YPC3t*|Ot*IJ1&(^4_Z9Kj1L+-WFlQxdeLBg>R@*;g?CSta;_espOff~g zbHR-?AJKe25g>|t;-T9$SiDu)SLY0w+N>6M$mK0ONzmRksJ0nStabt9bj@`i_)eBs z7q9J84el`qjvtq+PsA%+uQ3&k{Iu?nO>v_O>Ee5S10k-epQ?#5(x0)v{o7jm&*J2x z`t|_4FI*Hd+I=wrCi1kDw*zupwNt93Zb&z-ff8Sk%EWFuvC>~jT;+vPB>3H?`o-(n zH}%fT48QVeo}are0$|L}b0#@TpV~93z!gNe*UsT}HF#925E%z(Kwyi!sTAwn4>8g1y})7M=flBT$J%{(K~3Rhcz$a(r3e zg*ekJ<{!ezsu*cpbbmFd)=T+xkt}e^O5%4yZd-Rnp(Dr4ygeo(w48d*3|{dyk6QJO zSYZ!c;=x(TJ9kx^@OFV$e-1nDPdbcLLAPKjg3h=|RDsD+5vn>1?K)#1li!)G zFjSrSr=P42zUncy?H^L=muc5ww9dJk#PdzoskW<=)ERsZiP0JELo?)FyoU+oEmw;D z*^kv;C%*Fy9Uq#TJpt+EXC&nOJ61ZnG-d0@8eOq}KdFtdUM>#c>ZpGB0BE=Rty16%hyfO+!TqNkV2&wgh9$afU(75l;pPO9mqr^T0ns8&7L;QgF1TL zA(ZvCHF)BO%np&~o8hT7I|OL#+!D3g>Bxv{E@7@1XXwkA*cuU(;gd=coGqNzU#GTQ z0RiTc9voD(TOms?Uj_x4PS1aDE#Pn7H>)UwEzt@=UC`OiTj#Tqi*q@0GECX^wKl|i zA0HnJhIhlmBG*mO_kB&8p#s8a-6vI$(FXd?ly{+VWTpu)DfLMh#YtTw~`YLre z(W__I<8E94TW+$cfI?+2!XJSfCyj6~-bHPNMUKPw)6RQ`c)`xL$(==7mmpzt>tUX1 z+Z8%prmFK`A@IPId))lf`~Pqodp?|e&z9wymd z>R9$qm|P+dR_j@3V&f+)@ShbGf=mGsZ@;RmNHd4q7xUgEU5%Sr?A<^)$gc3{S4~vtn4f73h-*}=1RnqS_Y&7Hu}_NNkK$`8Zl)UBYQ$;J$R$G z6=~ljr8YmPiJGHVl?JIj+P0BnhS~}9X&e7}I&$!{iE4QPzqr2W5fhSp<+6%Ju-=Mz z8?K*^C?#=*_>i!jkV}DiaOugOtf0i5H5r#8Ekv7U+h^15iSzqUL7fQGvf7@96bb4b z9r2gxT9WO8{3++91Pol%!^Eh)%l1sVKtWWi)|91OR_j$dw4L%)uyd>{R$qZ7C$%Ur zyl-`$qvgs?sXv>2zUudf`lG`nHesxRdU608#9UroW?8((8d}Nz-jl$@hI|?HlbejC3i^nj zFj$~rJLldRqKjEq>MblFy=i^x*Rf2!de{5O!&t@rW$W@fy`nGkbc`P3c>O$ym_LI0wq`vwP8Q6Z<>f{lhGRkpr8qW5P?+aOyemDcdzTU0*0VlR42dln z4$t^B#Ha^O*VKf3QH%PA5#$_(VXhp*;vD@=2?Ph4#^SMC>fo&&XsM*%AF}R56ulHb zYL3^BgVHoiP8g^v9Hb+Pxp{gq1|TXG>kfoT&$gnn!7mFEXN4K`3FwL+_?W{+kEUS4 zbQvZ(#CD_;!LUm#d3UvrnfS{oVRLA@c(*AyogqNIRIN4YM_V|JeHgkQvF z$?K#ol+O8T-ZJBf^cH)dW?Z`;G|R;Zt&%iT5;V_XPs8u8x%@M5HxKpwRNi)_`{BB) zS(Ttpb9RDr1!8Mgc~4aIeA6#R0_ElKnja2r9i6)f+2{Fv6-sI!_X08XaUN!Ek{%Tj zs}+EaKF5l*xD_uwpKdyz(8dnN_;_7HBqj4I>F`Fr7cB3fQ_~Tlqld(W>hsb18mR^b zrn&6cI`t{zLJ%l*vL2^_Vf(XGF|(A{3*iAnq@$B`7C(;mFb zoN-PtWXy{{{3}J>e!hssP!zzj?*Mk}J3F8cbmgpk95#B=1f2$85uKnocur+(eE_0b zt1to-hwXozAo|$K)6N?CwN=GyHgr7Sckl;2Qjo}u$EJZavM5@d%3DK`N}sIL(b4!b1mTnRV0*2^0jJ~uZv zqpt^=LDu}i5|pG45xv2Yn&v;jT`Dy!H{Hf|d>kGpwMB`p51R?wh;blZyjB)&Ti@unUvU)*=I?&o+f%xpnGRpu*s- z=EkrIqOxA45_#LDQhpp_B$y|p3_kip6>$W0}tEFAg=l1jDitlB~QibOH zO3wUZGCh{oHXOz#>yO}Oy!BZ7(X;J(XTj9^w&rhr4le96c&Q{eZROQiX z^VE^zU}FXOt9a@yI9J~ChJtv2yi}a+4BRD^MBBoaXN!~hG4XSPnh9@(sj9-{Ga6D5 zJ@O%-KSU3@c$MY|9lU>A0SRf{qUUe|$2(2e5fzsI{{JCH#NB%}Y2A55BWo<=6Y^8G zeF_KINhO2f=+Br|lp}_H%fjD=_ltAv@sTa?a^vQS>HI~G)_SY}o#QmxKZURL-i$Sj z|FFa!Dyan~J~>5rt$6GLXBxf77Ge*a+7|(%mR_H?8d`-pEEjGjaC%*-bbpec1g zclU?zmA92U<`4@4;eWBLI@Qvj%5>k;4$=&~3SzDyK8rc4Z)j7&6h7p1DbIurNt8{y z@h0KL3Z(SdT*s5I$^#R$0mOKx1!aHs>fRcQkD}5eN`*jSU}cO#r^V^_(*0UK5b|YX zPRL$|=M+npaEV?GIim6v%Hs6^#-du=e~D8TVuNEVGTN*hiMoNuRl<_j`;y~*N>4*r zX2=5aQ>>WfE@DcRSFgD~s5jCz|FBBzJp34@K;v8lmW)oAzQRG3!beRHF`q06hJF32 zC?zGGPO3+@G|eE0fr@J7lfLa?S&>wuCZwNERf-O9vbBDTHbB3b}ZUGAZRvkn53Q`A;VZ9NmCYk@7 zu|Quy&v`+*X@cPJ3KjsmNQ0tZHuXp9rVc_jB_?m4t~(q=KIm14AGrj6NsTt+iD#Ac zw1u1HVv4;A8GXymuXneJ&CO~OY&hSczgmXJyzs1dUUkC9{KXCqJ}V(4rER7&bFN5|3#g*b5y zxi3C9sXB$a#hciswJt4$0ei+p#(xO-67n>08CI#TzNjzN3lHKalZF$H*Vz(Juwt>a zCsJvgJA41ekh~-ubhEd!QrOvpCsI+rAOiVVnqOp7y5;BUgBEZ4DZECoWlrnsfQh2i zu{1Ne21{SREvhSE_xD^xDSwS17QfpgAS-^1=Tn-}{(+l`XIHhb3qvnSqKO;cd+)2T zwKG7g7FNKr$S@&ZQ!pS2&z8Cx)E*84$HtZ26Dso7$U8sfW)FXZQm*+8TKQTZ4O>)$ zC5K3~Fj>Qbmih9XoZI~o(Brj~0{Zb1J>7CJyu=g{|I>4p$neG|lizPvo4YIfk$%?3 z%=bSL2)cN_5PLR5I;_NKyXobikiaiNnv8(kUux0&dTe}qNM3CNg#(JZ%lqtDb;NOp zrmX~}Eer=p(7xFE=t+$5F(Q)JFVpa*_;&%;xVD2^f;Q%m&`#FpxEZSoiGYRa6PQ*PiLcJ3di_*{uU~y zBNP|}cY_(Aul5jE}rouRMMh@FY8hU+Fx#-Zb%!rNdFJU3M-Us<8aJmg(C7g-s zH^&=ZBVI9Su0(s(-=g}pKU;lIFOg;b_e~C`Rn|nCEpvH?6W9Cx(5J``O96qKerpv^ zRlsELUf{Mq_Xt`qv8=lxxM~55(jVj5t9n4c&+MO5ZtpYx<(DFYdwNTssYXlwCAq(z zZYu_vq~M)XL4h+&uqg)7O1F^TEy?y+M7G{_G!bl-U2xf`CiMxB2qz>=*WP02&=p?> zb4u!PrzP!VaNlY?t26%8W9vwXt=7VuPV*-#^?uX6brKTP4Hmy=dE3_)&T{B?du1)#3rq%8`wb^L?>aanJwG*Y z42t={XwTQN{NzEfb*$kqP=SP>-(z(r8v=gQ&$G^ zIoM+xgOrcxhsYfkl=^)4?yT!dLV>Uv>lz~vg^#y2B9~Mh>CkG<>uhDfMVz=y#g687 zOqR;>7_VA3OwLbcWe=26dID_Q+W$EP#(Vfb9>(Y0v%HWkkN`-p^n0QCYz_`%Jh!RpTkqVvz4TBF~ zT2jip5DuK0SpN85q#eAJLuNxip3PzIislL9cZ=ebNNS8pf5Y*tE7{Ox7(x0(bpWuD z6sA~D#)0vaYZ{F%RV){ZBe10afKW_G!`RQO&yP80bdh5|orr{5pE^|K%%OK3LuyY) zJb<3G?K9{!K6bbmbm4Kk(BY%z>-dOqUX$~vZ{K=md7k+E81MbO+QDx0v{byE_?xni3(IxlhD}05 z)L(9k(rFmhXM%o8Cei1Q5Q~h3auz2`WnW2$VeGuy7fm{3v4_*{qRoRf zP6J~;gkdhuscfA|>9>Al8W%HEU& zzJ$d~2^a1*-9HuD*QHL9Ud)kCU3@>ExmF7=K6r{47)mEG+&2~e^iF4hKB55O``b)~ zA!EM^vbkoJgys9frX$Wl-W;k}TYcsZze7mVS>DI|-X)Pe4gD5Hn;vW5rcw<^ZEYFm=* z3K&5oLWxZKiUkEOoa_cZL6g6I(mNrNrV4+DYneTDPx1^?CeCrG;`czNQw348_%T(% zSW3rnd=hFHh@N7eC4;_HAwQDi_ud;V77^CkZ2%*~VG4vk z=dQnxQUrFI#`pNPIM&Hbetz6teY?}}@+eOQP(G|7Tb$x;Fd51a!AjAc6Al-dt{>0! z&L^%rj6hF?Cj(GluVRsA`~1UyWNQb$oYf0Pdn>eh*}lrhPmgNbV(q7~=VJ#TCb3qV zNOUdr%ittyoNc$4SisP>t+qirWs!jqI?<)h&i_qZ06-w> z-k%@Sj~j#?oZkikp3Gv&?#Dy2CiWf*kD32LDfh-2=SSmHwB8#zvAg&*gHC63bbl`F zPjiO1c5f|#lPN<-7Wdh(RlD4`Q&VtT8%i6~eTaC%vf@M#=RpZDPn$cpf>4i`v~{a! z_g)iLY-Ij^9w)v;pV-ZLboix>-pJ^9&!WHi>Eg0{+MH--$1Nv(&VdNF9f=`%nw;}r zfOy_P^J-kw5ZEI@ioY0@Zw{qwsP$rIDyx*3AnG0$x>~!wtmy>Ye2rJ?;e3Of zU%$-Fic-NV#J5Y!-fdb({Nqa=@=)5}yG&YOxjhxV`q$6lRj)-91H2?cZ}jH#7BKq5 zp!01wa$$hk>>%t04vwybx)*f9T+JC(z>b9q z9kAwmeMZuWaI;TZ^}GIX9uWKZ`{A%&&ivrXSE19Dm9QxZcCyTjwRCQDz7GiW+dVEw z_O~B3;l_6j>%JtP{R=Z#e<`D`!sapWC~6T0c;hhkQfXPjQ6P-IYXQhn???tvA)SBh zwSTS(kdX`!diY#HD>}wCo$-Upb(7vu*5W zCf{cqyMF#7lAXKH+<37|+s;Dd)EV|vyj%^7Dk}E^T`X4I2Je6Vl9>zct+?j?(VtE; zVhB@kaENtvJxtQ(W_KZAO=t>ZOHntgxp{3ecnkZcuPr6bJzoiixib(ZKxhhm<|N8E zc=W?8#XOrl;YFk_X%mo|5t1f_l~Ip!8pOzc!TA)uFQ^eD4u5Cvr`B<}PZn23o^=k} zF{t)EV4tsUQM$U4G5-wp4+YJOaWYQm%i;=(a9iD;efy!8Qq{SLlFX&Fug+B8%!KYM zy5sZl(DAXeGcmloRH}V63gsih;NlElQkLjmdR{kgcRLTcY(Jn9Vd2?>=AHu$(JyMX z>_oW7WNC-6rK?A^A-RqlGY?0l)SjtUY=&@{zpd8;)_E56De|FXkzXmw!4FY93*g-K z&7K2a1t4SC;$p7grpBRHVoAyuRYbVIK^mrxdbox9@fEm>m^hAjb~+01+EQkT`w`$G zdtwiyM4_TZKa^P`%QBex*-K7ykBRvgVTec;G06xW6Wuq<_in#0 z9|b-%zsad~wu)XrV4CJGpSbDxz-Id&6hz(eFr59M5jG#(b&a=b7^N2Mb}*Ah95o`{ zB_G{TG1yoZLf7TYN7RgFnf+JjP&o5XslXk19AbR3S3@kqdG1lAKGtyU1k|gS#gMDk zbt3Q1z0i2uW?Kxl_)vX1K)~SexU<3K-^wV*oIJfrjEV%0i+M-41y#_lp8QF8!^5<{ z)bRQ0@Hp-%0F>6@7(n**h8wkRG(K32aY9*jgSamqK0Wm6eGQDi4fIYh!H7F;{6xf1 zQN*nIvuuT{_cK$FV4v7>y7*x$*{L`;g@-Z`c>O|zg|1e4LQB;XX{x$|gE~XF89W*a z3^&~N7nz!1%DB23+G#VWw!c@M|GWB5bU!Jypa=G^k%I-btXrIyDX1!#Zo^uNgzN7Z zZO%uyh@J<_l+~)vNWftF#v1ZM1e)-z-_T}k`&5YPg6-I-epl=$9pTnxpSK%@pH${`}1=a z_7omf0Y!rP21O}GV~{y*T(m;}rXSF8^j&YM9o&y<+9o1C2LJv2nH0+o#!@NA$$7X2 zcRPL}r53;$n2J*m`RHGJx&`>5UnKvu{PGy+r98Q+Ue1hIFelUj;=ukNGLg!w+4Nti ze8tBBy5=~Xf9PE{q$@6c9KKO(PbDs^a@ABUL)iToB_X_fuJRGQ0=G9ECtay#zfd~} zv6V={ma6=~Js3o9Nfiy0lMEYMB7)kcB-O6pDH`J`|3f*22_a2ECiJwKiUr*o3= zjf2tiFVqFXJ_HmMfU-ClJ%@*MbZU1Wew#2}Ed2NnxJilO zfypTS+_-8grhHl5QPS%zLxCr}1t{U?0#xC`T(t-Qom%;`p8Lgh;o99XGv27$2v^6(-|2-kgNZ&vy8)zQeF!A`$&QDt^yN zA!{1+SnYRWxPP?Im)%J~QLnALJDO}}o2vTNq$3X649`Ba)}M&N zgA^>!gX3OThEb>beo2WG{G<({#nNO5DUo2$A$IWCW2q2 zDahQWH5k?u5PT**8W<_-{gm&ei~q1kgehhFwT6om1^k4^38N4EzSilmc&xO4umteC zL&yb;wtuj%P=OEJVUq4atXhHVJU2de*jkd`t@lSS)w zLaTrl)+sYBbL(+Z-q^wKV!nN|sY*wd^vM~F9Z10+aNaUz%=U_wElUxfa@M58Ave6! zGju-o$5((J`j2E>iipX5EnuXJ~E~zNb0@#qUjE*66X@&3F+HA?ILU+XMAJE z)HF8t0*c@nN2_1)hccq8V+%Q*`2cV$F|Agf#^4i$T29$%mcWPA*v5~coC8q;cV#?v z6n|e;uVtXdr#=ybXfqyN5QK{n#k+noO9zst>2Swwj@(z?3I1(5Qx6>V*L1NH zzlUQ`Pi+m0>uP89S#f=%8!FF<&#n9JBf}eW3$gxyp>Pvht597@4DjCU-7kyGQ3a8# z3z2APyy}*k&wRFvOaP=6ALEndY)r-6Ru$Y+`=n52rdbia>s@WV+@lUYzUUKkpCFrILoF6^jRLc`oX z(>$o@fI60{0;HhvN%*f>( z6IC4Z-Cx|US-s)EU2@|{{Tuc^t>F{(WLC!Zlkjm|Ki`#BPYX&tGEz{L)SS84o{L~o z=uzUj0Ybv-V5F3b&KQ*2y{dkSL^%qN9+*yp-J%5wCTvmpnU#`k2;Nzi1CWV}=uo-C z=&z{f2tCaltJx?A-Yk<<<7MlOPnAa_eGETFg1e%Yet5wc-3WRO@*ruW@Hi{7IWKz)X*RzC~wa8LIf> zvlNtvp~vfe5M|@aA+}o(LGrW^-SBRAEIgswW*b#km|hmw|v{4 z8O@IA#jYxk5TA@;%$n=7F6;Mrcj*kMze#@# zL@H^bW^Iyl7m0BDB#5) zd=t7*bNSuM|N< zQBAeUB85(aN3rcfn-Qio9(V2~$-pgGT}n&Jft&5Iq!h+5IZ3KEB(5CP7kS{T8C$H? zw_mEpO`^RKwmTrIbcuC*sy_Zaa`9doff|VjCldZmHr{QoXGLkx943pVpNnqvn*Rlg>2cD)d{CcQwbloKh8epw_g9X91IMl zxff?=)|&;fVu{pgcurm|^9rs-q;|T$Vj{d>P)6oUjaazo`^WE?-A?55)iwsw=`Zja zHM{f5GRJ48mW`oGqP_DfC@TSOU2+*^uB^LLd<@32e;{crzM_l}!r2At|6PtYjcS{b z|9ECQt~w!lHlz?Pn{QElgd;~rU7Ynby-EX6JLd*14cC1TilQ9w0|bW_sGP^q-P7I0 zSYcjxl&`~!$wHc!AT_An+!Lved<^?@6vTTgG(iR}bf7twCS}FIz60q`n^n~u9d&~L z=zFWdt9*Z*NlJ?|DbUN`TifU|WIfb#iurn&L;EQeSylrk@Ormd2**8Ej%IQGwix zaTk_!n-?>=BZ@Vl9?m1*T+E*mKGepFeU7DAaw2AfnvN6!Lirj;w8rwAg)dve0gJO*rhr(eYHAC z>>_K%--*?P^Ru|o9{Wfgc;PKJ?TRt$0_IPbu7I?*W`|Oq8e${&ebc79bMsrC#2_zz ziYUclaP=8P4Q?uD_JSz?xEbig>TFU)PCTGIJW1aN2Lv}JpLg8_JqUuhqqM4id z5#|esUZE0<k2 z+Hs7`8a)iw*tbJG4q2DaM@R3Y`)NI**C`nPUF1=ULeV8#Sth~HQXv4 zqf_>*4uQV#thYfN)2LG1(EChL(7M-q;@#*#fVyH|p}MaKn-m`{(`Y{bLF&;Xst07* zVn5OXyqu0vZ8>Y`1Nsnp(>2M($~?7%AAd0hQu)L#CP^^nkZ^mk0Bbb5=To@-tKydc zI@)9v_O8nOdZ*yLg@Z+TRY@yB6bWZGik?+Q6;~O%0b+4}VWSCG+nnzq z#s;z8&pYJREj-jY?G_=KK9Vhhh7Knphy2)NMAD(Boog0?#*RsamU}tXZQCh&=a#bl z-?;~`1`LcKWvyE|4m{v~>Kk3~U!^CB_S0LeA*D|o364+kVy>RV`#uk0=M5Fl{txH# zzIgoD_VFH(sYBKIYN@FmkIU!o72f5nHY4x&P}kmWN>Sd$J?#2D73({8B06_t@DnZS z0?rYyUs|oe2yarXav15pUhK^0+RA;C-?sqBXLYw*OaHqf3bDLj>Q&GwUmOx2r955S z7AlPYECp^pz}2p@a$g;DflivjHglUbUW;iSZbSVE=xYayR5Fx3bj}e?eXCemvK*>J zQ1%ia_7Z$?2l3{D*F_2D@UY%9q9FS~wpfL4b1_x@Z&OdPT+b*uRI<+ZLkWKlmaA7PojeX}Mz-R$iU`(5 z>R(XU#@pV%n=44E^`%PDaIsz1siH;{9|$^9pio<;WXmFhlRZtr+v(Jyb38*TT*Fc( zver!TCi4uHmn2EM)G@pui;AD`b-*&xev@~z|f=IPEmauplU6+pY+;# z%EQh<8~ZRCjeNa*fN zyi5^ha>}6eW|hPGK9Qi=_b%=G=?`ke@VGq5JT zWgyoL+0d}Ca5x89wp{=;+;VpTVu(sHpke5AodI+_A$kkgK5fFsjom~GTm#%*HI+w| z z{PDI%Q-RNDB;P$X-saC!9xRu!C6WO}l1R?oa&7(n8$Bld3nIat!blh~qNcj&b? zMm)H06zTk{7As~Ed9!4R!~5t87VED(Hqa#fQPS$Wa0LW&08hD(sDF88lmdHl911wT zs1ls6EB)F9pzFUy#-{#dAwWU<3%TR*bTQ9C`#1*>qGiplYGWLWcbn&@&1{om-;>?c zL^tDQxKU9{GC9?Y6P&D3khdRpL2{}>zD;E9;FyuD*)Jj$u3zc5iZbjI4ZN;~*5%k! z087Qk6v`UqgI~~Eg5HYDHvVjS=;)Db%kP(ZY5WE7ZN8uPZxve zpU*j-f!I=^z66E(6VUD9$BGo_gLy+0t=`RU7OTT;PwLi`pI->sUy%n8$%noY^7D(> z=OFLhtz{(X{T3{`RX0mcz0LAbMpdF-*68uffdJ;Y^>V?UMd z0(~VruUzxo5UvhKzCs1uzqtDR1UkV22*DnA!Yt1WXoFKX&@s1e}avDKB^8BVbD^#XtV7oTj z0;#n~U{#j}-jJ(rVl4*TgQFYJ!?@5jLu3LytW>Fq;IB}(Cf2{ zqK-6lc*PiwbjN1;*j<*pnl!8@RzYm3Vl5xiNrmc`G<2F7>6HlR&CddZ$@s+B!$u=L z7B#wP0{e(r$ck=bQP5y_-rCB#J!!jzO<>YSN>*cdM49Zf25S&*%> ziExKYu{J?Xc?t;b3p^D(#OR-9T21ou@`}PV`}aF+l&XeeS6gVfC$V%VD>*7H{Ku>X ze7eym=Vs{ldcusJU$?ea3%3D|1KdTJce?-pM$nZy3Pcgz8oM|2+#}JwjUEr{gWN4Ry!uObjAla{nne=VjNO$2n8e zDafF9M(3e6g$+?fEvj1-yYe@*O!+ey7I@Hhaf^THDcOk%@a;~44wymbtoMLW?~L*s z(DbQ+&LY&l7h++V!v{wVxvjl;nidCXnl2SQtfpQ$_`kj>cnnW=Uii1UU0LLWsSuJf zWPb9H>3zgvlmPX;!#mHrZZ%ke{bJd!zJMkr5AS>oW_%q-oh?nZ!@V~g+>$m|%#Nhg z@o?49kqq@Yc6o6Z$~^(OE~5~-dQZhdle;eq{Jk;Z1~?uI0gxD;b>t1iY)7jcY|J=* zCF+TjvQJH9vVrw>Qlf!hds?snK7b(`AJ81y43O0-p(sHXsl{^xAwihmV(i~pt07!6 ztA*W#0ZIB=IM#y&4GOc_!r=S9MR4wcwY5X&A~!6Vj+Hu4I3A3ilKIN5$1 zSSn(`7)1{GD!MZBGhki@IRr_Ra4^hqe|#;ZNhO-QqfTe1&PsR0-*iSxsHAN^_~wJP zHOh^7%nnaQQ?IScLQ;v&a9+P+a#`khd4THzNnI;4;?QdDS$e_yA8zB~D{Og{2;4e@ z*7L8^YMF*bKEzlgr}kmU6)+Jcgnz9kfl1Ajp6PXC6iPAvSmb)`B#UZ?5G~~!{glgl zkaAutOX9l9(W$Q(xG!<`?w&r5EUvO3%A}HYF&N+h8v`V<6t)78DdIk;3d*pz!8E%a z!+eA%RPCtMC)MZFw=%1DICXMwHdVIj7wvh<$*CiZqnIr%!h^b%<)r2~X4}r?Z}&1% zhC_s+dRZ2Ikqh%Y34rV%l^=m_(79=!1`d zR@mv7xhQXH%0{(91oDh)T@EzxXdN}(%q>dOEurZ0_X>l*gNO@u!WssjOG(~{`lO$4 zMJ7$2W56!0Mepb+q7`Hh%945do1lGOmY)uOJ9o*GNA`c@!#1gAI4w3!t}PPGorS&A zoFt4^vOLK>Tw12j5WX5%h}JV_1IJ_NZI@W`4Q(D|$G<<|AJ{2dG{0W)nY0A&F*r}t zTU`a4t~Jd-DhdrIDNt9`IHKl=F{Q+AN9*`br_YPM7AP>dhR80BbGW)r)4J9Q*R!tG znNxs$+xo+g7sx$aqVIK7cMd2RS0Q9fA%Hi9<<7x$`{eoIMdJT=+>E6D@yNLbvBi!FQ@O8z4vj6;CN z!ikfC_OFG?L#9!6Nofy~8SX)>f;T61ldEbvX}YMafC z?ep$Eza-Iua}-5}_=$7G0_th15^2R|ELWvc4h9tr748bV>{`CMk_X8RrF*(mO&Prd z{PAkU_`I3nQc6AM?_xFrr1Xw$MW4AHV+8I<)e2Uew!SC!7;F9zg(pc!pcv<-$Ll+? z@^4IU2nv$g_MZ%vd@j0c}VHNTFwJmkMTs9K~ z|G6Hafsvvx#;%dGvqo+=$M1tEh(69cLxRr75l7BFU7-LM=o$PT6X)-RyAA=2?*UUl=?2c*fo|KLwe;mn+UdirIj9fRT!-2^=ocIkLxo0?GiE9#uKej&>6(IP4+dn44Q!YznRiaoBY=ia`J?%BN-1Q@?uvs>n%YeR31Tm`;UYKjV+C+iG&~0rIdV2C_ zcJyhc*d@slZr%31Tu>tFV8Q7_Vtnb+ei_06mp zQQ}Vhr-(0ij}wh#d;9TiS)$cGfZDgUO^VNck@|q&F0l@3pY$GTJGrW7zFRIjIW@}> zA+4FX`bax{q4bt5(beBjsNJ&T|-QC>-NZ-TzeRtip?jPV{ zEoF{p_HXa!Ij;=~kjXv&-a*^UeU{amd7F<4FLboNuB>&gXLJ|!a$ofVk{k6t41op#~rSa{$-f`ef$G*sJsch3bn?ZpE;fsf_A!h&V1PS z6C>#2?nwZ@q7iny)*kd{+JB6b_B{9G3Yroj?33?~zx2RzOJKG|3EpidhCmhZ5Bz37 zXq7;e9Zr5>%qAk8MBEYvVID@#*&o`CXCo_#e;tEO(XZXNsMWwE#y{E)OeGuZ_FJS2RTg*a<(xl9 zvUeMu4;6Bl8{)`AjHI9Bi6t3DVhf(-N2j4-E<4tAf z3A=Qb$iu=Is#pwHw^ra>Hf@JOYo=h|TaT;NKY<@QS)142CX(VQ5%-C6Z5oX~{9*Mq zT2|fyWi<>(K5*|8YoHg`RTli&%7pL4Z@xP+c(FD|n0mh@U||LT8;$>H()~^GSwvdr z9e&XTPzvS{`pRw?eW8T#=5np8q{sEp7fL0Ec`Di*k46U1XrQX0zTjcl*{gOTXC(+# zFaF30M=+w^pc)>O`iKF^JS2A%^s`)o!hboNGY%3LD{vFfv_uNosrHDJ*QJr$|4j9) zD|vkpCb8v+yz!~tC!1*haKf&YlZ%Uhc-nnwoRiZ_=BIFa*o~arXNh!9-$IW{(*WiK z6t1C3>Z_eg6veoiVO&{>;hn4ZUMy*0$C7~)qjgp?CpmIB%_1rp*ac6 zWA+J_)}kCUmwkyeqb<(bJ1u*e^kquI`Y{GI5@wKOX>gs(A=k^9pM87<@PFrq6!2qM z_`j%UeXW=N&D#DH*f-JBAtW@}z{)aKxj>QS#a~orlj3r`YK^+u=hJAwm>MBtW1<|R z-KMOl=TV;NJ7lfpyA9IfZBpo7hP6}T<0ysG0;pm;f65R?@Y42AZW`~pkWCn652ODO zz8~X1i;gXxqK&~I*_6Bb_Ny17+< z;;w5O0LVQSH*$b4wC>R0_Sf7X05x1oFTy#g&mH9q=&7$|*I0MepZu<73_D@i` zox`eC>a3c#ckqy*BEMU!yF=VHnv$ki-Q9n22Oo2n-4nagxoozx&b&(w0?7;JI-jr2 z7VsGKLS_H70@T=@*BM348D=>&TT6ZW~@>b}d}BFGk`&exX^{)z7*ED#m22jE=x7jW>;O1LIF zT`@=X2m?-|bBgfH*HJ2EiPLGcLaZJA{88i;G1}~G=xGS?DL~F(vN~eqX?NPvn)j?O zihfP`Lf*i`gx^Hj5Aq%F03el+uVe0=j;pJ!vPq+mV6V4>$b{65W3L+tL4iMbj6#u4 zTQi{lg1zkEW+k9gk>8^9|AWVYM`w|(tkVTCsQOOP!+g|$Ds2+6jhWuAf<7E>>t_MY z7Zo+F8qpdF7ebW<%tze{=`!^n?7(Qwhynb_VRY(dm$lQRvPq{(5Qhfssp{NGUTTqe@JtK*M#?8)u$Y=4 zQm*rx+I?CPc5FCnf#dpYP)6@uv$lHHw+1GmgCdI27^b&F;Tq9`z`lse+*t1*%w#}n zq5$?&{}oW0{5yU;Y5nFC8a#gS^=l%Nl<%Z+1j<%OMPG==sq0~{XQL!AyZ0FL(w7PL z1g*CMv^@^ex{a0-Liu6WHBH);Cwh;SqR5@YVMcZv2hZ#?ZK)zN(5n0Ddiq?+UhC-n zIXEeOIxyg!_Nb zN1`X8-%o*5PC)Am&q3F{C~r*TkdQy#d!Zyt+i?Ezb=HXRiOpJE?Lu(#QJrM`?B8Lc zQhX$fM~B1|-`x>x>_Csr?A{;+GRJsiV;HG|w;91$UP~1Iad6!4H=G_tYBKURn4n*1 z@La`xk)H}CXXhzUmT>VBq{7T>$y|P{Dt2<zl2thG%ANrhouyrAIp@T9rMK^i z(^06RIZczqVwnJL8irq?mg~f~c;&+IyBjrhEYmMLM!? zRCbWw@jYc%O&|2cbkl?*VD^DI`6IItXlwXWYpy=5-lkK-Fdut4FaZ?IVxz$X>dRuD z_CfbTliXx~KIy-+Wqo;H2i){`zds%puqN9)KfxT#XpF}l2S^Q}tX*rpKISOOB6rD? zAJ73Pr0MtelQV$cG7^+>kvn-}^=os(73MuB2Ie_kKNsNi!w%quyN`&PVH=2L#>xuW zau3eUiO_fM!?#FfPnfV3=B{$Ogx>+SD$|r&h0|Pr?qWhpfj)$!Z9u;kKS#mu=4^Bj8fBuW?BeSz%znY(2aIf&14-Nk3O@Bpld_z7Jh+V zis#?|@pvpH9{CBVA>FHvP*ZbM%ONzaFc4<{68qST2I>T$OK9}Sd1?d!($a**2#vo| zR6cSP4B5<*req=`f3dvy*~Jt#31ikc?f91JU6RhR{?C+M*dY+9n%%iDDi|ibrqRKo z33>xPL=>ToMpIb4xR?+2eR*k=sR#Rs4lm5CPz$vSVJRIxarkKyw9i=i$tHB97B&ce@Dyn6oF#d{|LUqz}8l-mGN!B2su1Ju;jNEW&1ULm? zyYCJ>m6;xh0l+A33=jLH^&NFPkfj#>As89y!+$^M{7dWwLG|kAm$In-sD*p|lGFWQ zJb#DIr*9_JBEZYG6!eZ^!Tw!m-Zq+yDS*_yydrc+s`5Fp+0E8{Xlxo3cz!3L}(0cvZ`eag})X8${uN#%z(WA57jE)bOw`P#OBG+$K-TeKn~j;`i*2vfzE8ar?&t$Yxy zeC8A7TiR}69yqm_|DIC=r3M6&XXCbjz|nhR0e7q=TX;D_zpIQjPC4q$1%LnPA+m}w z?*m#-8o@npw=$!qRQm6l!J(LK5$n;WKff>6h=Imq-H?zNpm$_3h980lUcZ167{3X^ zLfGXSYw9(+`Tk&ce{=s=e63hP5LhUM41fs(;QF@%fw5ZGelLdgcu%AZ(@d{2>0raC zqyJv^;3&jpo9#Na-9R|>ow$K_OEZwD7df99m|gXW$a1%9EiUCxE(S@(*Z=zh(;F6V zGKjy`2y)0}D}?hrhBFu3$La2lvnt?CEknm=`{CY4z0x3cEy9mcs-~U4IMIbCBL&Na z6H8H1VYf(b#g^4&-8sO_F&p5dm5+o_jP%7j1|7$r$6$#SZU-aWw3x&v5be3;a{S4HS=VMt-Zf( zWukUjv?@s}3L-mReAc9Po$0P;`;|}0L3swydm5L^e8z^e{Yt%$O-MQ9*5|bxQO_?G zEbxefce6#5X`}>RiMxd`CB&PG50?!wJUo7RufMq6qbiUzfv6&3)|K1*^Gd%Z4K5!B z_dQc$#+NLCa{kWvD*HOEP4{c@9!UwS3h?}1O5aya<3M(;+v%xVk1L!(5LwEV+&Hk< z*9l;liaSK^^FkrtK0w_+!cY~gMcOGZ24;gOyZE^)mbn!VPxr^Bs`n$3*Te#3fv~n) zzZCYJdoMwPJlXA|HU;j87ui^6o=-WusWY5d?BUh~PftF(J`=c+vT8YZ^^GRBVy2ON z5g+3o??gUR7_4R^Zomns@Ke|Li-ljm{{a9b#n2|)#bdXa2fFeu_jxf=5VMUExr?CEYEUv*J{FH#V9~LJl%^V4uXHxQw@en{aTlNN)71k1vcePV(Hm3x}03PV)Gv zRwwj6;9$!}6gcgo29Y1fypjyHinAKOlMFs!Bp`813w|Dtv3v=(D`!(fk@TZ?kL685 zbP>$9I;94=q-NA_)ilYCs*rPa*DnIs)Psw| zJaofffFdhRSz{G;WH*R>)bIm`oTFGwf~ktV=LZ<8t5xs&gOX98?JMrt-4 zJH8If^FY=XtBnFZXpPFd-zX9ElB-3Y%Ph9xRye*hDVwn=R;9r)`LiV`n)44>vRd8? zTQRGx%1^ypWP*<6bXAFg!VK2!rTh2P4)uSt;l7N;Q4ssWX;iciNv2M z_YI>~M1m?Hyb&&#nFDemPAS1N?(H5B=YBXFjb;Fip=7S}B#|``D(%YAVSoaMVgA}^ zZ%EJYXNdXuF7B}=tvDCb` zP#vd4{?#T=$!wR^?p~}skI28H%)VHy1B)LGR12V--B>Oz-6QVA)6UA(^Ux~^{cE;e zssFUOki^N&C9R~96TUV%X$i^WJAuwPP5vx^c1Tg-S;zU?DtBC*i6q+z+zzrcqNg~I z38=(qa~o(A67-Uy4!6G-WNBKyv_MAK9l(@RA9@zb!iqdeX@77_hq+Zw6^M(Muoc}& z|8mzjRLU5We3k7SlIh+3POH%{hBL%N#IpTzG>9LfGgoV-ikQj{8eEeKlhN`ii4acd z5Y--@VMOG=9l9a*Dk?A$HCC~ei(Z`F9g0ZqA4=%Kl`bSNJE5}6*2}RJ;m5@$JvC+* z>O#2cr%F4G?>L36eNF>}LrSq!&PG7M3UZh*avD*Q8V zIjJT1P)NORp~VotvRKJu$?pTp9&Qr@{GFd%E$%e$NDW3%rr<{m4m5QF2ZH}=S1L5| zlaIN1`m*Tlb!pM&W^hrbbEw5ao4rDM;u$I1$0%r#Vk}BZiUzG2p8=?z-X4Q;`V=Yp zo~7jBzE4L6JWl{&G#QM+3l*p`c!siS3=+Ovvrgh)-FF~7!#h1^h|n9{O}1{gL`p4& z+8pRbYOJ}nZ4o=&uDA_<)X+78%+ZB@*E`@>V<6%F1i=p&kXu#-y417+hxdzf9kWc} zXy$XkVvv8pI?4J8VJrE`^zb+P zH5&dQ_usEg_X3!VYc(ctk0TT*Onje-Nl%uLEZ1!cq4_qcpssEzsbCoylE|C^zvr(P z5(3&pBYHn9zfLDvbeV;Wr=(W{PbSbICQnA+aP0fPAO?V-%dRYAG)G#0V49dfuuPB0 zf0{%r@J#l!qBOKg(>Y>?iZ=-lj2ESFE(Y)<)pvh{Ce$*L4>_626A^yj3#UBu3w6K;e<} zp$d?sVZ*-7l9m%^kl?z^igDk`ojrP!aRZRK*yXKORmz9ML8{9tKMUm}#1`vQUipZE z52ys^`V2gs2MKyyW}we1b_)~wO|}chcu#avOg_IsZ!t1{`mkDN&_%uS+M^u-X@xlr z5RV=KaNtm&qgd{q*#tu=aMu0GrfNe0n3gUk%Jokkk=83HtKmVgi0F8M=~*i9|MMs- zD1Y;G3=FQz*-zVnu$E^IgJw~K8S%58Mr619VgD=E;Pc$8%3_PWSQ9~4Cv`0E zvf;ovd+qnLd8Gfx9v+)Z4l>ba(x-i?PzWiM@I78(o}EB)Qahrj=}Jh&5F?&OU=YSiTN9y;mto&ywh@ih|r7`49hs`EA&KV zvi!XU-(8PE8LIsh{|^1|pg@qts`mkDn9OInvI|fA4V^Pu(nW8@$%|Af6W9Ebg~k^5d`1)kNCCpQ*mat57(kG*vX`(p!(h&gc; zgxs?7s*57lNi4}lIJytBqfR%@ic=lWgSCgvl%^YxI3Hy-JrjlR4x4#*|@?F+M#-{RZ)!6`JUuRqqo&wSD_yuQv>lAvWwA==JKQ&QO$-%|UGQ zfD^a!k^Hq&$`w1DbJi>c1ntNNhq7{P;|x-y?XvpUhZ;j3xY;buyZ2N?&;bgJPgJ;A zevpb&FwO;)L`n=%-#12~kT+B-lFQ>LCnRn>M0@ARh#m$xmMb>q0-2 z;tv1yd2JoRR-jHN6D%|J<(Xln^mq7Fv_T7ahALsCZSHWBsOV>4Ec zC%a9Yw?h6hR*iawN;Oos0AEXA*l-CD3$cb?~~`1qA3k3 z9QXZIuxA9?41(M*wtr{Lty&rDS$41_(`=_3Oi4BKjqxliLStw5|0t?xNVor0H- zB#ORo!5t;Va?@(*DqW@|Zrxq|5J>tcTG{#RS+HmHy?;ECmdHHCb5(DE!_ar@BG4z% z{I4Z60ma%2{m^ly9afuLFnB*Zd>9V3T@*x+6UQaSs$lVFtPuH2_yrq)n#+4=$=t<6 z3TMTD=+foqi3MRG5u9k5WO%m`+(ngqs1ronVGF`MMtwwUV*IQ=(=zcHOnxYK@N=P( z^Q3$?7yc$q6&xdm&eW4JcXYKgv+4vjs7r=q1+<*D3)BHpWij09BZZ~?YqK# z@BpX8FeY55a&x?FcggOwJ(iPL|Bka@ZljC3^dyYhJsMT*jh1)&k0|-YS(3bQy)%)k zo;;E_3k<)>v<4_);ji(dX^m^W+(OHBS{1WBt)m4mdI_{zzqPx#ULRJF`m(qylABB@ zqVbM6_&A6T9G;xSGiNl}5FCw?ZB8i(0Xd~z%8w3kwJW@)pX3rwXW&l}RdfQ5UXCk8 zqgu5A2MEV%?pZD^sh@bZ_c5~8%QinVsa*W0E9NQS-!q?XMoAOU>9(ENIV3s&T+_?6 zF>lHL^dkw>av__WUq%))_Q8rI%3I~n!iis+~zOhO2stbrY<$@jw742ddlj{@V^ZRLdE<{?QB}z zKW|N@_#Di?`aO>^4f>W8(p(%_g3j-O0a7h3Jb1&y<8`EH%$A?I9qyl!;z5BqmSRz_ z{1d&XyXzu9q<+$FD0#4O1Sfdus22*{T#HR47mO5C?cXA(A#B?Uom|Kw@~d)JJD6Et znck%BsZdq!-12MP2#i3Uoi8l#w~~QK?!kOe*rkKW8fEzdcE+oWA!IyoBHRB~wNAF= zU7W=6CC|6t!(Ckg4!D(B6Dsw zTd{A3k{=M{P*X5|mn_m*t+nrSK^G?MtUm_Vb~SMN5Stgk*i2`k1+80 zYl%<`vA6Vk1aLa6YQD(n8u!)6GLmNC|AbG#VI>L3IYxM@UCOJi%&s3r*UPota9ZR zi!}H3TosR(ZYbJd6Iv%he7rwCI3ptQh1!>J$e4IVe5#TA16+Xy*G1Kqs6Vs|QlwhZ z5~Oqn=AIP6N`Ip`{KHxzts3?2jT3Lma2tc;M!2Y$Fh5%gh{9I zS&n<*-w2@E`av-J0y(($#BFUp_H) z^gnX7KY7)a|MM8%fNb3Uavbr8l;l%B`Y< zk{8c2Rla7}dCjB0GldhCFs&qE-%!pay2Zu7D8>T^YU$Rb%iubaUU z;uR-TVoJ8i3j5Qh58hh{;FK?@%L7(s{w^yArTcIuyKfAYDC>XtB0Pgu^f?hYCOA7y$k`b zYI=8nR*s5V&p-0WuvPvpRJgy!u$N3+l85yf`L8GY3WL3@bZZ~RNSe?x*M1stlZO3; zPDOi)sZVpB)Gfhth##5Ii;5j^8N@wVESOB4dWz8+}B}R_VXi0wDa&z zY|O@qDNusGckitk-2}j(BV@3#{oeKiFd5`!>JdlMj| z!x#&`s3kn{iH{SAr6Nl;qJ;H6oKS@&7L-`_{fl5$Z?WgQoR@{>&o2W~!W92$bjk&x z!k6R*jLwWN32vgXb-5ALmNad+CQ&3h>I+X!naFC@=RH9@~dCpAU& z@%6sXytF;H-N^`gMwg-LPKd{dX)_6h8#AY(7-f$3%<5otFFwa9WCk1|1W&)Km~{)n zxVg6e&=mAf-bcM%MKfYXIYXAF$`zx$Lf_DfpScmhQ`+aK7>fcvVnO~2w?N0L=5d== zBp!f#W?a5f0NkVP0)R&lv*(5Sq>XTii<60q_R@T=y z0L*F}tC_z@#0fP$L_Qm_%eUei>|V>13g`aS{^%8G&U=43B(av4FOBW(>=N&;aHYKj zBv4ruVrsHDN_^F1<4~rFTB?oH% zfDMFWTXtyK!MSS7o37GrnWX!-lb-;hhhK#m`DR8I$lc?WuR9$OMJ%W${w=eo2m9o@ zv?Lp}>X0fl_7HR-wh|gCu_a` zg*E2Z4j`WCKgRlg47|Vt;8+)uCYrAisKfmOUR6YoTEHV)A^TCpi96fJT1L3Nw1eBJ zoM*Ml)>Hlh5n@^Mon0OD;$M|GA!@n)zGQvlUZ*Yncqfi|3L=remd@t$OktSFgLt~mjhfQC7{na(el8loE8a3=k6meFXVK}&w7J;z%iC20+}9O2;#r{0ao?Q=5}s(epR z4W%60`TEb_PEOVCf}rhFEEDb{Oc$6+y=~C=z;`*}yaVkOMKUqEyc#$ob}7LqyRtT) z1Bb9oTIu^RhV1%x`u=P3%^xc)86#5!G}8Dj3YJtkQJU#|5YvZly=$=kabdh;sn+A7HJH{i&bXVBYq8Fp%nvm(Udgt^P9(TxtKvCox zTdVe~zwofp1))hCf}PHDu0f!F+6>522X!Bv#F5NzG^S(sNUsa!MmY&R;#Bw#RI$awdPXlec zs+xy!H}^5G)q=BFut>1UFcsa&Fh%VK#9;74*8TH1w2ds`c#(n7ZEt%ZQ88gBQagIC zeDc7`uk^vH<)SPnx$5a2^Fa1o|7vKbXoi7S_mRP4iBO-0mZdz3R<{j*yvgGHo|@Go zT-Wi>N3+B##+7V_j}9B%!F@*v-0e<+2usj&{=XnHXv;Bgms()kM3bf}@g+JO#-a_o z?ZYOKkM5#~oaa_c+FpP!i~f{Mf6U;UL(`>cOHq;}SXJ0D#6}kv;Y<9ZMcDu+jBx>a zL>~tM9IoV@>RhIQ4+7-hcm-8IXK#KpUiz@kvlBuUaO(*bh35SSII17lyC4a$%N%^a z&noB_$0BLtR~W8Hn8z`aE086Hu;O8$ zrN+M&9^fDHijk)q8G(?DiFyMzGe%Hacxu!!Z^-5!rl{AN^lm#$4YIK$^LGcFcYj_v zcO3H}j5Dfzu%U~60EGMjVajQRXI93|Pgei`aS3`%v(G#MjJ7IKtWQTQG|_`~^W$a% z(3cq}IM3`4<0Zblff6f`lIPNDQOW_=1Oh-q*+IYO_pr1^;P8cK`D;(=*%?szMEtBZ zC$8+^BKDwHwDO85>5Ch^R*z+(724GFnF?qOpTNQW>-VpRG)nRIr$+k_6Mm?#?So_e zd*7u;%^T?bQB{gfWlP`wyI_w*CBgV4A;e#1qjdlRL(FkR1pcIJrEZYh^)Qbl&luq#m>|Lzu#g*bTD)5FBHLYGe|SI|ZNjO+3eV zq&XK@wc20OID2v88zGW>59J)&KpC36q>^77XDo*j)#&&ja=6p?iWYFSl@NJMW3_C1 zlnmlql=oJXWi+}BklME^@qfFP<=#r^0XTq_m5TWCsM2l@DXUj7%y_rrC`rT8R744X zPp8h(G;=QA3>K=P<7Wf;xO$wdtl-DQa^VcLjp~jMZxh0b zsrgemPk0DzlXRP9Y8P<`xqRh%>kvF002=LKcl%Yy1yBo>0`#`AY5p$vv=~F^!I!29 zaiJa8<{mpzzVrfHtG7;gMp{lcjN3k>zMXA1j~_u*6*2_Zt#C%18H!e&6bvg%q{#+; zXD^9WtLGLAi#urNiL@R^tl>;TI;%HI1`s7ZuNtPxIOOjgA4ukB%e5%io=)9f>l0Ui z20YnTX^Qwg{(4$W2YOIY_&!e}Ga;}pfQM#~3r^(_+h7q|X^pd*N5P&^d>ix;U;*=E z$dciFjm0k!b6{XWJWxR&P|M)913g)_U3|?t(a@1^>AruTIj5qy|5Pll>*UoLIxWte7c|=`{jh50bTI>rFcrmQ| zXNvWCFee=6c)6u}BJxfTx05ihw`r>5^!z{qM{~*MmUwJDXlMA(gFt^H?{+My8sq{2 z)n4Cl77m78vLLBSY8Zd+y@iAbj&r?9cjOpEsg8CNJMZ6)=&E;aJY}DQaQ%d(uE_3c zWYE35(FM`ZH~g|Li?Tkp#Ki~)-WK2gEh0}uu+b;W)W9q{adY3a}xHcLep$y_@N-AE?#a5j#AuU#l|m0IkH5|Jk> z@MSNV{nnKFFncb^J9$(i2>0;Fw%SVwT(`=ogt&jML^r0h5?^Jy<1@wBz^LQf4l&Ib zv%DLwQPp~axe}cj{^)N+BPsE}HsXJ~vx_NCgI_K(cDTp{Q$?I364IuMy$6<7cQde_Uu41rHVXov}2r);H9YWXK^;;ggMlbt2?IF;~$h?q{U>!QM<}${b+Im z)~W5bzduR_aG<*)lOW`DeD>LQV8)6VN!*I8syAM1@)T?ZmUFTQ^0eNCjLe;o~HeqlLHOl9SfrY)gFoJHlBpb$iK4l z?kQ2{jqAj3t57MNE{{a$&O9$TLVbTy)XBnI*l$P(4+OVWs)tgH?dozc_P-d7C5xJ=TNGW~%I z5NAkScu-A7;gjaoQ}=rf*s3XqN(6ye{QNw32KaW$Ko z(MsPIaz&tpvJ+`b+WS)uQeFN5cewQh$5j6LTWsRZSEA{zNB7sUd+5$z)l_t4MZp}l zu)x}5ew#X2Z^~4aUb6ixw^geoerW}~xFOd)-kGm!^uSWwI)8lqlL-EqVe?2ZG`!5A zBQ_JZLDcj2zYq)LsO-iy1Do}%XbZZiO90<%J_>QJtv=~PI7Q?5#`pJF8g-sqpPknn zeBry{DE(Z16>afff)=tqe7xS&`E=5kTzArme;J|==9^cnH?X++n;MsleNaj=n!uhO zet|(oQ2W!soD8(@An1`xY@zJUr8B+yhI05suPm;(>=yT5BA01VlQ$GX2!WYvvqiactzpVe>>%6xn>D}1d_oFcu#wOu%!K<7R z)stR3whSn25lK&g4gZc>7?>cxgDOLs6=~)ekfit7%CLo4aQ|y52~5%q4g5XA<}TG3 z($4#Tki#_hd?C5Cz8U)4bk%u@BtJeC5eR^Hmp9^be+407y{HgL!N6mzYW2B+V29>%vj0^6RrPt^ur-p03-49}3>hqaxyNXgkC zbC?$UWh{G5`3-e?pPyX*N@jYO(@;-IpY-lcP$f7p(FK^e6$dEYciv!B zk|P6y$}6OM5+C{OP|vs?weEc+2A(jU{rrKCI=sK$^%A*|w>9S$h^}s|^zg#pN7nI; z*`J^8+!d_kp7F^GEq~Z&dj|z0shC!Sf4Qx)5@*<_^!_H$n#W0L{3=SJ^@V#%fq;1= zMFZrYR2U#5bLWUSOiFjE96j4r+rMDVRv0;9Bw%gp;dl9V0DXNEK=q<51$^KXNfyhYywzp9` zyGwaa(j`-p2d3js&ktuj?|6^0SqwV7C6^KTFM{odi4O_e&JJ=#fM?L_Y9`gBS;_PU zzPGshOq#ii{v06u&A^@EmAS3sqXy#Ep&`%OWT3UtmOSXmtdTq+y{BwIU49Q%H09Q6 z5BLUN&t8YW0)V-77xIBp1g*;`K1_>$HGP;fa8BaCAAb#h0?InwOgrHF*E{Uaf4VHd zq)}{_(4nxjYtj>Bm>nENZZ3)=^O*+S#uI1E$?1z5hq)+AW&$l^dTKMfW8H%#}_$hq|v-P4iV~L-jQ#sSV z-(<})Chg>}?Of(jwOb(lai8kF@nGpV4(%|Ab%*zp?Rmc@ryLG!>C^oyvWjz9H3&P+ zg<3RpPzdDn9mI!eWrHIr)0#=^YQ!vEcmU^VhltVs}%__O%eUfWM`;IX-9i2kL zTG1xR@TOxJBb8idO3GL!T3gF=E)N7)Kc$hOT^E_JMk;(`j1P=_N(Kbo1AsnlVg0gT}Rosb%>3$`DkQn!talZfHl#p6&ka7iL%!xP*`mlLc8YbU(I8i^mU3G5kFg$z z$|m^#D16I4_z!ACGKt}_u%z5(;O%|9YbkR4`&Z3o588x(?C|3q3KfyPaxh0BDP0vR zT?qzz8T=I|{(R+K-(Q41{CcK}Nj#Z1xUL=}69abZr5*qp3+PLMGUr)?*F zS;`HlK%k8rh&&xAg%EoIo{?1GxNUM$7evRVQc?K~1wn(tVKh$MA zq8zkg^6~PHn2$RYkByAbMd^>o_6!7Tl$a_yqDv@xSdx|4zK26>(4k> zD#~&1+nI+^#|la*Bq;4T7nwKPVVQL$-(Lr5;zw?|te>Yu%I$pF(lS+txq*ZAg=#5^ zr5#ych!@;>&mW?0{WX~wx-dM@_D*(qGieLoH8PpO`oo?PYJZNPp`WSyU=>~9sPI%_ z_x;KHcLp+NLZH!OQtH)R`gYY|zeXC3+U=RrJyrY#{-rDkGeuc6=hG+!rV_y!ZJsil zh)sf+b1-CT7b0C`Hs(*V;FGJ-c*almAW#sHsDwm0U0T&-!2j(e(Z4J8g3}msXOu*o zrGGU{ab_xu#Zbb8nyjn+7-tQle@aWpo#n&oq70HE1KsVtkV3w{N=tie`-b&zKy?x% zB*SF+pnV-=I10Pk%T1oa(3gV`moam?OpG|(gedp_Cg`Mc9^QG(NxjHadY#-kxqptI z@WlG9|4tek^jZm7oSa_GK}uJQ4zhEf2HHj7zk@tneUkK{I9GaVj~oOz#z40^1Hn z#L6NYd8&jCCZikXG|uLBw|NiXx>f<=y9>I(i?Stgw=A2Jw_NCQ1#IXit!=3BWMjtMg zY%_~X>nBldxHUO!xZDg%}MfTM&tg|Dt)-Scn(;wHlbR7>o%l6j(phlyBY&! zdm#J5;skvw{{uClbzF>)kFk0Km%WzL*NH|)nSjn&qwyuo(1~xpfV#9(DD5OdwzOe%`jEO<>5blT&kYo_~Yd;D~}K7J+nN) zPgHNfx}xB9Orli7!4m6 zCb0{KljCq3EJ<31<{{5JrNUWiS>!K-wZq#YmBImHS5`{H27- zlP*qWhAq`OqF@c+aida{e`NnOtdaaO2mzw+4P=nrP1c+m(UyQTyD7-y~(&@L0OvQ%E4broB#+SV3 zobz3K{RxC5M~TXIKcU8`#{A4S34?9A1l{jop+e2rWhd%wdOF4vMle=ZA9&j@(bsbf zH~aaZ)BW`cfCSm|MQq0JX4q84F95HVMTx;B&qlv>EEeVY%Q?yvC~2}R7Y;Nh0!}O` zc)Km~rAgrDX&hq0Y#MIeVU~4y_YW@q?QpaL28JTT;T9>cq^#H1zys^L+B8wEZ@$};N~gRu{KFX$LGF6H+wX|mA^)|a zHqWc*Jy#O)nH@EE4?H!n*y&G!G)SEs89|mDJt<{RBAvHXU3C%&IOP_pNHZ8pe9r$# zBj5`AnN`-JA-l+NW(g~m!0Wm3EqbQ(4|k4c-tU@<@0OOaw#yCjIo>AOe76s{OHB@i zCo6L#!w-2Qe3tD|H3(s5s<>KBRFuP7YAWGrI~U(jVPwGV$cHv@QT}I*kqGeZvh?p3Uti?I81Rv(!48&xy((A+-^JT1qOL zLeH}P6N||pOYm5!n))Pi_KqZ)ndaxNHE}&iABsP?>F*+}tAVqK)aK%X9gz=5fG~{J}c}eMSA3gT0c=#5`fiDZsLML=SJrRoWGX=;VBn_sGwu(Kd|uAr|c7*cQomH zKE`Xj1lGwh9aw3O!hLYxVW=BZWGR^eqQr{6o1P?yn`i8SV=0~@LxhCwLhks@@i&(F zT;LU}|9-h?)aC5vCc{VgwcdGe92AI_;>=S_gd5G+(j+gkWf%#4{Ws{jZ- z6-OC6rQ?sXMVp~}`%HJ7hjZkYX|jgCqOPt5h^Vxt#6bS)3$N48HAKW+aPf@ppC1CF zxEjW`6IO13!z?%GPU#v%!xn*JqXKnSb&25ext#+1OZj$l^buCxrlbmgw#j zGTcx(qXE69VW*se{&b4qky3e0;3|ZL%(^?FiDMO4Nw5!&4BaIR5NkdF7>S=!Q1xPHfuYHuYC=hw8Q%GfI{8?{n-Nz5L4k@W?WCG` zxyizuclbrDxVl*rR670&10j+yACyLh`tOPT;8oO_-zU&9ug$ROWRdP(phkk{-VKL| zYKSXaYB*=@c_%)UFu5^SknuJ9x%m9Az|z$ik2J6dTTx~m_)Oir`i7A~n8mZ&bvJM) zTf(Ryes%zWk3W83Ln0&?dh8|-tQ_RNzgT(zQmHFT+TfpJI#?qt<&()+A`p-Nuy!{( z-^m^ytG~w!t~sQxI!$&sPU`!`3%f8I*PV7+ik)s`b+xDL zjeb6V*vCksjnIrAtva(2bG$9!4@!ldfBySm!v?+#Ly}8%B2H zROzca$^2?pDfsk-P*bY}njm;T)#`_2Jm4>S zfc4A_D`to@t|FWPwSXoMa<#J*W#e0hCS@XP*YlGX!qVqnbhcn9sHY;oyp6u@XSfaF z?QQd-YDLh|V`rzq;oMZ|0s%T#DbZ?pXpT27z}TA2M&jSY5Bc8IB46*{ed4~V>l9nJ zeC~Q_4~#bpyA4KPYrC~t@$rxF+jEsl>^mPVdd2*J^fV2fMg?=kz*IdmIM_5K6Iz9z zpz87F59WswR#6-vqJ*EK*>R+9fea5hxn>C+Iua;woRtzcX7@$fv@gE)8^L@_Z!G48!>W4H&)S&0Yt>eOH_$%6%P?6S4IAOh;ViWWd_3vr)aI?Y&F9Uf)sBRVTG52U@ z-$3%>)WIs3GQrbI-Z4IU%%IlDH{u;*7`|Y;L8!eJcZuLFKr$oy173;_f$Lc^$z6w(<#bva+GNI z{A8}Es0LzdB&VifQ!Cz`j+=NiV=lg@O~2)Hp_~oD4p8WH08uV=0kVYxQ{qI0bWpgNOu?dR016#FcEiFPL7%41=(IK*>`ervSyvwPfzbX>k1lu0z) zB{)xw+t9_7q^L3<&iF&>6+`TqQ@OY23QQ?ZDQs6FDXp1rdOuW4-Q7PsN4(ar2|?yb z=2pG{2m^1$VRtLhs7QQ|QIA%VK~O<~>kys$G>UDPd)biZnA>v5x-^HLim*#0M!-9K z%-$x6`1SlHIT-?k=&sZakN^4t$eDVO(+Mn)+B>Iz9B*^JQiYN6LB|nZFn>0NK!tsE z8Y0Ex{Lb$4`hhyU#2hF;LOGzi0Zm6wftT7bPxR3 zi?R>dKR?61K32Z^{J7Xo5zxez7{F6VM8xyoKqiFoj^hrA4~@kLLT!tJ)164pJQoA+ zL%J%UKQVcj>z`P z!l%&bUQrm~crQdp>hT}6-i_x{gE1E3#my2e83{S&KRPZZ)SFEN zB$JX@OsAZYt3BbAhq@ zvy5uL*JDRO#F~!NdG0>Q8&y#;h`sW*sL<5mgijDJOk$xp^Zkk{1j^zfWV-s#ekZto zo!ubgQma=rP+E~;?G{5+r&c%I5rBFDgAEDL!Ag>A*YWg`e74);{6pc=Pgg1LEu$1X8k>`B83d=d(ymkBn6IbLfRfbvgR7sBJs4XlV`X2utX4b$x7z9a`{Dd`G<|>J zZRFj(w5e+0JS!FazSxe3Wo3#H56SUD)!ey^n#j1_%oLyG_U{F%ZRNu=Zer^<_ScFg ziY_Lgqrs~a!~ZQ9|DzWz8Nk}yp`C#zLQ8kPj*at;S@6-^FUlWyy}+~Nej`!w0z)8e z!=dsR+Z1@kbUHdD`;?H++f81bNKoM0Hx!4^=S%g2anuKa==X-S3~j$Uv2MhP!(Vy@ zUjl2_x^9Y@lJ7gteM-*xl>hya8$gToMJn=mDhZYii^58z!Sk5K#Ig4e`zfGNwU9cz zW7Ez3RCD8Sf4*TqgfcH6vw?%7%<=SwU;nz+V`<)SGfHFli)&}2_*HqC=?U_*an(yz z0UzE>k#N0b{^tpxeO_$m1WvJ(#AIln9a5MK837CW$&^;$c&R%t=74Ia2r=4-MC%0% zTh4Gya#Sciwo@UWT!auMA8)bJhd*GX61?f#j^$vbL_NyCKsnlInMSHyfrsS!mVYG3 zirhVnoCc>t_Fjw(>3xKUhb@vhK2Nczinb|67P;(Z(Bx_I5$X8_w{`M;-+x(Bn6m=D zJa>=fSo)pFw>Pl0duf~U1!EA`ez@f*B%Gfnd_R$QR5mi}r>NPG5K6Q`sGqgzND^DM z(_3?gM$-;PX0K)69r#AX3~Vd)Sh!Ra1TK<-Z@xyjT145&kD3*v`!E{kHo)FEeFlqF zkAyeePK7X^{%gZmXkA1~GR#3pMV0%e!2NA5nKXLzW~`gZqq<5wnc9B|9jF`6IP{C- z`=^IF*M%Zo*U94zm9L+Lb+9cFS>uk)xGWsfiXCr4u<$k;}A<$@p8o-db!c$1_Y6LN_J zE+X9Thl+mR^n&Wl5}a^&uRpSzo!t$3_t7phYq<#v`bP|ss<16?pzg9DxdYx0t@Rb~ znVCem5&ccZxbRbDL0)1Y^v}eeHe%SQ7YZsJA&vVa2rBIcepC=wW-ecCX{6G|!Phm{ ze)|1&<9#?7%fPBVb;);D%Q_{~p8Esj7-2bCPay!c3QgoaU{Oj(c@}Dq;CybbyqMRkMK-!=I|@# z=l%wd=eh3^djuX?W4ew-_*1VnfFNMH&N}0;!tjpZAn8D;3%|`ae{8`F*HOBo8Jc_( zURqb`_!o~O$9C#wkN8ZwZIaKLz*2p)BI|TVT>}F}baxcr56NtJlbL+9_#r8iG_6tnF@8G8gIJ{_>9E2oJB1bFILvE}rjz4c)5XvZU1%ORS< z>XgJ3CZO(0qTRR+qs`Z)ZoWJIFvcJ*9=e~nw!(~ zAL6+rMvbaq=jFnoP*plH6i4r}!RM9LJ`(+5CmyC!uj5Ei}FqYmVP+qh!W5ZifI`d~4z%%>? zQYTYf21Bs2fSgh|hWDwSE1lJ$J=;=lc z4=tAHBFxYyYqG#q?{)xJhk=x%k<7E?{QDYVsDvUk9cf_QGZt6(2O<=dRYLLZ49F5ME{huSKHH3`8a3v zM*pTxCT7?(nh5s`L&qWQ09vU^J}c<%eMWuL?P2ecI%u$qDCnNK8yG) zhZ(r^g>^>W`}?6T-*Zo#o3BGhY!eBVIZO1in2d2s#MEtfCyCsrPQ|}k#gF4sAnoK) zPnYNtqDJN!02f98#Z1YccOhq?o^g4{o!@B1ycvlTIYFHu(}b|wVHP{tD&FQ{F`dqGCe6fv-^>l>`e0XNEsfq zh3Ay`xGw1lu1h)YvjfxJD0Nvg0~;v?omRaRdMnM=P1ph8^$AFnzTGn0SF)RWU$14N zlzyGB;A+mzX(v-M zFw^8BR8#2ecUYuS%b@RZXcJ%m7idZNOv1zRj9>cwfb-vqCz3FxoLC^eBj7+80gBUz zw4+v!n1!qE{hNPu$RmxMg1z^KNOu>KT`g9Yt?d2~nyFZ;)$w!J4PloR)!2Z0w$(o+ z#GWRd#dw+Rr>s!$)EH!BZqJ}z%1TWh9~-=A8D7-@+nC^2nUNyqoYYWDsUY$mN!Wni zEbhvgbpcI5iFRt{e^Kz(TU4DHz0z>LEL$n7jnf**lV zJTk@FD|R$dA+uPx~qJsHvVE=s-U@K1!N5#fu30jdxHO29x(m~RN`g`d(MCyg;j8`|Le=kUm z1UT{l9w;4u2YAAa=R+XLCG<-OwaE~v1qSX3oWQV|Dk>tVU*ulBTFi2cV#ZTnCTLe_ zS%)HH#G0=Qzw?Azd+HzkmR45~yNTj^UV{};n@=|>Y&pGZdREak%`o|`j-iH2X3!nv zMOsFjGd=BiJk%v%^`S?#%^~X!k+8P0OB!Y3(O2OAo<3$5BUMPavcONxx$aqdBP&Mw zyVriXMqY;`;{m+RYXi)Bs(rh*r9#i6qJcfA`0I9}vx!oZ?NYvUbA;k;*%~zBknw0t z97usnoyQA`B4tcw&|hKL+DB1;hWyZ9!Q4+tau{%gMH_A}cdt{)bkR+u$yz zn;?w|t%F zoTK(1qa5~MCqSsYZE4TG;Ph9{mYIqV*AllH9#X||{P{+^sH+?0&g?=(T)ms4wO{l_iT*r!WCyx`CZK-@xY4U?1B)Yjpjbdim<978s$y?5z$GdEsJ(&gB2 z_oyOwDB@)Erpm#TQY*ipmPF)}UgbZ_PNprAZq@!?*%j-P5wH`53 zqkTk*he%Jrd4(qHeP?RyL zn)cEBj!blZSiUCK2^T3j_W~s(8TbkNF*J0mD%Df6VwYu^c{K>^Ws> z?UjZWG}WU&Iw`Qk9U^fYBVgHI*Bz*#@iSw1_G)jbpR(m?^RK%xQOsKog^9fhaee_&CZISg1} zpr!Z8b+1oSuITsgUp=mO%Ty=0^MsILf}|;7n5TcTKxG=?`D%%o21bKHzhRr!b(=9m z$zgKCk|#OMA382)QFWz1E|jj{oSm!#|JF{{&J6r4Z48eV9$nnjH8oB5@XAR6p0kr2 zb`YLqeg5?ep){r>K*cAnQuBZCdzdT z)+W1QG?5S)|C)B`D5{ldl1=J7$33+drfGhyra8u7e5p6oS>`qGEio-NU2+`8lBKMt z-h&%y(D)Y{kd#!U73WC8G;Fn65htcX6K<+aK1X_LC+%?sVoZ{q;Mdk@Ja(b|t~-7c zwY$^320p{jOrGwnG4fdO=QK`#Es(VW^dcfaRhsLXAVJTxpW_g)Gay;qTxtk6a+0vl z?*m?Umxfksx8$knI=m-NxYQ|IUrCNdw7_WE(YE71jx_r8wR~s3?L74|-Z#8~F|pI% zc-Vp!b@s*1-Y&6zl1G>E&xJSlc~)W(vp|)@zxHWn!Q1|gq4hTuZeR6sJx`U?ti~q# zO5C%~%R294P0rz%1`C}P9c`zRL)Mgm?Zdp2<24##ic)SYtL|h9otRPX(p;)RJ1&U{ ze(T~2{iR0NU*@Mkf*Q{K-k&YRgTl=zTLgiI#4z9&Q*&{+;8L;9W^%I?zKMN* zh;>C&>`rUCdwqm);=!z|3scSciv&IJ*dlKINt<>-3zy_w3Eu&Z+*`1nH&dkdrp4h4 z!SA}TNG6YCbiE5S!Y?B8U!;iOkQ_9Wt{24vv$yl8X*oJ$Xg{sC* zul&FLIQ~q0b1Z}SY$VPCN}iakdaDG=hWOlX6|N^itb!4lxet9*JHXJL#duNH;0o0P z(9MyTW;sS&CjE5#wxJt0^^i}jBA8<66dkU{sl$G+wG8EzMoMb`h|s049k@orH!Vrnh?0srmt%$2<1??K=IVuQMDazDCapM*@#+|kPD}dG z80(C>gsJnamrVSXR#(~Ah39-dk})E5$%nI(h;|C3Q-9#^9vUj?Gc}my_x5--z}czH z*nT0hRBIZljr(}!)@9=}VgjD{Aow&LYkQ27gp{TvffCZ-ae_30O@~82VeISAa6?Wi3LHQG8Kq!&#B4w}J*{Zb+<_^O(=QZqLF4qqQ99GMq1n8By zYX;l(_N8s{tT4d{P+zvwK(m6!T67zKNq~K@he%UEafR|-B#%C< z>wbmW=r}~3nmh^ChqmD|HuNOY($XT^sIh%=+C>Jl=RCm{t_W4V1z_e^Jc+Q)t~BTo zGs`n21a!+OmAJXoS6^HvWhvikyz;{Wxk!p;jw1|+*bj53i)yMd8?$L5=$%uj!^k-;J}ZANxrM@TZp zTnvG7`r%vC*22hRz1(niziUM1?o)p^5+_F$ry=2^veQC{mH<8JtsT-`s>!JQ!W^cvC)j~c zkn0=It)bnrztz7IC5noTJrwPz-mh($$Et&l_-1e{3psA7%5JQyTkIG-m(8upen^C! zu>NLDQ??-Dvm5@d7BirwNmV*o6v1)bdz{P#RKN`dHI1Lpx5cwl+3{;DH{+)=gL# z@i1N>Du(tNxzDid0y~NS`xF-n)!s+YRmGK8cj_nrv_ub}n>4=M2h2Lgf5_EH{VeE3 z^1T5608<%ZvS}fU^W)kY4gaT$a?LBs~m2slYZ zU9PR7fJ8AJ*8SdJOR|(ROJnTQkTyBZLqx!@b*Rtnm+Jc6(VFD_nT+*h^=y*G@D;af z1&378r>F%xkw$KT=6V94g`UJbR!I>!2R_f3rD6l7F-Gb#)kV+W<0_F1$;d?Aq;+ZO zMwfMq80IN29W%*kE1NP(Rg95#Yn)i~MH&kocg_fC=bjFE2zewZ>rtwT5@@XL&!=Kw zokU^sr6k=eDCj-ZVbr3IqZPguixtG_qJnj!ogv3NucB(?d&$Iif1wqz z`K)`Cw`Te`vLIfQRp;emEFyF;UYHgqc|qT$Lhp4OZKrm>s%-sMt1Pe5`PBTh@709F zJ;3X{V*x@vFDFy9Sf}MUeL(z-n-9Kx0a?f-dwTcOWDK+vcEiQ&DndO2VjIyqK`;|3Y8`SZHK zQhTA~-A%O2`40(eIl}cnx=p7?wG92VxbMWM$tD!Epr!VkFNiC5T;dfaRs>8mhB@<> zY+H^Ku6*$@8mbAXu%rpd42GCe#gMH&oEOiN;wH{`WwCzOo-+ z^t*TSqXJ0D?a}5nLzlpX{_;Q;Ji$=I8z5@Z{O~@E5V)-xYoKviwh8S#6W(@bLF;C5 zLE59^DgF4X^Yxl@48$DbaLq^f9LT~v(n-%@P-_xQ6Rotbv@GOL*iV68Go-rba5Tg^EK8b1H5rz>LJ1x0`!4@a(#}^!AqgvjBWZ)7d6l$z%{6nE z?uD~`Tx55bkI$mF2));IxIl$4q1LA>KJ@He5QnH$_Rn%%T{{^?WvxPZ${(=+R~pTP z0{QGD3MN!z45XeAX@AZ7p>0(=J#L=-gq7vsiSo+IIUbszunG3dEs7wgt=41WaIEZ4 zl2F4F1z{_u6C@!&WH)%7nW@v#k$(ysE3FpR!`=7du|$%Z@?5}Ri1PstB8Gy)M6YRn zKDSR*=->+(PVC~_L27QZih|aHPRDUA_NIGK|0gU>cLoSe7|4Azg*ffejd8J`4@Mbo z;~8HEWc`bmV-hXg2al_}4F1!?H+SALv;xqz9XorkOg%sn|fOOr&58Tc370h_c>myA{07BBE>pmR`<2JsBvvnmdT-ot&mr+To z1i0VDWNL#D5Rhb*uW_=osaf$yQ7c;dZgJ@tJViUGVgrOgNLQ70>$#_rv(mHq}jPzgRte^5icREsP!B`P9oAc?OL3E99QQAVDC zd{QzzV+*h`yH(3X5`GNnH*0e}tqXT3WmeOjm*0KpBzQA-f-zZ3++` z{XJ=w0$rb&K7t@tf+jmRBsup zjokWrx3W9Id)~khGGsaOg`#}9QUln(pe5haziCl0XV8C*_B)wYzAg4VfES+y%HY zyQyDYTv#ElHv39#ypVp10#C&I@H7?AA-Q5PrI_gJA8l-Gef~@cf&S4%+*QqeN`ie$ zoAL(ob7nX5&7E@^hCbV9xy;ucY4YY3JJVd)sdgKw@sh5k=0;{))qk(GUW`5y3U7Mf zuQU+Ba9d+(5opHNC_$0PiE>3Cf7#-HnO6aSl?c85pr%=xWtf;b$QtZibm$u93Y)YGdRiu<>vu1B%qV}BmIyC01 z1T=V1?dt06(krx^^mI%ereQMFlTGsbzJD#4PVQVc#v#N>b+40mVxiTRpZdW;cUZUQ z^vrsw)hMjfrt#}N!$RIop!D#c0C$0LA`(fk;}^0ksq2Y3&NB~pcXxdGygAuE(vtSd zwa`J~&mIo|_1X3j2)Q!iaoE;W(!X5`zN@}%GrjeovtXiP#~P2Heb@@3yVJ-&%q+Vb zzz9}eIjEAu+mDNPh-kpvcFAdidw&Q-c+a)MbJ;MfQ2$~|7<$TC292MdXr#Aq9`f89 zJ&eu7w$U$QMjxFgG|3I0q%34ahlXCeNwz3LXdc_TLvyw{p4MBSS3@4)=M8^h&k(Z4RA zV7lefL2*J456Zd7?bhGFrM+0bAOG;m+KMN^!69hq&X8E6=mj27++U;nF}BGDj2h+$ z!cZvoapU#Xqh(L_*VeYZgS7rY|Kre9(r<$}gtx+P%uU8)KQ}im1)tLaHv4&v9WwH# zTIjEP!5bGLoQosn!1gnc@+EMQXA4-&VqfMAUz0zI7L@cR;s_CDFqaN z!<2hphsgHRM5JLw`@`ywja6A?Z)B|iS&ie!={TxdJdzy80xDp<5wTKzcsne_?@s(@ zAPSRU9r|e@lEuDsUzlO~jpX3<_1ur?iSP##2fMJRS;%o*6KB%LM6?}!=&PD=Uh9;vqw`#Lrh~(U}Y@&$-}H#gbhLlB)-Z; zTXNHerzmLGdnVEw)@oc6Q|dq2G1}--Dy!gz+a%B(`6VD_yNU9#uCBDi zT~5a1f>;PmtF~+mpEEvKL08Gu-u>GTNu_+^oe%v?8QYABe|$;r|Nbx^@`mvUG3mvz z%rf4MN#?7FHli^#GwX8)8~7V_y9K*Ss#k+O_ER4 zp}5+-9e>vFMDT=aMn!HO0f_Z)T7ym0m!{GX;=VD|Y|N>_0Whridm9fi*(!m9Z$BQ7 z&@f+&Pf+9!@ zy{ePF&R=5ComR}mEwz_bS**jYdnstb58t&KNNdHUNy7`0$RPj8-h{?c>S2h1I1C}I zeoQBrO-Yb_n%4dW?DDMB>7LEEfy?dJ)`82uM2vnDI$c|X@f(v zId|;3w+cRD7k{}<(oADm=tL?oMWjyvg%Za$zBy{n7F2TsWb?XbIRGqv35A7L#`gf& zWlS576K=M_kRU21>}4VWNlYwEo7~Wvh-5z)-954|QC~KXhqA!3~J8i&hxqEix*P zLFpXFt)67G!5_M5X#;q}U~O4qi5W3L29k8B(N2QDA4LeQaSHqLf&xUOkKrQ;3Ql4I z3PPVO=EeT$CgHnd&Eh%~fqBM0%dx2aBY6}1{rOakmd14CImTQfJgLug2+2KVDi0Mm zw#6*ce3)8Y4keJmSVjdVB7qKbOt5}OnSS`-pk;@REa7m4qSuQkMg7#A+rlT5-ab}Z zQ&v0Y;lOE^LX%%Q=l*7J2#twfHOF_ia7Q_oM|W9+DOF0hoSa=jE~lY^Qk>OV`WvcD z58*CGw53R*D6MkI^nP+0I=is{)n%~ItZuk0-w0_^5PBn_x(u~)vf22bxw>zU-(c&? zrgy#~v1S-P$wPX6HAv~;bsIjI$`g})T19tAB4weGOr{s?eHzAWEQTA&JZc_*?HcYX z8(3O*=s()p+HyeuRNQXcNq(p|Ji(9n<%a4ac;jJ!b0^x~yD*y5?PSq762{>5NOQ$K z%mKx;s~57zL=Mhg7T@6QDs@GJrtixr5ZSsZjfTEM+i`+^G&hAIcxNdwKTa@><<}S# zi%kG_qQfP8LA!iAA=I34=hW0SpRj!MQ`m8U`|++YgApx*QHoLMwJ&^X z({Nf5UDkG~+F`t5>adc;l$g8ucYpF?BDFly4^#y0J|IaFUJ%T&xO`Pqb}c|mJpVXN z4V%;^3{SxDmA0?;AUw47_bxEl)<|Krf*b}3EM4Y67otnrvBls;w9Bwqp5$vjfhUVE zPSUaVDaoTFGt^|26m->%at&=_z=rPC#dd}10O|)U^=kGU) z@8r3!CzkR{ppUXIK+XD6vI;Q`#r*gPq}db&01d~e;QO^;rU^`n6l3Ru2{%ylw?lm> zH+=n_ejEHBD2WyJ-uMqcaCAg^SVz{W9pnMgCy_vIKHQd;B2^#zAtc{KDn5}R_(QRY`aS#vOxif$4c>dCEDL*YnLzK{`asztEIC2d}=c<8BC#} zNgJQmQ@JW6wLjO9`T`OS4Wk{!CSHYt#B?cP)#w26$=|qsaI#kcK0>OKu^&7D&Hj%0DP}-iV@|Cc%1gkaw&QRh-+bkO6$6-8%^yF}K_$%kl z$ot#3+>_ILU26__Ez!RL%aQ_DC>}G3Ic9Ydm{c#9MDMkB))C2HG3oQD`H1o+d+jw| z-(U6xIgiumP5#cn(*4W52%^H?h#+MeVIUj_dz=tsOkqy@XYK&Y>JOyD{$Xb49sErr zDn@aKG(#PGZ(%Xz;T?5blO)}-Oxl0=deq}5!@VI96aA5Nceh} zA>k-?7~U=fe7Vc9fGwY|GSe2_kj)Xj4ChdHoF(@${RPKAyDLlzlu~y#f*7Q`G+wJo zYo<6nxqT*nL?n~ ziewl~5VG4HAxAL0mtd4A&Ci}=6B0{yoUXkD)e-8QdRA6edecHO+}!qtCh|tMFzxm* zw#(+0vK4Q}@`bIAfSf|99{v}hbW@(-$-6G1nVtHjwcXwy|M*47f!i`|N{sT`Jh2p~<rCEn)MSl1T4fT#QNAhw-UWfH0?_KF=$TAGTSAk!mw-oh(ZMZ zuK)EzI*wM>K=L5IA?lL@8(Dic4(UM<`hGAVFpM$cI1x_ZxIP>enGx8(%rQZQ_*HSS z3Rbn=wzcoRM(e6Ylz?( z=EElULf=`!G$0JJX&>TYr>uFkncoA&W~Cf~RNxn?#%L`TAq@xyhy?r3%uyL{jQKRh zI9yV%FpyzOH3i#AH%ieLyd}dYC^bKdD4SdcH8eCyYaHwTE>@QqkT}f}pdgIQ+(q3% z(B93rv%$p7P&=m0L+E+XE@J=NQdW%`T};l;K{=|Es50*c{Tym;Wh54X%KQVlDatU?7unyHbMBY%20onpjHV-*%@U?{zsV@U*Ph8! znU2%SMQ4+eZXV*2*!81S3RDuh<4I_&D2sGd>pUOfv`c)fR6wn6`>x~14mi{780?)i zNj7oH2kwsKiK)WHZchs}O`n@{>7M@UP zxnG3N6t&_{2cWk}bn7_QA1I#~2;{&UuTqqpA%L5v=M$z0s&g^rgC5X8Hjl`H-zCAM zW{v41a0D9|RnYG1=w;hitn_Q#M~rIqAWB~baJ=7S1fyvM{#{oS;yOSxiDs_=P`f$v zQhKHHHBs19FJupSY~+Q19z7IH#7S5~x;p}T*U>1FBLQ_H>w|3y9D6;#u`Kx6k7Bqk z$4z}OweeXC!&%(EWDuM=D=jGDk{K!)6-)s6W00yexT7Y0cpkn;TkuPg=b+O_hNryl z_K^=iR}MB|s_Owz%+9HOO+V+;ma=01qBZESfo&I2-Zfi`epR$&rQvfmLi#07JDtBv z=tw#4Wf60!?U8Dx>)6e+Rk5HLg=T-CIBir)nKv#VXW!|Bil4Eyg0Pw)jXx%o&Mr4E`uV(u`*63iM&JXiF9%+R8=cbBsn&Sh;veikKt*!sY9@ zq5X@sU-zY`zQ1zseBJx}+@gD=eyT)fwKzNX*lOWZ0w3!GW1 zV*Y}x-twue8#;1{O~9azByR`YN@Qml1e8WU&Vrt1p&t>)5$~m1Z6HE%DIZ~-Mufa+ z=pys@qg{Exzf#6`)D5kEasn&tc7smbq?|A&4zuL%VHkt<7~jD?ZwW_CBTgftT;BWK z6S!o&d73Pq!f51b{6kwl`0oh-Iyr%IX=D%V7z+X+ot_eQeq~YCVrNKG5usBD_VK#jX0kXU6R=)kw$;JB z!kqG7=1*?9msxKL*dYj|4Xp3LFyc`CL%shN?iH1873qE+gr>l}wJV~5=@1~0ntKLw z5L(+bFtTI_6t7*+wI@AOD$T^x(c(0c#FdH)v&plTYcjIuMO_A82a?1cXV{qk3#WGQ zcd)X?(AZ*6|29V&r>d}y)7M%Iqd^M4MHm+@PuwAu!N<)LpfWsF+kkB@%x7$9Y@!(q ztCN~e!coEur_$b0Xe#ddVK%a`mzd!8wB%Nv2ai@H&Q5`3LtTB3p`1+lFqS#xwAX$hNRLD9dNDexT zD&o;G*S5^8u#%Q3pr9x;wYH8dwyjUiuVo2O9!V%GGS8h4lo6l(7lJAkxidqiLcL3& zg2?{ee=pbISohLQ`T6iwY~%HJX}-mXqnsPBYYJmd=vJ_de%RT_feX z5njX6N3Pyolto#}UiHCidYZ>mcJIPUKJd}YG7H1`4e?XHWr_Shn$E&6s_xz5Qc6j8 z4BcHycXvs5rZyf%mJNpQR0dx1ZzkSD9D9hZV%D%!Hq?KiOfAi3Mn7;{X76lN z!rhey)$h*~GeWl-%6XFnk0f-OPNL+A@Q;$56%|AE-J!FL2R&Ecj6AHv6MUBwx{77bS)?J|)&)`4sfvl%|7s&m}dqF6Au%9A9J!3^ci?l)v$#e}i6<`eE zxBis*C9e}K4P&KYG0jKMGZyp-r!RRlPbzh-0B}0TXDH}qu>ae`+OfWGzra7s& zQP0-JN*RV=+~NiwtqGl?dv-DSbVwJ4^RKgnjxOLPNGZ{yKdvW;vso)u#dDasDl0Yr zcNn9F_aj4w;)jYURjg|>@2!>f43tuF@pdXDi0P!Byxv^`gWx>fz=Z`=sb20`d*I2d zAh`K&dCyDsK+&2!`eO8n_IDjBJF#v-I5>t{uF}B%rcx8T?L%8%I`vE$$YyiI19+r% zTU_2Wg8RkoakYv23_nv+Y^UR;6Ge6ktYJY4Z~I6PDX$)Nl<_F5oI&lJ0ERW(lNjV+^AVQH%=lsPNOQFq6mhkl={lxSd;YkTk!CG>E0&t)&IXy++nB zCeZswPU}`6(%wv*Z=(X3g3*6jl6CZ$9ZS1)bD@`HctPOr)Wn8YZl)lWiCbCP-i1%I8UpsTby8&%OlGp$xa9u{!;tTIC*i?CxPqr z;%xv2TAuO65{?AX`7IP6Zyf8J(im^CM)te(_16Z5qP^CX;z>dKu5ZyedDn{=*!0d| zd2XFT%yc)EWjhdhJo}rf(r8a19m61}lDmed-s5&dMZRnn;Qsm74%WQi&1##Y0I zq(yJBN}rDDv>M!5slUjgOP!&o7%DkpZZ4G;eb_KN$481#&Bv4wczw27=``Qk(ro2f zm^d9RU7%NYost}(bjXoNlfy)JWOtq%S^C}GU3u3qUY?AC0)Fdin_gwgSjN&Nv%PhX z)9r4YWV-S{Q7p0<+-hh^Cr z+c)i0S%sckl|8^>=?BHpS8DTwj9zY}&leBTQ3(`>Vu0zRR^b%zwid}c;IQQpcRO#_&Ox5E!qrY8P zu{sM9D!7e&m!|v3kTG9Zfi=fnC&cC)-rU?OphQVOyOR0qFjzs)0_AgwQ&wm0^16D3 z)8|XUrMZYNYbzNxYrkdT%z^q+UcW}pb+0|OKe?UY=Rk~tJiRcF@tfC6Imhh8_oHpQ zT3&I^@S8vQ|LXHC{mgsGM909UR~7$Is`Ej_m5h1M)^V3y`C_CDH1e(*}27RDG!Ki z2it>E+rjWK=E+e96|~U*zlT&B8VRR6ny?SC4S2_8Pm8~MAL?e@yw(w`-6soscL?1t zu8j;_>Eux5b>5mBT3*u8EBYEKINy>p`(J83QxvQ|*+gC`i{Ldz{mAoeuRb#|7x{JSB=Vlsi|BQ6oQDn`nch zxcxS9@jD$0Qb1lg!IAb|2lGZBOQdj|1UwWS>+_pH;z70w2%w__EEPsL*Vv9!by<%~ zFl92C(oyRNbM)o{nWknLX2bYa2#Xd@Un0EjVsWyvU|%MDszji3fllbCaH6Xse&)a6 zA4@ZMknl2XoaIL282;mIwGs|&%+=S>A+qD>BJkR=l)R2E1~1On6l{Is<`OwZ{H1jI z^IGHRVh;Ay#a8ho_Nqg`kE=R67NTb$n;BdGx9ZF;&fMFp+gcSYJ2`f%A{nH65)kx{? zc$Lq)1v@eA6tP52uClh|%p>-Cwy6?}N?JIV8N>fZO#)V)&_0x|i}bfwA&`J4DSmx&I;B*YXde7ztvm29nmnw&Q4mf%WXB#X9qk z(>%MaMf%#ssdy8KG_+QZC=UBLL5uBK2n_<25=Cc#DOZ|llw$>SoU1z@hKz1YKJSu? z?l6w^8t|w)u^Aw_g99N2Y`44w3PGAUJno#8f>ZX3Bc^>ts(qcdq%(L=)?Z-Z(A2)N zN}vT3(+d9St@dnm`JN`7e}T54|7E%i3wZ+{C#S*3AS#^eIdoE1{IMIM zmpP#f>z$&K=M_~IkC!haV+$-iDj>JB$YXufxPyTk%os$~LnQCjnjX>ca1)Y1j=K0K z_MI4KC@Fa3ot~G~p0T%JgxVqynJ}b#tHQFMV1gY|9J{g-(ay4id?~KmS-- z6L1+ANh{xMvV;HESYo{*j5ca;+d+j0AyV?d2(gjB;_iYBKQ4n^MrEXYSx>`3)q+Y* z8ev{&YkanEJQ>z%z9^-2BYZ4Zzz4D=wmLN{b5DU>l^u-rWn|nJ>6pwZ#!Gc~IX7BH zmz+FAUQ*|VPmVV3D}Irmoo*J0FsXaPfLIJyVJy_%Z_c#nk+JBfEN?DzItJAx+}bW7 z9dLU`LS$zZq(4$MFjYxxebi>o=hXXjlSO`>cH>iG!d8pQRb2%_xaL{Zb^IZ9Y%Vi$ zip@j=3HK)bp2|O-uY>$Q2i6tb@CW%+!BcCcq@|wM8_4v(=jmnz`b7~zKzkj}+PJ3n z0LUE<?6^7n{j7f7ST>so83ey!fs#9m&1QIAn~ zjj?33^g{NflJn{e21TipC9`Kge#bKRz~a;E1|fR4xie}I;g8ji@Ix2iR9ZtYB5;}l zOjA$>i`AqP;QKLQStSHeWm-p$v1~`sC|gghZCVmCB1$`UGpkK2Mr`mTgLO~;Qp_X{ zGJdVPb~-zG(DaHc!dRV2&pY$}RB@>Il9^w@D!mJnfI1j&C3&5F>Qdal$QiJK8lu4Q z`L5t8KD?GMFD9EWSRy$l#GvHpSM1N`ai=pIN7yIQ$DS9uzj(TJlD%NYuEXu^`ulf9 z!}KVeHO71VC?rHK+v(U@d)(ro9&{XQQbnv#)n37NZ{5TS1yKt~@H4Lm4zH3w1)u)H z)q@d@Nh(I7xA$#v=$<_vRH&!8Y1p&!vH)wzAXD zXu~F5yVA{2HJc{bxc$5u*nuku&mr(TVZSmzoxCYT`-glRmna zXXn*bmG!Rs7?B`&6R`ln%APeFek@FrD(88_-+W=6WWoLKJ?wP*)t|mLCx}>6UK8cE zBP!|<*SKCWW=ofCI!$@?OW+5(_YF1BN;n+%uqw{bT6CynBff-(cVm_nVNfgWuEZTQ zRy~!J^`$bf`FvPFLu!=eea}&XQBv?4`39QA?H^OS*Dp@KEa2toF6K2qlj?RLytX?Qvbq z$4G9O+0q9YBHbh=u+5OHc85hZNOJ;GgFQR5L3LVfpC}7n@)>WwW>deKeKSZ5n0EJR zA)I`mT{p4c13ZWAfQ=R?b7p!QgYCU(+|OGdgyIbWQ4Lc=g4+^_pvux0a1zypp!;3o z`Wc^_EPq78{23WA(jQECry?2s5uXBFxBuq2u(u`K!=;5|V;0Zt6Xj}#=`)ci(>nRU zIL9WSq$lVV%xWNZ75*5MI0~@cOmy``v@rX4ku@k1)Ml8CPn$Jl1`mIX%ph+TF|!5R z3pHBX};!7(eyx{d%+uW1U>c?9AFZwTm$VXy+MEb!AiIS>I2^rD# ziN5dhG7k@a z)tfA2+N)_Y=O#&~U8Ps}kgCK;jJOl-T1wLC<<4kuL0rVzFFq}rw|05Kt@G|Gs&F*# z^(Zg%UCcSATIZWGW&a(g@(x#ZQ%(=A@!+tS6JIQ1>IkALHQkuGSem;L^~8Ib_ayjp^qZuQt-Am zjZ%#Yv|&aBFH8;|fU(5hf#d4PE^9ZLEPJcNK=LRF>_68y# z;(CT|9xJJz8;??~QGYSqhjx1i+>pW+ABeThNOeh^w%L=_ku*0~=88=b{iA6>49A8O zfkmk%JE6}Kbyjf7s!L2zOcza%Zmnd^=-s_dy3Dm4$>H_l5f})&8jQhpDRjW@ zzE;N#cG{IxV>dEcnn*)s$sCi~N>d{cxy%H2z4=3BlspsZVvKxXS#c~y5K>9vJ={ys z<2=ju8`gN(ZL8*#-IOtmbv2sncfu3lJ3;((_A*RLOMg9mDIXkAR$S$TARZ?Iz)qG&X<0{ zQGfWXCO^2$OkMj!`M%6))-UP2a1Tb|g$$m$9(~&U=c~IN3B3^} zcumF!*5rFQ3cp`cwFkKv#Np6{e-`&h(3k~bJnM5}XsQk?HLx|}s3!8SmcwfCelgIv zN^30I^1n&IkfK@(y}JPsnKFKzHr=Bk(=kMQv|N;$|Hr<9?eDgbu-{<-K^Z9wjcAI= z_y3~uUAfPX(J>!6dG3zOAf6!z znC6Wz9sn3!$52jdZ@=HAL&E63Srxtym8z$1gpwPa$a9~_8XR5rbr^1O?r6!)NqE zEr}Cri3!OFLoG$XW62~mMN(nLC54@>VDRP{WaP>Qm+Qao@QjSD8oGkBo#nj%6k6Cw z04s^979rBn?-6ZRVHsFkHlVPoVZU6-ry~2?tjs}aPmkg4*}5$lb@*7ZE$eb#HI>kP z^5ca|jiM17EERgQWjBzc-3K~yT0DbI;G@#wg;dT_KIHM|`LcY;$8!fz=h}o@ha|Dc zMqUD`$Y_$k@ItD5OA7y0I>QvvPsSvU@n8FXfn}e@Le%R|;q`=3-wlyKcB_5u_%iGF zv~Eif;&I5za{y0aZOl->Xd=t)=@!_!Xx4U!c{#6nDBkqS)4xg3FY+5I6k3s(LWVBc zX@z4WLn)pmwlpZ8>E4E@6nW#gYs=|(pK$okRFQ-*sUjonI^AuR4)Wgv1G}T|8mi6# z)0F9r{ddt z{43$HG}}LVK_5*+@!h49EP)iFBDb`jR(-gP2_r}SDROGYf1Z1QoDGh!cj3Ps@okDE zbZsCnA;sEDtLR@vP+#Z%ksPp)=xlEC-H-_CD@Msk*c zB8RXtj-K{rEvv;)tc^xwVv=eVZHhZ71`?$I-o7n|S zctM-}NNlk0sV5K*Ptxlqr7+!96_=r+m6qN8+5KloRAoH#LxCuUM}bsKls0CBBsG={ z8mQH~mw+acW7zVh|ej|DVkhPt6KSlow?&(d0-?baDorj=Y zK^fAZf@rUd#xU{8HoHf*!=`Vj!jw$!Qiz_Ma~-<HD+rHk*~0v;O?|JvTC16RO=gdRJV6|FXmf{U{7d~pUtAq4WA}=k{ zyqcsML#|l&wjNz0~=kFMi~^ag;~ZiPlKc7F%&ce zh2{7D{d+qkU7>%DW?MbURIn$w3ivP)L27tT)FzeWs7r1d*j$qeO#~O-ky#f0tdoAD z-g)q0?h=f?*;kce7h`)@$E&G7gGldu^)o#sax=8G?X%E?wY#~U%iJ6U3uf{M#H6u7 zZ74_5{*DP(;2ZQq_}1~cZ{jv^9v~?cMeV(}B1?`fX2MrM`gSmiJV zfu!Dh`~6p`#$fmm$u_KM;8F6t3Vfdzs$)4hB2r9yE;h)Mo$78rrg{UeLN4kVVBgz9 zgTSL>f`5IT{6IjAv=w_B=`)rCfiqQIVLHRl`(=hk9qd0wJy&_ybf=Ya4-67|n+HAL z!gPPRWXa<^CQa=mFfYk-nOOv=17@Lg2i~cpMK|&~=Gj|nttBYGM<4o@c6d?K{QaX{ z!E1xPLCSJ7q)g|k7efjeqPa{e!zqB*C_wa%y*hQwf`o)JwX?UenFH+WIYmxF3`W zvYFo#^iXWc?Se4VA(G`)$IYlzO!Pn3 zuV?(v6mu^(jZf!ee>|>Itbi>r6Xx@Qs71`|Xoh93^`d0@{fN!Z+nk(j>y5Mc1}1a; zSlg%J|22F1elBK}A4kZ>gL@nw&aXMM?cpG6f0)pLlYxBxWAfDU;;1%80SfVv=hn&g z+OPF>dmJGZXcOlC-3u6$c~hAHNm|M~4((nn4}Z0d@ME=DOy(c)Pku)7ZX-=d0}lsh zZ7!@YYsQZ}d(V|OrtSYYR7}t0J%)U@i3jxdXqoe9HZm^u-=%;1Y;0~OOKH(7&K-t& z?W8iLxU>|qR3KcbULmcF91;RTLPf4f_ZH*+DZJv=4vGm3;PDCu^JaW$1>C<@KL^Zy zEvkazN7NZ__zON-4#BQcUN8wBtHdSx-G}+H!u1ZJG%K~Wcb<(ZavC|HwQcNu;)IE zDfyD;J}>2c_Hf!gHXf!b)2lA2^9-_6mcVfTnlrC|6mQLscwsucKH+YMK2pf0EF~%kipB@QtDqw z6D-{3XtVT1{fZAHMJBQN>8gNMuTi=rJy9#)lx|;};XxxIpQ=$uht4V+DRIn_s`{p> z7!+Ebh1efuV^+YeJ0Q-(ue}CIHLQXABnV%?u9n0w?a*w-{8ZS=)SN z(f1H{WO$1zg>FW@!-F^;O^kNd?$UiVP)|jE|RI)Vj%g%;|xxJOqRu#O5n_52sV{?=9jjra8)TXM#xu7W2WWs$Z zp0ni!DUjzMGBvbi@ zoP0J`jq%Z)<~byqOTFXv>@EHapfu$k-l`i;?->j(5j|GsaxEq=RH1INUcwEZR+e+f&^mefIPPzY=Na4?uy)@mz+#k+tm@(sPPxDhX8kne`}i+|VB z1||C>fFE_r-)IJIgLltS?1a=)f9gP$r|}l>3(M)%&7;SKd=CU~>2E-t!E+X?x&eFdagK(TBtLu28{_D8W&H?UQV1b*0u zC5cFc-w#WKMub)nWWhnBgs@Q|SW6JBE z%uYEyqFjVDGog|A_}58V59V@iQ-^OH{kw5R@N!t88Pu@KHOP{Zh;yxg?U#s}wAi58 zE~6US0NzWGnL2uZZkF#8sn4#MtmocZ&4*$)Z)MNn_a!VG#fP5NcTaPkf-5XRN9gz( zvWK%^hx}DMqg$IHy>89lJdge%F+j<)?J%RzHiA-JgktkoZJSN<{#>ADRrXB-NQVo2 zt1oz~n)^Mtttu?XPR-J4($)D~J~9gTgRyes=E92E3RiDc*e`%J#r6lk6KkOAhKS0? z{zi_<8)XwyyIu|Ux$;}VK*5@$k#N-hPK#(N(U8EgQ}eYMg-Mu|gnvM8Ht7ujY>}B+ zZDDXzp*dzs@}#935!xTAhGT8vrL$_$b~Q8`J4K-66Py?vfg(FTke1T2#J?21K(ghA_v(L~ zCtk&^lOH;)k;U;eOTBJYbvbVg=gjIHtNAHcjF^g|6iwC>bfSj}xVoW^QUG+qpZZ?K zJwSCr^|^IA8)K%Ca!9M#qY0i;jFUQv@)D8L^nw7<7fwSDtU14nRVM}e8~&=LOvx#R z<}48zaYVQ=WK@~y+*7daAJCI&qT zf$zmCY(!uy6RzL5AHY&(m2_&17xuPEk=$b8W66Z|w11*7Z}90Fwnw@sfGx|cQYt0}PU5;McOX)Mn$ju_CgGUm>+CuJacx;>)@ z1SU)1=M2iE%fi{JVX|j^HS(NlL#<4!a|G}*V+iY`NnFOX*aq^LE3!}1EQ`}^dW>0?}9hm`3b1=R6_0OCYu z3#wnQHCh?P`Kfk@fnUnx2d%SoP-w2N6Od!#q-6*ffKYXBRR<%L90n*R0Q(t$Y&b-# zgpx_B>;YM+W`yPk;qvr^C*Mc7hx-vhYLYC)y!*^&*OPdx(rGp>glHx$GRSON_dG0= zUE9Do=`E!JqVSBXxu&WHi=3=()F!0g^tF$kk2c#Zmk+IYu{+8iyM!ya4&G9GEQGwA zqGnw{!@Gbnuoo4Id7KlUin zKZJa$78sF^qG6JGuF~azNXS3e$$86)!Bq(wvl6|R(Egoon&Yf8 zAwx`g&0b1_m8DHghfS%&oSBj!Uiif@PIDqnQ7WRYjhTTY(h!@mA1z)~!1yFh+Fyt^ zL%bMkkIG=STXlUX$<%q+X~22kTEO`y_;ePVBuR^s#y&CaJ3FPND?ceN@l$?wU7?gt zp;jZ}3C4ztByvCB;FqTZZ8N~f;gj$wyV8RE@4h(4XIrCapG%0zh}`LXk({%^%)sPKK&mNuFk za|eY$HKzyP`JZq3`uX)}7xt3FdH2219r)bNI0d0{`^>%~aq3H9(tfz--ZMy^u5Q_| zSw&p;ILsM#>u>1mF_zeC-HqiC#1w2yjnfU+8g;t5NdH)W*Lf)LPg&ruEJ%_smSVHd z>OLh9YZfr$99qyFy2`Q}%!Za1avlxlXzXJZupv_M@p979HIc)21_vfeGm{`{>*zE= zOG?O^^WJ3|+3vCj*oc2T>&(Ckt@TM}#nn-stq;XWVlxJjUm)l!_V4hcUtY`uKZbg# z*|qxbj_q;}_Ms9%$7%YMvVCnbTBKD<%*~5+?dKYV&sC^N4B0l7U#t1f#F7=NsnB7o ztafms3LH_lXwIq(=$tx;;-sm%bryk}U23hM0_8@D+t_ru)$QkIthFhrbmZPO5Cr~2 zJ4J9E_TD>q6+ZK{2Az{ao?79Tt7K4)4+ddIr~ar1J0>)7tI+*cki{eF$1}+mVIEa$ z0U{4)Qse{4z2^11vD9@w%B(3HODT%-%fg5So4|{_m*Z2t-H8en69wHU3Tk!9Ia6?L zt<+*py-cpp!(6lsuNK;S`*#EnXu60qDDoqsqa8s|9?@`!cv8oIyy;Yl5bW05zkI7= z?r=L#1tw6-5P-4wGg0z62@88pv=GcesXVX~*mU)O`Z!d4tD#q6_frZ3~T!TXbM=7R?SOkj>9{ubDqcyp+2_Ye55 zv?|6)3A8$*z#fu#WB4#xB$f-!J)O7R10Kmm;-#iLy_}wlekA*7<*d)GzzcgMuju*s z-fW~s0Bq?H7abj1+O-hSjvB^eM}xTpi5*%Nq%zU%Q~ne!CMpLMUvyZz=0sTQ2lU97 z@$IvgP^BcA*@k^{WzrP7A@*TOV<=k%q7qGXaUj|(q69s`%1#gMLeD%Vor{kAOCtsx za`cE6>8k8B_eLx4qhWJv4+u|@^I52YjEv51uf9oC4u*Nz4R2<&>`!qb&ZTHt8BJfQZ-CPw=7CJPy|z2raIeds!tW$E4GOw zeQ)XjUH6m{$d_X_ z6sWk#K>?yD2>>=aAha(}2b6^@{>Q)j&Y`h=x{XsGUY`WrIr+|0MYS2P`7rF2LFW@ zazgS`ouHV)(cCbGBJO;TNOK=2zux7=E&NP68{!PND>=*Sv*yda1&MTz|G{`Gf+Ho6 zVIat{&)^}OL3-7f$?Q$BFW*Jsu#LY>!L!4%FVUu;#PB*`TbB=Ikm9mNEbn9go)*_# zshQuL4eK&DHUk29g1)!Jv!yk%Tf!;H=2%?Hbg}Cpdd%8!HV8^Q^zt*2nQR)yK5`&H zePFWc;@ZbEkS7w|@qFwGD!vfsV-ql;!l9R-kR=i`DP&wr(`2YMJg5?Smr=3xs789!6_P6OBTp#LD#Tmi+fbG8%@$2FDnnngJ|mwE#m zmgFIC5c+`M&3QvS`}e25Pr1(-?IbK%{K<~bS*Y}8oGy9~42KZ;=x@pVb;0d=8trkv z|8j3^&V>5CCGxyVtnn{G-`$&a(3JR7SfaF&2L5qw*j~4QZgCwsb3KQK?B2CG`488K z>`yZL^h47+R0&CZCHsvjv)>+IlX(>G=iFH{<<&=j53yJ{^(1PHN!fe2wM8Z zM>!!{5z^ng9p9v+;E_FI(Nekwj(X5cc>OXZP>NHUF~|VU>$ADXiSDLN;M!f!zZZz6 z-;Jj8aruwTy7Fd{?`1dBYNo;1(K=wS%ofQ}P{jiQeNR83%|IBr7+it3ItFiU^oWXQ z8Lg0Zvhazud@XUSQiet=YFPQePmD^13TJB5kw;^g&mvW7qJ&pDx+ zxfBh^OA5O&>7Dnu&S+mDz9M-i`qrn?V_9kOdwE%uN%Sa=k#myk|kCceBMu zySc+_voWf%`X?^gbw(xXqSn+jFkS1jUA9@sA8)ORAc;%&DV*7b!!Q%ysnqf#vPgCx zKN64)PVYAn!(+Q%5Vo@4?2RS;C<@T{MzyCho@D5IhNwM*PhaN=*|WY>p)kc8Hj@9< zaoVWOIK~|bCkbSlm7?nm`dqna>3dsxPdH7Z=l?v#wAv1SZ2Yw@;{Plm0_&ULZK*4;CI!muo0M{~Q(`QKqiM?QBzK@Htp=ohuaPF%*V^nD$S7E_gKhTYTAJ zN)dwm8vF;=P=v6@>3tSXr2BiQ-F#12<&7Xw;HcLBvaNsPVefbo;BR;E0~lH&*P*2s zS)U&Jhe=WRxAE>qb5K)z^WRD_k4|j9p6x>NmVC6&RxiV`$OHDqJt9hXR3pC>2X)13y5Nky^)MZ?p=hIqo5byz)5N60U#P)8=%lbYQS zY|}^S`}MqOVf!iq$fBx)?pjAOzyvtZ zXwN(sr#Ir!1 z*#=n!nodSQ1Bx|&ivtyTM+p5eU1WTeD$p(?UXQmSxEwl5GAXBdHB#&_cl-o_# zj5M{YoM+QOn2-dV@3SQyEsQDgi{g5_U#4uEH|auG@rO+ju3E~LLcCSLZGn3bxUu8A zK#U0choW?X$TwEh?64JY{0BFH;ifxc#>j!xunXA8{%j8yjdmvlbTHY}B~EG~huqrM zc4_zmzYpQRI*%(0wa5`s$p#OaxZ`{6E9{c&?ry;b^8RLFyQb)Uf@CS>V;%rgZvSN8 zoFg*x;ei7}`nl9Vo^+}kG(~(7Fgm3DcjxMSv=>NF$jpDr_dH|1v(-e^6qDl~Eb^=x z+K%k^O&9BJnsGgxJ173i$!52*aU`xXw7adr4}YacGt%%_hKqhicNu&2dpA4)3MZ(R zW14^N_My`9{nS7asEkkk)@XSB^xu&R$t0in$T&M*v@DrHV!esZFp@Q0aunc>Pc}ll zB%HRSG(@0GnwIsn(`!CkGd`>rpUed8bq97|w(DDk^P&)bW_@iROKxKBd3Lt#I7Bta z_q&jS_|8Bq;ep8Hk9bY&P9MEIiPlC`} zxwna+mtb!G{q$QSM;?r3++@G=aI4wg1@#qTt`z$B^T?8G$@f8KEd{Uebz@wIur)_J zHebFz0F6Upn1~#%BEpk12>1dh(aYnb$;8|?_`whOi%5{A1ym{jfA^ZSdsO4M#k9`i z-}Uh0GQXoY3(E{{ODT^!I;cM=ehjiVj})n|SQCC|SdUqR6Bx#GbTS-K(s~U~MZ%c7 zDvoU$4~dSBL7w;}T>E#mLGAYMS=vT-c5HPcv&NvIY(BZQ;>E?MW@Ggl;G>W+;vQFXWE z_vas^B%>211yV>I)9daRk%** zT`nx#x}8U0LN^7ugF*{U zWO2xqV>J5(dZ`GPG2N66epS`$VKX5>6GuDpf;eM@vFz0G3 ztRYhzH=ZLKwMf+<@~4sxRqhRIJL+7NQ2A$Njtg$F7j$n`i&}@wYitgO#`s z!2j>DwQmQWkLea+;c0I&!){Lyl<%bYO)ks|UAOja^l&>gwDyO?QuHk;dqWAs(Wff@ z0X;U|pL#A+Tox7g-)p^cI^T+}08GbXa33=fF3Zl|ijv!OTL$X@7i}g6&JRQ=-}H)a z^0B1p(vt9S#FNRji10~L*8b!p-~CM~@aZV2wUi#76HrA2s#n`Joa;Y=-8vPV6L3C~ zmK&fa4ogA1jwK-KE{ZqRl*XpHpHTa7ma8i{P0xxhkn0%_wQOpTtvNt-5^BBSOg^!A-bt9G&}?eOVal;$}k;z>}dFw99!>NOxE&p1Le#c*6OL*hWy_t%dg~Qtr$47SmypK{S zTt;F<2emr%FbaO1QPS|D*x%K3|D$7F4Lc>0BZmyPyA&{pW}$iP66lTkNr z(gdwN5ejLrd30}j5?eY436Y}WH1Yj@xD&&Zj^AxVUdmgjg#FVe>NorQ{z9#9Qxj%m z%2)55SkTqGDP7ql_L$*1dW@DwiC2oP>Mu6)i4FhC)xpO&67aY2aWedUUgO{Ty^i@x zi3)r_&;}ZbNNyruuj?r%CtR}O6A>hfGMnOXil3)|@#&&tMvZ`gTZ>QB_1Oyba-XmE z21Z(}_~7x&V+_OI@0cvJ{o^?^*59;l=ST5i_3DgS%D0N;iIHvR%sHeV(vw`=Zp$A2$(gh5lmOGP9vR+wg! zB+lT|N0JE^5>P5#JGs32P>NHnfnxqV#gfmtCD<)|cWkm{ ziB_3O3OYkHAb-Hv1!|9Y!tZArH5ij+^B(40j5NMbI-Taq|7-h+gbNb{^LScQ?rLIu z&ryv6t}8{q3=0Ocg76pT}93uNl@+^ z*yk~~Ktz+p3TRNoucc4o-sldX3b-H8zY17ca|ARAQcn9IT4S?ZLm8r{ zF)Srzwpp`2+u#;=eYQ?B@B)Lehct_!G+s~mX>^z53J;K3sI|K*CYntmq&gZ&Y|~~` z)7H8g^v_l{$YI*d6XS}D(JH0cFp@+)V$94kq@R{&>mQvXs5Qf!er3wrArkz^@p{@@ z^!ha6P}3vkd()u{FrJA6+sE1$D)+fRI;>}MC5jr$oYTW?7kTb7!b85 z2dbCEYbkdfloUEP1Z&x4&;KNIKs2{!bu}jnO@!W6PhPEO4{SoOI;j5z0l;8vws@n- z31a+^QHtj9n$#BF%_{X_&C&S`l_UFl-E^$H-ZUIGiw}f5&}b_n5jp5N z4yYl5yd^wdfy~DSXO;i~4D!1mcX_VRK>N6dcTDEmt2*H>r8uGxZUU#%rlpmZ7Ly^w zH==BX+abcy6y$_NpM_6Kl;aMC=|nO4ilt3mQD5+-gGKI+X*11mgHNL?SLi?D(GIMU z{+=|HM4OWL4(9)Iyl+V0V2}$oMiXTbPTdT#KxRx`v`RsK6{3#DkZ^gidFj6H zEX55a0Ubk1;~y0j;q7=q@sohX5R*1a4<8ysdbxtCJ9^qrRxk5U<_!l4@iongG8Io| zobk=Y()0$Ym)lUw?fb^dZKfU3F>$h-%j6Rp)kz;ma!F5Oc|@i^OHxn{G=INC6b)4pX*nlD2>nlLPuc%6V9d&X(zh`q^{q=yM`wzof+c$Sn zAX&Gid)B3B=*M?Kx=>D-fjjxF{C&)`a(9IgImTT8ZmGJ_r4L>p!`!#7K>p<%974$YJ?w_}7z+JYe&9M^(=}Vn+ zz#68gH7tIw&)vGEbGRXZO|#fXdw|T;-GfzF--j-lsn8?ker?@;-|)A~?c6}EUqogv zSxqq^hVXM{Vt51Pjt3WNTnK{DNkeh(){_;UIEi-{`&c$l=;@!c>_1t=Jbm;Jstj@{ zd0}@kwYKk)JgR7bBLIGWbp?tx$WwmH2a=SR3`~IbA#c#y_YI(ipk^qw0QsN5zD}e3 zC52wcazd7hxFBO^)C~Jp%gm?{^Qr_@;QCiK9AcFb6s>Hw@8glHpb+iEul1!drLXH` zrXeeWd{ie&?Rp0HiMSI~KWdlbj7L#7n1mjdtJ(RdofCKCgf^%{&}m!wc`%G11X1so z^ju;9S>rqb7zL!1^7os07n$aW^OMK%xrOi_^`|)R3^)ImvMY4`J7CaL6Hv=r~NMUeV@X|S1!q%tGATBICjuJSC!B6~79ExA=& zT2Nv);p-n;)>swh51vA3r&uMEsT@Or>WCh6aPsv@4s83M<=AkxlJOdQ{W+2O$9YN^y?r z)r+}CS0x&*H|cfX7IExz{BvpcKKrMb>R{L*bGGbJKbR(9FgH_j3D3Zz;FW*l={B9e z_Oyiq#Q0f?j;hSS9%K7Rs-u=nAI%jee!Kr4QD@l@Wf!(<5kx=`3F(sV?(XhxL>PKV z=`QIWI;1;?mQtFbq#HyUq=phC_RX{3z284DALhea>%Ok@JPyOb!9kjA)s*4-3AMjC z^Y$%k<{eq(0k~28l^?LW(tlnQYa0I(fXIECF1#_k{I_Yq-Rq00qNW|4_0`}Z5-rMu zVF;_KthlH|MfbDE$l*>EWPUgr#8gsLBPF;Z1Tq-nJeany_D%on&YC_EzjJ3&N}VZh zYoLrj?Plt`#vFDa=TfN6KrAqaQYOAho6yHAu9h0DNN*auU(}sX+3^Onl*Bf|TXLLe zdO8+yLotDgiKQza)?soA4&@f@w>K-G4hmeRA_kQgCm zdxJP9RdDeK(!{ceQ6H0}vHCHp%|MnpH^c+3hoG6hU`w7n&_nzc4n;!>>K^4-P?XW; zPLtVO9B{=&3<>9Z+{eHCrbYB)8O2Xd^lK~28lTg;!+oIPknd_}gp1-y^CPo+qSdr2 zSw4Ml+Nn{C0|zK;YD&YP*)BE54?a>#4>j*YZbVWLLucML0e;;L-p2oq^REyTQzwt9kmQOVc$ zWsf$O#kdQBQ94kMz5(c`D_bIzsyAzolUh@MoE#Y1>^(9dd@$^HyXyp<4U}$z@zRD8 zjcW+XuO9&}U2D{`A$tbs<*Oq~<@<&?#1u@&tGmx*xU`QSP3ww^`e->b#vG93IMv=F z|HbU<7tK?C9HZ%^N}X@-Z!n-wL%cn>Ca%znFgqFwH>oL?45WHs!b13W7d!MD<4}0Py*dQk86?N4z~^6s-TcsINuO5G=AL z0b!nfASPspKJ7nL@#luMUh;=2@^Q0v$ldn0I)CHtGhqBL7J=Iu-Qw=L$@QeX{+AA4 zaVJS=XTHNj{@bog39fuWn=GsMb;@?}wX?BHd3m}l4EqB3rj%j7NNepJST5?r(0vc+ zT{obQA+8`LKHT7dyP2<9iAdpOy5!@!sx+jhj;9l_@A4WrOw;Srou9c4#dd4RSj0)} z^3*3T&#k}uw_F9r&?Kz=B|96xyuQxLuWdQb6p2sqlOD$TqUV4)y~0J&?%?>F>~Z#y zQ91EQXr&cU`}KZ_BOVt!6dglzB(R^-Mf`pEJH|}^eZki|pwB`1%q3EhxB`yPhUQTE zxONVdQ~gC9L0s;;N8{D0FEb0%Ctm>|_I$ibV4!F(7>TQWOVmBD;k=0KsywP@GRIf3 z0vBQ_oRX6FD6zCML$~Imyw@9L4k3K$ue#Ka17z()ZT$1QWI0~aVCnbe>;DuiyeYzp zI`HK<48NvJ($qjRZ2c1Px}>8_?!r-igF(U9H)60vYG~O6YIW=*athH_in-I8r?9}* zQY*^p=|_m6c_o5Ov;uvL_{(T&#aW>=fRn?Fz8NO&lY+iIKO%459-AguVn0snJqAiw zOs5r!2BTE_*!Yjzy7@0WWbe!ioh~$0J{4nyr@Lxkk2rG%gJEAHppQJ%wKw#9(_lsh9{?O@l6wqwSyMD<^$D*woMJczGei}w zS`Zd%`@e2;6?;eIB9|rghA_)%HPSgE(@7=0#H5d=0i@EH*NCH`+C@5FFkm|V6fwl) zy5KBvLIPVkFK|hcgYYJ#0nfHVVSW54z~+mP3K z-RYR*p_SP5KM^8TOu4R;Dd%IGQ*?Pgb9<6&kmQr^mFcPTSM=DoiZVyF27c40Lx&$T zH260efXOdABGs*( zvBR{L^(;R+NnC>`N7liF2WvWzb<$MiF>#43NQ1o zAgvoJ{u$9B-rnn|E`Nb`-_Z=q3!3)dA*oOoccm*-bKYH-qLX;q|J~@u2(`QqCYN-1 z*>2s%0jA=Iv=w9o%|*W^^ZH(EBK$r0KG(jDgf#P0(-*`9Opd{DJqg)RSLt^)Pvg|- zy$ljBJROkY+G_T`c(FfHtJ0hE%f!n5w7Dx}3hlx^u zn8x(%AF203EwDuiAMg&u8os75sVgb&b!t8X}j3cBA+~sN*mtI{GEDN(B`aj)rrorhT@&QTAJL?!ZqO={1ba z%28%>V(DeCCBa08KfL=~z6tr3-mnW8G0Skrzb7POF!Jl$L=B~J&2raHIj0_5cGd{m z^t&q~|0b1B{LTH8139#OLn8n7#YPMHnQ@=)4T=o6kEX*^x}jQ1Bb#4tj@NFZjA6Wn z^*<&$?jz!@cB;Mji_GsqZnRH!8`)}4;v!bRybHzS`}kp9*iGBl zzkagRjIZzO!A;plmHW{5M5?Oq9A*;_2NWB|rW}9uJ3MMsh93b=SbL7oVFeBj_Qz6J zkos5{athgV=j4!aLs_nivns->LkRaBvbSOYr@qBc59;aqM0hb7=Ri*9EgyNM={V`Y zW9;_bG`7teD7mN@!>(Nu2>J<`tozPoSJu9LyN$Z>BaUy~W}fgl<+0F=?N$swQUBih zHs!7dNG|?o*9C?%`(L^T`Va_mdv3FP8dA^uQDyKTq^3YEby0d%dF<$Utkgga-v4ao ze>FEpH!dp!HwvUa4Om;*PU-118>GFviTq6ZWOF-;>94TvHpke0QFWS9wejH69zch@ zNlH20u=&!vZ>XE<>9TW6Cc-TuZ0T_QIfCN-Z)JlO5fQPJUs8bF@$&L=v6yECaV~^^ zCPKaC>GbJAjL8T|UzJ`nX6iGA)8jrOC$}jCU)UuVpxExWm80tKgSV2zx!_@(PtSr< zHlL}O=emyxx3@A0NyE38>ur|=-yDWc{xMo*OUdlhm2+N_S*DR4H&-A}IQsKrkEqTQ z;f0y3%S@`kyRG)Ef)*LG26{+DTjT z+jZ8i%js_+w&|1VMIlo@&W|K0ZtfAB94ou*yR@WDPtB*3dTp zor|r#f_!`83_E0K!V;ntr$~dXNB5oqO3A@hdJ-coznmGX-!eU>tu<+*q>(7!+(gKM zm5?mYo6dEETAkfr#Fj4tb#OoxC7FjlH-?5JznuEMS6gqBg6t?E^x zTy62bM&^B}`#Fn7d{z%iq}6_iQOZ}V59KZnI33g34o(UYW(OP@7Ox1=VRy<(nisJu zzSvo{Hv0Z7Br$%C5^lqmB1=WV91RH}5x&~fg7K@NRhAVLL)yS5Stmw#2ciSj7_BoI zr6+nb8@C(8Q^^sPre&zo%*rvZcN091bI&~*a`^IygsRU*=-6C$bo@8%KKvoGuBXVf#h2&M>*ggWO7m6fX1 z(^_maRYi6!1pFm{V_jVk#Y_?74Bmfa`Tt|(tbx!#70 z&%q$XS;$ozcm9(68tMTBg(w?H$5hQ7mDHMF5~e(qjuau_=BDJ-7+Av5P!ciIh`6V! z$0G3XZ-SZYA3M3Da(*7gKkKFN=Z>PNoKGi^4;AIV>lDg|YftKn&w(n6!hD^X)7FilfNc?QFS=k3 zpx`0<0|!QnZ~wrEuhqqkdawJhOvEi#+6ImO2&W&1kyDLBTiPliFAHXrUu(*FOkV)Floi?gDFuI zsjAbgCC5++Nr1Wg#$qdSXiQqhn1HH#o zzwL|DlrnHMY+N3i@Tk@scQqe}N#fga4ZCPLTAs>=5d);(Py zoX*N6$YUS#=?rsRu1O-tcWR9D6y+vZ$L>s zcM1PMHr4cO-LB6a003Oo4a6%A#$c3=IYa!f3TThJ?5!g=99*h(@HUx(GJZ{LU$`## z-@rZwCai1o-R0+r(W9^W`~$l#FYOI4Hy@2LhTx7uU6(io9-3w>AMV^s?-3&Ds;28- z40)kl4AASHJ>*k!`d4pGcqh2sfJvFNG}P=iS6K`|SfuHHj));0CxQC49p}33=hwa< zYtJ4A>`mknitV@h4VG`;$|P9|e?mhod5K0t16Xym)_K@ub{G)7>@ld`C&1vfygOY+ zC5TKO`Y>SN>Do|y9=(KQR@r4jd0q@?&`jl&&natNV(ba9FsQO8hxK`Dro^XOC{b%Z zMMb&}z_^AA8RE1wy(wtfQkCKcO7f*e9OH(HO8}>BwJBZT{k<5ZO!SV#R!uexto3D( z3+gS`>i2J`Cyiz0MzQ4`=Z}(ndI?b`78ZtwmU($=>-2ajffJi0*Gxg{Bro)1Cs5$!H1BTxr7s zxBX*&PRgidaXF|EL4h+mq%g0LITWPbB}YP!FKMGf1L_J$z|ZxqG%8aWaycEh(|+kXp?^n;&*N$milX8GoZ(&Vjw+&%fpRvx5dFi<3X{=k7UnP zp(&vn3)zOnk-~c~5|Xr*HK$A5(eY(y$5n>$(xppOilUsug&DHQ`|~(4W*sE+CVnry6? z#Bgi`@S0T^b*Y8)lTJvzy|*_dmYOZZS8{S%@Gr~2%N{lz>d{OGLU$@U+^15v`J-}@ z9=A2pYYlGrMe1J&db=9M=&2SWB2d;`Hm(>hrv|W_d01FTX#}o1b-nM%!H{^?IATLn zm+@@+W2%FQi;{%m-0DHMJc!J`+rBTiK3J%p;GNI>81Jc#RO5vs17tvQGNt_+U=s~M zd4++uND78gHh(Aed+&pPp@-qi|+e9|ylaBC;nJxc(XNjVd($E{cWGIT`2(hI;K(V~F zHTf@>%5@4bI}v`4jpdoyaFDsfDD$&2kj-5<|B$04Byy2pz$n?hQ1nfP>ii$Vd{-rm zrD9|ZwoOib(t({6dBYDSs&S$K7H!Wr0*)UGI_#0I?mwv~w0F`8Z=&)p4TZ5HI&14<}j@v<;9>N>)kmN$NapBu~*Z{t2=e3mtH9KYjdnn*N-Ae=gJu znR;!8(-!BeMp$G#OqP)oi_~@1c1`MX>Fe1cH6`emyY?dJF4V(qgpICYlo6=o!zsWE z@@2c*Ykao&#(}A6^-Td|%BoN>xE?(=VZ^=Fn61MT-0Go6da{QJd!f9?z8Q7(x|(Kt z(9Lxh70^BJnb8w3M667|L*8-+q%X)F&pq&DCDx>8>aNwd#W)U%^LIa-ZB7#e!htgz7 zO_Qd4aFOQh-ulhfDNad=^8+}S6kCP*>YE9pOc*vtHbk|h8Q2UYSKmo=G1_ET*Hw}) zE67kCI;l?I=FFx0aj@+xQCkS+X_Dc~B(-7BrFn?p#jNPED!42C#fnqfgMr{pbYilm z@G&M!ZzaiS3GD#?hZ^`w_9+DH`?~3Y6$q}lrg9#P?8eIN9~zpQm(FY@kAjM|-zo=F z;o{mq6>K0GF($?-dq~fMjeOH%F)$U;L0EBgMjiyO$<0GrnKin;&IxMIdzCH=F>mNG zo3Sv&(J69i)To`UY*A5h@(Nnn7AE{Qk~w~%MCKPm|01I>`Wx6DoVpdiM4jM0+wS_q zpbVB-MoI?&K7)GGK3tSDOOkF`%PnIJzAIptkZNRS%6cp;6|3Sdbva>y>~yb(tj>tA zVqV6MoHSHhVlzZF*{#`tlO!AR-BCAX1hny{;MPW4-oY~M%Vg$Q5~I_u2BT3vnG1qR zGne&O>Wssqr4;rsO16$mY615wsQl8Qgyf2ROE=1nbMT{jp+sDTIwMTpL-`uxYPh!B zf0}ENu4t(V&>YDaNtTpH;|U{E(X+1a_25XmQjhoE#uw>*NDl5Oy&!0N*3D?AGChZ$l&K0v*+2xs z8ZWj98xdA1OBEwa4)pn4A1nMBl$0w%*uE2t^-!w_f1O}RGgOPwYGiMf?b%eYcds7Qmb4LMf2e!^6esX+LUsOy-G{vP<|8Qc;@G_T>AVgQ-^!9NWI^ zY8hC%=FyXwVdHgToSGj8`+V*aLLHLreHHf&ctKylDBs|_ zDKNRmeR~}!L#UemDySq87#(mOr2uc@7g?E07jIZ(0noL$eh&J-h+Zn-0t|x=hso)* zIT!Ftw(Bo0enAY1Mx6|LlR|YWj`Bc3v>#s03Z3VV9@zDOQH9c$`23ul8olN4xe5geV3Fy&G}Sh`A_=RYik zS6JHM*%)$+H4#*5hnwoDGEkACY5 zVKq|kTz}Gjm!3>Lt6TS{<<5nJL_YmKp85LNv~#QcQS!uA%{L1i8m@_ge*G*~{Vz*D z`P|RVgi6e4jEv@O_-m}6U*Eu<*4yM3(L|)R)4pEfdQYbJ&;~v%rt(7Hjg0P|K2&vX z9+Rh;iE#hvzxy(OwFeu0dYpmR)%nL(qmYv007TXs7<&HrDPKFW1>q2z=Y51>hu*(-VqFdWnF)UxcoUzye?7^+LE6fpck>hsPO~SoJUBYnVA4uL!V9!;Th-Npnz`L1a zVi($&+#ZrBCzh43V->|ve<{!fTJG0A~Kpk{Q(HQ(GWFY}Bfh75&c zg}NXhp-j#_HW-wWysnh>N%|mLVzNZ9c6tRBv6N!{ISMk~ z-0IC(>(s9@2E|1)Eo}$0p2<;=CXXf@=2w&gJ5JHwxXy4+enAINq$C23^4|h@ydW(m zx0o8gV~X(Obj;3uqiT|ysu%#-Gx-3%sAiU2(}*ZY45yl?3c*Qhg*4aV&L&h3nu|M~ zj=q9|`Pn@G5I|TE4Gtz#X-wu_mEGlEhJUD$(#K+eyc~a7-C~ndph=K`IOOA4ZeyWa z5f++_;(KhDJ)BSz1UJlccmt`iF>ZQW7VRz2l9NIQ-iM@{`SjwBQU+tgC>=(KV~S@t z=soC)aZyuc&^eQJVdX0J6qhYNq0wqZS7v3AJ8ZLQvq$U&?#pwyp8CM z3~(_wsqnFST=D)3CMcbyV-(ms@xl5?Im6Fkn~|o)12%M?rCRPhL!9>s7y5^%jyHwy z=@l{0VK?@IeM_pMUDvz^ciqOYm%%mojpf2_&Yz_T>7DX`E&9-@mS+9jXc-Ou1SN)F zLb~hHs5?#Um?qo3f({FFr$5Ow3UG0*27nX&pM-^)zq;p#^IYjmu7z*PT|Z9?^Zi^3 zgjrMjGjcpstAtNMbP+I01^*umF*ZUT{HZdI-vad?k6w3JZit&cZt9c`msqp5{!;;8 z#B=`dRKH57j+qm6b&fGcc$Xs~^B0zes<>6OFI(SjekwOT}h$Qu@^cmn|Z@ z7HH%J{yN7`7giTKbcoE0H)i_!QGbYE5%ya6VW=k-_yKkXl%W`3;Itm2sX3c;xI$9f zKb-#jf8`AZfV|=Vzghu#1!C`SZYdOD$@qrFR2fRcpI5&_dtV1|HoWSsu_i!YED64B z=XVWre*59lXc$IG{`dRqZD0ai&VbA>(N<(-$~RNjZmJf@`>-B-|LNyz4*8NkcSN1N z_#&V(44YW?ciMk|Jh&1E6h_s2L1V!Qg@yIXmb z>3Qenhci-$-mctCG3>YD;h3vD#7#m?C4R&+3G9m~xI_qJe2P^1r{<|5X;Av26~~To zB+7AFfiA6pyNbX>&->Bzos$5o)5^$}VZzBL>)WrBJsPb0A7=>wKK>qjy(gg2?{es7 z#3s&7p*C&Q^smS54Q7`DS;xtD@Gs?znvk&(Mgikn626s(mbm$6-Jot#p2^%Jz>Rv8z2!(qyY#^r2&F)I!Z9Le!PsICJni^@`ayEs_lw2Ha-n ztcFva^=57oguuuB!OzaR{MCXTv)M~aG(%Ou&WMQX)^uX){S~`znB2jTmaTiAKtSP0 zuWS&r1Yvpl1wcs{iomK~l`bx~-3_QUPHYHind|F5 zaAkCh1$XYXCX%=kguu~Il#zIjDK(P{*}nT$H+UIiEafB4Rg`Fun`alw$Ee@rmecGk zWp>Mn9d@L1x(bucqPE|9SvD?}SHx;CdUa*DIbf76EHN!YJCfS27VilUSw|2(V@GRi zwm3y|_dQ{XLEjvH|4^)(1Vy(Q0GXx??qoo}|wC+a~E`TY;<9GzCzS;V2~!jFQOQZ(i&W z;Z_d^wd%|;;*Yo2(MNz;l9gb!x28!S6P`y0EMB0IDc-%4f(9R{0{BT3SKC^Drpge+ zyezW`h9gO50Sm~$upZ4eX%N>Rr=?PW;ry^Ym4EoCBfRNYuj zQf9%ht7R7v>N%y%$&vEvIofy?-J1BqbfnjQ(M5+buft@lbQdrf zVrG5RMe5=v93DHkNl^ZBtmWP+VOweAp^nbD`WR9tR@Flz1z{h<5Pl14$50Z!U#>&T7qSU) zZJ7s)!;P6Ile<=T4N2CCh@rZXs_9V*d@jF!{<%JpZ+Ba4u!y)n-gpGBu~5j1rFbb% za27LA#4<7I@O}lU^5>C&V*Ov9N=p zj4Uu(_&j-ZCq!$~-cWEjrO3cw`*}5~Vi4A4i7)t(ajLNAX5BY#se3QamzMT8D*_rT zeE;V5tYe*#{PB9Nun&sbzZ{>E9(XASuMiVjK`&9G*iZW!coql0!wgVsL4RM#Ct*Bq z@|VPW@3k0nT_D`$4>Uj>IOabzdCeA)fg~y6y#{=)+Ds0a0ZuK}&3mt1Mcq6ihF^D*UE&% z>kIw&gqYLt;WO5Dr_;XaucXisgXuD=v4`nOCtI;1)(m+TT+l|IBk-V1@c=8BdJVW) zewIsEbzM`jqS;)%m}opj#2IXM*)*yO59eVK%l@Aic8r2Sm?*CER~=)s```FGlaGHU zE$1qAbW6`tUw96DQn>znGosSE!6No_<+L-H?Ub_K9k3(gc9=Pjc14vZuV`-{qpA3N zB%|K~b~pk__!3kGhJ|^bYO0f7AY?mb4^LK&ACIhV&P3V z2BFT?ryXf|t8qH!BQLOCIh5>@>T;FcX3Z&}g>4=S%5-(QpNsi*Wva z?)Y=Oe*v)YoG%9T$6m{Vf*?x=;~DI95UGjg1jzDpzS)*WT6`@lr?>@fQg9NMiv4Ua z5HkzE_>fy&DdYt?&c#@qWQ zMJv&gr6P<^iVTRK5D=uSbiCf#%=B)c03(3?>+SU|h=9G$Lr)yJ87w$Bcg2Roy?z~-hUCPLJV3e#X%`i&g0v-@$ zLYmU#f&F_8oC&}wZBigO(;}_7+>Kk5OjaR(nkRnkFg{9k^A@IF7jO&$TNoLZ)YRy# zyaHQ?uz&46XMDg62!F6GXa0?{KlNMN4O0fIF>kAzf@W{kM2$k&b3nRF+~utdR&^8w zQ;_;D)4`2;)DHEW0c3{FCS|YtkF>2>t!;w+Uk=DMch_HwXhxLKDsa_xXn zk1b`4k#649m2KCToG3Wyy@`|+IU{-c<830>DARctBA7Wo7Y3C_mcc6V5L|J70wLip zv#DbE80NI+j0dKFK|~O=#zdEQmE*04=c12xvP-3mob*hg`Si;Z==m>G)Z<-X zog_K$i5qTQ)p}-1Mj)cUP4S}ltY1S9U0Px{%8;AkFY3c(ExE+e~8&Sf#`p}KW?J<(k>fq(~>7VIVi z3RbyB=rDlI=DHoT7PUYZycN%M35)VvI59nL$m>|P=1M*P5z;BPm*W-Xp8gyc)JCl~K4`*k`o zpZdu=?})#!#_a*8TRrcm>?iejIgLvScB8e>CF>ZpFljv#_tg682kw7-U0Zx-Kg=4%dwvJ9BHWAU1na=vePo&yB6~>o%FgYQ2}ml z0NXMMU|Xhuy&|yg{Rkl1;DB0k8e&0M#ZRJcAgvaiROmpy#C|h*dBH zBC~bbS-R_mP8RkdNk1eAiAkEH>MS{E&k_7R+1d8Kdc+G~yQGg+l;zq<%H8;92D-x? z?|5vi$oLKyjMrFbL0EU(-~A7Tx0at%^RxuW$`dmiaq;M*p_n4ft*ajCt6oRB@SpRf zoc!E&E6zxU*6MAM16sg?L%xB}cWh86((#bqbMup6~1MgdrH5NU@=Rnzmq7_fON!mX!Pwd zo|GleWM;9ncMx|;a@HR21yPDkEQh`ER)o0dhgT|?Mn237+*WUb!AXr)1`)z{Au~G( zd5_XdZ9XM`j!6>}{Ev-XZ(95{S?vNdCKLqZmEMio2jtTx<1+6*nkwt;EF0v3_8AM5G+CM_s!?0GYk# zcbt*+o(Ap*fwElcy*zJ4QULxbk#a<>7tQ=$Z&gE8Z|_hn#?%=(&s}i z#Hg4woX{Vr$Vis2fGlvBOC0%2OdBwNx5=Hk!<*vJlst${3lqKyONAH8fgvft7+795 zdg><>CBR^9>hJF8E5fma;k-=|Xpwd6<@L?twG`~dA|I}jjU<+gfk+Xn{o!9O^Muee z1dcheGUneWWa{I&Ag01WWaa!J@g}UsY!DzEWc}{MsuymkcdUZzwHYft#_h2wi5* z-;fwh{z0=G4c&rTTx7&z?icwfCZFrt>X31!PPb?tCGl)#sq7EBNE;y4UtMmqbEAvI zW_Q$vIBh^=Bp>HUW`%3J)n8qG5T0Q)2lE&iQcVsN$_wUlWFlN4ff8+?6fC*HIW7_% z##d0BV*gCIHAfsfjFRK(#pY)oHHT5UPr&yA-utXvPo)B8JfFO=IbdVV+$|L!{X2+w zhWB?wUqOGIZxh2C4TJ551pn&u$TOYhQ=umU^8Cmje{q?phS3S_saC#YM58N*QQ=E> zAO3Ch2@r6`AO$|F`hCxsR7Vm3(Prx!I@-h8%l?GUJl2I56KufE^S%wyfhG=$@&?e@ z%5T|T!6;uxSk0F`0Qt!Q)MZNh1bWR#5ss>VpF*PcP9V`(Z-?nlbt)=0jD|le-*dkp zxMSBfCz0ePD?jHVxhY~7xvDrU4gToN7d5!Wchwad8dDT=8j#_E^9W1j=b#QuQuy8} z;rGNIjV{1l`I6HT_cERIlW{*K1HedNj?+ym&4YB zab<*Q5iY*x6C;1OV9Y@Obj3^Ct)}Dv(JDOJiJhet5b;(v5oCa>PS}a4s>au*G#_{v z@MI;YT&<6QhlVXCyOiV27K~-EFdJ&#u0K8Aj6Of%Mp zL&;EbWTSAS)xhD zjo8zL7;nW`$KTMef1jS?@frPy!5)YA7n7|mQTz>eK>^vp!Y{vA6|uL;opf*dwEOO9 z%V32y)0udu3O_FTTuXd{!dK~rQ}G=WTWoz?ew;bB-=t-42b)xXa}<4NIa~YlyJ9cU ztEXNOL?2amun4Y4mH|kBz*3PziTYUzzHWO~b~4pdb>gXwS$P`1r?S z=c(%q$$M<6AZWSILR3OTT7GZ}_0{3rI74~-rh$cM-Xv^dy^E7yl#wdag-|g;A<)W- zh{2K?AD1R&{BQ*ptn;wcc0k}gF#06@__rjmdSk2IN@QffmKVGTc<=F_8`OOi&z@LKD<(RSri z5n!>Z6eFrMKtRyXWLLt0?Lk=-+HykzoKUYzP9f6RKg6sCaft9+`cUblYM83#!TPSW zn@U7wt};i$8>O21*oT8~HKzlt0pjkGd=hzejcKO!0hUv9kub3B9~F1CbxDUPiEw^{J{)DX@tYUQFlXS7~sNU zQ7CCrRmyvBO(#74tD#K zVa;FY{(eurhjNe-4V{_W51r8JHx~ti!6x>k1+~g_Csf&#bur5=7E7;X2nS1V0s2dp zdS2n9maG*`*`Yu*(yx8{Q3bZE!BoK2heL!=j)rFNMpSasLW7z->C)NY?k3IhrX}{z zGB2b5+7>oTtD^5Hr>&B^yl@)CHQx~tLvEw&(LiE~_b|BB(r36z7SVblnB}OYaZbs_UKbGw{^E;b=L04?%?ipCSNPMrI0oj;~*AK5;7vhZu zVkRGMWO)25nqjO(Y+P@oHy z&ritF~&?&yngUSUZ96SSwl?b24^ zAA-{s1-UuJPcKjO!YeL0ffmO#J6%D?t}ETfPg~bC$0sK(_PF+kSccPWJARgymLb75 zN(@v>4P)@-qV^yOIf)k_gpAM3iQmzSl!G)8V6UDd8U|tF2QR9m)Vz9e9Nxc(CjP?( zaN0%ZT55WIkiakiS>j-?mIv@1M*v!bq?YWzEwM)n?*S&$56^^cJF9yQ5^+LvTJ`z< zosGxijj>CyG$s^st6ah0{QLu>J#Dn<)+5vQ$1f|zzG5Pfw+2{CtQ-ICt(6Zzf)6(f zbt&s;I3Y93lkf}&?_VD6SKY_Sj@=Fxy_D$r%n3;JnB6+BVQ+VEA=1yoZ=>6yAkLK= z;srmU<3$gFqcBy2j!#eTvB}8Bksf%MTv^+Gz0{hpk^gRVybPXDI1+Zk&0LLz&`jbH zYbJN*?NMYZdY6QJN$T?nffFm|<4~jNsCB??=jU0?8jOd`x)Y4#^G$y)v{6Y~Fw>i7 zo~k^Zs=$E+sK#krrsk^s<2?`D^>{1KdEmY4>iN)f;m6>}IpuJcd|6R-t=NO#6#eVe z7brBoD6Y@DctZU%yQi@m6JApAXH}m;Ma}qT`_+uuSwV%TLb>eqV%PIBxvCYs+WDnJ zo_=axoXa5Vrpb9(dq1OPz2Q3yy5Uk4xfo#ZPllpWk-32xQD{q1CP0=h;dPPLdU@#X>H z&Ib)ev}8(ei`uKVo7os_1QivxJGy$s9{((>RtrlJikFLP9o=lctazg^6V$6jN%^Wo zgXN3kc{ebeX#cb3N zFyxX|r`SMxjU$FgxzMlR}L5~$C zA7K1V?hYh(m{VPGmV zhTN<2Y`<1Z>hRYplw+UV3e=iZ-O7sfGKMu6@u`wYqvTIdpbq{(Fpi8c^@r)P_XPu( zZlzdeB`vH)Lo-^j;T;FmlSh~ZnF=y^)~ZY_drM#|^s&For+BBPzqNFt3b#UPlZ!d= z+*Fj@#vi5UHFJzQ;3APol1?e#sGK(v(Zr_}r=1OMe`9uWcBU`>v6uKUyCugd$76lj zJpr*dZiL{1Qb|T4>=h%kse~<(0WR7iy%Pm_zvOFI%!Quhg@~81kvPP2U|dmMLYW{9 z1dEunB=7*F29)!+oN%s8JQatB@t>fDk7O^`Uz)B?ig+>e{w3<5mKahgE3{N{x}2Rt zs&w=+#cVz$L01v)<1p74=2r9G{%uoE=rDWd#$Ul|+-Wsj=Hncoa?_*n@`V45m0Ji2 zbEY)K7h6T$AK%N5gfRa4s+S+tt>=7%>@j`BOvwDfOI-cOx>*dTqngO6A3b+SJJF1t zPdTILmDq~q{uC1TQ>L~#_C%&laexbkTaYcx2dVEn8e&-qCRgMy_fPPCi>dD%PkuPc zk(#Bmh~4Qt&=cMbwhqJSq@6fZ=>bjNt zP_2{uMWg+E^fbM-=c)!i?{nGWIU?8NnI5}cnh?dv+5LdfewF46S{No7RKj>!fc)~U zy3@bnzE^r_C|-}idDh1a>-+;^}2J@|a}yQS;cnHYB<{I$u^^YIgz zjKE=8AHu5y!?yWMJZ8wJ*jV8Cqk;3?E)qeq$Nr#qQ zA*Vay8?i&d92#V4Pg-@>RN^KdnM2*1$RE!l;K7)F;sF5uCX)wHuCfI#jqc>0dX?Re z&*_Z0?r^<1#h)x`QwDeW#yDffdH%CSakwd-hfeSL2lJ@Oy1tpTP* z8f7p##*LENlW3ID(;8!@F~y4K)rbh@2_*L)GH55dIXQZed%ZpCjy!65s8vBYGr7DQ zKVfhjhSaZ1v-{<Hq}_5&`coxmlrWiDYen%{(MR5<|Q;V zpz#h)7`_jmU%DB;0%q*f!fCyJQ2paO$6UVD)locnC&I?TkBh87u~y^t(Dt(&hNAj>O7hIfkmC8!Fa|qWhmKso`~xU;;eFAqlhGR$Mps^Z zNUn`2vXus+5|NdLR7Jk_v>y)`kA9&rylRv_p5nuSq9AOqW`$ z%l;f}Fj<7VXuY*Pa7dB1S3EM5aSi^W#h8_hef4S}jsfSSG59;C^v7i$_g;pgGZqPY zSUniWcJk0OXs#R}vtBVWFJ)rQKOGKepw{fmZnhFX+Cfd8R92@7Yix8Ftv-*fP(inn zsvWw`R?18WqmWpjl}amx{E9X(w>IUuKMSF-pGvj#xsna!kC+2g z5@mhGDn$_uS;nH}d8^!C?tG}h&$zcv6$U3%iOp6l`+Tykolg1=s|_>uGfWpg$=QKI z-Lf*JF)flevnr@3YawJF|qrUvT2@ zZYk;8ENRY5S#2Iak|Ksa1-<)8i3-Q`*Olv*7CBq(uMt&<&T<<(rw?6UaCD$7@=CD$v{YJUV;?UP zQ+AD*QtnW5jR{_@>MAywnU5|!qYW|o~MDj?Zkc>2?#>A(fu=*DW_om4B>4=Gah*U);J=i{IwshJ;r>SB^`6Nf{GqV7t)U z4Br3xB6_>qEaH0F0+mYRNA)61NQ^C_EeMx(q6}2l_jok z1^C(<>Cl&UD((m0zK>JB_C493BH<6N&o0gm_`Ihoka~T;8A0CCy+7_0(AasXQ9RUJfpS5@m+X0K}y?*nH-?aSW)4^#VnWId+N*$6*YRV*NULn2o z5{|@)3_w1BMei^*t)aJKOBfWF)bLan-C1;`LIvli5+N1}4E-x5%3TcjapnFr7(pX{ z>OH~_JYE|9qV3SAr2qiHb6y`ljO@A5Qe!I05E33IfdR-=t`MJ+06naB&EIu~#b_sZ zyw7#!cxC%8`!scqX<=lL$DZdHRizBLE4mdJ7y^lHaxtT~tG?6(Zr6Zg3J+L6MinG3 zc>So0*pWiFIA>R-{~YEYP>znx7!hVm(L>LavhO_WIZ_3r0&Z~^gcB6W9!(!)Z!7gt zQpIPhe!9xdkt1%xl}}Y5;k|QFeF&0rQ>Eo2<2F2mKM5Qs-*4AGHmyRP%_;!f4Yrjn$+{<3&_jV%vG=dH=xN$9%Z7_qo?{`AMNB_KS@AP5vp;Mbs)f*uVcf$VMV_-5#*h)?Pw~ zZPATw&_z;dB*;lDN~>k25D14`V66%dET-7GjurWq=Nwo1K5CWSsu;9vL8?YIYG%#O_Vu>$E>r(oP9!) zY+4Ik3JhduB{bh<@j|G#qomu)inIDq0e+|b(YAjn0YtU?aW40;;Pu0@_1t{1!$SzkO-SK|X<%~KG zAFF>ih}cW%zAZ9!#bj*sd(5jR zUx;tN@p2>3#L>$!J`6Lz-a9x86Qni$s}VjCl}QDkqLqK^jZee$Wna}PHcuuJ&JF|w zL|t;k>oW`pkYUKvc|Z2qI{r&VropT2&vA?q7Poo3b#}7&9GuPOdbZlsTr}zXw&Xi@ z;rrpaRvrA~gQr66-WK|Ee~LJPd!fg}i~^-ek=c0Db;Su`oX|Y~uE71eXU6OMCM}b? zb@jnovkkK!jQbEMg#@ayyYnXNg2hS&R>hR6x~B)pN?LK)6w`diUH%=u8!wf)Aj$0 zY`4YiY5i;e*(=tHL(dw!ucCgB>b-;Y(owOAn&6^J5Tc(l9FQn)uob%dOOM3{hRb17 zv}ic&Z3U9*}$`>NUWR#X3pxIWLdpYZ^{9joK z7A{J+)wV~>&L;o{I6{>G^ET2VLs|HhNJ{s}n1jOK9JFzZbVuq93ZS~gb(a%-#ZhCE zzt?mc4F-@&XHePPN|WO5n2R;!InyL1u9f1{>}r{_2`F0(Bvr&YfUn>fVaXEZUAknU z2dO8+!n@cg{B6}ZrRgT>Uq_7~*+R>$>1+S8Ra=yBusR|}RZ1pOR5>j{$UIz3`tQ0w zdatW^##;v z#ao6E+)%O1fVaqK6=Ug)T2)H6G54!Jrl&EEYdg zbhvA>W*yUUBaFT*6JvIM0lhPO{nk3He`~$K2}$xSV~{e%icwhHS%rd&fKMQ9PK)Cg z2gT!(t^R}2zJq7Qz$UHLpZ1mFO{EX7@@1HJ+0!D(ciH%GpNHk`5$TbbbqJ~cLKx}J z=d*V+!Sy(QqC=+;Ol3#lgD=y%i!W4On%tqT?2N7cu+W(2^LgMg&WtgQnvt=fr$F51 zGYZ`D+Mr)i4ERfWzFiu^b`>xXhN3A<1q%cJQ(TOf$bUH1wPdo&U!17&VgmYqF7Nzc z2+U=<3w$uB2{Iv;1+em>ik#s<6r#-N{{{&VwpknL-X|vT0>_~f; zK@S-O5zy6>0upW;&PywCo-sHkXzsmqt7=A**#9&_*ASFd}O zikey}470yD1)g%6v=Ys%)vhQxsdU1H<1pwk8T0FWo>r&0Kz4oYoxic2-d0HpyftPc zDI=wXb_@=)lG6l}fnHYZCxd75lZRU_f@*E?Qe0astQY{zr zE#0<*I2dr>nU=}g)CLn57a)ixAz#D7>URR`$>KP+Std1kmRm5M=lWQ^?FQrA=B9;~ z!`_WXRL6dgV%Y?#=VCJzrP}Ryt&Oh@Gr8e_T0RsNrFBGU`eQrGf=`9?W=Ovmz@ODy zUPH2r8jFReQd3>y(AC_cDtXK4kk)(LO{fw!aC6CiN#q}QncRt_Yc(m#EqJdw+rZu2 zP5J)EI8(BOB;5+tkb1;kK1~<#Sb8&AI1pa)%85t}BcGX&Py_Q|Kl}5i!ni>|-!zLD+bd zit)u%r}ai=@_81vB+3!@6I+-nSEHn4TD}BP#g?G17U~ z+H95_>a>;QN$F-ykr$FoS8)|Jg~i$5_Rk9RfHk~T+LSLu+HV}F+W3}(XMKh@)%H(= zt}df5J)NQiD_PYL>(wO(>Q8nCpMDUeStK3WFv$*tiHuvvB*#;*&b6^S6WX{69l@B9 zjUSjfybkkdM2=~VCjRkOj}bf%eInkBG@-Eb{hq3 z+R&6`W7ew5j~;qbX#B&$B}uR-0vMbSHGaq^3zbn|dz1e{N&QDx&!YE6b&@fgS{P4x zMy_a@Mv@sFgxWt+)C3{EbtI36Cs{$rx#VN^8M4W&73|B$k8!gG*yE9)^Ye}RmKyQ; zqVT~4n_`aM#}>~m$dliu0udS6yc0CHxM+K1dd-q8AxXSK{A4I8Howcn#fPJ50(;#I zmXW7RoBMNh0xXO;g`3CPkCLJvbYa~ygVcPlt*pJ#l|Il_RqY{Qf`JqBvze1X%JA65 zMdb>Sg)9y1GCv#fJU>MmqST2hO7eiAUY{E%psn)9nJgA$G!3n-$5o;^#>6-mFX>7h zSRa?T2eco<61h$jaM%^4G?n_k2W*~12I6Yi3@`~Q%z5m23C$MBmn&9XkI)2izbb)R z3zG8`?sVOBXUweozEjD5_0X-;cr33EP1?A9E%*6ykC2mJnuy>q$G%GM`RzWvepJFm z+ri{c$q}GOWzI{v>_X41WX}(>(!Ty{G=;Xrx>r5vjU@9hF7sT#^LbV`H9s`EBl0#= zl6;9?x_G95KzkCF5)$uR%hTLhTlg(pu`nv2;c_uLdOtZZx0RLP#Z`S*DM}1ZvJC_YFQ6Yo zYhlg;G%cI5GJpsb*@ccK5U#65xN%CFIakd5zYZU>?%{%)jR&`g?`Gr|}RF60&nv zY2iUSjU%ZvaL{O*X2_qUkPOMqh_|LzzBntb8DX1z4PqVba;IpT=h~7iK-!#NFbk63n4D_= z{C+FX)#YU>zHDib{8E#kHal^r;arJv#k5qvnsvd`u_xLqjj&1B(G!MYz3v71?ZAjT zS+V_}D#Z`$dcBRqpr>6%W(x8a#hE0MgY(ra61-wxx5K=UJxFUl&+GjrCxzboNb3Kk z-t+boI<2=S2zXO&6B7HvhQRFJ;;Lkghlua|+k0oTmijkP`j{U&@3jp&t)$1*-e#mc zr{VR+HK=8kPZSVwiO?SV!uObV>pc%t$4p0TPOvaU>URs6mV1=&4-&!Wu@aS&h0tM^ zypFl=gH*?AabYi=GTP#Iy2>|H<(>3_>5??%clh8RRbCXDgT!!M(m<^d2aPt5f=XT2 zPHV1h;}xgVZ<7VGljHt-_1NWQ&m)7@xjfd|nrhVvk04e>OAB`B05ovUw}WQF^WC-Qw?d6oc(cYY434Qdp;DUb(SNn4KV`-R>WI@M6djTR z_dX}u*6Qcpalt^xw~QV4&Sm}OnZST7RwD^$$%DLd!;!B)kfon(#Skb+K_gigauo6U zCSSnC;qhtKGfWL2MHnx})M<4}v$wD9n1-cz*B@U&iW3=Y14tOZl6~A*SWf9^wQx2Dj z=cBIZxa7(Y#9b%&oaQV4_k+@)mp8-;EwN>$m8f^2HNAsjN?ynJAcIC5o!R0`%~eC| zAWL?74D>a?RwNteN5u$SxC}P|jc8&aT^_7I81<`lBH7g-F|D_Pxx&}U_e-3er|r`c zp=S9!gKw4AbFBulD)}PGFa+Z8h)y$XF6h4*9tjAd{`Ao$$YAL>#WDJCQbmP~yzWBk z3Hgi}m3VDO#Tn(UCJ9NbmiyL(9cd07TJtiGCZwJYt0^v2y9csEAgq6)Y!PBW08e6|F#zUl;iSMcUXc4%y?TWN`v*Ur$tUT&T#etaam`{Ecr! zEtF4~TrU4aMxV~RYH7Hasjpgs81wr4CU23}!ZI*oF7*2nlFhES{S{^NL{zQ-Dp2`Yc=QQ2OIkQWu`9ekGxVS`d9&r4;tAg+D#*Lo{|2BGg+ zDNbz1!A|cgib+2;tE?aCw<9sM8C_Htgp^!-{TpSGrhh$oPr*KXapxwuYuc`*cI>BAAM)drHuM{^iI-85MT z)LXAG;l7B15X50XL?-jUYl7F~=z;Rno|`ulOeEs_N1a6}5@_W8hF{;_U?V=UNU?<# zerYd9=92f(zW{ziFwaXNXVjulWO7*DnIB|L-!o4vXqsXk5yZsAFfTT0W(ciNG}Z45 z3_j$1aGr@7eo00CcgMk*H!au@)G>o`sjb@Gh_YKvPR+!pn>0I{PH@7+R?f00Z~1Mo zjr`mlzJrh!t$IRocQ<_+ME*4H1??`kRX+P~)w$kHuBp4i)X|Fl@LEu2Cwzo7TS#Fn zb>Ig*nk@0pg>{Ds_%AFGxhxVILn@F02!Ixa)*+ZJ9A$_W#hj zS&I97Y<4+}oV7?Qy+qod3@jC))ku-$?G_eB$C>6iD2~L#;fIu8i0}e*t~z-P26l^T zmMik;{)OKX2TltQE2Q3-dZzu~EfC`S7}7j6vEjO9gPwYt^d{AFD|J20-aUICYZl`u zt;XE9a{i99t&xgJm!rl)RuC{tId?sjK}r}QB}*B`Ps>G9&>$MA7JVr!wB`cHP8I>w zKm$lt3|mMQj%Q-q%my@RBlz(7e5${v^C8X9{OgSfLO9}FbhJaPrR($nyy3|(ma$J#&Nbfxf@p1oNeH12=lRK^RQZ;$#Fr{URB_#rzifxUQ zSjv0=fMM1c9Hwd?l=CfeNf|SuoECaS$(o!}wQo_b$NE)%ck#i4cLcj8i5APYfoGQT zET4G9H<_E+eV;3H2KVuF(;obOV?MW;I;?5^5Oi}9EUieG+uhY3BJ?QJ1prnOz zgJox#d&Yz$anWv=n`*kbO5$B{GuE;D?ILPuc{M zsgklq2XBh2PLvURGm<6E^32!*`bL4LnhM1$tg`N}NKgA<6FN zaMW+tzdi<7aQmOi_Ab?gXl|%2@Dn-1UT06w0JeKF{34{LV>Um z-#ZbG4su!g-M*Y){ukid`|n(i+NksI^M2OKVE^5W^0RABBJc?cd!D}VcUX{sad;3~ zDChS?iARo4MQDj4qX;LSVyWzh)Sf~vkwrh13?%pdliBlpJWaP$nbj@|5r_bZ(J-0w z7md*Tkhzg?7s(iqMwcjzj99U;Z74XQ1MLfnBg=8&QKLCVWMHn82A#P>YfpfKN~}Q$ z`;@u$$UY2lq1Rsm9ftRem8Q)QU@Xb?8s-D96{?uh@_CNYG#1NS^K$ce^2?^x=sl)G zZ!VovU%RmPCS;~$JbJhgRve?Lpoj+A3D<*0hmB?=KPOO2N3$3{??@MtVM_0dhN5oY z4tG6lQuDuVquAJC=qwQTJmb0ls9vtRFz84o%k(YpZ(ej6`hTE#xQM8s5e@1@{L14rTzK3P4?3@*1o)5nO~BqHYYuR_TBhG;?l1qyijh);JPPsOj>gk5so zH(kHez+b;%HVYis6=TQ3eQtPsZwyPD68|WFbI9MHfEW+$ z7WxtX1~RKr-+7{Q5#Q2vW^)Z0>zl(kpzAngazC@uqyFtSfP0G2a=>%Ic^{O=?srNa z!b!J43NR3Yi=x8*eTZhs72n<-R`_V%OKoTFzE|)KgYA(j`z9FkTeF;vgd&LbF5@V6 z%CAWA6gyU($_n^}85Plqa$efsDl8FV%3iWlV{b?hbn~cMKhkMILIRRfL?}1K1SN&4 z<4F;+I*MxqJ|HZjQ#_#WQutDA^D)}4{r)$V(J(Vh28R?wefTQkx`8*XN!z@s21S^2pqosFjbu*-uO@Yh%gBxw_&n9f#|p1r%NticfuQ|R#5 zEZ3riB#y6!$uv9PR-Bd@%KmJ9AtxSw;(EO3*QX^v=AuX@&Mc6ypv7KvleZ-8!Ac;D z5>-rVvbeHJJi9pvQ*a|8QbdB2HLO2e zsBO)qcFG%?JT5U~tE$lL6DmjAN8m?R;#gDV<#d$&9PS znI%rKUIUi4cpZPlN&=iG_&KQncB~ zSEM!LDFKS{tFd7O8cwL*exZAg`SfUVb$|M85d;VGI>9r6#{FUl*~vW3a@&~?8k$c5 z;=7KE46=x5{7g3vfGVlIQ%y|8+N^`9@4k97Y!=W19C7b=n$Bxx`o;X11m)6mVp@h5QYc){6?;JYsG zIS1~-n1uAf1z=!8O*LL)Y+vf5f^gbB<+r>Oyv`k4#?`IJiJG+O z=K3z%s6C%%?n--&oe$y>!0jd46!=xsiw__fy_e#3ULN-ed`PWIZ?1sBpXSjpJ1lk{ z`^ruFxnrJbIU&w)N@iaqv^F-EfRS?ClbZ()H~VKmYN^TeNzVE`y8Dvn78xPLQkC2= z#j9rPHrzm#w7}PSf0UnfrwoJ^bvNHTRo*G!V&xvz9b+KW%$&Hbm#(b@*Y2CF|9hr3 zp}c1AA^&!zGUUI|f1Tt5q&6-?`FU0>IE)W^r1^o8La0#cDWpc4h4M4>V&A+**c zMrB(#)SoQ%w?b$`9(DGBo4dn_W`pubS(C{^+PGjQFA*fwT%Z=uiH}JBb1y$KA54Ap zB95aUOHdi4e1eoFFe^+VVDU%AsoA`aqbauUxc6T!fl>Ud;QL(BxXIu0zx8w7-8|!o zRRBv*DDD613@m1^*d9zGZ1|M#b7C#1WSD0(M)(V=?pW$`-^MS9J@L@jn4G`=@RzlMwJ`X`66%>IRRc= zNrf?}7KYPmF2m)#w8hSaDOK9dSYQKHl81`>T{!W(ghozrul=_t zC+Ts0m-M)O4GKI}YP+k#%4ucYG)(b8?A$_66SNwJ@BVOfzdDdh#}=0~&(FTFZenv- z(aw5G{^K;FC@2?7}c9~e?3)=B-Cl-3(i2JV53BM_*z@xfFU6BCo+(rr+m(1Ro4?4a5+v@sE>$%g2XObONRmN-WoTX;8QL}S1TMlPg8h>>-Oa6}qF)aC`e4l3NcBN)*pU{AWh82Y@U zeF1JI#ES`MvF46KA2lOQ%2&!2X7uh6TrA-ld!sTU%8qj@mV#a;ic*)2 zPeXQgPSA&Jlz_}oxJ4ysr$X$|*iCa>5}4D9^{(H_KCMRTKD9bAYr}g0k}2`Mmo@Rd z4sLNU_Zs=0=Nk2O!I?^%jhd+K8W&9qZ@ZFh_lE%lZ5AR^j2QcLZ8#QBMt2Eo#`E*g zNcOf&&byq}+~Ds)UI+pFIRTPo{auD$;>XKn-rBOUS`NLMcDG=%D`THmqWt^Zkk5+K znG*P_t4fX?{Le-&iGIUHU^==g%J?&!30gN#`|I_%$?kAe^B)&3BkO#?#cRmRp4!L5 z#`PBeGJdOA=!|Y4Iec9_gnaYbOQZN{-FpjqXJg8EWwXoECL+PL^9EnhVqvev=K=rv zTF)!!)&+u1O^es2rFAdT<|PRWzQ0L|x8_h>vPT!^ds73k%*AJij)|@|YkSWAt#7oF zP^3W~K%v2R7Iu6d7+)UGD1ZRM2s}?2l~A9uww~~xz<#(1$7(&A?|X9Z zgO%YtFDui{UH=%lCsP6%WDp6XF+WdmlFoVQ#Ko$!kzHwvrF(2hAH9$k9F!$WWB){P z`zPgI>3vCNT(?Y=737b2k0Ozy$fk!Hc5+h}7(EaWgZOy!s-@{A&ser#si-KVQCKNX zBY4@&Sdj}1GY0O0BxvOokL1&k=`^SUgr$l~TGW8xdW|3UdPb%y#dHK}?Ef;mtH5>^Jt&8;6k=m!4AJH;n!UB{E*b!b{y2PZ z<-~$c3S*geBlb^#;nk%%mO;;vmq|#`syNkt0<$@{D3l~Z{3ixdq@N*VKN1|mHL7oh z9jPR7590;*95%S%$eKrL5H!%N(b0MxqkDU4)|=Yztr)SThxCc6+t)oLR^vEcLwKiLtNS1!&v#L96Ogwj}E5C6kD7 zC<*+g?>GG8vdjp!M2B8|s-c$bq#HBV+2NYO#5cbInZ&MOe1nna6gmBT!u2Tgt-eGE+TQsq8g$Br+Av`$|7#I}!&-`(67f^5oIS z1|nzgqy{4GM3-sb@f7nA0H<;u__>2#CxLxx0)6A7Z>`AA%{v~(3#G1y;wxULX;wbB zZt?nHsqcz0CZuUciH1wDmD~qH8t#kG>3#Sf?3T8WT~tSK%@LBvO~aO)0A`QnR^i&R zfV;ntt?+23*d!p4@g^VFcPT6FEHm_bTNP1djH6$hyDJkAtkAEGgqTe474op@CNZ70 ze4Y=6bgIrRdVUh*dFM`D{pHYOtEF(gnPeY_Kv@5l)!6PwfsC%Jes5CUMf0tBI;X3S zf#p0P=#y9HIW(GheEdfG_~Y2313mgBy$*%s30SSNR&k%pDlHQlie2$9n4OTz#l_D$ zf%|Z^&VG!t*s&wTMfK)0+VTAjGENRm$_-IvxyR=2NsQDCc!72gGTXLtEg90(9u4v*Jz$wL!*ee0-9uC#sVX9IU|or#Ni_ z)9iyOk}i&ti%TyiMwUU9RS#ntM?`J=ndGC#=SlGv#bm?z@!gyoDL0*hdCXsYn7+qi zLHg9Mt_N9=k3jsO<5s#iI8Q@*ucA*oSUu)iW3L-qZ&^0%H}@?)8-TgAUgP}uvD>z8 zJVZhi@ezc6yBkCr4i9$x4NAQ+wpsaZEx21btSmj&P6@UYeOoG8)?i?7 zkk5Cut{|5|VP>R==q1FIVQ}Rw_9{U2S460vIz&Stzm!S4e)7lNZ;<3~t7^;r*znc{ zCXkh>TI~b1n=i+o;QT9|aEz2pN)zEJsvKlezlm-%pVo;wHxCVj0u^%<4NouN(#BU_x%ZpK)1C(JZYH=il~#;D+eCrQU{7LDUl9HES`ZW6`iPUw8umLOd6g|vOp zrj;%2c{IC2e5#Rq(QQN@4<4@!EP`qaD7h8rPjzE;)p}`zm7u(JEZqBt3EYo|yyup} z6b%Dm(PY1x>mBmX%3=TfSv&OjUzlWZsS8qtohTnTJS^pTy&Pyp+U*N~`C?_&X|!=8 z9|$Y{<%7@REiE{S{CRGoj`6%-lSCC)?+K&^x?9rwZ98FPFiFpI9&Tr7zo=n0rj&h+ zq2>FFo;cHu!7#iw6cq&nKVEXe#~v|`(wsqIKpV#NVxaKh%TA|F;TNxhUj!Ho#WM8A zwNNIAO1XC7#F250GTGR}aHPiUcXTr(oq2dWfX^KLJss=`{!ZAFF23D?)``}Cz?6u( zPUj{|9wj;#wJS!8)Nb%wx3aOW{S!`|zsjt8Ds_C*tXmHcNRu_nCJeZ`We~+UOd0A% zjSJX~@QC`o1j(__EHQ8>gv~5V(p+piHxmhM>Bp)%049ZK36{hp39I)>TBU}GvEkhd z%Z^buFrgZLbM%Kua;j>G`l^Q^k{n?2wg$Z*apID;1}O} z$GJ3)_FhR`y`SDs<9f|w#K$B5jZ15>M4%G&zoOZazmj5?{z z0V${YnmbqNR)IFpxQkrul-tuSG_-yuI);yf*I(gRYEp7sZVt33ulRVxiCkMxM%2TXW<8(Xoegfk3R9Y9a^xl%WZpZJMKl1u>m?SdOm!f zE{$(+Vh}<-j zDsrMKpdo9d&qzog9)Gh~X?KUcC68-AQrP*4y0%lhtZ;hBU`4cNU!<4}bf0T3z~*b! zn^$N)0HtTYz1**))F&jML;co7z;Ql0KV}?4qRC*ek4pOfyx4hD^;()4D-1|jv}a9L zM~GiIz~^RELL56lu>Jhw0~mR>-pod&w5-z8WVunf)H{HD_C;BRfG;c79p3oF97^y~ zQ`S&ZS9%);687v@5H#A(&p%3X7@9LmP>}|?3Ay+PjRWdi2_I)WS?W(>Um{Qbc(uyL z=aGg~hiwg1e}UJ3LeIoae6T<{C-Bki!_VlM?S!UKC{oB)^x<$-#eKDZ#)JO6_oIQO zt)K~|kC&!iVB7I7Pn@;}XidLk0g@Sqs}C`Ra<`&}NLCN%XrJlLo6RIu1Kt13nK1YT z!yYO$NJqE;DGR`T)3fr1t=RXnd>z+9X?BISdj}Y0HJorg^B8p8BF0bS{8&{1ZGB(q zpqRhGL*EokNXmYg_VWg`kC*w+jV(dWU6L+R;qD4riOD|O4mx@iUB#xSEABx}kO%2PtuDG3a%Qd=^ z=Ri}V<*JA{E#lU;vDcC6Pc(|=dg{AYlch>w9`<%=Um&xV5W^pCbQr6`*2<`jtQX*Q zlla3ZNmDQ$jni1|oQP{qBu0lx;S8-2ls%v3Y5bLt0^phx@&<)uFG0v-W8UbDD`_;q zk#c8;P4|N~SV(IZ+RNewq3068wE3fB>#bS?E02y&wbtz(qOo6_8_Pc$*S%-L?G+D2 zHF*DrkHm;Qt~T^w8RdVQ=O2Rr?BT5RY7MU-Hm7wAp|swO7PFYs&8G3= zd}Z8tsu5CN%6O_F(!2%b102)3W5$&R$C_xk9WF5m35$Hrt2sma*}Xwe#kg8SeWoR_7(DcDxw5m{v}OF`2R$;)2uBm32sdUr2Z_rU3Xfo9l|g{xo#4cSLgd3IrtCV4^=|1U zhk}5UxHyBhk31L?YX@nZ35~&w>r0XckTA+RNl~4F`(roZc(zT9#FwIPNoJXK0F1$L z7i<8oky^{Ss+tFeU(3;kq#v$Dx*JoBLhhERkLOd0Z8wj zChxOLg|c5p#aPq|qSZ|*&t`uLg(VvBZ|3%AOfKguJA;6wE%hspdo)5Fm@{556jCbr z&`eR2gT|Y5ODjv(!<01YJgFQrDU62UJ7wiorL#I>A1`{{g?!V6k3~0FQ7DwAxShW` z$W2KKK2HLb1ge$-#a9ocM2=(Td3x<~ISjGRxuWBdntp}4TK3ZkA>8Uv9Q|SGDe<9V z`XHpnUj(dvIS%}{;bpC~RiG(+Y5F6WE01&cRb!b)>L;Btg2L~PSXO=%DL=iB&SCK5 zr9n9i?Mv|l#sZWhvul6L@?HPHR-mw1*4KUnco{~U98oE6r5Bj)*Yt;kNKXR%1pfEI zf$QcE$C~wRF7U-LT0G41(;cKh#G5Yi6v?*W!~bJ|g3B=&Pb)_b|Y? zm*0kwWVT-#F@@SQ0Dq}l7HZcGAufvijL0JuYBS>bN6R6`!77ktMibSqK3Rz@#xWae zIY10mGhU`ve7AO&GrQntADYI>qrpJ%S*Y!k&o)xFzkPg%`**Ztk_-69fHKOJVBSDM zk4w+YJ^vhn1!!a<2H1@Ii8L%+r_zrCnC&NbY=nqXZKr!^Y;?SR$*`&zhRHR3fz0Dr zuPqOa`gctYmvaQNS@$3h!x!x0W#UZuf7Co&yFNWPup=SN7PUfVYrbdGxlt#~NFC!y zPkSUJ*MDCKl0xn;;=S>^>fOz|cxGC}2=KM(wQjvmW;9nYXI#wXr(skfp2)xT9T2gz z>d#oh1r*-}O#^}433ocwCfUxlrzB|gb{jWj6OI_#A&2NZ{G_{t03_r7Q zu>p1y5|}ip#!}}72MZGcK&CR5@`GJ;^y1p#o%`e}EwYqp7)EM!`b0Dju~xfZd-gndie(m% z0z3AT1f`)2G2yqkExcvN`CX5vGV)ibkBo(wfJYqT`AB0DI`w#Mi%tfV5H_ffQuquV z2|WItsgBriaFh1oA!CuBxjl!SUCHTMn+{G9V;aX}E#k!%@g!G)R7>hnG6njaF%(nW zf~(;y21~k%H;n4>TxFiIdJ5B8s=0Aw-mVu*K)vV}$9b8uL~>*6y+0bC%8s5MA44M% zV<>VYlgh69c1rs(x$k;jF2xeckD3%Ko{(6raH=;2`bZN%lHz$<9_=usX``URhPs5c-iUU^jm3pfGGlLi46LHYxF*2q#^PO)O2>Jr3qw z_@@AlPMrgjIXi5+Y42X?>~NS~o1kDJUX7G!*>LjMdf|pV+fI7QPZ!ar2>{u0Kyc96 z|o6^k}pfZL{%s`6CpB z3aFeHX2%2Kg(~!zhmkC9Z+pUcb0yV1_<|I0iA;9f&ywaa8b`d){LKVa#KMv;9n+It zJw3DVbbSvEs`dNMYFX>za%*!aMPuBJuIYRC4~%=2Iji?b7C8#|sNBerRJ#U{5Z$MX z*^ph}QXRS%bf+|$D~m2DEL&y>0|RRcy)&ZGOY5g*ueO2$f%niXu62?Prd;DhYoo`e zI~AnOjD`&DtvyWbqd-`ZFXw%%G`UI?@k9~k7KHZP*L~qtY*>Bu$tZyt-zQQG+u&5n zy&p(#MBkh)C>dzacy8J>{R~EvWz|nYM;MrZVcLxT?`}fdPyA(9@2uD=MSDz_v@hyY zcZaOnmX8Nxz%Bl6$K8lD4zjU5u6>?sqFa!lbnvpwhb)3E-uJ~YfcoEpD}vrDoYu9K z)60i7nQerg1J_i3^t_D`VLP8l$j~CUy@-2g+KRes^k4m#UXdUbwdqN9-S`!#qeD(D z)k^Pp8vX-KNoO8eUMEU?7!tHqc-{I=CymR8MIy-Uf&S3R=(EE}G`P#!pB7>X!$he}8nlwBIdifp2m?I=xPf2oqu;zyTLX zJ_*pW67U91@8)D|li6CFU5=T;<}ZF6zt2VV4?mo#QvgnfvZO1!8b_%m&4^K3fbgZ0yVL0B|wi z6y}cQ`E_``=Qh|WhN}6JT0q~1VuSbLRHbkh=7aCqS$}y4*GqMQmwQ2@aG#qtryJZD zw*DZ)8WZ>z`w1%s#VX&gYq}p68$7RuV0U}+ToMuD6!$pZ+Q~r%E2h_fm^-JPcmZCs zcJl^yNBM{Y{W`tE5a+$nrhaHb!G8!XDvZ$I3`I}3U2~rj>@)ti&{kodZ6nDJ(etEb z_;DhZQ?wnz&{V}zXfzBRiFtW>1-Ig=qfXW`B`+zwmCWQ z!cCAK*6U0VRqOk1JhRTxZpoyzd{ab;w}aZ-(L{h0f_pfL{>-tmhar-9jAP1MbdY)# z)$q4AV?pwWOm;ObqNt;2*}1w>-93s%>%ocgKC^^=Gm_8C$swY)HWz(J2v3hQpJoTZ zNCL@R9!k5qy2{bIWR_(eZ;J??1FI5Wc7(6(3dEb)xlDd{O1VgtSX*J3ROtS;aFUr- zEwY&;aBp3uM3L%=JjG4X?iPk&at>opZ|1Ngu2YAJ;nRs=wE1-NBq4A-77dYDf)UBd~g zl6P1&Q|_TqN(~zD(hXnfblQ)YQ=pJgD2sJ{2`pfVQw|?W@SojI56+q4Q)+e;_?~8q zAF}U+m!7Q5SFtojuh-6w>r5M=(jc+Z4rT3~Yotn>Gbtuh4z}m4^({Hw=+d|sd}&@y z=I|owPljur6l1Ujb7C+{8;IsC1;h{M*_KH!CbGDRG24gF9aTCx-%Gfv)m#@E=_duz z_8%{=fLZnWiyNzN%W??7O2%U|-!mEs4ha*Bq>_PpdOdrN?j~?;WzsB_LMhJ|dc-S+ zDWr3p2;2dNa6DG2g*_Fn0&j?tA9G1_+^y2^LC8sFv(Zo5jLXl#_2Qbxg6<1eiJ}tJ zVcGrtiBjZ#7F>gUN_M}tYbXc} z90uX>LJ|_84D3Jk{{N|fd!qh*sSTfE3+LfpyEz(3fCF*DaZ`GzWc@N(+tlBBr=g_! z(@ZnT>JP&(G6pQdE-y%?(E2A1PCc|$<68mpeH~HGl;v*OmsZQ6{SyWR{Ov~p4IXq5 z$z2TeD0NNe9HyVfr}sV-AN_#6p+7nAwUt5V+slk=BEJq25)w@P4(UIx>A}=`PvMlD`rDu9JG|0DuCdWgvZFD( zyF~5#f7`f~4~&nDQ_!@uO%WNl*sQVr7oVW~Z`hso0^$+^L-1ggO-g22&dHRQUE4^% zYwzK)nTaH|&d6)A2s;tGceGMbYtZNSTis|aSZ75Z{EkW6bDx^%Ncs*-yo?Dz7hg;h*gw%KVY7fqR&Hj z;AHx(zfxDIfF-a%Ty3;XurO(oCp%D9Z3^rrX%>5ZjbPqdh7Gk%nmyPRlYjG2{&;YB z$n}7Lh%_>lZ&LgH0S=b{C!yz6 zQWVENF{xkGpp;4=Njdk)c?3y^rpCBQ2*uk56KEa9 zp|FG>A0MrOH*=PpfHC1uXk&$ojS)VY)y&DWm{H9x;Q{RQcPI<5m2DzwJJ0FNoh+yE zcmC$XxZBc&GxRd=^Pb&n$qLYMmXLhKOaGu{9rWl-$22d{sb1Mtu0O7&1OrIuc=+z6 zzfgVXOAN3^bpukd!C^wzqz}WALf;dAzcV+PfyGuO0s_KsRP7&>9XW=w@S^u=3e-GdiGigxQWhcPHL)m`YCRH>)p_) zAZkDtMXI!POaBWxD`XHxw_wCBQ(EQ^rw!U`_5Lpgx#QwPX`f`9I zYt^l`|~SYF7exJF~uJZalDJxnHkYCri3vUnUZRW zl*$&FIsI!g;0QB(#}Y4~qWl^>5c&77Qk}8??d<{I0?(rT$uII{AE5-J;KC2+AG~QV z9_PE(uq^D_PPja_@z>44Sf%v0C(CBqMw5*!BYj^Puu#&@HLQQmXV!Y`7YWoFeMR~) z)fI`ddEfKoezw-}uFX__{TKTxMdW*L_*=9d-8#Crh%A!gwc~_ZuW$d=v(sQJo)K|T z_*o-ISQAEp=hHnEBZ#lS{OY}D_XQBM>p*fOmSGzaNU!U}#xAj`^QOX7Fj(5cZafWE zGxxQycpB5z3(M>gPdv_@j@H44o083=IE&-EL)i?Saz%>q{f0<7PAg zsC=N*<=~^nF6}{MsaI9a`2AlCr!zv<=zdXCm;Tw1Tut#>IBgg2!Qtt*(xei~s!e9j zV&ukv9`I0YF?~J5ja1DfX_>Lsbn?`731v2TLCjUB&+?ZyP7t-~=S5YuS`DNT)2#1d zC5#oX3B#aRkUu#)hU4LIq29MeRmx~bqz5!v4FTThuEGTqli9}{zHaLAA7}@SwLgXN zySQ&~T%_NH1}J&yyij^x?Vc(lX-dx+8`Qwzy@K|^s-oF!8tQyJ{Jl%b>=AJ@yZ9?K zR=#|q&{sj75_Y0)f4z@B*dStpJ|cLC)UX&e+-U{2&rfFqbGvUd2YlY{Nr~E+VJMF$ zOGXnRa1o(UUxT}htq*7tOr`sQpD{rPE=i&27O(-j^9sCzsWa$|{x@Q0-GFM7R3vcw z6XC2~Go$!Vf;?xvVgHJ`zr>^hvc_(nf?T#jKB9C@b-MHU{zq0NJ*K0U%Y+Gwk02Ut z7toJQ;&rFKuqH(kPaoo=D9NINegP#q@hrJy@=F(pAGqfN!~t%xoPRd*|oXsOYgB9UWojOH5+zS0ZgMf1ppo z69-cHPn-Mrj=c}g8k&*AR5Yo=ATAVLg#aP>Xa=WM`7;45HGKsUazcKAItMqinQM<{y(Q@rzjfO%X zEZH=@>TldTYx%KouMqmZCW9gd-~Bp5F9!;v>(77Ihr3>ny1>BV_|F)pqw#VTwcvrY zN{VK?BMeE_A{B|C-Efc_;|mp7;fTL%)k09sPbX}Z=BiymF+5#)0X`wTORIxwdpoh# z?aX(v!K+H(asg|x1z3^9$blE`gH^u)&yKjIjJ1cd&A;X@EfvlAq7v1rvP^JENwBtV z-)~F(46j+OZ)H>?9EmF)(iH4y^ZGKQ+CqzQ*1xEZP5Ty#|4znRGUOZG6xpysm?$%l z7*iH|8-0XqfQ9)@Hc)Bn<7Ub!gvc8DEk2MoEYJKCGf8N2oUzu^?KeUr^@{S$T@=bo zFGok6BXl)+XO8kS!+W;^BL=-xXTCyTaK>$Gs1i^n*_f19zApREO+}=Bab#xWq7Mv(X4H3GLK^)mM_Y&vklgQc4&7on;SgtXgQ1 z5!H9%eYV+3VmRfDl4lJ5h+joKlxg%`e6b-t4t>q=4D!G!HJAI9yn*>9a!s(3rM7`$ z0DiMB+7xBa+r_je-X&yALo-^If-zj;5>h-Tfc6<&`t+yt$DT^;-tysixwXa|X?9t& zL7QnU8&&3=O|l^RQ*nVuNNe-JSFyVDRU!hD<^VU0R6LE^zn+S-wKI9X_j#W@NPTO; zGhG}nxE18-?lOF}P?}3kVvqb!6je8j0pdh*MH5vlCCok>1id*Ro>JPc7I1z1(80%G zbh*X04kmuCR1;(28(|~byIozp3X|fO^L*ufl0P-wP~2|yEsB3Gv^olG3^bQ7*xde9 zu$oY^Z+IW3r#1f7YMXXBz?T7gWVTXf&t6l^h_1-QMos6axG_@S{`_XG@Nor^Uyuml z#(2j9`)&k2M}UGJ+DQeD{K{gq+xsP3+Pvu)OH?oGkj%o9@t4Lp5A9DaT39_0h zfQ@?_`LT{B5S_^9+_fa+M4_5%I16Drd$IAK(A>2nA(i_b9Emqae~WtXPmA323p^w2 z6{U?K;=4Gxd#T*Snh0X(hD&~YEsspZ9EEc4YC+m(lDC{3qNpeLjlNL3%fuGusWEpf z`1<$23>$vYPL)A1blc`rVE2gj9Z|MJ?EMN21U3uqCC!{IuI32y zcy#8V#eY(Fv8gx$P6$_MF%$Y;ev$R>w4nH#~8mI`%c25X*(&&C5uL#%(bG-gj z8rVqQ0OaOxLXUrAm*!|_LhmC)dOgRTa>xNOR1Um$uGtexuerGyaTMI6W5Q=+!#36r zQC_d5>KxV6MefHnL&oL?Wkq!sF28>JF;3k6z_$xVm7l+ujAuzD-svtvM^C4*_Xg}I zzA8l*Y2g`tO|0HL zP$8U_VoYVTJW&L6e+R5NBZnSJYy$;E@XE)>32SSdW0TzV-r5x=OuT(QN+M>ckmlNYk_Dv@z>GNl4gnLI`P8IGc) zD#sRzWw+BfL9aWFRRebfyFTc!KX$sz_S-xHY^KX|-SB5TRAU~*Dz`jMRxZT2_;u$J zoUdBR;&aW#g2`itUir3$XTW;6(POF8C(L^JIn7!L`0h8z9GB_JWVZ)GIXx>bG<0=_ zXR9U@!_ne^rg{K5l8aV5T;c#~l)Ow;xz>|8b%MUjyE%w0RyKBGt0Y-pN@fYXT#0rfqH*81oL? z+FwnRcyj+J0Eg1q6UFDt<2H?w0jxd1s+;pOTe$+OcTNq8X=sRf^u$JyJYLlqHu7j$ zbiT%fZVV+N=Q^_vLHuO~Vj{){9M_+&VA?!dSbR5ef8KaJmR&3HdP1RfcZ3u?1S4-i zuPF|Th(HGV3Aq{_0URl7-XJ!iWq-$sQu=}c5gNbvt{DsPiM218mIj}y;Cty6g{O5^ zaS4%0b6GU+jJ^<*UPj-TRaTdlt}4@CoPL<(y{}p8_-D5EVhcNRhx=!h`BC~b)DCa9 z9N*L$hdxlcK?Naw)?$$Q>k$Q>sL98Rm8L|^d{kt>L6_i>&x+WMzkPlWBLf*i?#91g zw98QLc9G84o2uo4869*|YV`B|?l-UXryRy%-*@@YN0L2JK%DcoMkNZg7#p|7x9ILD z`wV0w=)rpV0aeHMTaivda!GT=C8f85 z)vm8cd~iUw)jNb``Yq|HU@=*bHOo(S$vpy}fkpV*74l zkI`YC#OE4^2*lCOzw%IyvVW>1(MYd;qcNixV-sUfPc#)M9Qr{~%Nt=*J2i$?6n~W9 z*{J?2qCJMRjxS}#>!7`VnpfKNvhfc{=B4;DJK^XsF$TDPxH@z%fO^`^K#}kMYut%3 zoMuGXx?`;<)m8@^>QRd2FOmpvdB=fvj7Y+SvwtZ?q0>^>MU@V-8L%SI5FznmKLm`G zSCT~sn@=HJW2Eb-z8~Zx8Qc`9ri@&`mVKkc;95?ZB~psCctfOJFvE0cEECZT8L>zT zy_Qe3F~&=n3Cl2@5)~~er>IDP49L^?l58dQ?WG!5Q?Boe#4uGRvrMIEnTqs~WjneP zZ?d}9S*dEH+Goy6PffgHhvI}wt>lr~xz5Uu2l`$ySzbNGj%$PPCI?{5so7~v3X(FT zE~02gYIH4Ty4lDiE6g%_WO+8GSfEWn+3fFW;=i)+G8c;_UcO7&8Rb4nQSh@WJzfZN zNdmfE2CPYLNeBNH@F-K;SmUV=04-sj@SLkV&ajciA!o_CG-42T<@pN}t1a-16U0hz zB~TRNYoH;rW8Td^+)p2kLbQ{-@$sT5rdXb;dgTueB7N!@u3-e2Tyu z&>TnHXgVhmztb091A@r->KG#Sf{T6`CH$&Wwf~Ol(u|Fq_63OH6(Z>ZPMq}|q;2Pe=|lATJ)$;(Nqka7 zoC?%=t<^l!1MtR}2o%ipe!8XCyMTKTe%pUb6ORY}L}S6Z%|^A5Rz%i|*ZIgk!x$0^ zg->_BXLsfL`_!)jA)C}6OQ}8wEt`r^X#9+zH^g0AX*gRC&*wSi(#gZTP@O(XxUBPX zSA7gslcvo{l3^oFc2}F2MpG~W9Rx}XJi=DA*Fyv!?SA4(0PZB~z}PL%yV3i3-|w|l zll~E~OhW^L%g*mMDB8L&o5ZjA|BZdBrvD*N$2_uVo8SmRZ$25`4In7<%1xW8NpQd~ zsFf&JNCSieG)Y3q6iYHnMixxUa++_yTxivSI{G58A-WZ9V#fGuZYIq^oo6fyI4;GF z3YMCR+PGacg%#1dd?u;JQH6>gFK1A5UZ9HhVx`}B07J}DA_<?%E0v^N{WzYeDEu0)Jwx+kl zVF>=K4t<^qLBMsEyS%r5v+FFm(M&hXtM!}2BXKCHvlgMI{}v&CW^rk zZYM_hFHymqRAbj-=Q|<2l&W?KwQQH5qE@c5s`e{_&H9gotQ;H%{;aUF`y4>i{gKdv z&la@7eEs43U$Xcl)pD|j!;c4aC#gD&89yWKy$!10D&0C{efQmbhp5C(0oA6*nb$f! z@bxqn2(8VWHTEd80{W0Q9Qb|oUAMX1i2R-h(tcB6mn?iABwWqx1H2S0(dRpaXRCI; zSDBYUw$OxHDRavX+OlUTbGhQ0HPl4NkpMd5F}Y1*g6AJHKlYTkB3B4>4J;{+heuU#Nz+F!#> zZ%PkVx08(#Uj%1*yGEia+HbRbIi-5)W;e<5rPN0o7w_xqW3PPCyC(3MQ?O?r51=^` zBFdd$pE$`QSuc&SfvrE()R3nvO2*;O*Wt9W_`Hk4y@`z66DUfq0cntl2Ub(;EEzx1 z`*LrWzv#~oN0e-;Lq$ostD$vx5O~d`O)INjmFR62TXZQdz1LP`WuNR^!K1zl z7`K(+0zCJh52iioBkJe;>?*ZQHv9ICWtdP)zPOIXQ>&1ya{w?~^P8i~ndGo^BfEAY z(w*rKTJ(YSPoIitbI>xGr^CP|w)gwq%~WlZ4am&i!l2D30^PJQq4h&7LtmItyOI^8 zWLmer*OICh;$gvN?Dz5IiT~NzUj@;c52H~u5iGX7Wb{~axUv_dlvgzMqSxfuhdvui z?V{@<=iyU9>Ko^9WaomjLkKx`*f7)dGxb{dAJ4-W+64rvPIh|w?%ry`N4+s$+=RHk zuhdT+Y`gVK~8{Z&Q8OA zeADIg=bF?@#t|B$WM(FMM)*huaTU4niH3MnjXi0Cl4m{qW*EZ#7-L<6H%mAp4xU7 z4HjHcZ_;*+9X8qx#efc_(5K;}iuq*dyHAFUA*Pu3u!-zPlMBQ%mRE?zhrYs|jBXR% z@hqo6hqS!YbuRm=Xsp%cbJ~@7A4RD6+oNy!7T}WD>6`ZL6QqB* zP29;z>mQ6R)SQ)~D2b^cK>~Ac%K6yffc4stSbUK5!xB2^ll|I4-DC@8p%n@g6F@YJ z@r+T5gi3w_*l{Y5fA<5O68i`^ZU`K-+SxyLp&Y~{-fHwFb)c2~KtMv2RyRwvUVQ$! z;x(~)+1=qby7%Nq)z&0Hwq4klFpN>`cEC5{%!D~|BX!qJcW{hbZR0M9FA$*J!Jm>1^TA#SSv;K1yx0 zc&;*PJ)DzlvGPLi>s7w4tiwOu^!uEx53u@72v5tP?)gWWa^X6xnwxuc2mYNA9B)9l zrwVntg;ijUFXjaI0!etS8SAW=JR^xPYhd(#%SlAo1!(Tzg{3y&P55q< z9(cKSz}pFg`0<#u@#{W&Dmw?*S=cKPKxX#zlaRN=#j%<2a%bKfu=oGA47Vi-Y;v9f ziA~M__xSJC_PZy6y6E(=af=ub-|TYxU2*^M90n}agqc4)fMhu4_f~+6+4FCd_4f@B zm^*=QAu*x2!{T6=xOD?}bL`_yb@Fo&fxg?6T?@e=u6M9a&Jqtt5>(hARg_?MKf1!YrHrm4VclW9X1nt2OVW-qzbg z{Pjcn7rHQ(f!RM7`|il|lx=E4ixj__ev=R`23vdF6Scfm<_`A>KScw|!DJDL|9b|Z^MHex&CTApI^n<;g6Ux%7dwldD$Gi1%^2v>L)75EW$SYN=x zz^%2fRQj@D@9HKJdPwQ9?vzV6Zx@Lnz7&Bj*e!(F6AYpMz5RXt^v-!^xynWu>$*&? z8YTw#izAuq&!!##Fp7K~$W;8ZV%8QQo(KTX1~628ch!9dwjSTDbq+Q9=kJcY%#ip| z=B|~ccy`1VkdEG6Ip*bChk1j*=Cf!Ma)kY!e@%w|kHF{t<+%!NIoMCTOi5l!-Uj%X zoqyI*~M6krwTBAIX>WVgm=dUMI!CmjpxLTYPxjAGHX?Jg%%;%4U8 z_oW5hi^n8!D)<&OeQQ)(i`H2*yIjb`yG8%EXl;5JC94p;Kgzn2}VJbhTZ~@NV zSb5X2*#NzW-0t$r;HKyZTJpk%Th-xxeW_5N)dvz;zARE?X^qS2y{=T+c<9o)k0&V+ z^!!$*O50*5c_|g(pv;w%abqR-Y7(Z>CT^9Kfp!Jo=P=g#9r@JW)$s4s@ORP2uD#kU zUQM2`wb=bm8j_?)r1L(Lg0YdC4*y@RH)QDETgcuCa(1kAMYMJR z4{rTbGeqPywuJONHWzcY)F6A>vQM=9ibC(DQT%nWf4<88zFxFF=)&#kq-oiy-A;#( z+&yg!G7-#E ze3f%2P{-BFiv_17wx7MV?$Q~Z$=rKV5F6`jy^sKNT8bvc{9=7C8nJxWCYXDzOTrxc z01j@US(^q@j)4#-WB>2t-_&(Yu0%D^dwNGFC*yA3BJ2(c(SXa482DdvA^r*9YjGlX zTH0ae@Soo4X%4k01O4KPGOmtme)+O}1xBhK-GypbEA^^B6HqOhQT!`sA2zI-nEkEZ z1R7@cJ^ywKtf&Hi8%cUP=RV(9`N~%c+4*dsZMttlC%n2dKs*jW9#RF_9MXYti61A! z-pT4UA0L4};cZy|?M8n&g_EKBbusrP-Nq^4VZtaLccMCPlnXof>3uZU=tr^!#5DXi zh5ayq(wO^@+4X?0qG#y0!CvulZZH~nHSatOb;NHXZAWDvZBG@%*WZti)dnPDFl%PF z1M1R>ab<;?v>t3VNeJPdy@Nn_rnm;Hd3m{8%KBeUa~oo3+&70c5v|@X%gxXBJWSA$ z5Xy qZOs}Ra+c=Wf!Ju#%b3vD1I=i8A+$FD?qm7@Xf^5BU25V!I|s3qI-ylR+m z@#H%o7WF;Es=G1O@cTvUQDxmNREt4+xU|I7fH8Ao`YRz|-`-e9hqdXWRjA(`7R+%* z->eYptlvX4w6~c!a`GS0zAZIHOCK;;?(()?0YjYOU^%o*pk79uBNV#Rp2&l3uI?=18e@l#3nPN{g?P*=(KrsAaKd$q|1IrHk38X6LK= z9+PMp<0PWvNyx`wlNI_B!LnCkhlVX7Y;41&%{hrGIu@tW=(~B{Y_J{{Smx*0L!ZVE zVC$%gE~_7_PW;yc2O-=FjpR3-eQtMi9{p>fQw|jg#NyB2O-RR!bwj5R#LV5oaKtpA zb@%P)_uN@MTR?T7VsL8%(=EJTF9_I76|9$=-X+$T?}w!1$5-7SEa>$Gm4GNh>gKXZ zU{ZMHgwJ{qrmOkrqn*DcCL}7!&Rcy2<$EQS+|w4-NvC#{mTNHnY1at@C+B&Gdh}sG zSGYJ>UK`8T&fT7nYMpB8^zVfDLjWtnPK9gm(?luU#=zRH%(>(`p|28NfLqnd)^r3z zp8XSX=w07aN~fTYTW`-L<2E;j7|u}+VCKAK4OR#7CL5!X_NM=k(xd!4BZU+Fj@s323Eahm)S9)07M|n14W2VEx$WEo zbn1P-pU@XXgs4-2SuaLX2|+n^EYS_94A?B@K(d;i zX;d93uJB85YNmJm2pLzs*PX4$c@MOWRptbM&M_6-!K`u*5sZ3JD-ZwAuHJUmxlFZE z6MPONF@yfw-!i{$F)v|LfcUaTXucg@wwxU&DZF;sI1Y)D2UrEOqt>gpt7uY`3e|F= z--Pfzy+;F&kyU9mSXzERHAl5YF7pBfYVnte3?%b#&B#vEP&L`DQm-$%ej)l9R`Qd!jZSV+$znJqB13>{F^=#i&6<9GH@>5gZ3!gHm`*uWNW^&VYeE(e)$+~> zY_yqOUVnAfMi67ggyJYi1^OVn+6ZwB{S+o$%ahDp^BWAqd=Xw_vK2z~i{M>|BoIYu zBZWc!3f8FT4)vhmST?K$B81y;s?(YhO~j8M2$@qbU@|}}nB+=|e3om}90O`wHiR>L zJ2g$y`r=Sv3n}bpV=-e4dGrd`fj`_QQg!4KQI^RVyj1vnerC)n*V9o}jU>*aD;999 zvY17|#=BT+)I>m#9hU|siW2(PF+a)bMXL#bs6_B0l2m2M**fjoI=w^1e-$d{;9x7g zEqXgh_=@o$3gI}y^fTB(PHZ#hObVpYUn!t#@YR29zP+Cu zsPaS{pe`kFsy~miC7OZ85XDK%j{^x~$$HR;Zci}pco5kau0H~uuXgA2F7qAT%wrHHz`j+5Bu?jxw~E7 zt=tpScXl24eL1MouUTgrZ6kTy$FKX}wF@*)YL@!)WNmY;$}hNTAj#v@9T#Os{3{MC zK#RN#qNS)UA46r!h?aXKc`^Wxs<4eRU7M-w*b-Oc0aCMLvn2uFJ+L1w=uxqV26z%7 zp@GkYwlg92XulFrR=j*Co6+QSyfip+p~(6l2>eUTLrp8Ye7MzFcl=0d7|Mqi_a{Np z);ZEa`U3qv)^@F9-hL7eLIct<7KD6h&s_v$kUdr;YenSN=fvZ#r?xJ7d@k$iDgi~9 zo}F7@G~x$-S_dX>t4Dff=;ZGL_bi{=#YO{o2j7bU>`ae2^^U`DB?N(=z}aqV7W$rS zxZPqOv^Ttg>0BK%8=z}EqNBnl3sdh&S=nwj z8bbPSY5E_ZCNodWBkClRC1Na92gLAciSwBlvG;^O(OU)G>vsL_czf!{fDN0$mDIOW zyt2`~niA`5&~V>2D`mW75)q+FhN5ty!{YQXI6w>JN?bnN`Ii-l`St!$^LdlfCO#`_ z?%MM!RlC9i7@@fSeQ;iXVqEPU972{fQC%5GNroZz989*Cmu_z|HPv*rIWq9`MW08} zl%Qr~UbV$3U!9@e(wAb&qxeTosq@MKRmAVvNL6|h{?MZU=2SH-OPAt|nOQ%T3DUZS zydvJ;o7_c}hItz-yh0^Iux++8fff4Jn|dBo=Q`sP0z+S+;ja}tVv><0*K8uGlNiEB z@0EV_s>oR;XV8`!{DrLL_{N)3tmfv;0gFsVm@&@hV9{>)o3fD+`f{r9mHkwa+Q4v? zL!tch_f6=^Xs5(FO{Tw27-9r1`*1Dg$E$Usw8E~zei6tXw$UC0OKbY|7bw@Ezl!rfs#RaFlVUg`e@QQ3AQ`n# zZ{o4t)hDd!74g~y4;;`sSvdufOP?Run=Gd?bYCn?{abckceR&mi}l7$+(K#>_7~P~ z>YI47QXpFE3bU&^-^RWp8w;`UXhhSrU3Mw+uCP(9WefZl)QDIsOB|E4>_mVKbCkNj zi#VZ6+4XW|Ws;o(Sbj-Ao!emB2pO@I^L^KZ{2l78*e3udL!qmyyElZskuCDHmn=qt z@qR&xxtv>ju0UGS@CRf*zSalpL$BEgtkl7^+`hzb_d}T`ni%3hQW<@O{h4#JG=QE} zOGy81jFjMrz1dHG_`*bIE^3vCq!i>O5pYH)}u?i^rllKCj8Zz|Nv^Oww_z`6rMM zLKF+J9amhq*@E)3ls9EGapT69n$VBv6ITOxcM4oMW1dM02i35gH?Az3_5Cx zVQkIwsG^Z3N_l878sEg;eiBr}2D<@m>bnv9!^4fGnHei;YJJ_VM6^byw~?LUSy>-= z?OwlDHgCbvU-jeej(}zkhEvGKrq(IP~doJpclFY$?`?I$7gf{SMM7=!>Q?TYPE+NW`*z$Q?zJV>J4h zHkAmgt8AKemsz?kZ|3KLc>t8Jz?PXOX)L+E!__V~mg>FM6Ob>GOPghw9e%P}t$6)D zoM}wPTprVL-wc^kdx)5;X7DD);`Spd`PpH?9Y-A!P)N@|wNnTA$1I6&i&lCUE@1?d z^7oK2nTdygae(nudd;l|wh*=c$htKjd5g1}G9QoJ!3zl7;xKmLMUyEl`?c>YYoQvg zbTJ3*x8f4L1-6CP#7oj?aS==3O=$|>0>{hN&Bm5p0_oc8X+lO9 zM9 z{}JU#$#r)5C585P#OSts$I4_%E?-5eu)FkfBqD;8n!l_Ts=uawsufobwSw8^e&W#6 zu|}&OVI4Fa!LXz3q@pI$SZ5n!iI8+yd@aU~T9z$_Z>G`t^>9ORF;fQzKOIcsD2J{&wH^w<)R=y;mU2M?2SL)zNm4AodGI&GfXlcM8-NZ$i#2*L zvk8h{jrjbq6ukhs@cHR>(P9WZZeW{etc>wY-&@I^3ef`t{ zuwQE{*k0e2361$Z*W7!pi;a8pz9;k6Y&o^>?1%%dh{jW!35`v>sbgA)qV2G}q||c$BQPOd)jHL;t{Qx`+D9@#S`p zx(51sTAKL$qEHv|=XM|#kmEPm`?Q~q+4myKMkMxCe8x3*DNnUhE0R8JD zu@F+}k6sOVV|QZ>35hTWZkL&A%h_KKls_c0ed*K`R(fCV@6!ktCb$SWZ{Tab!_%^} zYeuo)mru6c{uy53KyJvkGS?IvU&|JdF0OAke~{3GRR2R+!I*SF!W{Ixi#iBX%5Q#F zA&{snmzgY`C*PT+(DDafeA8U-;EC$^wA#N4a)Tay>1vG55N3J27!_Mbb@*HPs3L%KJLVA zQGPcR-!{%jIin%gO_UERa)Qg6@aY0OM$dTpXzRC~Pz zm5cy(jQl*G(j@8@0 zLt(|M#Hy%|!*ldkVlwtaNXAXy2jHF_6RJ#o0p_Wu?-jS-bUD$Nt{d$xljW}lk;xIB zYn1WrfF%$g^z*OK$$Hm1!^3|vm*?fKmrNhptF^n%d4@-Wy|mlrRU1e5goOK=<4x>| zC4Nmw0Yq&lBI4YnFk+AQ=)ALG0M8_?!tit+Cd{yYKgeDF7Qu;{G^rS8co9=IcFz`F zFNq#r%4Cg$%kaF22`2N@oWF7>qbxJlQI2Rfz78)4uZs=vV_;A%?p0%>naBtXYpU%D zEMV8RfsfKsqlGa11VS#LNEq3p$Yqx5RE#T6!7X<@|Hd;Rqs6RyVC_+<`XFi2T zy>q7B!qK}b2oT?v!gg~%=0N+M#CAVj-ArAboOyJDEW{!zOsW;P!g&U;bTaAUU!ZT5 zkF4QIzKeO3l(UT&u?KvEHy$JN1#MfTxx~MC-ANS-Fx61CU`;`R-qK%iAWyu13>7E~9aUZ&b;(4v1>%~dnu$hd3keCD$oG zM@7D9xJ2pMwW4ov&AwgP%}$O2&8@w4tahj&btio#Ku_J{RYi{JOe4S?q&~VlkCAS% z8x8)Eiq(}o1ALeh2%A@V6C*}J1Hnn`qbeHX-L05dKh4iHHU=}uwX3Tsx2;boocTz{ zxL?$!20jTH$xeYeUx?P~Xtcgs3--0mX^#q|-1!Bw<7@oeE3Ahm_C8NamlU2xx$(xv zKi+ln?C>Y6M{Wn97})jr6T<}&e=Bknz;j{mV0MCf}~4ZgMlSZ z#-KLYB6^hy1u+)?Bpw7Tjv&DBCeTMX2;oTc2f`~~{@LGYv1YFK33_oD5;G%Vr*`d| z^lzXR6nas-?xEY>``$m=-wCWx=gjROnb78zvt%ka4YTsqNJOTvZTz`ilGjjcPXu6}c+npTTUt{p`AbT{V z2yR@OjP{uxp0VGS%flv_s`R_o7_e(P#idjCs55l|GqT6?ESypQ%+CFsulF7Q!eeDtjnPNFalEcAAsuDR0#O?ptz6 z$yHN^r9eMQb^cKPTu!hM?^v)aPK%`=!P;7uNeQp1X;*^Yd*C_4_SfZ*SFQ^kK~! zBUYkfGz<&N=+>DGL;&wc>7zE}jrZBZX!-c((|L~%8!w;rez5-Uxgxawqp3r8Et-2A z(?K-XY^uA=1HKSi=rg;$v+(l?ud4hjwS8U@qf(+Yqx`0Hkbw&;Wp+sWHIO6-rtGM- zuGVJL=oQJQRZQDhvGXY!m>9-J-s+QWGpAz>%h$~My5m>3>hxCcY_+7)7_Dknqw?tK zS=?2n-^$-=O;0;D*l04137lpVjzuJw{2fh)hC2#3KS z6bwPRqMSb88VMCn85D-`k{FodB;_~yLqSUPX#s=wU*=KuTNyS{{z^SeNKyV>jQMLm zpjX`dPn=WsVZC&_qtENpZ(M90`v4!YJ~eu8=OE&}mBrSKS3IY0n-5LXsOM*l^hPHi zDN!pu6bf_>ngC}Ww@|o28}7Vc^wJrCvQwv6D})&*9}9Tka-{BHwN5E!drd<)Yt>X} zlA@ZOL2AK`beX(EXOMTo&2l*J|LAvmy|4*UZG_S+G45Wa;~Es`J&s7llKN|#U39}q zo}oT`^S72$k&zOm322++6)Aq5BPO)sp~O(lZ*LG=gmt^7sCMUPdD(BI0?v=7N@7){ zz2`~n5%}i}cf4p^L2Nl8%$Je-Fw5(AiT>x}o&x&5AAwmD`|5RK{veySkdOQ`%*x!`H!D8Y5`Y^e7^iJ)P~qg${HfOVnTRCH0TqJ@DQ)zx5mL zNYE|P_QV4M8)K^7xgS_~JuR_e;5*&lSR5puhMxwuF37P3|FM8NTbz`A1?J)`gWQmf zh*KLy^NnOXX5(7>Web%F>C5conqrt!%)Vb{Iep$8;MSpkU#jg!(pOeTsIFbqj|z1F z5_^Nr*S#Rup$K7cFR(#5_8FLWt?Nj|B3pYfVDf8L*?UNcs`@m-y+7{P^fP=m^6yCG z@d`gf+b!bgu)DOxO}ZAM3A8$Lb+8EG&wn2UwI*u?js3uP>)*m^QewejZN+KeJ z#lbYGt5yd!XN{8By_7%DXm-U-t2jzwuPW%%lZdE$E zafo~3_NgzhH7gqC)#9CX6!BGU=%|_+WO0*v+7kGe>zMOHR{2P*Gc7)+vmJqHbhOop zt=6%lk+;KBzYV9RIrP^Jj4VaH94n)25=IC5@REFxUkp0)=z-TzsEwD5y#7;saW7Cw&sgYJRn^4G zzc+TjL2sRfE@9-t8&9sWz0wjNn=M13Xd%*VfxXMUAo2kVA0?kKw$zaof(sVxNPIZb zAJMq$EYRa&AAD^B?)ElWa_a-%?I%lrmPan#=!`lo43N@!zwh}&e+Git{)7&YB)S&r z?$Rn_r9&@K9z#hMF*vUHS51G@><@L#_)tf))!q_N#K&E4V?%XKF-u!^jEAe`n_Mom z-!j#*BSTn^Sf+o2BIeEpc4#|V_+f zfXR6Y!iTe!ea)SRe}%CCaZZq6)@#g-`=^p|Y@DsxrBu8GSEs~A4tCkKZL>8fTTU3GqQJ0X< zs9Y|YUc)&Y=t7Qnk(B3wBcLg?arzuX|EP8;atAW>Tq<@ z&NbB{3+Lhs&0gZTQwS4Dv^gQf?o@vSMsI+)8LNkb>0{`%9q3d2*UQx1_XWbLFd((w zkyh}WLYSUHl~-W6ZQqtXTZN{tt!F0BoWnG^iBmbYgx;fXV=wb1r~<_lF`#|6!S3Jr zd@{W+bjQozwUZ#bvpC_~Nc#`<^9eOc`QlGq5{7$Hp(WcT#kcX=C)=X#5hc2)1#-U5 z$?8!fWl7Ec&oQhm_L{NJQ?uB^gR`ThNvsjVS)G0HA%q06!4Hp_$V-IOukPv>w!4JB zt+Y{!duB9I3w~I49J+FZeq7(#lu&Vreh&}3Yl3H6`#GzW)~b!@7!N>m;ZPD7;wvfa zwwvky60OzIt?L3UKbXv3?LO)1wAmQY#8wg8o8#+>$Za8$I-wJN@KtvWfb=4;!7?(}c`VP#IE@M+{4dgry!n7VS3(Ei=M; z61K{`3oc4Df+!A21M?I=lWL#*w(Ocdoy}UYYzWmv3eYca=UMEhP^SUQob=tfxJm1W z-Nx1}abd8jyp+&dKt4Y?6TnJd4k`OdTUcrC^Z;|luY~w?+FaFaCRpEZdq}ih*e$kA zckCQv3lnEm`Auw4`aQl#e)2#^$wvvHXSY?#3b9TS`%(X6h=W%K{!VyRARfy(%q?8v3}SGdD$%FH4mc1_$=Mz5yf^Lvp}7_mqQ-v+*xVZ>LM z-hC%SF5&U$HwQ-K{#_Y=a%C2meB=3#Kefm3XER;O@)!zse^xsBo2|rQu*v+7t+!xm zvkTjH+d_dBC{U!ui@O$ghXj}64#A;?;@aXa!QG)a1S#$sJXmok1b26K`n>PV-t+D6 z2V|1WB&!N4U=cmP+BhjLbeWrd<$y5{;u|?5;zd)ESwTiKMw_j zzNWkXfqQQ0Ye2fTFy}bdv-x=ceXYtTeXXi3-R(LhMsQme<$;PK8hczDdbqtb==PhU zE|F&R;Y^BIg@8d2gfj$a&Arrw51<+c5=L0-MB1iKTk2<46TkhiwS;UTh!u}A*v0)- zT&GKsErGr`$(P1HtwO*`J%4+|BWE+a+IG(|gJlTDF|=t4TTV{Iw+(k1>u)U=5M&UR z#|1;n6g-&MVJ0+FN-c#5N5hQ{OU$Q7caUMAa z-G<-l7$bIGBvp~GLHRb+l#7}$##&MR!bIIX4^CeG;Xnu|EMsvPB9Q51JMEqlUT@%l zo`4k$g^&$=H_Nv$MT`^XfNBBnbV$3a@!2Cxuo{rW>IW{@cWZOTFr*Dj_)X7AIzL1| za9C*;(Ph-KQLr&U$=r*~j*c1!89eQ4(oU3KGOy)fe%noJfZ@rWKJG6BeVahu{?Q_~ zQ@8`I&p1rlFM*J$w2sYl9fn@ph0*T)hn{+Etd@i@ z$Gs^eJ80slIA9mmk=YQ3d|V=nP0V0TV1ze-3I+JW?*w2NFVcN!?Z1ZO7V5s%Hyn3= zGeC-)7&y-TgRMJA%iaI$Cc3DQEJ^P!{I!w(ePdWM7cQ;uAbR(fA>ZhdtRa0ZqE)P$ z213o6b#5K}fsL*E&2W5mkH<^$z};*bwuiicWV-9M^lw5&*xo@s!C0bbpX>vrQ-tJ6 z1{3*oq$K7;*|Iqb-Sb{rnR6rd97kf)d4|E zWDlQVDhWWRSNYevlHc0Z;@9MV!|kGjekr0PscLsAViI4YAGni-UF4^e4w66INwLIr zFTT}5DT&JSScBXRpl0#!tMwMP+iO0$MKymy6JRc4q3g1 zmb#kiz{zJr{@xJy`i4W$DL?Tvo8}kyt+g&B$k%5PczQEB+Z~2unS!cS$(Juzg^;F} z!ykO5qGu9B4btH?c_xKwIy!9@Q5aT!-*Y}wWzwnWAP-Zfs}c}OBUU!r`bldmfuaOy z?~f#jMODR5L1O-|ZWmaDv6Kz#JyCyNGicxJg$f0v(~IYjNO>}2sS;{(ajSb&QE;~&^SesN<_lG~f*@d|Y4|6j_p@5phppABmwTi(5A>2I7 zE@HZxe}U?|*N9ajJEBv8*KZ47`ctK_0z)r7HIbLtbBW+vO6}KYP)aI6k7lt0t&4)Rc{((;+6bT|I#xv72HsW>o^|( z8QcO#<}t$tQ4zlf-k82RiG1TcP&9T?&-p+P#epU3^Ir5)BDZ%AS_oy@KGHQ$~L~m>EI#WLa$FOOqw4+%O0=&6Zz)l~b%_(Dy~$_wa3~bU0K?EkCO9H)U3)q*Z<%fX-FXC#Q@riN^JF2}@>q{c z$Ano7QCNt&Ekd9J{H}FnLZnS!!V%~WL(mB_-bny!24UmOpRY2ghVa3$F)PRtGX*=<*PcLn;{^dCBRkOs=0^Z@hZhem&@qj_} z2}PWF^L`8kT@B-t3iT=aBL?*g-;?3b%oiIW(!S5kxc>U*B#4u7afk9!;HYq@c#6@B z9;^rtA<0ISGEOw_jj`GjY~Lhl9QRA_^*J|Sld>%SYZMfPf}fT>-WyxTJ+(0(L~r0C z*P$QUAB6vY8qH!u@i^%g4&~Y)*kU5#MVhE??M6?#k(3cay?t4RMJ(d;2iv=o_nObs z!)biGmK^);WeHXbH(P8_2dc;L=O0|^S>NdFy2aanSn|`c(Hq^Idi=~AMugIYZxOL; zJFlL0MCuwG!P-o(h8Om2;RoO*H!G6)aO#R$^gf5Ty98-z~y%LI?-4F7Q8A!)&2(r6B4dRPPOnXQ0>aaO?JB#hey4jNE zal90KXBijIAxEVdU#Iq!hoUcX@$OKn@Ki&(F{6%5Sugeys9h~4ZX&TdRA{5z)Z5|=kmrRAyjqk<`UL~ zr$OMAakR5(D&;D&ED??XYIU#zhbmce+N|PT^|b0aZm>a%)4}@rK+wj5Wi6}twCnik zc2*?7VBEfpBzn4nc5{h{AmPm1M|_}6fOyHL+flUJW%DQeebQ|WYlw7nQN?;XH*O)a z56jU$|CytSRijHHb@mNYh9Eb8Xw2&$Lym;RBG`?L(^w^L${`zH+0Y2{Jgjf{kYwqU zkExK8m-{y^q+gsLWvk|?5bc5n7uSsqcaLpE2I@qK2~dG&ufnImsOJdi_TFsqu*maVHikJ9K2@X-B+X z^6$vXl}^r*85^urZnATjiv0DZ&eIyO2?8kuF`rn?O4T`l>RE}cNr?~0s_iW`FIW=0 zqJ4Lv&Op|zL=9#TfppQ{359;0|2_8Q0uDIMvps5)W3jDiXJ?H@5cjh_eHo**?Azy1 zaPoCzCQK!9`e%o#@p?=%O=rR^RLU3PoSGy9vV1 zEBJ_>Yj5pc%*4K9cEbLch>ZxReZl_cW5sP4!t1zCzJ?FrbiV|6ZLRFz34qS-!S)Pt3F!2fw#r zbHuQMcUB&0<=ft(EZ0! zg~({=N!KH%2nVGLOS~eB8?#WFKgtATP!ofL#Z6{9^CS9!r=)VluTs70z>RPbeHezc6P}KP~O9a%lNc^f!HhCw0{F97zF|a^g_o^g+C?U9H-3r<6R!l#t`7HR7 z#HXKN;xa3!#9U|xN1n{}ucoIx^Gc}gjw7aWjAB&O_9y0QjgtJmsas21vSJZ2-93v# ziCf8aw_T?&TT!~)YaJ^^3oe7ccMP`8d)^=Y)qX&kDlxbda~lhGg?6U`WxN3!wMBq9-`vj%w+5^`8*AJjG2sxdg4K1pPt<6Q=i&vqHzbj}qTSXhN)Mqeh^Co;l9$ ziT()$o%$Jv5#gMF=INM1l7{v`Oy<>YVgl1F6`A(UA?)z~Jc>z({ zHc86;@!i<(ye@;KmxQ}S6Hm(GJ;6Sh<8fGi(A*#b0_|(B(a%XkRc-E3<&)=#A=tm7 zXh&>vOm@q9*F#-(qNep=;!T)5LWG6G?4HB!4@Ln}*&&gVb+Ax}hB#6HDYbFL0mz1H zg!>f#RHkr5sybdVoo7D8z#DCgB`lUl8Ih8}%R6&%;?Rph(5`63?E3kHwP6f*?`N}( ze<<5W^A;veuagw{e0~J8WY95bim0=;o+*O&JRs4^UNbT81oDG<5wClhf^mi`;dOOT z(|41Mh>ly#Q|uLxipJXCxO2Y5+T1Pa9FBVtgON55TQ?)WPhmM}Mc^O_=&X&#(Dwku z+v4t22{)HfW7((2`cNfya(9=!UHC-Xr*dH|5yjT=iFW2cxye5W^&gW>`PF-a82)(d z*CB)mIX4W6mNVZrxU%Y6VWSF(X@C3E?$dIrl-_K+{qA-l`KhXP|9XXm&39dX)6MU? z&1rm`?0w0OSYFaZu9N~;pg$#XkFrEYmouuoU>I|kVP@TR@L)3b#AnHU|{(GOGNW*;X@NBhI0hP%+eV*5Evn&@g+E;uLeQ@!W2#1o$ zpLUh(+=u20b}qZZcNMFoY5n@`ISI+sN#|U?<%-6by$u|yFjPhUPuWa^ot-%tEIO0{ z@=1H2rE7ivb`)HH!t6~RGN&o?eQVI>v{DoS=l<&5CUhOdmh^-$RXWcz@<01Wa0_SZ z%5q1AWhBtqs+G+as?F6=YlwsepZPCfoK2lj;2UA^e;MZV{njzfhzqt$MMukIWe$~v zb1A-YLAMGPEXXn$A)ib--JIfaVRQD8<6{Xl+ROzdI9UU^8L8Z&8ZX+-rq5=tE=!UV zXM#7?Id%d4ARP|2ZI;8&(sAE|1XzT`szpP48D78z39QnLi@s--Ejv z`()$OU4r(!q_z2I8n;EoM$@=Yr4Ygs%gNM2AH}XDse@ET7CF;u`q}=gpm+-R;j?MY z2O9jp(yD+H1x2`{ke1}{PXq|PnSSk zbkE{{aeu|m`&8lM!`_~2^63&X+XZtmp;fBe%39EEX)&}Z-H`RPl5VfDPD}XIMdAW1 zsxB@YlYFQ@OG%lbV_dy)?FNL(BfOhV|6h{@x5c zs$9fT;|8Idwlk3pdm~Fr@eeY-?f%gI=^tUgIrr3I6tDbn-XTYell&WDA0NDbx;7ew zE4KpG0Hi9^wy@CJ6aDPWX)N>%xC{U`1j(37jWudE&DU>nl?y*Dn%+PJKq9NP#VqbH z>fur>S}DV<@`aqVJ@*YmTc}3~17z(+X{QMn%sLGC06<~o#w4N?ZD!i9XJwHdkdtDv zc!?kGAe844Y;TPiG}55HJBW7zN#xsFJvvTfXiDGadaG5;7U((xmFY!Q$3IS)b2`{b z3RS*q7GX6S$rl5UZq*s-P28MYxD>3&$-TCvqEGFurp_}f&`^UBy@OH&ifczUvAAMZ z2ZP?zEZsK0??60fT5^jVhtAyGqriFE-m5cNXaeItmag$+&vS^ey?%0+R0{PGgts zOk1tFC)fV$?_~SYxLn@H28?^1J7`MVFt(I#;$jjG(t%vuRLAsSOvGBw!~BvuI%$e> zAxX7%!nFJ>a@Ic7genpNG#_ChuwWem0G~8Y^cR}iv!B_7tt6s4nG}Y~M7*14pJIcC zdT>M&gcXeB8RkRfd9t`{DLo1$B!(^9)e$qK#P6CF@$g6a0;W|8{t6=oyi(>m>o3HE z>@Im|s~9a_fy>)x-+?_tj&~n|m9arag)@N{La`m{fq&6FEFUdD03NY&`5^3~SZPw~ z1i8iw1RM+?%x>^UknG}&lp;7atpI{Qga?I9swd;yIyqehv+B4Rsy2tz`HbvtXwq|u zjVy_2@+hbL{DFZuP_B919hpo5f*N|usEzbhO4arw}qwej95N{mc z0)#C0G3PgtNgPF~h9#|?`e!WDdgf69B~!RAKo!}z zqf1WmeB|{p8tno?yli14yXrK|VsXb#ghBGQeIesx8FW*dPss#3w?N#j-yUJ?!} zK*h=#kxs)ec<2giIT?LM!OdSH?>4A7V@cz=cahE@bQSTo3O0ypfHI zaKJtOk72(9=aOQ%I#w}=Jehh{2Exza>dJ5_CzP6GQ^Amr>utbae(9-pJ9BXwsU!1- zb|>+{1Jjl|jW!zFF}{+WS)dRv;(V`IpF!`AX{Nd0kBrRi8h^fVYEt#?{ilGpLIvNQ zp3N5Fe(f$H598NI@c|o*$9Bn^EhwA*@@&ZwStryw4M+Mz&R1dP=XtQ1?4l;a#1yE_ zLUZ7gWuIn`<$jJ$6E*7f@yten@r%B$#L*vzuaTVi6B5RMSgd@zH7^kO&x4qFzD1L&|NCoH0%EM?XF+O6JHEy8lFtHZZ36>lwUqcw* z>N#q|9u_X5HTCVn1$)jGoeqtJCN=my`K89j8+E)c7@9^vol<@L9#PYxQ-9xK0?sS{ z&r>oJ?>}Piky*1a(TAE}q&W0Oy$!2l2xC6hqqI#bPnbctiGTfJ%PeWVv)m_4P+$&v ziQ}2${_0dDgK`^>)~qL9fY9-{4#{QW@g2H+h6Udfy{XF02Xvejq*n_C3h~Vi0nFoE ztf48tFc+2>y2uYTB+{|rTEkN6x0ULa?-lLk>nqLoVDsrb)%`HUZ#;!$sR{`~>SeFQlL?Uf`n4+6l^O4? z_zoQz#z$4`1qdiY-}|)h>PwHERmef~Ym6Va;r?vb4Cx7gD|t|i)X@0mL>C$a%j8MyhZfHvaCLA z@xl=)ydZw*rAiS*m2!hMhcRo`G~)vihV&0==e3WnNjC z{)UjCSL2XYX!2Q6d7OfE>q6B81So7wnRSas_G^|g&F>3(vc0ehkrmHcwjBMcopsNe z9_q#tl|8g&=u6cyn0}lwrjA1&#f7qufH$llVVI^ZBph{u&Rw&l^f=VHlKxadoN+Wz zA)(X)oPkK?rp7r*!V{oFFc;ebzj>#jy_=qfXcfOOpuyiS1V5H}s1%t?om~O6Rld{e zYRdxC&oG)th-aS~;lW{oH!TB0A`AKxE&0a6y8CcEs*ST9dtDRr*_xap*|H!*Ja=L| z6j_J6ULmFLl@OBucU)pdBARHRVM*_7oiJavxDe{7t8&0q!6^>rbEaBo1P|b8Z}Loa z0La0DKJTgH`JP*6CN*w=Wj}GC_EN!5Gl{^z!t!Fi{QcrsZ@{hH;w!mzS4?ZD97uv} z`M1H9(mQzu!(>i9l#)i1MEj1B-%shL9V`W$g-bd+EO_pmiSLBLHWfO+H$yo*dAC$w zSd?{--iDQ9IA0Hy(lD7{!V)3nUUHsJ24MB;g4rPBb3zI3SB)wA2^rgOE_a5fTC@2J zbDTc=-&Y@C3D-L5#{fJiX}w&z%m)ZaZOMM^sS~zsFQ9)QI1EVgBAXEXfRZ92sEI9r z96Dw?btaY$xJfVzg-FQz_OQm3aPah>{h`nCWa0Eio|%`oMp zZ2PB|GbVWb_oWT$>OjE%B|pP0tfsg1f6dR>7mGfy%NR$0v$7gwj=sSfxq0z}gRGbT zQEgvxUpTAGS~SQJWig*qEBLkH-SiKU`roWWK6i6B|J+ss!bgryxA0`dMRjEjCFdgF z>k&H4HS=3rI;G3HER57T;&Vl$MJkiW;8U(72gE&}3CCc;vXz!c-Uninnom2Y=kHIq zYt@)6T2}X5X~E*ky6`vM+jQ7c^D4e}jKF=K%f%T-T9^}TvPL3_#m~Ro@Y$K+9l{0R zoV^lW>VZ6GQjk9=pDg`aAQ@*&;UwC)8bM!JVAb&TbW(M5T^-pB&V~Rvc`{;QQfkn6 zYX;>41TFGDFRUho3>WsxqJWEV?w%5^rq2kv)N}aL8TFFT=uL*5_VXGZ@*!@3|1kn{ za$5Ddo}a@!NDj9i`DG^)im1^sIB}-^c>%i;d&&U3q1|*n_+xT7Y5#kf_Fs#neFRT? zmDo>~a;pKcbe^|o0S=;ZZbBzQw?c1>ru@tti9vz?h8t&yWQnH8!#dXRKYc4Sx#Iz} zzi;3)M018%;SBy0*sma#xy59&%*@O?p^ebS$Ye%{*DBeimlzBV5n`Q*E#782EB6Lu z#gWEaZqjWwUxe!O)b#D7-U6CLKC>!29L1HD3T^^Mk({hc83Q;PUX=jwnQ#kMtA1py z=P;9eCFIlj;bxS?KQgE|s@EW~7I7|PFs{bWaB-Dd9~-xh90bMc7}p44ax_y47Df%3V@nO)OAjNc|hklB8DWfA>P` zWyBQPUI$M-lCA| zQ{H?K1p{mlQ`q*BLD{B^KgVZ|a zW9GpPO^iWlCNFr#QyqrIUyEA&%j0q&Yl-ivD2ue)5vYtK1vi|qCOApBFr^_eWBgBe zL6nq!!7et8i*DZ$K$yFk2dXVP`?V)rF3Dh&YM}-jP1yXavpXpO@is0gH-G>&uUHea z^ot8rmtYquNzVDrh~0v_9iUTqxnc5o3+gi_tJxZ1z5CWQq0N4DyZ(O@3*4f@+!ZVUoZ z9=kuE5L{=`!Ot^x0tpqe^|WhSrjG-hGMfRYcXdLNtIoG)aDuI=tShd2t4Q1aAo}Et z%8kVTD6~9HlNzyA+(^|_Y?Ept%ZXC9m=baBV?+r6mFMZp(QjCT_oA9%o(8XxR+S3tFnhs`ZoB5{_ROQn!9hd&@(Sn>a=BYAH6UZHyxakBrb#|jJ5`&2kk z(l#LdKoSd>r%OCY z(SUs^sWtuBlk`*Xp=!X4r!KNOh2223TCeL5C(gJW&mx6^k37g2u1ly}BOwr>GynU0 z?ULK|VS}PX+M(*lZQG`5dyE5*pxI0*L^h14+A?;!8dj2lhi(8{T&w~mc_zgI>nU;L zH*}8><$c}AZthDG6yGoBrbgCTITX(ib6N+$L|=8^@0P2Lxrgrar5#$|lzv9^cUR3_ zsN%`ka2Io#Rtp>_{K)8w54^)<3SY-Ew9-piRk0>irjKfLt=F~vkk*EXy<46WsbhS%~yq2GCiJYxL(F)Bsm|E9K%+H=|BCpVxF;k}#6s3Q-hsKnt@ zyL}BM3Zn(0dMfhguA>c(e&B}(Z&Ki8XL6C4a|KxVh*-SE8EW1tgnrJYT+3)|>~FOz z*Zs|+Bk);Ar_yH$<`k$&ym<|7P{AgX`V*~!Aa?db}XmDRBzbpxT*XEbj zN!Qb36k@IUt>=na;v_!pd4p9AlSt>u8TR;c?AwlsEs8GX=nUCd5GL?#VR z0wCplJG2kGJoMxxfeFI<2GFe3w+2%7{%Zw6`%bYCY(e%7_l0b~_ah5lT0KOgpFFay@sm*hbUT-Hb|^W=_uipf&#>Kg6ECf~D~>`W?Z@IOBZDO|O_JF(Os4?xT0 zu-8EyzN@Tn^Zrao$`ps!#uI%rKu@4X0^aCqlUx#h_kEmfCOxZF_U7#5I)ycT+7A~W z&d>2F^?^vI`RuKZ%TX=0sC#N}5uS9BBV+Q0>-qn38`Qhn63${e7b*6(A^&ffI85fc zq#YA8ewn!!VdGx?-St{`6NALvqJ5nlOUYcfhi|qnDk}8hA}Qc(u#Jk7qm;AMMIM+W zSIYlssA!guCVJPRDpPWIGnNM`umgTqn5TfTZ9YC0-Yy4oy2alfzWq>+DIwaFgV_n0 zD1l&N!sC828}+Xu+NLaA^Mp=Dd9x95-ZY@wPtX39ArDtEL=IfbaC{)sjfld52F1MN-S>3JkjwYM|oF!}MX( zdunqJIN(ryr-fxUw)q{{^Bx6cy{h(mqQtC?HvHgC!-jDy zmpaOMBH(||j+epLEIvn59>hfF5pHr3aut;zh(>&X*%v~DaME3vq276=!Fj0YFJcgs z9eJ>7yK(PK`}Gov1b-touRzbOA=5~U%h2ZfxUE}Z0r_hcXOk+CM9+6x=&9eMn6s8U zt~Y~;vg1qkbNCa{hOouukNjB!f~HhA71Io(GOdaVDsONy%pD-hnyv~3QYOY0^%+bx z($5hk1*cBm#4pH-t5RD7DTp+`?8gNMiQWX_z_#t=**kK^b3f%H8hf46%IV% z+tPeFNNO%>DJxrSKm{^%1dv+mGpRUcf@;x^cIic_LTfv;`>;5G$UHsv!^*QLH!-`0I9 z`Zc3Co;X>+FdE7*T3`m)9V9UKc#bXCh3F^Q3a%#Z$8xtGLBhD(SAc8ab#-E;k{I(_i=`TIdBM!v+rmXJPq58`zlc(9afzde}J@b%7Ie=$9< z0h2zKy8lueVmp-wWTdI{;o>~Z>WLNkyx&BH{yDc^eZx902b@y9pAt;^+r3V#mr*yG z>AkYG!O$G_>U=B>6U^zTKd;;{IXzuR4tLdCh&mr&~}07y#ypdwb-8M1?1tlu_n{TWyaJL^o`ajYpE^QQI?NB0W%9Xd+UyM9)}lA|PDeyY*5 zElXjlOB)GrO+GPOoS!`^R6==3O%bUw$(Lyk+ z519&|a2H+>h^vm>6dEDM^v(@`wLxxmzh-P24l#fV%u2bf-Ls!D(3CW8Bfr_KOC`3& zzkv;!G8uDZO8<2292yG}#O%IW-*2NuPu*j)>KairnZrB`*#`mccQ)Xqk;#9kJYWrs zgF9=j9l!OO1<&uI&2r@i@UC)1EFVoigj}v49!$TmzLYhpv_lVju@dDP*RbK2z%a5Pu1zWWk_jpy6 zTh9Lc0ti=n@3ZCdhl7U@r+3z5L(h^cVlTUOa$79srxO&R>x}R4Zp?JM$M89-aLOoT zqx8?PjMO7d{?Xw767%7_?ML?r3$hh7Rl9J={ExqRb1Sn(tE@zsXeC9=a*LZ@2cn?D zry`NjfhUR)v1{9X2{R2!M3@lu=0%etPrAp|C%M)17J)mn_bqc%{(KN zYcB=(gfr(k!9^2`Xo7Oos2_=8#}RcB>nWAjB^-xCz@ytVVSnq-w97O=H*3{}#iiC# z&Di0zm3@=0RB_YorXhIYnU(FOLX3rg#paNFAN*496G!FtKe^ZStEmh?1CJT23Lc7L zDMs7)#}fn1xiLg0!tPNAJotg7GWbkSq*^xk`e?Sb6d>*K$$mai?yCqDzvA~AR~1$P z%T)buV?Z5&>HM>4NZ3Q2Fddl}U0p(a6Fa#dexd56+uS#GhZTN`-=YvL8AdxVocPj} zvN9nXⓈpvH9J{vNSc>%I>9iCk^Pd?1VKzylSteiH|;o0*TsUMx!E^9rSzvV{pKXUlbPy~ zu?gPx*V$%B)(*O(gf8B{$yP?HTqdDnG&l)Hke4+@sdWO;Y@@zIcy)GorVu+9Adelh zoNSC!>WpWs<&m0tOkU^}l~uOgJ_lx{=F}7N+n+^N1wtf67#U~}JC>;Mp67aL*8`?u zFGH^+{q`=MCp*h@x1!u-h#rQAW9e|gG6E^>1JA~<$B0lq*(`kMhf?c7Kern{Kv9uL zt~sk>{L#)~-tK*k_tdC`K)N;ZKIh)N_>;Y z%(rMd!n;N`)e6q_G>#@rqJ(S(;PGw_>WR;hr>)D7Vcd*XZQQ7DYbC#*Bk5dcm2Vmss6OiU6@R{8FE zmlTP8bHi{Y>6OVH!6Iqui!x1HNd6gj>MzX#!`)duYX)bnP3bBFCS{MUq2J8@3RXx; zuR%EN?v04%yr>WD{sZM(utIrzcH;GszA$<(I^qjI-eU{FprQ-cqQa^&VbIEuLn$4< z;563LfyN@IpqAr#pI4zqe9bB3fH*T2X@e$pqFrE-W}1m6Y{nL*J~+6Cj|5wtq5&KR zEWE1Nno$77<&gV@8#7kqI(s3$)~zl&Jmjm-zTR1hvJZ5j@I&j`Ym1`~g+N*wm83P& zXIOIkH0_E7Q+99;Uo#iwq=dlG<5_!M#>ahW@39WBR!pBTiyw9-%c&+Tmmtz)9-BBGFx>2d!|%|3 zi9w94(-7%;MPhPG^s24DuQ&qb1C;mwmqa-w&?FE)YUr#w5{V27zm~JQ9N^w`bO+%~ zLevutdd!^0^A|Viu7&Bbj!5E8wN?M7mUA(J=AYIwA#UeLYFQ+Sx}S4*S)n@nFWGM1f0Zf%gc-wXe#7A$L5@uA0@$TS9?3;x9sHDywXVtP_|s_ z(qHAKVUf=7Fos~Y&I-k5EzL50n%vxqQ-ePPoQ%f}OOztzU)d6sq?BvJn92mekUT+F zVolc}@K4qub^>a0W)v;An0ld6lxbh+-q(9~$%frwP)GY$TCR#O_T;H0{+4zvTwEjx z%xE!lsc|bYwx9QIoKAq07DH$uyTqOXf^3ZqD-_;L&(L8FqQ=?pAW3*)tUf2Jq)wt< zR)wV$%oA(BvCjR?W%|U~M_cgr-p9a~U+G)FmD^B*;7+ZYV4n?C^Ex3`-L%5@-=Qk4 zK$pRFMA>?d;wO5&_G<~`ryb=r{vSh%ZJYstYI49~E7r_-2_80&3Z(hKS)EIymuFN% zNlKV5Z-RO@Q^%2#TUFa z3tZ6$RYNE3r3t~8hQ=l6eF6z{R>>DGb`-qUOvRgWe^@w0kZ&%1ySGjFc=3MnyIo53 zG3^LYhZ>k#^A=J zAM?|bKX&MWwx1vmPi3>ih1-ZVavQ>HQtMblOwD7l*4%ztz<|EmRtgCLnU9@cxeWy+YRgH_N&XT<`3 z)gS8D-xI6*dojF;s)X3lTPm!LySH}MvWhjt)w>)=U&_8drM^v*jZsKzac|7Jnu4x+ zgM^N~PfqFif>b6#`yyb!in%fx4~>?V4{3+d2!1nM`MX<4SzdocBNh1Ao!!TA`2(H4 zH`s8e4JAOHRE67_%OXkoWIVQmF-lv9=rrx!$h7bj!_Yx2vz}L?xd4w%C+QwmhscBV zh?eXB!iRlpUn%}ZCP-W4kveACKq)cE*yn?T`AL}w(ss1hQAg-nT}>&bJo!fEjF?mi zZWlyRJ`DV*fV0V`KJJtWGaeK;UueL$4O`SIeYZ+G2ZoRk`cx=og7OIH>0hkf9{&$- zV%7Eh%DNSS2o7j^-|csK7G5d_CObe=6l&(>sN{cZbZ&HTfW0^>9b2J1{R z%b>|pLFK!6cp3=_Vf=c)1}CRsVwjC&spDO;_6=ZBLfhU{KMk(B%Rp%%Bvbefmgi)c?n3gNpn%bO}*zaCVMp<#y`9#XKnCeV zPQ!W_BkH;baH6LIcw!sIT55_f5GiK$MO~D>S3gV#X_EC#XN6RX&T)O)qwETAUkp>$ zwT$~cohz;f3RgPjcVGu&VLhb;wju!YLlWOc%OvT9R`LYtKj@2WSL3fA#zN-)XCM5# z8iEENV3NFJCtzHm%AFzqS04Se`gFMZPL*IiESPL~!Qw!GozhJhBJI$0?$89!l7)+L z(<~#&+vE70Be_9Oq6el|3luc>{kdZ>w==6Yc3UHN?nOanex&`h@!$zu->hQYQG2+5 z9tub#-h!jK6yzw8w&yFBE$fKfq3v%>Mv=`TSM-NP(Z1>?&miq!xzak^Iunqa0G>+% z3&Oe`vfceVu<0m3<*j!=VXd=J(D zDx;6mRc9=OAOMA-E}wgICp!)BY4e54_9m=80Ma1JQsx!EkoAE&an$BK3KE*#mNFJk zWg_yFvR+amy23_Jsmc6E`NgHIluIT3Q`lfob)-KhYU)hX8i_j%4zPj_%l&JRn{``H zJ=60MQ40Cv0IKAh1pkW^tvwZ#oBpuZ-lmaEsEjnSUHYk5{R9h0pt$BUpuoN^E2G_M zA-t)m#FfV~O^H80Zh}jR$&^HtOYtr{HQzJx`6ZXT8(A-I^ST<`_fHVe+Cy&sN`;<< zPnyzq6KQR5K;1&8H!EvvIT>|88cmj-?&b<^Z4p|G6zTOCS5+yLl90Aptql0aR?e(u zR#t|H0qfEMb<$|iYB}# zNCg!`Jc|jCvU2oOfQeaNgc(FoN|1d>Kb^;1s53qotki{TE#jnyvbBG#AkhN){qgLt zM)H4GaS=2XbUj9Yu(ruAI0%y`{ZK~kVSZP>+i$PU3trYO8VTYInXfx?SufX^g^a4R zJD8T8&5JPV|8Hk0f?}r1$?P_?O6ivNQP_(YEYu;KW%cY4=>AC-TsH9c+6y^<%@Hl1im3D z$Z*VWrUaZ={oV>b?eCBj#_hv9e(G&Se&VCY^4{GY3Gf_umBcOTmJqqKi|W!+6GXl> zY_pj3J9??D?Hbba|J`I#f#OoFWHXAxA!x=X{#D?P$HT9S{&W8(Y9rQwrFE zl3xX%At=ti@cB)GbaOp}lANrOj_*zd& z`LC;$ZuNfUT0!UVU(bsUkE#Et=<#a0whkIo1PNxh<9BL@l_~n~XO%^yGO9toNWc%R zwQP-Ruk>=ReZs!o@ARCzAd_wD8U5);p?N{8hlUG^Lx?Oal|VQv_}YLUvJI$aRkmvg zYP5OHYok9(c9Ul0di{UM=2KT8DCH75l3#^`tOVb|No{Q(ar<$iwBdQ$+#l^OijRx7 zUMiK-=G#vJh^?g_JgMaRLFM^cQ*EkvDc!9Oh2>=yXiRpvErj}W3 z>oBMP7=x7*mBRLefd)TgC7rBNHlr-`=% zR;VSR02!eOBT7MBjI9-0inMV64$sM`u}N+$Z&2LOsp@=(ZF(f>g!BIL(UB5deQ{3r zOZFT)7fTJ_B5z~!=Ff~0zgyGEhCUNPI=uI#X-SiDCW~jZsiDIwZ7rh(?$2wS|A2_X zQYSkb_add7@oAleFS*p!SJ=vget}v8M1fo4?fLWu2EvTjzNFjuTo2(hmqSAw3|sG= z)?uwezCPLI2TDLK4w{mYZpH|e=96MS*ReV*b#Z2U1SF61+q#h#$aJec_~AjX@ui{} zEt@`RYD?Zm@qLq6Ozrq0eIK6$+X-^Yf z)Qdj4m~X9`FRZrfSi-GYqVuKpkTwXR?O=xCZ=^!*Z|Nga-jn<2y*^|+{C`+`tEe^r zXiKy$#frOoaCa#Vf#6=;y~W+7xRc=SUWz-VMT!@9x8OyB744+=-I?`f)|)%?@m79G zV67yZea_h;X7|4tvYk03+M{}J0``MeN63Wq|8wVBqVS2;#^32bwig_)y*d@On#3OSUTtMQr>n*XBtIUHtDt#WrkLp39g>2E62rbJ3hg(;yCpil3gTXuZXfURmlhAa;2>c3|gggGbz>3w@o zJ`AT6G|*nS!a|f)BO6h+HPo1L9BS*WMsR&cweo2Z5D>0N$K{0Fr_LI^SuAMMfYecv zk>qnR5o6gsf90!5|H|rM#w!nB=#BI|)Cugut11EM1Tt9EmU81RHgnS4?q*?QbpiINMspm~^Wf-8ZE= z9>GI<<*VnZ1sj2F4x26Vn28?%`O1nVJj`jA<~O;iBiVyhuGX3g`dv1)R=fefnKWzC zYXiBJDEY+}$Ub~!@^-j0SbPA3-1g!|Aezf;>(p9L7rBn-eJ}K+vSJ@nB3lhr*k}Mb zGvLf*9Wj32mozcA`*MY&qm{2ub5@%dr1#7FP0+6tW7f`=pyLQi1)HluJV$GlRgr8i z!jYPm*zy+I7;V)?iadB^hb&zC6aOi=Ttmks@^0*KBd^hUlEag1ny{Yvr}P3F_Y}tp z(G4k=C)OYMZr%UEkKX#yA}h3c&I-PUj890coa}ViY&G^eUt!Ms_UyJzuM6VM*C=mJ zh!Dmok+K0?$4)-!k2`)8Rn!ZkZH?VXqSt~l(L<|Q4nDRz8LZRW zax#N#Y0B=)9Vg$g#I@fgQ#_T~O>?E8YTJKy$bC~JEWU;_Mw=@WazKNx8ju+k7VPw{ zSEfCx0PAsg4eydMUTUVA5pIptipjk_mh@W$?;~1<=*=>RBu7?tr5YBjFpJwse7jzK zT~HnJ-Un%v(0eN!&08GfKpbA=wpY&2FVs{Rzq-s8_h*)Qv&c{jMV?%)UG&%O8Z#QF zij`QN5a%cPy{HoA;0@OFa5Ba6+)!CthK=qjJm9C}1<~UJFb{NlXvZoI%_O() z82^yUp!s;@_ze_oW`&$<*@IptCMZoQiSF4M**mDQ-t(?>?e<4>yvhRf7|q}MJ|qY2 z)2AHo$p4QOo-_CVM2M`v4h%+!e6>ilYCRwfg9jmarB{IT_fX&)`$@iLL$5fgK}IEp zipF@NIOUb2e^1VZ3zz>vLDnd(vgj3*6Ytbv*WA>uRYBmlbl1%BwA25?Kk`yGQQ6`= zR*Z<#f%$(s31FqXbB!q09*bX%%K`8VbRyG$2BV;RsEsO@bohtnrqjG?Jw0ojHtGS{ zZ@+F>bv}7!WqujHIBY-7aTx85X-uv0v>&`NP6H{irQs-)-()KU)^YV(q+?F-;}edA zBWm~ba3(2~@UaB`jL-e{G3MIC`seK|uWH;VCNo}lm-Mhmk9vu;5Rp2q}e`)G`ezJaXR?y#~J1wWw*vZUf8@XM7 z+}n_+O368IR|@hQ`rR13Nh!?rp`{jgM{lt*6_7PLDodawr^9+6gMd)Jr@@?YT^Y2c zQb)ET$6w7~h3JWPm-?Vq!3!}XBS~FW){xF&sjZQd61MTr;#0&tGPeit2UdwI2z+W} zYXW{<7GlXL@c-^Xf2Yuyt^J*!xy+)mrc>I<-!A3p8jR7Xk`5BfOUYgM1-DU-`)3W5 z7Cm9>%InsZqODXYl{V)okM<%w>JS1PluP*-(q`IEHyVsF&o)^vZHzTn;u`}JA<8^y zxuZ=kL8ZKfVH2WZZ(7BMfEnErbO2G7y%!c;?D$EKHD^t;vEnKXlC+QC}i6=mw7iNq2D^Hd6K)t|qcWu4x?~1to--+XI7L+$v z!}Za8;)-vqdDRI+ll~TbyL**<@?`>ke-3QDD^C0HZ7dzS;V(G2^!iONHZ>p=@D?Nu zW84el#0G@`nqQexr(%aS^dgbnv8*<-=|krI7jH!Z&&Hn)e9zbB?OIq9O1oBW4#j7> z+VbF!wfA?TcirQrzDUz!13$XwId+dC#?O@kVVBq9I@o~3*kUT^UuDV?Y zOrJi|b}jd$z-WTLn{Gf`Y7q!_A&s{4Py4iL^nudmx3JF>={^TqiKmUeiG`cFsL!kq zaA(k`G+}l+@Abaal)4-p^Y@(tTol30fF=W&`4{9sG(z-Tg-!qWA)m@5ZVd!+tJnxt zu`FEug@U2VG!lRQ=nhid&G2BkR%IpjpkR7p6_*4PBaD!1y7Zmh_8wvUU@**?4f~>U zM!{#NV5G}|ATNNbEAZM@B+-cWvjbbKGE3D=1vgEK>JMl$4SA}E&*WHJ7EHXblz}Lq z$9$_>MvXE_{GpO(Wj1W+`EE0b^=tc^B6ZLROW2jrr1V>{unS5x2(nn?w2P{~O#b~v zkTNGZJT*}3Ui9zNmCsRGM!nz7M}900{l>R<>)r+LGK{^l3}zzozb}3GDbAbA48jx+ z_*3t<-Diu)aU_L{Ar_C{S4ng-a^Kb6ZNxq;b3r=EOUy|O654a~ju0WiV(fDI;m~2h zg=1Zk@AKVS!s;*gN+(x>R#jM$`!1yt@uR5oTZBP=Cw;=*`%OK}n9%3XP&)hQz|0L~ z1#Ca~q&ff14_-4`ZTh{%6;Hs)2km4t0PoY{mtIm5UFXsErDp3m(YQ09>%qZp#futWU|z2=a|Cj0b%PSL4?tw52US6alftk!bTT@K z`K&0{bA6`#}f{Wdh|#Xoy|NCDQu9x zf{;YqbXOy;NN6Kp-rR?l3e`74S7xc19aPUs5eYByjmS!)P?yAK+d1G*ZU5t32w6;; zgv7fD#>6fk{9R+s=c-}8a>}JQCB5&6)3z24apo$56m@({O|7kc4n_a~bEZiOH@i|P z^Oku*n-GN~sh6V&lJ~@636Ct18bpb-bcgOKL3u|<{^2X6$SL76>!@Frzxoq)d|cL; zqzylE-w=M=T+2l${{f3@xfM9GRafv$G8^r(|I?AE zzSYNOtOPkgtTI49d@D@sTg<=0KsyJAfty9sr1Elxc~f>hA)JZka~02jKN4!0t7LwI zF6YwjkDZ60A*<=O>EdI4g=BvBb}r+APqm6O>k5g5BVuc!)O>Ov^le7&k&IhCQA{W$ zAnFmU$^a!Qp-_#&5LUq*|A>+iv32Q}rXfi(nCg0Z`uX`M%G(pQ-4l2v%_ZECrHCWQ z!piEpXzX3(8*TFA&zjcKq-q44MM$KVxDvR21b!h8dawuL7a`;LSqi}+!}X-LZKpiP zsSgGn)9HEI*~~;p5M}(RI;WB;V)s-Og%8oq&dHgFg^B2wc7?DNFBUYuJ!!L_WdPQ5 zNAq`=57X-BvGqw7v!K#NIC4>{daZ{nyZtny9kpT55AQ7h(!h_RAxCWBz`i2QBXMY*1{PQOVGEVX26>I)@+I>rx)PQHvw>T?tfl%-&j?_?N@5vT52 ziI%63?+G)OStrl&Wf|^yhjYeD>WFn0xwWs0SJh~-Z|-o`$>^&fluu^IjeYIJ40S=w zbrUnvfEVS^KlQr#s=n)oqA8;Mpt;f-Jd?wF`vOF z%$zwxmoe4{+6Ertv`iWJAZ7|QOb(!c)Wzj1+x53V@v-Ps2xERgz4}is<9%3)qmn%g z7!Q)1Y;O^B@+{>9Yxdo)U#RUB&AyY69_q^CGQa)UE}M3!{JlENa#{{Tb>J+VH^xoy zdKC!w(-LGDE2MbY9PwgSMJT=dJzlm=IIr{eTV)+OQ)Bd`7Km_rUGi@Qj6~<`QawI5 zI|Vk2TZU#y{P^Gt44qb)psiD$XcTkJ-TAB-M^o?>XE23?ce5`gg6OSCjP&1{TWxWG z1?eJM>%E8e6mD{`D%4xqFmH8=~Vya-}t5>Mcl!K zkkQp2u0&UcuXPI5T@oMtI6hzUWX>>siGTgU?3B8n_q*CfA$6sAANAtZW4*5{ZfZo!Y<*Edb*acn%?T!!9dQ-oT^ zj5cEgVVYGZR>Nu16evy5@$j;g82JXzttj*-ru1`mw#MPA-Y+j!>@x1S#GE#&krvPi z`pnuc&8bVPBP*7Zc5M%f@-^AL5u)VByK~*pK|Ipee>^|YFwkzoNeq(Bo-oPSlwEc? zy5^G=34_!d|c9k=Vjx@)n0A&eJTZU)q*0|mq5)>Khd~}@LzJ;nn z$izs+F?x}ad(1&u-5=*!tf_ZlZ_$W2$TLzcdZlS5O3%L;bCJ)&Jpe5-?DFob^!)87 zu!pBKUWAOoZ3t#|_}3SdTd!*iA#0h1Bl(FVnTaCK_cE4Yn%o$f#m?51dj^O`q_o`O z9s$H7e_l0KWxkYk4PX8WUO0-V_(IwfQp)I_%bGEmOTupVu~J7)&Dti}V^Z#EM=6GJ=|rxu77o*4Z~v=VN@E+k$rzeAF3!$QM!V`}=A9pSYNCnW-Mgw)^87oO z#PHDI&pjI^^F1F)-iGo#p(J{EZ9y|kfBtUUz!c12DfaiX-_m9Y(-tX4--bfdJ|-TZ z=PfVFNxXn46lNYNW3<{T9pC-v|`^ubIc^09cM*%F<@2**PFMZ^q z5hCtM9sw80*BZo`NCh{b<8AH{CDHx#gLX9wtX&>QI+cwRUuCYq9<$vm`|b3=3`21l z7?8ZJTbH*W!!(G*do!@jfMsO&s_%FDCFiB6>JcLZ_}2x0_4c&C=0w1(f8v`Et^&2? z6*XZ3-OOUrl9>d#7m-2vFF9*Rid+uC{fV54hgJAIR2st5=Ar^oCe-KfFB}9^l;x6- zbpwVz<~B;eI-e|m=`p@PsDHAe-r=)MWD({2*oWWkP=vCIIKPw0<#De!eaLdkNsgms`&JbP|ZNC>q1h^ zAH>ySdLEHa3N7SFV>E66{dyu|*Tl~a%wJ4*@XJ;6Q11CsM-#%HA|He)bLFqPy1%B! zg8%YmUu9eD5G%!AaL#UtDy}PVe91N1(cf2y-=n<=Q+eRhD<>kHA(5l7I<5gJ;1kF=RF4jQ77NT@#hkWG#JybZ+sCzALPeNcJF_{u1Pt(%mhO@7Im{0!gkW|sQ53E~Fk>o1S>+R|8ZG0{y zWo=PS(JT-a&O~{z>r&#ytQq}u2{I)Qd}ix6h#U;~hcb3?RbTO%ps`X_bDlhViba^= z%WgJ(Gb01L+Q*}J>`#9}sZLz(O1>0j+o(D+BkAFUYIDE6;3PJcS373alVjCQu9(RWxtfy{{xdoXep-Wqf0UAY}@*w z6F-PlcP8#Q`_cp_CzX^bD^p?saZ`3T3x>{Bb<_4{2F-Rj1_lPHtdFw_lYJdgv%meF z8B1H(1kU3mXWi^7VTBD3heQdsZ;lg9D~4uCy(mt5U(Qe(H+Yr;uZ?y-+Y#z_Apj!V z9fXA%jk0MF0hXYjk>FTN+8pxF>8g@rr8<5dN}L$9kBzSFTd2Cd6xFFGkZi*K>lL1H z#{=Suq7Ra;PK?C4L&QtE#O-Qz_PL92M@JQi4iLkYtgaB&E=9`qK-`8D?>^|en|Uu z0{&3Lja>?B9#z7%<-|q#lV^|v(iNoW*xJ|#-TC=owBka#{)gBN&RAdh~cP=RN&(C~*F?$c39~_ zW?gjn(V6EzO%g*3p21E4BrBGJRcr~e>;~h7FMI-{71lwd_E`(DEj7AQaloxT9(Lv< zT|m&D0@D}PB2i~z5r(XUpDC6vT7PepG)+i#jsyorq^o@$<)9zB-S3guF3-|8a)H?} z%xU`Hg*%1-upkBF^k<77*YPh-k~cNe*jYH-)2QE;FQ%yD zmS9%k5=81u5zis7t#%Q=bh9gFSAq;HocoGn>PE$=$Wn#m@%Z9~B{_B-g7&n+gJH4V zk3o~sK0M@PI-#DVp{F&i^WO3>2Dw|bR=VeyKDyX3=L$YJ01@*q6b(|d_PVkd8pj3$ zH9c{WHlq2TtFss9tfgr52h_t|!9#SPcMH{RM15~{IzE61iz*FXBA>N4kGBxhBcMC! zuBr9mqT1V42uRDi8SjSa8FM%phza!v{*T!65e!ttvbkl9V{^mJtU7hausQjP?;)x$ zk~J#AL6DcpEQsR^qyGXV6n@7G%e8AnF(ti{vVVTKIX*GwppcdcD4On9&ddBCL@0iQ zw&5lk>P2H2_xcv4Pe)+wxkt%m*)tA?n}U2C?jEvig+r=#$dEinI}HsoG|ZumbxAar z5XoLz}SW zt9Q3g7@`U`@?F~zgN?G*XMB{PHRd0&TWiyL{oq`<(%DtM(EYl+?4W7upI=KnJ=+~&hMfcS9hNRG>R=yV2W$!S znW$}&@X6IvwQGTX9^wH^OC>eR0H?k5(7^Q2N2J~`laJ^Y$Zbd(151J{A0Q^)a7`Bhb z#G=I^lj9-PQSxxLXJEW35QOYoB^Gb+7|4~cy=}}M1}K!q#J*TSFp1h|dI6?tBsK9v z_8C{JMg38wDt4uwg`O*H+Vgy|iKUWX1as0qYf2hp@eI=aaijxl~Wh#g}ZML4zQdOmQMV z6DG4+NZ#;*b(*SkZ9LI-DIx%cx`vKJvfK9oq@q8I@ul--y*=YyrP+f^&)-$LK2(Bv zMTKd{;&R=sbEa=pE16(XDwUihbo^}!rjTG!=g9^rQ`yH)q)RpJKfuRSzAnxH(Cegq zCfhxqQs(x_+6THV-`(ujass$?FhhEqT)abGN0Vmigud?h+KmFMf?g)_J9)-Yyojg> zV)+kwlnGA?ADO?WZ1V09mG(ChlonrqH2P3+A7y*R|6!4su8yh0q13Fj@3EzUvBcEa znvYKZxrfctPj)$?Az>G<=pQ5<)v)Mym@p|eMvoWqSdP*ZbY0IJm!$9#Z~9KLi$>^+ z)A5|B`6A(}_)O5_sxI^IF=GAy22IxW{|QZ?MidOZ0ML#B?8(h{quckGb+V6>1C`ek zzF(elr5R5rOW>uCJ0&xTDRm@T+dyA?;rrTYP4o4;JIUuF%b{hf5Rq+iGx%EK$aOjD zpdi?U!qlY~F;hISlgA}N;D>&_3s3G@d*FK-@aWdaqW_#%kRb}1Z8%JK0-eqZ3_{Pj z^C)CYkDyaW?OMgE=x3I=<#Dn4L;pm*1Ev`$2^%gguD8~7Q%R|P!r#*^s}J?k{YH`a zB3fEBPVhCd<_n=cm`;BnBhI|{l*F#GF5?k*P8VidawBv;Wj9HLavk!6-Zq_0|1+6$ zH_q?hzheh_?}xaO$qf6bQLrN(ug@fBM$kCB3x{d=mLSA4YinQ5`u~LiYOnCE5p9IY zZzf^Hd!9Lj?vvjlCo-<@hF}}^n|bL(H%v0+9AirD z8L;Ngby+qZRy1u{o}Fs62h8fP`P@0M8EqgDWz3j;(Ga0T`>Iw^@o1Bh@@dV``s8sH`HCWFS}I~NMo+JSJ- z+|HH2Rr^tS&YW-lu6wc6>DGdf4M_+8*UP2kZSj-7S{!s^k{f{uyabdeonbnp4^HYWoAP%5)cu3M0S?hlZJ0ZJxXw5r}Edwqj6lOdt z4cck$r{)^78_TGD637^r!X-5{*b5+($2^r1pbV4Qr}tx@&ae<7{32t%j9P*@*q}-X z78SDZz;lTWXnDqV>M~+@FyS_G4qO-vcWPlpR-w%S#g)uNy<03_gq%TyDsLKjbly!)!UuLURhli(SJZh zq39RO#^WVHf(0BQ(P0gj4bAX6_A~&qR#T%%p_dATx*ePzmLw^p9-m{`y2d zqz6~mGO^n;yM^b|4&|7HlpIxx%fiASQ?u#=>`wE{&)pj^Ty1SiinI|^jcvV)_)>O@ zp}p+7Hd^YD55M~9Vs|@swL}Obcr1r&Wb)YD#`(UyE82YIj%DiuyRzwr_swMl>CDTF z@N(QSpA#~-ovMiITRpy@1l{=wKG2bFxH}@)w7i}7)jH^QNB_s2RxGMd2H-albr#P& z^bXBR1oPJ1ta=ZgOQEz*11s=sSl06hZCvhK86eMQ|B9@lB+%H45JH@jY^A&po(az31$s zUu^A_x!cfsH5Zd*g>$RI@xH&geGVZbpt81BY_Q(4x36l;W9auPs*DCk!P6iiy6lY4 zLE4SaooB*5t1S|caOC45S-jb7KHZ_3$QJsCA4jkdhOc}XQpAU{0X~w0PH%*Rrt9K$ z;HKWX7a&W`_v?eZDRfBn+jYO>rmijKnh0L)1pEZT5fq0QsqyTM&kL>g7h|Fask3kd zYgZmDNfM7O`1^ElEWCf#b^c)^-lu@?0jo`pDL2K(l>dzw_=|oMlA6SGGmKgqf{IE@ zWa2}U&S6}=FAqVagm+c_Q$xoJ729Wb5|Np$pr3X{aKUoOlhwgmfzopLGgbH-de5EB zNoWV`CW(9X6m?>oVR<>o9R`s+Otv6GO&zn2sy6i02CXv^$~L73W%N8TiYFoB^Os?Z zaRY<0)hH%0#Uab0r3QFority=w|D@$IF;)sHmF7Nd(f>J^--u2K8c~<{gx*#7w98j zx95d^CTCImWRRok* z(^}AB#vC%jmw1i7Q~Mja-mhQ4L1Zc{mGxDH%!7hxWaXhy#)2d#0Vb&?g;%LHZ=U?M6RTtC6PLW zjjFHm@b!>3O*NMIL{Uv3H^u#s>QIIjCxb6Hb3uU26Qg#tX*l{@?$JREb*K1}DqGZH z45fO&>m{+5?)c`mH8O`{_l#z9m8mfO4JM9UL5wC_zM?PXNM1m~?e26P5v%Ny#}OYU z#l&Bgf=~?*ei)luD!(Mi4^m!D!3k}%v;Y{sJCw#HBVkS|_KQzdn!%%JQd_a_{!rXK z*i{lVRN|cjerqkWgFZs79J8*Db6#r7W$gFEbtS8OgDTUO(8+GbWX;y|P$qPsD!|4s z8`tMid+ZD!n}WQt=u7_lzx%u7O%jxe8RjHD`NHZ%yI>&kuO;1~_#f27!WA#MujoF@ zw^j2YUcOV(U7DkNL^S*+aXAP;{wOkR%f;|I+zl~;!EMH!_wQ9!Wr#8@GZ!$C%8-Np5>%er@Y_YnfCIey1fVe$oo;_8 z=NiWt2MU!v&CK3M{+4}WRg(g2cgEG00t7BsA1#Hjgi8@l54{F+mW7Cvd_XsL|6~Le zwR!tZ@R&{-3$Y~!--Y?7w!y<^?Uj>sf1#1cH~&}o2J;pTZo0bT4bC9y3FcORC&Enz zA1&|mCgu&<AeDua|uWTD2aZGNM;V9M*j;xA%?R*m&S`t6>5JUKn2roAfR+ z1_``(RMY7!Ykzs+f5Jc>azDbeMyIvsjZY01pLb)3Q&5k?15aBPP5R-5FKRh#eZJvH z!hU%0^^fV*Y`uZeG(Gav58=OSJK>2Vo2sGn{y-`$HE*!MK@%J&B7#}cRYz9zQE=n$0UB4J21?#K#po;&D()Pa z0>K<_f#uQ%HiQv^fDOQ(ZrK^6bD3^aVoEd6wHY~tmAGIloio}e-?iPkkZtzt7@niS zJj92-gX}4Zi|r}t1{WcK_xj!HjrAGQQzmtdNFxC_qdZuH732y@*VuiqWV1d|lB7GO zg$Q3_Yo6ob2VU_pSsuKT;YP4eFAOp@;6a>81-;*iyIkg6EKoF4Z8(BoK$DYV5mp<^2Rq#XBqpl7@cLtk|PoUf`UzyIqYnX4u6(aTegVlq-cs`w_U*0WV;MP!KIJe)v{}~3_g6^z2b0*wC&pDk z#t*j@oo{b68IoqpqVXetWLrLEF6eu<4EzT!3&l7>8ScF)Ud4SqWEll* zctqoCv5Ko)r<&qjG7qV&ajLb1bq-Fwqn;{4TQ8XB{&)auE-+SFi{ zzE+ub`aoh=C11x;yl94looM-}!s++#ctAt6#FWSKiq~1VzWjcr-TGC$ZB9WLjEZxb z_IaYxu*g!N74P)|M3$ByjNV~F`D1+R%Xt`o z&~wu#l9QIcd+~&@(K^bi;a;23#W2s5jtePn{2uP3xhhGIrA`N#Q=^trn7p-qdMNwe zNzYy(OLqxm54DoSwH0?z`er52!Qt=MqVjg=>&oHk8?MD^VqtFvC^73co_T!t^$Wmp zMDjHdcRO(OFbiofTjh)heKZcSS1xsvR+to@$vLEvJi|VV(W-kipWz5vsKW-M=Z|JkqT-C^LC5_5a;6YUa}V|C)T+SwnTHMyYa<{K%7bH?WKo zTZKE7R;%6~?NbEo!L}xILt16^@6fznytBn^vEPv3h->JdO2#GW{fxSQ!QE&p_O`$l4KyA1#yx_M+wTW}Np5wa8 zu*m?xbSU`#I1^g+7#35JGT4a$CNwXff{s zkmyl=*wKYONkI@!ESnJ@UqQ38v$?#_jOI>=(tK%SF>!4!} z$8$skW-``XNmc%^(8S-u<8z9#30@r)2qnNwj9M7|`AA?Q2eG=|`{qO@$8-`r= zhH-Qb;3rxNMbo+6-6V;xu(>M@II+m?14$98|)YG9oqhaf-*@{=_Lf%se&XMs%*Z$V!On z=sDv#LOq-ybehN~&zgH5xo>+e;rDVb*-?CHQ3sAJKL3lO9(M^|lVRi*mKUv~QX!{7 zk_xin#e1zmg|PMcrG&VovK4p74)^qr@c5gjxOD~4XyoEuXBJ&E z{yuPB(%5!4lhS)|l!0%81ZcGNUz3f{;_Bk!2$xq*GRyhrqWm0n6&FFmwZ3Micvz;@ zoTIsCZA5t_J-5&#Y_&C5Nl>ci5_>6A!?xw9LZR~`zh!UB^<>plFko?lPxe2VkA(5{ zWwku_9=MsaXgkOg5+=J1-YtVTJF!j&AAYS~jP5^6`igDbyll!Sc@jL$G#KMf_ELA$ zlfQ!?E0v(NM^BX|y9AHZJQR|Y@jU8lSdd-b$Xe)m7t=)N9gjkYPn5|+{HKIQ2? z>A5r8$o=qV{S=Qhx0T7u$s{QwKM<3%ZX-M}>S5ajd1^@%LvY=L_iOtTE#I8ZD6t@l z5n{3hxfT@f@T9bwT*iAp%G#F{U6M?|F>S#>K->gc{C3)@(8MK68fMmCxQvs3ehOJDrufFlR zd^K!!$fD(#idL2BsqWhQ*}kTWWVB3}{`gyw(7Xf|cWVWYAW8qc%07v&AsBRhgK@Y% z{mQTrUAT(@#wQ&2vLyH}?hASq#O5niSlG8IS$TWFHp<{sPcd3zqmzf1ceO2bf4!O* zsZw8ZuDK7z_7ysUSd5EOqw040fr5~E&IS2utd+jF8gmjF1he-f)s`ZQ@yg~~g6Lt^ z=OIxwT;xaUSxXT%vdIRARC5D>3KzQx9lS}EyP!WaeYyhFZAP9ih(Zlt8WKY-gqM)U z3taW(ZVflyFFR)?7O{?|xWLs2L8m9cy|4!sef{IbD}hs?5RBEk zZ8KX=h^xIbgsPPDX9Tw8q%7&$M9^NA}|8L8OJCp_aokpHTn7KkgO88s7_bh zp@_~<4EhNb&DeA6Ovd^4r`tSybPR{BCS;Z9Gp7-1!FT0rIcHq9oN*k?_k0TPXEsA$ zWV*ny7}U&`00Fi@(C9f#QG&Ckv^kZT{zkEiJ5%u|jv8n)*>{22akue4)t2f*+!)Q& zDhk1PC&u*+$WSgBtc8(KYv+(fa|MH|!DeDCgbocyz>c3XB=s{mU$yBzw6E5T0@pj& zIkfM^GRageO=Pg$4MsKebzXNZN?cljW8OTziJ{$v0Iweot-!Lc^}_*$qWS|I?y)G$ z&+p_?fP1mB+d`0LAXC~%(ZR6PsnPd$CnkY7&W@XL>tVvp938KMpE80YZ_7neI|gau zK(WQ#WuG|gwJP1w97t~&j|1Ri84u^QOuUR=1TURsuuv#nm-mkD_ZL%9*iv1+<6rY@ zprfA0OWL0;`4{3vqi=5#=G{2K9fL7UB3NgG*^9cny~qVW$Ue39QB)3CFB(PVICqg8 zu;(OY$0o-EahutcfnQccL(awhUdj8V{8@YEZLr8!H40cA_tnDwFWDJ51%_G4AGTssMdJ z#%zY9&=lU!#IQh+^{a|XUZUSXi+aPa^iaugXzo{QQi*xNU$`0eAnfN=BT7^gAu$?j z7Z1O{`mu&}C_-LJX$b=8j!M?%0!0*tqG7lhFYInpR7@)hE9?F!UtX4&h`8!2dXde2 zkPMJk!w>jS#E3-NWdCHlf}T5R$Hhnzo{tAYHy8C&Gb6ngZFJd|CTQP_Hm4Tc;pHr0 ztM2wCzBBu5g|o4%K)CJ35aA}fed%*UN^u~2Lxm=%k6JPw;Kg;DV7ZX>u+;>#vLn`4 z2}@U@lj>8~+>1i+xnf41i8|;@zwO;OTMJk|Jw^QdRK#Pl3YpmmJS01mlS|}P#kjzv z=5s^{nD2M-T~jP@z{4d68BkA$^ip#XPiDk{2t^;Qh?(xKo8EAR%0vZ+!C6w!^c3N#WeqXT zQQ2qC3L~GHF!Bj7w}OqO?a3yV@Bfix6!CVJ+XM#=1CYQ9gIg|R zYe(>|PMs3i?2LjP&$-)~N|{A8qk_v0kFwLW5@=G*KI4YQUL{!dCp}9Xs{R=#Ui9!kt6f(t!0OPIgJjm4ux6k=Aa@iBvAZ4~- z4?_9QhX&hCDG#cZxL-AUCPSiw)2EX_FrR>?r2 z1JI{j_|bJ3*XQnk}(_3&O#G$mgw}f_lUI>aj;PMr4{x2T`!i|5u%a?;ceeZeO&y;SHf~i zw#MlD|5AK9R5dpb|FbCnuk0|6M;3nrmv~6i+Ib&3~T&y>1UqQ@isa}di z&So+{ko?|;4X#05i>ovJZn9o_Pf%+vN|HBe^-tSw9HlumVio$FfVIw7neV;fJDMy= zSowZ}$Rl_XN@iCN%^37poeY|Y@>Kdx$nd6YqYiS!M(JNm7NbpwR)O5mE|&YR8rF%VC!ho*8QC(0^k5~qWEDB{F*}##-zX6>7h{1_9KOt;PKU8e1Ff=@sD?@5 zsDY%4d_P6@IH?kzjJP7*$e(dUHTSXhGpldJqFhXNAas-Bk6&PZ1NX7;e}*fdjc$0e zqqZo=QLpe=`w$PYFX`K`OUWI?^LapSGPYFwSxLl@@*>K=Z|y}}Tf(0!>i-gq5~P>71UIkw3=IlfTnjrT@SK>%T z0IXb%{uK+*jlCi11jN@KWBb)5lK`Tc2a;sr#QI3a^e;9N9ZtIf5-zGA{s<^oW6=L- z6EkM;J`xb)BO00cHflYzvC-`ajgr1(^k*ETkhDEZO8Hz<8CxPRq;)^HuPO8ST*IP;B7k`AI*|!&4gsM6(PI&h1tauy6hC8k5$Ce zY3utqDw>uM#=ja^v?+=eIB}R`Bzf1{Sm7EUOqhN5gH0CXk7h*1UC&Y^QBK-Ejg_?*MYN|8ljou9NBdSxRfo(Ji!Y2)4Q_Z?%u%WI zL$@R6O;+gFY4l>!d~!CgCNJ*lv?gL-w8@|O1Wz63;4xr>vT_fDg=@N|zSabk5A$_6 zN$AC2X~Rd8siNL@;(%Wkmc+@3jQnt_`(FD{2mnR6XGGr7m2|hFjoL{?eRY?y?L_j# zAh1)QaJ#6U$d{1n=w1NU4asV_oe~s#mbrt?1xPra0lZ;iQTHQ~26$VGKLf((oN-PUPVf*M?pP<+t^IyF6|@R*o)g{)Coe5)cj$V46PIH`iZu z_IAO6N*a*gt24$jt-brp)Vz&bWITr3bHBR^;s0ch=|L_!Pzeg?^A@&Tu8sZ=)oIN? z)hVr6dgCPUf1Qy#;m*(B>aU^R2y{4?Q=YZ!SFOzR}h61g`+mb z^ie+X&bJL&SS2M{14+Z0sX;i|v)%j-X&bIC)zU{c1yYDU;jRT&ofQJXu##VE06O6oo83rm{$)Z1Yu^u!rAq{PJP0AkVug(vDG%aes#7Gqp- zli+91?kCuo&vpdS#naU!D}p#GwWyl(_)5#Cd3U6YS0zqD!R|r(SL89g<4Kd)0r{5V z;~7dfZ2FDF?qzT*3P&%*IlfZ@!Rh5o`H#vAZy$i zES`(Ep=fnZ(prWQqe=HX_=8lO_^gIZydo1mkBn%MU%)2Y+~(v3O!%xrlESyhw;DZ% zwE9+TpU1081s7fpxeO+RILO^a_s?G?(u?<-cr9DQ?M>QH-W?v|#)t2sr}{@&B!Ct^ z3otQ~Kk}3WS9th1tLg}AT0C99uoP@2;rDO~FjFGqkmQ3EO3p9@B-@A*M_;CM0NPNu zPWNH^EMAhZ_w`sE{$|zK(DLPfhb401&$nhMN&Pm-uJLAHcqQ>(`(hJ-X zPdK~A)nEub55~^=`&q4NpqvrrA4=t){txu_V(`Cx-LzO>9zaibE+eHOINSBI~s)<9|kZ zhCH|@n5IRna#Lm0VYeM;7`4oXj;VcrQdpTJRLHmAXV7)nqbxxf5>UtM{d=s4CXXm- zuQ)lzF;zL^HV(_ZRl!qNp=Ewbj!p85iySc3{kCa0%oA2^-=niwd7!^Im{_eXd8wyNX~T3#QH7vQRxQ0+RPT<8|10U9z)WHRG$c_8Jh@A#R1OUr zbOr2&`{BCdjZ9w9Yva``)*>LzY{47hojm$yFVCj{r_mksI9VRI;dWWA|H2o_*ANW$%PA>VVkle7KMN;s=oQ z@9!tbyF_p~Kkv;g_c3Re9rhkNav%eRlk+Y{i1Raf1ZP}#6Ti3UM`Iap7>BehDr!sJGHsZ?H_CQM@3B4n*gKLn5hGrzklLO zT5L|ifxune9&l+Uw;mpgLAe)MFxzgZwaxRME*`SsrdlMAUf^qN#*lj$BxgLg^sJh} ztz2`)3_`vQ>E{^u`=tY%NC0ZVmK$I0*GtT9IhwvnP_&p5LW(CwpO z19E^;5Qk=jyPx&iP(MTG@hU(^kc*|#hF>aPZY$Z5IUBRl)@`4q)d}aoZM0TK)X2wC zLZ~ZebjfOhU_~`RV6QXg?GYBKLmjc9_Scx8%SQgU#dHVGBPASB2EkAxxHR%0Lj~yj z_dTmoJ&nR+5fdL)llsLfYjBe?rKGy*)SbBSD-M%bj|*L-kji5;)cUh~rlCQh6j3}# zy7IfFB|d35hB1YJWP*3J?_F480cKe7JeJniNVL>$V;D)Z%Ppb@gYNRi;Ol*o&M45apK0TbyQ>OrR-if3b?9 z%~A&s2+uh`vP!9hUB*9_aRPrqO?PQIHD)w3gKIA)yh28c4G$E&+djxEVCMmGF|4bl zzw%>vS<-EnyU#u|l!V&R@O(?)@N}^v6mT=Ud-b5AuMs65z8gb`!VSR1}w;@2)sDyTtB5;VOgA zt$S0nD>i=?z^56uf7e7)VVUfuYVLSvY~=WX4gXU~Ic%p4GIO3>yjP;j^^-A=61d4m zg39hj&so}U*Hc_=Z(ugT<`)r+`3hlbXO5S1}JE&L~04=W3eU_Bs)Cydk zJDz+--=GInT3>tXTEo{-EHpw!X$J>C?Qy_;AHoY@G5%4VI0aO)gmU_f?x*oE05j$NL|Jl{pb#tXf!y5oydk^F|^LpO|IhygJU9+%IKv#)=J z&y!w?9)3>z#vLzbi;zNcMk>v}0`cG+mx+;N7W-%LLLMPiNs8t+;s}QB3g1UlR@|x^ zvThfaNG!2MA?#X;zblij`3#%pj!7T&0@IV2vjz438Y%d&S{0(tj8u(!rfsreJh9Ari7LE-K zb);iCjhfj97i-dhC=wo(_)}Rum3`2=GJ0Z_tq5#p6OxXO5d2IHPo>E?zc_f1StsH9 z&wcLs_+M{khCQeL){>Kxu}pP+ugAEO4iMv+-NrPx1Mjt&IK=!zFQ zTkFJ1PZ&}LZo9y9HuT)*Pg9Q@U(}! zMU0BCliT;Fs2+lCMmiJmy(gvpjt4L_{ll%Sr5}(HDKJcdW}m7AU09~|GhuWF`gnY(_`GZF@TBx>DpbPAP(CDwN zk7Un4*%|dGo%e;jZN9pldzN>zPh2(+-P(3Cx4&jT#GJpPFrOEW%zauEOR=Ada@!aX ziV|wF#w`tCqIby2C}Xz-FV4OFH)R!IWbWd}b!r~*@S8wpB+KM{2LRgtK*$%g!)YV4UJSIl*L zIs_|WdM**l`8x2p%1g!#c-RhssA(d=3^VIP;z;9zEQrHne9`g!zywI7XtNk(3!iXX z*RHKgax+&t0NfC{i?*H-PE?eAMFY%?=@WaSpB$4o49Y1Fq;-zmHYFx%VV!jv|JPb_ zj0<`(h6E$4`d>!7l+yifXH5qe*V*s7Yiv!k2o1VfQGP%a52P(st<@{{AUTZ-_r8=E zP06B>X9kclX;pxZNp=C!evfxN2QK#j?g0)r@l{`d$RSBw&!XAp2;9EN^zhtQgWvot zfugyn+b~gof~C*{c8v(YCJjYQw|9OFzB7xm+vsn5t3iC0akfgN=eyG{ygS!@Dq3sA zk-f>VH`Vcej1~jP?M^6!wsSqH3Dp^)4Ba=UQUVbOeb90CMJOdhn-IRN_}3G^;(>(( zqeg0v{oR_kIPib#;bC<6&-VUW&Fqn%tHEQn*ThKz4S}V|${n)sZ>Kc?*583Ln>P*{#3cJHQ6A!^5 zWdDm9y~iYrCrwg&I4#yizK{ot!gpxm=#GqL;wHCRmNGRGk(6~cCei9>%iodmU{Sf` z6H~ENl!}7fPu_yM+TF|rdeB`wKe%U@(bYsuNF+`sqC~-&ivOA-;2EduDBZ6wUu;r;=Bk?O2=>gNgP18Ys^-n?~5JVI=yC~LA3N7(7 zWLtx3X+aw0kURyXG0pTrGLvo|^P zoyBf=I1tzNJy+f(VPEuByx`9h6ZArHt%oVSj1{xg{6sQk0^byP!H0<3B;Rk!-}8ed zEyJ)EzXdg8MxsdIAe5|=i1dfPj0~sjR(rX$^p5q%DI(<-+u|5J_fK3bX zG#4fjuRLeJcDS9l8T6h^y|{bgz}r6Kyj>E%K`t;`33pNoElV^M=KR|@DF)9itZ%zA z0SO{lTO+PJ@CxmT8{jR@9;`|N*Mq~`yDaA|JpMW zY1ZVX)3XtBQr8~nId&zYtS5?~Be@tG6@Q@F#&K$l)e7RZf}-?|4vcvNTFMp2MoGot z917>1qx?C-$w1;OwX|e0yJJxMIwf6-PL%>Z((fMJ@94q4Se`XP!4oGucJ%g5}Af{wS_3i~mR1)8gB5e!xq-k9#2U1_LW zE??;#cR-Q^U25!Qrn0`cF&vt>YA8jcu9PZur1%a_v^Xm}|Bv!B#ilIb_f8bAb(B^j zri1u6C)LS^LSN1hHOYN=S}q=I8!jP?DUb#0u!B5dq3@UI1MdCXY9jb7E<@d~2M9}E z+Bb)}$z5^mOAWV__5f+QB{V=b6(vx-f6K0`%gnoFH221e%vmaUB*|_IJvuXTmXh2N zF3eQqC;AXs+II*6bB}odQ=Po_-7V*I-AMB{lqdw!uD$kY+z0PH%}Tq}f)nMVNil!D zeVut(>WD*A)3+t3Po6GTJ;DaXqh?5uU>S0f7yhgon!SNpQg6k}ErW@D2^qwqCBEOr zuNBCvaWZnqNFG8#SlC4G$DApH71yLPPD~kb;aXK&0ecj&H0XX5V?`D3T21kIK_{?_ z;Jg(D%I*VoJTY4FqIE%Wtpy3a{_5y0+t&sUlC;U^gzq^oaBsOT2#mOC+femO(mN-y zoYSVb2+F~J_uxzm5Mfx8Osh9JYRK)RublAt;`QDA^U59ac8%|2v8G_nXWO%C--<-Y{jSYh zNEDTC3mu)yM2|-1j=7iHo)1mBZ-!sEe*Pc!k`CierJV;##7fU2{l1@z@-ZuD z%+M}Yu2ej3&KxD-z#flRk7#LnlJ?$rPhGs9?>%mA%*_ho(H6#6d_N~U**k03*=Dao zZ(3;}f3TqO1}>qxix`%KqKiaPH9lt`=I=u12k;ia3WP)5Pr;*7%(D9;nA=NWQKg~@RC-Z-)VmA6yVc2Rx3M%?}bKM+M)I*$XEf&pD1 zfkwcZgEH5?PJZ&0`5ADOWMM+7F4tD64*oj`eBOwHMew}`c;A}2-lPsmkJELlNVoF zl!QShQd_tB+~$T+#ESZex7ldyYs%wtzWz_EnUT>IEFB*c@rl>7L_#^I^=o1RBfn@o zAqD7BK^M;y3Bq1_;55F(e?_hB?)pI2EuxkzM+2Xqw;FO)Nvw`3GOMPZ!+;{HU`km! zJc4rJKuaBVKFU0@>(qH<*G>7X$dPKV$hW)@$vXHn^&>mEDQaRh_&%$f7c*G2q;XYN zS#DjLzoi#j1oOt!{wkIye$NXTBXitE{P_=TN$ zTsufhUI@9QJ{d-?<*WcxF_n@j2|x`gnraCr9%GylCuWi;8IpdJH$depDEr=?Z1Of% zaHzG5vONXsV4-l-PkFixAG`$zHj31LE&m=R60=^68ji&mte`h=t+#APa`%p!UvZ<+ zu*u8O{_-={w**^{xU^zABJ_`nSvcxY`{*O{z~x{I!HN_(>)mC{Kb~*p8&(_&vnrbr zx!ZjxpWv-9Jw!pqUFpfE(RK;_w2jL?mKK2t(vJ*R{y=nxoD={l0jml%E8;DqT$pGr zhz(CZxpIk9Qa>nmiU(ezhU@pLxgo?`9{R$gUpQXqje$CtED3ajwKOn@h-!)ML@kX! z2*)(HDvf33j?5y@T&ydGaIwW<+aH(I`ArN{Z`)ln(fVQRYgDQnw#j~mOH=Xi3nfpZ zDS$*ve6L>!tB7EGh~X)C1}EI&g4;AJ*>Lj|aG>yyee-`~jtDPxvtTJwwmCxe9q$4i zq_U$KG`SfUQA^6jbW9kBBV%T*h}bze0>(`u{5YK;GhbigYyujhPyF8mZQ2d-yfn^h z4ZGMcB&P-DI8JNZ3geQeb)UOuK&7LKO+BACmG`0zvTFDnqhsVk? zf6PC3r3i^#1()j!Us$#ZkvR@eluTv*BW8NQPF3o)J%pOiU-Yj2MI$rVL zo;%+Emf>U5@`-#NjGSN2-`~!|t*$O(dn-UIgn9 z)J?TeIFgLmHNNK5<)WQ3-lhn&;YRF__f9|7*uBL+YVzB1?vG_KL0?Wg zo}5*VXg-!zlY6nF`9T7wt6xFaAhNx!%0Q5hl)4gGW%J_zOc9X&435R{Wv@sHVdLtU zjBiE`>^U#Z;Qn#hr*zDco8HGrnZbbjkp-C%j|@l6YaycYA@Tl`LU+|B=9^6{0AtrS zQAUw|ayBE0C+*-!FRXy0nEd=v`bu?Ih`fCnuu&#|D$~37$VBgGPZt-DBE*nA5iXd`w*9pB z94RfEP@E^@M0gws-tAXwF|*s$PrYzjQdZjE3U#gWMJFt)JiyI+5B`vMIw;qVyZJeI ziypbhfVLwYh<4#-#j7Gdtr4T(A;BOo(pu+<<9q)~_;A=^)ve!O*$ltwjr}d0wjhCP-Nf*%`fk5j)~dE6 zlYLvZ#68%K``!k824POYy(COz!H1lu<}8E!>j+OD(}@$voeUZUcr2>V2cjG4i>Bre z*(Um~b>ox>NCf=;nJsrlb|n6_{4t(lNf)4n9@JRR#FdbY00Wg13*fGY;@w`zmbCJ^ zL`ppXBwJcNVrt`1+e$$;2^}Mhe=JHTIEp-@@31B{gk)x96j>zm3D5V{n{c{KcyrwdLpAd2oQ9{igWPbzda!IR;4W&NTkp& zYs((_U>4pDK;!3(T%CmykVIkExN6P)G6$um+eSF{$p>g*dXt--MXnl<94#fY^{Zz+oG+sGOo@*zDn6Y5L|S1PP_;A z4o2PJV3uhXHiKathkikDT(<3^8$P_zYcJ_*so59u-&5k@QFKl;xIs#W_ivcL6L%>~J`^GW1bu;m{^{@$knCT11SQ?MXA_l3ql1S~%)lZzB`5 zHz^LI0QUOUTrCNv&|ihOlniQkW$b%F(lw-~w^H%e}1LZJ`CA zdQ7_SXGj@9<0RNB>DwFe@1-y|1ThG2w3wZ>u+*CUyxd&%xNEeZjiU_!Pd5P?Wi$*1oX_YvzDaQeuXombn=vR~R=f<5hDJP1I8iMSag_?_&_U zzqv^F<@KMxNilJM)5#$YuX)pkgz>%u{ScV?GBKB>cHDv*+NrOSaghWrpp1M??cMEo zY0+qFJD%=5&X`(|!c*$uoKe7xqH$p@4@i2um$r-49Z24$*>Z*JwP(4}*$e@p>2Z4c zP}A|qUc?3VIWE1>CTLH$V@Y_9f|44WyjrTTtC266xSQ}Ycnx*XnimMyPfEz^nTPnT zkTYQ=qxK4{;o@D?*fG_Kze*DC*Mx}OSl5J=kpdG=?%1tTDXfkt?oL;u&ff-njXPDs znn1ci_!Vs^V|&w*J#CBa*3@r%Lhmrl6)WM*`&eF2$mbI8xdIP!wnRPg>Fa(G*NkKOcT1I8xYlyr1X|ci%hqs&dq-ayUTy@1)SZ zkf;~Jk(eL*6WJAlNI3rn90M_^@?BmCy(7Lo$srqi%12w9HXfu`Y?%?K=C|AMqgDr4 zUiz-P!@o_i3D9J_V+!8+b_7qX*1(!RJ@1%+OP~K{mQ1GzBjZqTUwj*>dzNArV?%ue zgIZZx$d&Lq9&WE~eXjBM^0Ex9gGEVATaEbf91%=#x*P*joeSlwj&L8d88LK1X5#QG znEuZx#Q02VkTwl54sJ{)ZH4+>3zO1#=CLzsrqE=-d)tZ*kqra7ehV(lR8G&VM4Ya; z=C!oIOv!&%bj9&g7SP3v8hq#(9Af*R5ZEd=l~Jg=U2m46&dg3KxJMn+mD5WM0N@v7fU(HUEy>xnE?T)Xyb&K=5nkQu%)l zm--qRELLgpV1k&ImX(zIn(lXXDg$ney$lF@oCVALEozj+k7LT(@J5tP)_&GfF;BG;r?m<^`i z3F(D+6c+jxCOC+VbRAuVwcomjdo#u% z6jqpS*)7khVj|W5N>0X2dhPxgvZ`bq=F|Fk4F7Q8`^dY#x)Ec#u;wT9{>O}^Mus|~ zk;0_-hO1cnOgy|;Fo%pXWL-7|X+mbXpAx>55$;WY%Z)^V<-7%dJ9HL%;BlwXvslFOxAm2_AcxK6rymI+e3KZSg@q zU`!38D~|l>M34#L{qUyb<0s{N&+zmNbD8UTCJMCsP*JWE!zcJ+r0X@tVQyI>6*LA_ zF)Be0L=`;KwC1HWijx=(0n;dg>Qjz+S#`>B5@q$M#mX^`g;N;Ks2DQk`4tPx!Dxyb z!~c{$wF>TbF5e&cy!PRRy81u56n{8-IR5ppM~#)0!Vn*2G{F20-Qc^4znJ=~yYT#7 z=wTUKFz~uGhQkn_28u9!Et^ViTs|>eaWpYKl>d<|DcBg=M~~Zbp5eC6=s&TmLm*XM zmtf|-p9VV;{5vT5J5jDa3@Iw@GhIIuFM%x6ytt#~zm|C2`FbWHoV$g4py3oaI5>t2 zAI&wJYgn|nv7gq#GSFvdvq)EyA=>r(ka$*}gCHSIcqMgs2UGPydpMXwXryH&%J3JK zQMw(hA+nrHK$19^+Cs+(%Sa@mE!VRJPp+o&8{u2n8@V)s3%ypuO3;3hvp7f&1{=9w7^12lDT ziAj@Xm#U`*R%GX}v-5+#=PAd{{r#`P@Nc6jtfEL&JUp`w6rMo! zCYjzG81Qo(mUzg_v|{cL8INIXk0z-aO7$hQHy$UtZ?4HLd|h_>`{S?LtIe)j>GoKA za$Fky&_BOkFnNCLa=AFu2Cy<#@ec=1+{KZYs`5F+T_WH(E9-j$EkUgLPT~_VfOpCS zxS{kc2^zDIo}*x9wQ^NOIUYwa6D(oGJsB5u%^1+#O=y11q;q3rWIYvQ>CK9vbZSUSa&m(XhEj{7UW~{8L=`$`Lpn zDh^Y1HTU&mQ>>%^dZIlvV!Q>msvtPv)%)SXz!wdq>&;UdU`jK=v$c}~x|#|L`&Wu< z|Lx3=C)MGhe&Z z7;a}c;U7))kgx1tBuxUY(*fE2#TmjeItJ&_90Wp}e+`ScSeM z?}FxLv>LNHb*-0nHDvdGcg}i=oo3SPJ>R7svd5 z*vXFY8f)I&K@m})I+X7Kko=VEut;DdVVG-%%7iUeEgkLzm1=pr_r*;Sxy%=UC8oQ~ zni)yoO$-N*sa{u7YALF2Ezq6&Ikosy+Cn!?YjX zBw}9cTA9llOMCgiWVx18p1#J!#S&-cB%tJT@&_%*>2eAO>F49-4;gvH>7$lBa5vIy zECgWKbuj&5z{+_tr;+DK+jz35;+d`D5lmVCWGBnEOw<17ZtT*;svW;CH`{8bjBqs_ zp+3`9umG_&&3o`0`D&p(>=p*5>ZPCWVyNR8VHy7fQ!CJ3cY0pY!k{gKfjMAwAh;M_ zDk~D;PM4voY$BJ#|G+YcQ?+#4SqLftaEfZwnJu6hi<1Sol*%`_fX!=onpV2<)`RSlBa!h>z<|32mf zHU$Mzma-pcEW5f>yY_<@sD;|sB+W*Kk*^zHtb{%Syfv?Xz4tR~5JU3MH9lks^{$Pa z3{Zyul_0v+ploYRpy08J(8>fYy1I9}cdB>x;VtbJ_MB6M7*mA!>`8Ke^!f{vd?;Sa zU14TNTmlmCFfzrQICV@^OwQN=6Do`IrIGKAOP*1n`H)GLU2%@2xnc=|$rA$k(tk+# zlM}|V=WPODk!(L8HP`}ua;IEq&_vRLwM-eOCa$F{j4JII`lpc*yJqRzgc-GKVgGWU z{JS<~_8odCWRw*f{J)vjJmg90ddUeeHP`8ievm7#C;xIuHN0}ay{+@wxLZ%c)UMb4 zJE`%Zh*#H6k1fqCXwOVPOM${-sdM6*lJyr{G34jx&!CLldt5Y$zx=$rJC{0!ZMD|> z8hQ3uz5;`|FGVo;t4uI2g4q5(%)??nYDq!LI$FWO$DeCw)pLL6LZ6P^rf~d5bu8jC zwrR04V>g<=p&Ctev>roDnOBb3Z~l&1T*~mnt}`H~;OL%v%bri18el7f{q3zl5;rro zRLwCtQ6V!9y)?r;Z>&mWtSC{{TG%0C3dB3N4=362i6v1;o=u2NHN#$7-^kDh zO_M1m{j~qk0$4{u*7J&Ue6B&t%TM!YVJEZV!GG2YA_ZOL2pgxY?JH*iaZ<1Z5(lB` zoD`@!)R1b%up1yr9E%RMsH5!Q>%>)*%wX2F8=#q6FW9p@&n)<_M-U7E71+mf&e zh9d64%Na1H%+Zc&6~E(HIEvC{c$jH>;MYLJe#m9cHOWQM-4E*txyF_$CWuSpuMj5_ z19Y5r4JTD>4^e0LgT>CEg|ELQ&CFtIAq2u>X~>>1p``Lrt-vKGph4W7FVcj-JO6xZ zJ5~r1L1IX-3c3ZG$d^=$BE*d>>%B>klWZls zE9UJ0>JOoOzSk7rMToB*C7!p2D_GwKt8{-quk0}hbl{%N?DUJ~hR%Dra^4mWpc2W0 zg(GJI;WLFmaN-evVck_#5tb81m7KP(OB@wG_=w<2DC(61ukEB9C#AbYk%2ZK5<}jE zKrjg^$u-g*S=_vM*t&$;a%xU8jT{j{Ktv=@mM(pX(H>#QfMqAcBSMNa@d)2+vf~E# zG52%8>vwQ}$b14UyUNpu0ez^cMSz4%_^To?{H88$!>0HSc|!96nnlD4rC%H9fVU#gZ?kou*Dz8M|cQ+BELUWM;795m;8OR>W@9YQ86Oj;z3tu7y;FK!TxO7BaC{9HKux7n2|Idc;B{$)tyL(-n$y3wN`~B#J|zKsp8v0n&1~ ziBbu*gBG;TiXy-m+Vd4009kCEJ&*E{kPA$32>D{AjX*nif9_Vr+Y=kuL3~Fwc$)rt zk&yj@hR<~>ao$OSrmcI!aL<-bc(S1z^Zv5$yFFs?g#9uj^vc%$vRASF{<@%4Wii9K zH%HauL-H6KiylUS5_iML%bR#W;^DjXTIoyT5Ah7c!i^&ptk1G|WhDnlkaml$GCy!r zyXF%3Xn#c3^d3(G+`<`jqT-!(F7MBe3M^wsA`}djB65fHy#!_VW51mXZOIwDptaxC z`3^kYri+6t=CX06p_l7Ci@(47zApGC-Ms+XANrWL@i!i*vV8ZU5aga6oATDckdZpB zjb35VEWcWUZ50m{lk82otH{U+gkj)#DvvAlxR9@)=$=vH*9F77R(A&jq%IL#{u=`F z32BP;>RykZjPmN0)@zDr=+51F|M4h*pNSxot%nSf4)7udjAyZCH6Ocdz;1}@M)bnv zsAGkpE3<(QpR)IxUgLXk(?#ShMiy@Xv=ao-;XUh-`i#LfjUnb}@%6cCm(bv5UFl zh~>THD4;)~Y|IZ*SDFvP4rXL_;Z(7JQXlPzze&VF9TL-*rKEkB_@uaV$^69l8b^r< zFRGL|0h>WHXEi6w!h_o1CWk7QCjV1Z>NQ5$FV#Dg6V>wlxl~qpw z46?Dgx%4e@eYX8r=Mflwg$cpG-e60d<#i=9n$D%ki%XmyzZDnOea;qXc5O5p^yd>lVy%tFl7@OL-wk80{)^T`e+ol}wbsu2eTe(ZkU z|K0E1ZUcEA@wdDOqbebNH8mvo>~h*QGSL>g@pk<65X@`<(nWj2@J+5|iR9A{Lf(?QhX zv8%%Wx_f@n&8B2BcFa|2mK}NB80@wjeYS37rTYwBU*?G}pr>aSZu4oHi;yB*Ivtoa zfN&)D=$sv?2;eCVI+Jf0ulQ+1hsl>Nr%& zG&uueBg2M>u(0~Y)fa;zu+=vABfd{+oChQT%lX#@9oJ|7f=B=VrJnHL5Fkg&LzSd+ z!^n-Pf1wU;sZT&*Q0RsM13LjLeHCM&4BKIl24Ms-8Nuaq9eqkfT0yS?!mo9{fkGW- zNG}>n|K=~#cn)=&NOfS`GGuEvIJ~E=;Os$q&QqzJ`q^4ixz#UoT^L> z?J8;wE!a#vaPpxA$1}5!ZaY>#rHw@uti_add@irnZUk2j_&XdkeTqj4KkcDiLHL?; z``7NOm;(tB*J|@%;PXOS%NrHXdys|7_#-M2W%y}`JVuUAi_*O<tx|5zMH(EzNL0zQ2x{57i9&VXPfvq-)Fl{BY_xbHqfkjV% zT6PU%Njb!CN|v3ixZq=T4d%6(LQnV746CgM{{CQ7>P#9-eiIVo?VNeZPf-O4+~J;& zklsXYlnPAes)wn4DN67$xUkXi^+AFQgoqS@z-zyq6biI%k`C@a6s3nTH z`IE%cFsKcqGc8s+L7uYG`ym|LTsko=G)tBH% z&>wtE|G1{-t=Vc0SKfN;z@W_z^RWXt%uUt*EG(!r&kVju#&|wUz;kM94W>aTuY|6$+z^mO z8EC1~pL%^{igRf<>;sG@KL+lywXjp#X8s|TI?wHYXkfwIvR_frJCpP-l!~kWU;Zup z?d|RGm;IZgnSqzirS9IID+X1_NbHZ)U}Rol2!SyS=jM@_et&&*%9HzUx-Xlc$ARiY z^kW7KjI|*;(aZkAWkT_~Xu%&99gVDc%JM;)4l>S`E2{1Z zOT9OgPwI#l8XP_7rSXZ?(nzLnDR+7D5r`_JvMwL@S}A0zKaEiM z`3_0Y^p^CnNmjjD@Hd6sFaU`Bg0sM0wr<$+vi9E`m0e4rH{|da;kd_>wAp|+u3wFW z^#q7(PmUX#!o>PMm+pb0A5G;QWuF1=zigF)hvLDJ zOBGHwfe4%&FQbkvR#5lDn5|EKdu#J){fcBL8TFN#vQ>Mrc00P!pIUC1!AGyjgG~M~ zk)abmd-cI=QcZz&K4l<$s%{2?u79sl$iV531=Em?6L2Wgd=?Sv(1;>5yt*%*r!p^5>-h5?T1U38GMO26edUR9)g_He;M*bfZVMX>skJhzVt z6ucZhT4J=+0s)8m9%>Gck8ua783mM%=76kNl&^BevAYM3o`w82d$fUaRzM)XhpElp zl|h-146I%zVW<>&e72$>>ET-MnNx0FDYW2E9d6y=W!c~lbfPvgpe%_3hO&T2Ywy+? z5{C5l|LB%Z3M<9P)9$^RTt7vHBKO&CwB2tnTQWa*;*i( zfe*Q1&4KN#^_ptcVZgcZF`CbA?bLN^dBwITszEPe#kPZpOV5`hIlBR2EGp_k#@E4a zHU6VGqMYaAP-UW^a8$yJlUjh(fNPgbkUzb@3G=ZOnF>jP%UsY7y0IrchCW_cx)2ZD zawE;)m#skY?j1IMMc#KqVSjagTqy>@K<{T)lZo}r#^Yo9%W~JX5HNaRKnK5mtJ6;r zcZXEhy)Q|#(Jm_z6`!|-uhM=8nj@+8K;XQKIUt#KMXv0VM7vyx31eq zruO~Mx}GZvjWWyU!6y}6;d>FvUvd5E34;_OI?s|XsZEAXehGVzAb<1lQjt zVlUB?`3i*hJwk3o>VGQie>Mp#WyN$8&CC+E&1Z@th$ixM$h{an#Bf*0{37}^w-e#? zlB(Lhl?AEMJ{NM^60CyjK)NgR#PsKs?La}`|GE$=e~}l5Sp34ZLS&X0zAy*eWYzG+ zdshJ8MJ0NmD+Y@7%MaHWi|6SMMiNAKYiV5CZ}|9M{_qmWf_7T(y>QOkDvNr3<25T; zI6ARIUXJbh!ZE*}dR8o?cNqWcLQ4aNb@+#0$u(}F z>@xlNPy|wXi9njCZSY}av&723!-7dv!EqC5p;LnVYb|jVc?;`m6ZTZDJ_S15+p$^} zw2UaY9x*ky67rTy3aoy)%7j)gKh0KohL*%<8;82+gMl#ck5$X@{5WY4ZkH+?&t=nk zDQWt!&`~LfnHOrvpweG_D=&QO>U>X^fB1*zn!?+ssz z1kNqTo_TpiZ*B1RQS+Z-&tUl`b;-Y}rsyb33g|21iN4#w(#UF}XwT+xkfpRKm0vRaDYQ&Z$87cGUGjl&?)Iy&f?|bC` zIhJN};K3c_w{TrDN@xaBT1zY@*rzI54o;#cCX15s2D-6Js>HcU`=a3XNuanh(UUt? zW)c~kQZUJ6$fMO~^ePP`MUHL7{f6XNr?i#8a8DOyZZCxsaAx2+=U??2I8AA=qNV%( zeJj!N=1gSl*^5(6L-yHS57u2QWr-8!e*M`-*ZtI)Oa~%n1coswsS2YvPXc~wagyd} zCHJRJiWq8yHr~n9Bsdec{C!q7qxlJsopUa?sR|3x*+Ew)fR2awbI{&8-D&5eI#dIh zo~(s>stl&R@;M-x^3uPv9Y9yY#}<}>Ffak-G1-=Um?d{3Ts?W1^EPysawFxRu|PRn z?o*IGZZO?5Mei{PPy7`tfAqNV;)$jyE{D3$zMJSMjurEBlF@obQ{(BrPUZGXVaZR} zA@0}%#Ar4Uwo66iW%y@Ev5&Vl`Q_yw7xa7~&fE|9SS|OR5`+9z>lmiN8ykvqt>>*E z!Rge`A<8=H0sGC8?DKOnIvV7qAQYyD zrFG}(O`!IBMn4;7n5^J<%ORV{fUuBUs4qvbTP}JAkcZlD$p6uZz!r9ZB$3 z9UY(dLw!`n4823!C~)qPd@zRR(SD!M9yw8rE>8F?`iEqtMG*MAJ2Kw|b5i3ab6JkFC1s;w;ZwQ14B*meZUk1Ri zD8XVG@DYTp^%aG-Madr~jbH`B^g?}}Tz4g0Se-4yj}AyE2uAtD2M93K*QvM_0Kr{l1`$>8AN zbUIE56esKdUGIVNIVh1ja^{FR0qQPCrR{8y#8(rujW`CW)sCI>nFXTF9{=yl5HJqTnVVB+KVLCz55a!`Fsl%J(lFp#ApC(V>41F%P`sqTN3VWZxO zCz2^auM17F%Dj=K*aJV<7=Lun`ulj&&J8tX)gj?tp6^8Uy~Dw6Awt2X z`_VK3_=De?(ZkJnKylCbSfC%UR@cBwF3KhX83L?-d$>$8L9m^(hR9gK@B(jrL_+I|VB#x>sI$6jm(V6b3D7GyLcox6D(>GGx1GeHkmgORlft zF-mxo{vQ6_iyhcU-pA(+_fT}nLFu*MKMO=xWLJ+8)^FtRDH65YKy(|ATC%v7A-FeM|UHGQGWU{)qT-@*eLg!hFA z#J|BUVX#zNlCqc7=whK$RNy4@B-x%f5L9I}dPGsnLIV}rN)`xg48(vVgH{O~Q9J^o zP7yRptIGCrU|CR{M3(;;H^x#S_P!MinZ%S!A$$h@l? z*Ux~`1NEZue~@(!-f?$r+rHa0ww(zZ+isjRMuWyqW81cEHn#0d(AbS_Yof_F_w#(~ z!+QUPS!?$G?dv+v<2ayBinV47NPn|njhB5iJ{zaL$vRppS`#E*?tRtM5w67{>DXbw zh#2*Mi9Br@G!XY?ND8y-A83b76nlAjsWYD@IcWK7WbyOA8P6k_Hz5E-xtz=zB97tv zt9lIe#+t9B=i4+ieFCz2KS_;5+iz&>=4!aK<$(*70hu6mZ?qdQ1>U7laKR>pwB(E7 zL~$~Dvi|&Es%tBK49RRR{w~{EHj@fdWKc;BU5LQl{B31bFAkg#B;VAE3gO78e)(sB z^fetu>_`rBS~&k(cqk4$JT%h{9n}|RqKWpH8=y3BcgL&ke$4cQRJRCP3hJwhPkhV+ zSvxdhQ1%L?MIIp&Y@}9DIMY9()BRW4?wikx{$udJ^cU>TwR14r_QM@BJz^8H<|)F^5E zY8*4)gQ0s-078{!{JFPq;FD3*8rBl;y zJ0Vgut6w5!^i9%3(2!3+p%fxBPL;Hk_Yk(1p1ogYp>u=xY~PICrqhZg>K+g85x+VO zNu~(X6bK3Er`d&YS?HgNTA04HHbn?h@J+Re4@G5fRELuf%4Fbrg|ZS&k`mF0Sy>6v z(Fc-GAH+0BQyW{-wvxdQCXk66p^NV+iPMHO0aEV<(G%h#tUup~Bm9cl zEOI=3YMT;L_9deXzl`~+6aXZrcv9Dm4RCOFMCg{Dk%bA`rKcHn@hl;$_ZNAAdzDc6 z%TLD_{&kLgy32(Pb|SyayPo0RvuJ$C`(d(77duv@AG&!*{oPQ-^W{yd7SDT$=Ts!B z7;a3(x!)`AcgcfLb+{ZuP{r6R=`XW8JrAr;^wbaECGwGXR%o;hIehgH5qwx1ZJ(jp>m zs`G$K#ez3xXuw0UR-7G0fsUW-N+SNf1}ITmp1ctCtOO7Ar@q~3vjPs{HFm@n$2Lqq zj?vYM&q*~j{P>dcrAZA7k_D|dA#fNIpmms6fhiF>)rfUUREUM<|LW8&@aBfKu7SRo z@SlM)&nGsAdy>CV6z}(ce*OD-T7n2X`tt09xR<|@#$;ET#%@Nz-Udsb=Lxwd&r<>G zw}jm%D&7!mmpHP1;wWloe|P0|pIdJPtcGg=IPz(SS;!SNwK=&zMPpUQCGBLGee3-* z_^!WnF-@(MBo|om{;NsBPU6QX`!9xa&1J4C-A-t+nx6~7NQw-`gXg~7DX%wbwGfGP zMh><9`u+Vq4W#Iys-;yPEm>sf+LJ`}pWwv*7)F}^azkK%Cxko#cM?9>}=2iI3 z@A)!XJda0oEJdCXJfisb0;NsRm#0f;e_g08hz#^u#NEf2wn!D9O0}LRQS4nB#g&|O z1&Wc2GVZ}a0&TW8>2P^L>+iurkL^|cpv>pU?CV!3IM|@=G%!OEI$S1g+XTgXbURIp za11qWjAXsxAPi(z^^d8ksd=^@S+WC7Qhxvx#2NEo`5>^gwDdcAFj^n)&zJeVG(P4Y zP+`TqX`1eDq|>51IzGZaT1umzJwm~b!mOH%_1}U>T_&zNgJznN z$D=S=(+&y>a!>4O;~bwiq?nNx56BiY#vEp#@?=k^O_D90tFcbY@v!K*l@0hSsJ`Ke zUm;b{1=bAau!Y%;oLMc?USUo!@t{tmGD&6#)7+|PWV|*Mma0kmQaqs=yl0)wuC4~9 zrmE|Bk4adz<8$k)n{qlw`aWH3#fcG1v2X~4k17FPUOQ$s=3_3wWcMiIEG$?lPEf}u z$0FuZ+%PYNkqn7)zp*1iCibOM6Kj)GWM}F`7PZRFgr^epNkKqNwNvzb zjIVa+AVoWml#~6SZ%AZ(trcGmDbBkCytYC{s?vF>jDjEW_aeLp&@y~^;T1wQsvh&Q zu|$=B7N9*grN{c>eUXtLtv|DRgz`2Vk>+~4uzSwiHZf~P%ce39G`-MqraF<2b)6W) z{=~}S?h3KtedWS>zUFVdT~8F%zhZd=9QP+s1vJWoe}Xkjt4^dmg*l34F~d4AH5+~465L@l7%Rn9CudDFqH-o8Q~1*i{sR;Ri=7I5>0ec=hq@1p&AWKSq=WMr?zg`qKf=`g>K8wc@ zrjhG`aMcVnKEE9kbZf)p=c*cL?DatteqObkQT9M*N=N5Ak^A*Kc2X`X9gf#?AQI(1 z-&~E5$R_G$Z3!;HMB$}%T|es2vRoIj^PB6xc6+I*b5w6;6;)OCTL@h;IS_)C(0Z5g zbvuoF#Tf|&?;Y%qGpX08C2W2m^?-2m29``7AauNDO8^L2$$_<*q%Ve zkBipp-!}L{x9xrd`rWT=9A8da>ZLc3yZz&;+eo%nfA?dTiLR|br|S_Whf$JlwnLq9 zU5V(+1r@mp)#wQklRSUQO@XbR64b7D(SSA5!aZDn^?2LP6AndO8vwph3KPzRq8M$A zm)=}6lMGdQ<+Q!qeE`7ce=8g&3|)J(#^2p5mLYQNf0aPqE4_Zb2$Ye3GC5`6XK04R z_xbVv!}a)CwVn~kTnEC&L&oOtS5s4itj6EcyR>iycr;G=GNkc3IP8Bv&`NB?lv3;o zAX~h#Ef1_g*_HOVb!?{%6S|^S+Bh}iPBsHKgA{S8=ZU4hn1T|`&Qck9oZB$reP_<^ z$uyM_^*uP*m}*B&6J&-UNimCNH9gV9`Y(=-*3;v>GZr$^k+*qY;Rw1w&9yIui-)2k zhmuQ|fxQe!X>WbZwMfQO_c+0CB&MSVGkY>vDdEY2AJcMJ|Hea?S63qdA0S}c)27$# z+??ac$&Ju=rO^DS#mOvgdh)9V(+MSAT^~7}OWdTRF@t^JTgGOMZs+y;m7l&xNgITJ z3ZcHG7j%lsrT&+uWux!&@IwN|=%aueaX@-c{N#o}j0}V{&C%}5K1P0bG!67P$|=p= z%ZNos%BH!vslhJPgz)ZNb4$s&Ni5P2kcQMKIiKmtm=!Q!fRVQYq`zhqvZgGge{u9# zjq^(h_MFR$K8x z7-6bY^OaxHHg~*2n~GOw*pS(e{z&Qi2p&#b4n8BA8>`DAR|MIHr~moHL9Y7qO3JKt zcqjlN;T5&}*UVj@>UhlxY?s+!hsN@5f>Y#FOwHvW*NM}9Z@jAbF3ehdE=@7nq6#mcp_qzMCmUwK`tLCQ!r$+m|Tl3NKG3?;C=g+kmPHl zIPNrxhH|wTvaIh8rT;dI$dk#^9^lv zbuoMi;ocsgXBt_@ce%D2lt#MWOG}%dFzE&V9zg&VZ}F;SeeZPepyvFF@R=L>wL%`6a{soz zPXwlZA;l@cLJobxnU-_zpq(Ld^c$|L%~o)?*P|iNg~-7P`5kdNcs{sDTUlV(J?IyTd!n8b$^?us35*x*ubW{kHwbN>?jX0+u;{#2Jm)Ylz17A^&E; zgjuW|EJ3F?Ezu?jLC+mb`*ztnZFKNAh6fFP*JsNUf9pBu5xw@sxL=MqD+7mB+41D~ zpT{ac_)4yek;`iBx;+C_j1fjjB{|PriKiWX@S!b7-F%*eT+5{bgcJ9h4HDyjquGU^ z>`w=Az=yx?|n} z^gdfzCqA#M1U%!D7)?c_k0&3rfbJ8IYEVU_wM4y$L zkw*990NCr}#>(rZ$Dq;VX&|*9hz)#`diy`_jDPdR{|SoZz)NGScDmKFDaF%>x;=Ab z-_Tq1tg|B-z(n6BHz<51s1hTaY7$B+=p1mCTsCJ=>cQiMlGJK96{cLl9TUB}yc`(_ zL3B8)ZlzO63Mofrf8A^kHFB2PqluY>xadY|1Ygl&N>g5a^vj5t^wGRaQx&M=WkwDP z^gbUwblEu(V4h2#=)cm9#)403-;UB34bqHj&T3^w`MThK{OHGqqX}AA|MgH-30Gcw zaS{;Bh1RL7ETM++Ht1(oxB_-2=Y<^I-Y|s7xqIFc9)nI}$8E#=9&B}TlH2duPmW7x zn0cTt5a2>N;R(X6P=W#i>*9ZaXcuy0EyU z@2_g7hA@Ao?wt4A{20o%W+5CT`OEn~j2O=r(Tclpf?=tvTU4KWmuGEnsank?lH=^;$%-zuIz=UOP^;};SXeo0%x7n?9CBveW9 z!nX_55B)}U|M`FgklBHe<_U6{$1f?ugmNH+y{>kcvbBRGt z=WfKGIGh|hU*cOxcE}*ix(EEG^Apeg_+uLt5p*n;AVE3GMa`XV-NVDhbF+JuI~;`i z_IbJsM1Gj(Cv@3!C8XmroKl9YL6&92Zx(;oR%g6LQssT{qssRLJG<9}*0%c{(j!tE zIC3Gw#vdvm)`kRhy;09x&qXhydDGb|p(lUXB0&N=m9<@g)nOEH9Dn*HG&aLenwv)@ zidGtS7_B!uhzsnYFWb%(*4^hhEo^?xjFD?G&3^4rJ^KgW1pTs7FKr89Kl$&3*SbMm zi3W?rR6O6j7+eJU{cXx(B4it%e>4L*?dzdxt5<{5w$@?ve?Zx`-AJx`oN<;_CcHK@ z;j8Y=NiaaYdZwCC{*vlUnplXD!r9Rxwsw<$5!kUmNaGv-Z5nN1|F?~y**CUcji_8N z)e4$e{@Y9AmktUX*08KiH%i#{+*D)ER~ax$LSt0T`D&YZOn)xPP^`S&&X`p0u$x~u zPI-k^+xydI%xg{WKX=-d;9p;Bmb0ozVd9F;Sz9vL z3w>bgQEk(oE7DstA%YfAQTLEkNx>(J!n2_h0jm-MFJ*O2^|s%t1*S$s!{!eyoNU#~^ZH(zk{e89BU8y!%wkzaM(*E$_`erOoJG{Gdxc*jwmQnrVhpqmw5 z!WXK_$i}vn2A=8y<0X%B}4f-a)YX3&$&;|q9jypOd(o90^)TAXZ8 za)!itJk>-hK{+da*DS8J9$)m|-!!g=6nu+G?bQrVssVk}V2~~ZDuX?rWo0tuLZY#( zLl>DF3$@bdK?OeA>W%#HHDiZqcxbzf`w;d%eAh4r|8eC=Z3pITj4cG7P=JxKAHvb*i&PI1r8&kcTop7*)` zZ}a6cXrT1t^On|HxbXW^?OyGc3z=oR!ihKqrqogF5BK}hkC>0=NyL@9J<^w=U(XI^ zV)nuYA}4pE=F{HYP*rYEdctx--ZhFArzfBE>Z|D9z2ypyz47nx4QO zNY(lWxM;%Q{LY(|rLm?yHhh zfrvJZ-@zYy$Bi+~1anAVUP4NPo6BxBU^9S>_beW;<=b~DPM48*6kMF>w6ZTEl=TBf z#<#V~Af6v9UBl29-_(^<`3s8h`VQR}y2^hD7*!pbet>2-b)ah3;7ch7(%%q%mXszI zuC`6gczA(4*PL5ij;n60f#XOlK4Aa-!2-WD0Wh3XsLMjxJaT~Lt((;f@dx?_C7TU;`F;qt?cX9Q zYp8xI&4GDoBImL{!I0)XkWmku^A?#Yc(g`oKByvXbvSW*dHa@y81w$9L(rWQc5_Hr zyR%A7GDv7=-4{?}=!{&-V%*QR{xIbQ$#k?9wf9J(vsgO{t59MNf8HS?kC1uf@`O%k z4}MWKNf3xllu-MX?5!~W^!$LE_fPQ?1wF2g3a)vFYW%2M}yw*Xca);-P;!!CB*$3mnQ|{~S4}s`B;a)`W$;9=PFz6rK zZWHvs6+YzDie!3uSWHpXc0I9x_g7&GP4$3hT>Y54noh)n=9KGw3~6V;z@dxP&30|x zNa%%>{Riw7upuM{&NGR}`53O}Zcu+G0+$=YKc;H?@Oj!vM8u&duGM~`^L^e9ayOoo z^nSGK4JWi~gXdf{1PR`9n3|iGVXB8t%Ofq2VQ1M%k@waHT1ve~{$XJAE6$g>Zqsd$ zFYIhYTx+!%M4T1~(a80~2_n`#21Mq&9)4bbE)ecd;nt69Nfpo%pR=d?oN%1}ytZmy zrs^X{cv0#|L>6^yLP4BN2U+|YtwF@@trdjt2^^bFx)f%u$5+N%|2Hp5(}QyRXQmq+ z6@0d}r|;W<(Y-U=c7^E|fx79y`qygkx`RiX;AG6XoQlQSonO!M*b8*)_l8${sbDY` z28I#_4_q1hcE9Cj(eW*H)E|aCNH0td%stPe#g~_?G&O0W>OF7f?Y`%E{`1OH?P<0> z)%!;6ebV@3WV>zgb~88eE64pj3ecbD``}=K>xU!!Iz{c^V5NDf^Z%LA zqqsA2VVSnU180B47p3N@!k)fkMe;3vI2Sc3n7l?O>y@aV59l8x)oe33WphUp9E}N#gT|hCVlYi61(aQ6Zjl2z|wDm>k`& zCEQFW>Sa-vq+6XU?qfub^h8re-%Zu$KRQQQ+4uySs6?--3Yx?f|9e?1gH$7jVcPFc z9(65dDKv1#N>yewpI+c9H=MC4CX>2eX9xue7O?C-7vV)y1MeWK!v24045sHtPI%M! zw;``)K_`iIq7;u1FKBFND9l05KpPj|mC?k1-y6v>9bfx{hDaX)u)62_wfH>Bp59YN z1e5{x8HN9PeY$BncO&UzPq;%h(D&N#{_UK8t+d`7J1_?X*oq!JT8vKrqB|~4kG9a< z`c4WzNb8h4aiTQQ9d8ebz%1|iFQ19iY>Q=V#nt`sI@m!QpBN~MSHeLlWS~3}`xFf2 z`$b~p>WR*dR1e*nux^KtBF_EZL-eCvAj ziDUJ^5i|$#TI-en;K|T%nojiL5{v`^1$-ckPiofFK=!(Exz~@dw@u~i@F&BK@Y)xX z9?yHBep3_o-pzU!_N8jA5U5T!X+VR_^F+K;+YGMk*n1n!9S-Bbn+H2O+?V7`HOs^% z&zupiY1qS}*3$i=7QTD-)2g?{pv_(rjc;~LTG$$aMDfw0@3*UgvWqtzU&+LHe-E|I zSu=!rlEeqM{e$>St-}@KlX`TF4;(R1s}hh|agJkneYza1(9i?6hz-)%Wd|TK_298< zTPmSnOYajUmUD1b>Bnj4d853#S`XjKG7aBO)QGp$=ruzNmXwe~tu*@(dcqi$+2jTg z+I6Y>&T~zqI~VG|44u00&#dMR6b{QeZ2U+w=wHXT?3(c!FQ6dCOExdlw(Eg*GMiL0 z&YjHv)lo_scC0JNA7c@X&C&FgtXI)KE8&siSME-BH#I1C+=A>iMJVPc~J#9 zX$Hi?7SV@I2Mol`kEq@OLMapQ^s4fPuMkr&NT4Y=jtVV0C703TA9}xM z(%ip4BfNy4P(kkpj#rF$N7Mst2T6oWecFnM;NJ@TlIg z%TKEAtoi6>H!}xs<{XZ(2N}@mg#R{Ii_8+cu@k&TKRwJj&$7`BP`s=e5_HoI9{-F= zX^v>T$k~2m>iq$%CFM32I^@!*KwY9%ciSt{Y%h;CI_rzG9N4MzteuFnlJuJ4fFNO5 z3C$i#8-+hJCVxHG0|n}}t$T(|+(u=%3#)&eXMwi3DatbZcp6Nj{qd-4TZ9Q85@5G9D03doMll+bKpQN&WN>&LDdoU6PV3fuI8O;xO1 z>`suQbR>X5vF^Jf@j(cquN~a_IGVocL+y!R_B+9BGgb!&h{BX`mTILlXmYC|Qh(N; znH3xC@p(;#MfcV6S+i-SWhUoHy^)|!d(1Z{AQ0Z13P8<~#JhSk2^qMR8)i|@h+#?X zLLW(FMxsulmz#)}Em6^Iw#5Dy8ppxSz1Mx^!#pMY;cbew~aY+zVHnlMhD`$u%| zXww|q@ha`LcWqBZ+Sj{Nhs4xq9@%<5DZWTqg^Mdu?A}g+z2Sg+ltXMEMAh=Ql{P3W zlBxke43^7}_wyn$oZ8~xAu_qK8=0)zxdOA%2}7t6ZL@Qb?dZf2(7t8S9%1`^=G%@h zYren{2?bghDnMNvo!}f|s9w0!G)$+#GbNMO+fL(?O=aLc;L}RyiN0hOFv}xtKl;}o zJ(OTo+pX^5w&ye1C;phF0#x25JZkh-I)YZK6f!;SbWF4{;|TEw`V;+gtLLA1+Qg2b57%D$TCE%xe5Q`JIDO2`;|fG%Z6ryGkj`JtD3Icm|cN8M`Xa1 zkT}d-qARrJ+3EJ3S8)u{wJd;!9yPzY8GU~3Cq#wHbfo)z`Cf96N)y2)XA+anU4Jo( z*-x5>MH)8JFkgRPpI7NdhH}^2!Tv+&e4~+2VeB2AKOr7`m;81~97ehnGgo&^92LXP4`tNH_JlS-Eft7v$0I1t)hQPVfYo@J^cbh3M zgwXE_s_kg}*&gEDojY4e_WvXHm!+lF=M&_E75%;RPFv$}%HYCr-2s?V_LZA*Esl{; zZ}24tHi4$iE*8FBs0&iGs7Y&gdaiCWm$(s6z#OcRMvm-D7dGCZjQ2fFezlyvtJ;WK zqOTcDc`!Zo<$pHNzXZN$_qHC^0uvxf2R=G@1{1-19d9P)$z&L90_u$ombwB)q=55-?sIy zXR~mNgw6xjCJh92G}8sH57>H5N?23$_q4fn5sUh9L*Ei#Vz@W^pil+;BO?(Ggu1)C zbv+7LqcJFxEOdn5O@vn%G}FIboBFxAasOLG?0WobrCDzT4GD6O7z;$u7%K!AP6mcJ z;biwx&0J&01GaMw(0Icze`OKF@5i^8$2*uD5DlN-LMih`hUp%9P>bO-q=4k2La8=U zOe|IfZQUz=ldU+D2rM?n->7}(6KcN<96StO+ffEW-EgS+~(m@s^gJ(#$Cx>*) z@sC4|(Mq!w?&;atqQJp}=i<5=-q*QnJG>4VO|t&jCzrkKkTKwKi-BJRaBsW_Zmjd1 zoiY+_#3!boRQ@LqXIZ-q0O^BFSZn-%JYclw)EZ~w@_ki$)UG@VwLk34}Z zH|TM6|F= za3CO60zjD3z0RjGgUDz${Y7`MK1IQ5 zoM!p};puWS$~?~#22}_oOT;@D;Bj}f2SFNtiy<(k#6KI8Nvd?3VD2SqU3Z~Q&F=mD zbA+rnpaGAFNYY$`yQ%8;s2}@Z^8v<-$u^Wy!s_|8Yp*L0?#)-Qo3W>M)Gfha-X>F8 zQ&JPE77U+#T@q3;6_pE!#Q^oHX|+fp~&({Iqp;{N(T1c@Mlbu3F%D3bWzGOKaVgQrLcf zlZmZLu0w;g1v9z}CcUj&DfAZT7vJ}o4m*$n83vBAf)oZRIG{?YMhXY<>P-G?xD?IziJMu?# zed*2%9x&BxvS79C-6L+(I%Toz_x>Vk`0S&~bQg&`W&%_1jx^@{-39%`woq|X?07ar z=qIJ2#I+$4b)a%x;6Msb%VpHIK42xN_k9!M5sraIObQ~xcqJLu=y~g1$@wP`xzYYY zx7FA~aI5t|Lg7n>`I`;ycIq0FgqRi%t%up4tj)iGq$`j_hyq*;Z5JT zm&Ui0CL}s#Zp36)5^dTej$0n(cXePh`6(5HN*cs&?WNf|@Rtg5gLT8H=fkN%Y^oWo zQ}B3qsPGhDkUf|6{R9L1^D~R4aH6|3Q>(RGE6hchHcSmq`;O_6teG)9K@2UB5b-)B zH-FmRT}RBA&098UG*B)QXNu;LJ@ux-X`6;{&x4KtI#}iNKaGsP6_{IB3OcEdvB=x3 z>WXDZXUCMq`7_cWapwTu4bki|N%+g8D}V~=KaHGc%(GUPz{fl^mmNB?nTiCz34ywE zyTYIeYIN(8GQRDF5;>hDp~rEuK^9 zH()iLO9K z=c6ui`&%Ol2^EglkHOuys?SsQ*4ZlC-*R#B}fdRSVZ~&eNLL@S8<|%C_)@gpED9y3oF-Uw|1({nEI7& zWp!ic2d|rJaSFqjRab+FhtSFSpLO2W9BuQ<45!(d-B8E}wMXt|u&I!mouydmFlFELInrhD`|!!;Q4Ut7klLA>sOh_&H8zZj#M!Z* z3n8M)ERQEmOjL~*eYb2{F;YP7U8YW!$h0J!*<3?_U~Jc&iU~9 zG=Z42;6Eun)eVPe7T1T*m}X(j>TpRe_dPNo?4@?|r;q>0XDKVF3i|KLIy%vba|jC@ zEd#@^zBwohWFEwfWz`Ir+Quo#30p5_P4jI>*5y(@^x6`;_7(z3>lOm-m{(obZe91Y zBpkW&cXRj&Jl>ybx{NT4e$hbzWzJXX(4U86t{*e43C^rgjn`7&%i?lECXS8<%*>{9 z3`}v{yx1)($0d9tWyh6%tcANOZhfagI(3{zrirO(kyQJ!R5*MB9~f4MXpT61ZCV`(UkU`OlL z2vA$GP(CZBvga0=AtMupLYNno%U!k}oYGoX--P^92BE_p&}(uOUJbuUFLz4cNnivC z)d25|S_R!@Ia|&L=)ujz6$D?%qXmJ*dOA$enkGT=EWo=jsf8nu4qz@43pC`p~&1g zgvUe`fN8#8i%X!_F2Y0&!zIoBlQc$yQ$4Iszi@;=uZ+drSppg0#SX~GMvMLY9=P%q zD0xE{wUPW&p6{`~a?47)3qQ)Ga5$G2p}_v!!d&2hG0?$% z>l0p+=bv1DSPjN@6k}jaNJe#2e3xc9MZO^wCPG@^>}?B)kgxKjcEcC~;-@%53Fzk) z(P{Z%Y?sk29&D#W=5OD>voUxI#&{%?SXS~0rtqfJqM_^F83|#=bj*Cgz2{6uDRAjK z3QX32#zRm~hWnz4E}>{Fqr@6AFlR|g4j88Bzk13qX~qwc7+!G$QF5g6$fdJkV~)fW zcbFEm9mGS4lzc{qi5pu7h6lUGB$0K}(V_XC7M-s&7!s@3Xa;bewdD6q#(@?X2(@^vf;gO4~2`*fdU34|ftA z{T{r*xkQ6t1`WdjF+1{%6LI^QJWj%PV@Xv2IwIl|?H@&(-0G-n9I-;Zhzxae zc_WOQ3G^=;qN?Id-3tK)fsuT;t^h0SNb;ysDpGbW@Tk87_}(K3H6e9Fi6*LN?yz#h zzgOsnh^xVc(1~G0{|?*R=LBo$q38Riv3n>DJI4h2dRzW{>de!WL;!iXVlr8K}+?KJIL(~&nL!AJh$jP-9fNHBTV;!`ZQ zMM8$e`!76+k#`#Hfd!AVI=?|KthH_nn}m2a2OP85Nzr5xl;SA4&k9&}TCuo#`luIO zo!5PdtI}~?uJ=xtEd)eco%mK7)k77EOU8Q;li3NiRlVV>NHRx90r$K&z0gn1JU(pS zg{xPz+8RE8t~_Yfnh&;`yiC&l$?l5QYLXHOXr4AZ07SzTO#uzfqYO)FKAZgwK5lnvDL;a$}V{GS*f`*0%rqczFZCBH9mg*9hXeG9JB5h|8_0DjVIAllWYg6`moZ_Yj zK6CdiU&=7`ifb7biVc!V;@)s$^T=9gWKrk!*lX?1aGE~wmE-V|sVsOc*+03MXT`H_ zp})`=Ixn=r+j{GocFDT|CC{e=2H?|#VbyvLl&K*7ef))zO8h}bxML^z<-|%HY+hOC z`pU}T!4aP8E!g9*$DyfJ#ES)XRZCSX5X>(%KbT@n?+C^9-?tF`)z9SpI7SU}emB?r z7?X0p3gTSxZNrNpAucr9Lle7;tC0=B_*;QkZ#)Rp1cMQ(wz{yjx$TBJnyu<>=TV4g ztpcUY#f*%Q22$e+d2xC}r@bOu%m(q&jPaS0tC)eIN zM}^(P$GwG70JPsk9H0!MBp8}yc#%J4+H&mf_rLSlU;nd+m|2QNBq5On!^)A@s+2J^ zs;C-Am(An&&)PCGkkGD1G=Ks|tNatu`@L@0+gz0U-h1fs)ZyHvZi1P;xU8N+ZAdeO1h=Gw_ zm%9Zbtgn=ypeD&+ePf|$GG|j1tROZW3_obt#mA>x31J&|r6TkW$|DGLBdzGt7o^Yy$R3f9Q^+6<0s^9a9qf3uIf6tbemoKZR7$}xW3eNf~jKYxI zF`zUacywspezDyjl+I~|GZF`2UKcA!U3*4k;?J$QKEv1)3K}z+8L(b}4@t1dg1%wp zDjtGV{CLrM@yqd2l0qDcoLEf<@Z|E?NE(034^*9~hJQ7FY#K{vb@}`?4j?8#!D8vo z)xbU%pPCumlx#vdG)&6Hhrm^-MU#ZLI{u|3C?W;h-3qy#D`(mza(?QOGrWxV21JgN z>FnvLCxbRVq&&+CNgNK}?1hw7miHG&VAkKaXxH!mrWz&Ag(@mPgBAF>qo1o0X~&@@ z<8QD4LO0F#-8>VQ*M<6imWj?nkdT-dd_VMMMUz+D-m-vm1|$KtN)Vg|8-|*=P)`+N-!YWA#D7Wk1HS-V54S4N#z?_Y(i-Bb!g~s@aN=3%OdA2TAQjBGT*uWk|RK3zs;aejKuLVXHRV8_MQGHAS$y0f6<}6jR)3y zKPy{^IE~+pt>Cj-f#@)4ASyC!i{Zo=*R1b@Bw>S8ocQAMP~UJF%)uyKo*0 z$_=o=Z;gN*?%GR#LDw%|6>=gYid4|c{syGwg2{TGCXVV)G#igmsm!5KtXBi~Vmymc zCq$9lZfZP`#-q>(71|Xp4n9bgE=jYk8nc1H*ugvOuqqbZP@~Ol8A^TY6@ogxZBBE z2+h_H5%pug)!k-8nWMq%1V3QJ$Q-fad#ur#*Yr+8tslJGrUNVqXt9$ApKjUzi9g$4Fct@KS z0t>s}m-p^HJPGxA+4Y3_fluHCUXt|pQe2=dEUkuaknOhJ;(P$Yp0~VR?>kcNeyjAL z*DC<9NcdH7^ANT2U}LVMkC~VDJOwlj@C`fK+6*M1BzOBmW1{)9-tGAgH2g#|G${Oz zDYT;c$=tr|92K+{S@*Jmz2e&hlS^_28#7&vz0$l(_4b_6yK`ZKJwt^GTyZEeu;Pz) z%X^3J`N+B2(eU#__Ra8_bvNx z7_W8#ejo}xoq9m^Z;ogJeuQ5yKdn99r$Mt~TLTn|@0eC7i+zhF>)sZOo1q`z9dpnP zU{j==i+Jz10&oGoJ=`&;Z~VwF1n>^xc6XCyJvU#6QA3g_OO+V-*2Zkgbe_iT%YkD( z5c$xlyvYz3nHufkzKtt9Y@>=XBpcCVf5`TjzdPgE^)Tf~ROU!#Iqm{l!es`QeX$DT zGSaut(}^hcyM^s)KgIOE9Xl#+UQ@y}+(8O%P&Tx#L6gPKZs?=eQRA zWr7_4OrC>Y6jmvHd@ya^Ha3mVv$T09p{_@vOPQj3I z<7#jyvz@Y2M;k0%?ar;PG*6x>kom{m-JOKb0oV#sS01p0v&wo(*1J4IA?BlG_&hW` z46XYE$rb6rmHzpCw{m2k2kL8VxTtLhr~A>l1K;XRBB|5$=X5>qld69ex|&$CKaIAjNdWRKRE*k$WC*?z^5yaK{YSf| z$T{(heXU(7(-kR}V3tq`}hvosM0NeELo16&KgqaXOb)=p4rC4JA;5j&E zP@|vT+YU=IYm4yhIwsbz!qmj&MknMS=+{-=?sW83ej#-VKe6 zmZGb@wjOI0?e2;1<|a4DR{{< z`D2+<>lSGf;re}Wwzr|Qq!f*fO|U!KQQz1o&UxP2L5v0t@HQ#o|R5W9E$M7&&S*)qfsuxn5IxS<{2+ z1Cwm+EI@M+nw603*xHC+&q_7)oY7E?6Zwa+Zu4$skJ4 zmfI?i1zZ+lYZ)PUI#Rj1NS4R*_v2_mA=)K9#!jD!oKd5w9ml{-^;gezOv7>_Ph!aD zI>_UWaOog1OW~8?K`vJX;N zF1_QMxO#l7JWlk2-ooQ~1NUvpT@Ce1t>t^L>@QE@uOFVku&cj@A3ppQ+^~2qrc=Lm z(fm0W9p^y)hE-U4#hTY_Di^U<6z4R?R}CJg8BgFy@QWm!Z6y)6ORqiKxGa$(2EpCP}h1;(69aL?Tfk*+L+ zX^e%!x>{AH$ZN2XN%T zUL3DCW9*zc7!lXEHq$BfGaSnzQaeg_uD~lFA4lS(nV6K5&`}4K@O+Ls99Z=tp84B* zXc&GCzWMOivGlTem^phcW>3n3^XNuw*nNb?#uKoc*}~aqBwKaxw-@4r-~S9he{Ka@ zCf$!`|NK9=b@41rnKBKt=FG;Vbl~LP9rUce8%Nzaxbn(rNTvCU&J=^p(bKVT!30D# z9jE7J8godSIISw)P z`|!!OgE)5h5X#zl9y1lgVuBUWQ-uvL{1#9Cb0;$I{w1Dy*F0T$!AXuYYQQiVF~0YRER2SW{nI+c53s zZ{nT>*=M%*UbyhtQB!^rmDR2Eh@;1_G2@UOWf2d(;lST=0vngVhxgt)3wobA{GvvL zu`QSi6)IE=8t}x67bAG#ofjhJ&z~>8G-f4`(@h$9!DHC4VaUwPR3~RD&I#)3>abzM z2C>M=3L=+WatU&Ba)i=IZ*@>vd{d!fFu`op;`U3^@uh3BFnx4v(6#>|FAh*!A>tOp zFV8Gy6QZM{L_0FtVi1*>j;NGuM5hizY(@@Z(uc#EN^|(c4CrH0p)n=Eua6QY;{6(( zSOkNXw>j`on10a~mG;Q-@md928sRB2FRrll5|`H}tc&pe!1DL(GXGFQXup$@k%8H> zXN#8{iflp`{m(Uc>n^q&#@4|&euaALM^*G66a^jz%b%4?*m4$cKf{&~cp;l#kSX_- zV<@Q@oakOo3Wb7u3Av|W_pZdeLaC4D+Xg*vQ)8h1+9;Go;$m&Wq-#frcA)BX%jJX$|TGYmPaOG>cnxFFgh1G87W97zpNx;HL;ZuZvUO> zlKbTI9Kr5abi+iWXwG=W)mPz`TbAOsrMKbMTW`gZ8*jv%d2^AGl>wbWUbf=ixT6H< zM-csqewxNRv2hHSBaLVQ$$|EkMl?0Hz$NJsmzsj3nkBGtH@jUc5_M_QqRSw(QMLdgD#J@y0SD>#v9c#{-9XC(=Ftl?j!xOt0yu}wecN#sij=u`wnsfbp9KQYbLZV=!aZ zTwHVA^|;}N>#^|Sxin6t%eN`g0M8dFJl zdb6-J$Vzq=p+wH(T98N{ye*4R6rmtBEGN#qn8a}SBtBmNEv@aSt!skE6F@>j5;D@$ zV5WA$iI6Rj$oxVNNQhLHireiJih=x0{r1~$QyD7dCzod^y~#x7snAt|%a7Stcm8Dfz-Y#4@R zr6Vc0{e?zrM08>*GPB8@?q#RNq3=&+Ple8!it)1-V$Otg@$s>{dTv^evF?t2V=7nf|gb!RBZS^{QiHJqeMReOYXiM^G7Ga92Bqd+)(L8*|d-&Ji{(}uCWUDV{1`RApNl3OB`d*WwXX&Km1ek*J4~9^v96JwJU9}i< zCkz*sS$r+UIB=vDSr^}pM}G1M9=iW4_}Vuf#*e=LOLp*o zqd)y0Jp8~{@a6j-#1DV;T`ZlG0j;YR1qXNISXpD2{P?{!_~bvo$E#Z#k$3goSTb(} z;>7+yI@r=C;@aD<#k6D_jWKo{`Sdlsv|>M6&RifN@WWkGfOQ|Q#LAT`v0}wXSpMEJ z{NLYyk6-=fd8|HE30IJ_3;b?jA?M8(U&e0NaItORtlW4Qxq`ufq%n(d>lZFXj-GcN zID)kwet`7{%7sM@Z}}dqT)P+5?OvF3Cr}%(i8%m6Yl_3@IrA}hd^TcqPMp}f6Zv+k zQ#8gJ=w2Gv*UU5|M9bUSXmlpTB&Q*Z>M}n^XC_m7qVm?8XhhV$qzWFOIigDwk6BB;hKC-cHu{4f;}=gni6?*h4J^zt!_|Bo zD_?jP%Z~&w{o31b^KjwWo&|mDK3p*+ z9p;{u+*GJgp<+;hUncQZMC;bA6ASOW;J$eAV)1gVH*MXXz+X)Bmv1Um^d}@qLQzqX zc&OwTv26Kf-n@B8OiYwt;P-;iML0(HYZWR63OcH{{1xM{R10FG`u;*E(oohB82lwE zZ$%RwWrM+Jf|kv`MxroOo-l}NRrcFYDcjnBQLZL_DQOB^M1-T#Q zzMwba_Q_?hD09&jk+3)tR2+%6nUD|@g_Oj2WTYk`Co>JBa~y3iB??O;Mw<7_Ytu^oQsO_)pd_2~!j=?RSOi`(k55cO z-q`UpcbqRQ^^6-k4oQj0)XvPE{c@-G4q3}2pKy6e^dl<*N|1bRG}Tq3xxR_wvmz;d zIAY=wp%K<9+{7q}F_5jk%aRR{oN2@(IXfM8RFsxr$M&6AzWgJ+_S!PMwd`%Y{gzB` zbN}2AZrg$T%2ZB5Rnxt1W~==xkr~? zU#^4%P8at>bb1Ee3_mkw=Z?UPx$|-PWtU^#oXJ%G(x_gs)iPPR!(g-`K5ZB#&zy(L zueu7CU3L-1=4Bu@Mn|%zKsY|F0p_?Aj2t%wb1u3VmtMLEb7oIQc6u_USDqz^K9Y|j zZ+h6GVvsX(6lTnrjs^2(ifx!ulM{s%9=(q1wA^pXMEMWB<|sL@L_(1yC>7pPx}l+& z`r&H27eHo4hFB0X@Z5*3uQ3x_WaEU>1
      r_;dYaig@nR49wQ_ujj~GV7$bJLpYu zd?$q7dSe+r`fxc89N3S>rbZDaZ{5dv;Cu@u6(+i7q48MY4=5I?MA8MiZ^^**kBRfC zLWPP!2EQF`tuDFCCTR3#m`#ydQ1GF(ay#C8^*!X@3KV=Lm!omc}wrc zC1cF+ww7Vb>W{E`S0UPD{S`=!wWw;7;I$WF_s*ke4Z6$FB5K%NT)AjG5~BKfAw+9L zYTh(le9=^7@-}C_M(p|cV{9pJ3+eX)aMzb(|CVhiv*h8TSrd>ML;W@1ava6G&%cOm z)d3`rorif-(_rc*3m6kH>BjqT^UN6dJ*_yl^+UY5VmIn~*t+F(;4vc9%1h6nem=<8 z9U2-t6)<|eE2&gh+`qZ(A>$1Cn4)J3rVQd1wU-H9VTZ}!McB8~8 z-}SXMqrSxnue$=fw;n*F*mg&O7UrZeG!L7NQ5oFORH)!up+ZGOphMF1oAKBm{zVVI zui?M{{w0>=#q?VR^ETF_MrJHPbfNo+XauV4A=v2!$qA6p4z8!WCCsAx!!6SBqzgaO<`T75$BNmxQNZDZ-Ecc@%4Q)dyV% z2KsOM`I+Sqt*zYUW_Ug{NgV7@-Dy&M!c><&5yUaVPo?AWw4Lhx0l9|p}2;@ij)Nf1!8PuD_m?D zLfL*~Ai;fLb#=Aaa*nseQI!t{9`eGod}Qlc+qZ8Qb1`1fW=k}o3O%9cDux262c4Br6>wb22byL=Gm7 z8;xm`#$rO=NQ@YkiHy_~#KuICe`Ak}73V8t`H*m!2t3MYsZH^gTQTu*B%@f_>I4h( zj7A{h=YE_`r_;aR=Ri|Kt=K+9Z;nNBMlNC#l7oGHsEP=sfqr<*^ZUu&?}c6)pgFA- z8JU?Fo=fF1G7n?Mj>otOOye+~Xkw5iDCgtpoM_y*vB=9Cg{-VhvCRrwoRRa)v2ngR zezqv0r7_3{N4pD>+k=24(X*RGXK_y0Z+~q7Tt{QMMC;B$u;7Dw-e_vKsW6+&;u+iLre5k z3j|b7e4oc}Ev4UNGNwvMhNYTu=Vj@5U`aNHrF3pVqe8{G zL}*tltOJF1y=Q@dA`p8n8sVn4QS zJ&5vVS*1Xqnklxg@Y;*<;h!JF?_Sw}l9mv416>@ZUwl@49rEw#E6#G8gfWy`2{yis;RXmpvJ-Im^>6yuhG6fZ11nAkD#=nH* z1YvzdYfV6Ab~@s1Jf91)5RcAa5*9C_iO1)0!{K(LQ#ZhilFh5IxzYnyb2&bJ@hLp| z$Rl_pgdTn373{3F%d;7u8!eTEDEU0vGab;tI_7db_#+DY(MR#)AO9FXef(+s@vncu zFTc42<9h6i{gl2M9Ne`7N1HroDEJKj_s5^(e?rpvKmYR>e)F$&C<`hx`R#S6D5vLL zyAS%zMR@S3XYkkOUcj&Ko7QEw15aBG4s67CDIIUfO!9Ss%5sN|393A*?sq{rJx z4lUUE!q4#NpFY5m`j9%Vi^8b6*Wud5(~)ZH9{a$6+k=XML(jdX3KeGz8pxv(k)1nA zn2FCDnSrS8Dy$KMj~>#iP*hq5R&X@tjKtWCI9Nio3{MBG!A65@vUnl)`H3EAX<*iM zj?5}ls8BKJz+V&}Ja`ZvB)Cqsw6tLD z+O=ZqnJH7I;L=Mk726)2iBeN2pZcJoVz7We{w*kJ$DtE;)HJ$-uIB*MRcm?A|6rfZ zi%9`$7yM;;3@?U|i&#L>umOGko>#o45(|kum1k9K@fV;A)&L>W@G-bB75x5}{`mV} z{tA~esu6Erh@7Zv9MN;&W%pK`7!>+q51Q#*F-5XwAr=FAae zo&1%Wyg+)c>yC=SfzT3H>Oco|xUt0uotPUL^#);C!D=>%?P_A9t%#3}MoMBFGE_*V?#6aCM#0XGZ7abFWv`fxSdmE9A9VeOKA&^bry>mS=rfQ%aogL zz8QDkc?WL4{dRoewl8#{+rMxd?jZVtpj!pqcH2^1d+pVjF>@-CQxc%l2e+DG5_zUE zS;NW$tk}TH69#goGG-V0kwp>W^K@$mlmL9QZCJA8dd!+N14)UA!B7=&B~s=| zGAI|pU;fiVqInn>p>U@>@w|>n5d_=ff@(or48;eiJ|V;Scf1kG_Zd@0^b;Q)dWg4jQwg zl94-d6h@6=8jaC;V=-m!mALPrZ{dyw6OnLA*#uy3t3zSY2{;1=n5~?yZt1i|BROjZ zZu#bqWQB?Ee-$^(8;MvNq&0?EjGBKV)#IBmgSVgwNU+xyVAa2WiTm%k9p8N7HSDiy z7xMry-VPu{nJdNEQ=td#SA6w*vMhf1Aij9R45S6W zv(qI_#kDsr!l+a$+_eYs>JwkbEw_9LPrSSiMU5Wf(7>884$~%%Kyp9r4sj-6%jf)D z{CPhXRB`suDlaZnQt7-Qp92jwML1bnM-N1LWRDz;+=Lhy=s~YX{y{`M3iRZzl1lwe z44#MxZDsKZ3QanC;8SVn5WmFYh4&RJR^W{{-oTAF-iW13mm(=CNz{R!{G5&$Y+gSm zCZ@~che|4)ElH9jwwvL)T2fLXl$!VAXAfg8!;jtAs_7%PzZ2EZ!=879W|E zmw7$;KNrN7D<)2yC|)Lg-tk|h@JT6a1)(x$@X_UR2@6_>4sY3Fz8~=4cqob~t`d~ra&&c1&_w2k_EvjIi}KMRJ0Df(_c_MGPPf%ut+ttt3*h*OMH9ER5iuhW6bvU1ea)WKjhA|pKs331V4oTLO0UuT+>a-;!J=vkU< z7G&n+VBzA$xaF3camO9E_Cx-y3fQ3 zkmawWoBWIVE$3a_5OSk?TwmDoir0hc+B%e$S5weNdhUo8N>^;bhxsJGM$|2i)1(W) zWT3Dpof3y>&?7Y?4GS(_fLoT{j9YJ!%T6Y_JeP{H>~z05NH^YiJ+8UxD$JTW18J!# zLg9#W$a&{_!*Pg@(m3B#Z`s0$j%3Tqwr-CEmz$1q_C+4Kx^fu_J{`-RuRH6bN&$Ez zKU(Y(n(SV2-V={XD)lSrVv&)VM$cZdLb=!NKvi`WJU#KAAxZ8^96weHFNMoWdt$un zfsaXRG$SE52kF!)YcyUoHP)lq-Y%X~p-sjmkNg8K{_=KAPBy~ZRE+KK{T1K7{W@HI z`?vAe4|buV%_sLy-2HS5TkdMSt0}EMwAP)# z#&CJV|xtdYm&?GE_ zb?5Clzukz(E&A>8UI=ks=%mw4gwE(genAe==l67P4cC2(5Mff zwPG(``^`hR^Ok$?sSU$btR|eTU)qRHLG@ zjo1uG$eD?y5AbMQxD9Z%>^U9hin#A$B@xAfT2DMG^)G3~N68sv7nQCL`rH{W~{Wo2c!|Ni?iW5$dQv-myv35VG4#TQ?UB}i6DzPndJQ>#n;ndh}?~Pjz2BQ%NP2J_qq%$T`21;CZvvWF#`sl^3iW!e1)M zt&-=g@(;hu(tuxm1&JIwNH9`}epWc4Baz;(gCU@U&QEdhbe9)>d~W{a4Mu9uY|*4M zZ9E-T&d)DWAW0I&j2VMF@4OSy(b3`_&-c6Y4+Jsnc02a(-!GK!7=tBk7ZNGYe|+R; zK(_d{ckf_!-^u59{HTpMQJ4etY*qdXH)u| zauOYk2HuW|>vNdT3Bo5QM5vkwS@OK3s-gmADT zP2_ZS{>au_D7};!mIH4Q!|Bmc+36MHc1UEPbA{MlD6o)kv(ZF(q5hxC#Lt#aWThK! zqhh>pI^m)+^t!#o&++)+q~|8d?SwAih2Ja7lFjD?h=ybw#mHL;A7&UGZ(rjii-}AUzas1eEVdYgT^HAJ1@c5j09MuIviN}JRZLH z4t(wB|G~E6MjCH>>ObUuJ7T1Z!=!na;-Yamu<_XEJ&E`J`yNiTJ5W=w4(s>(FmlF1 zOdFdZtWF6({&uvsQU6Gp^V6Jw-!<_@s;7hGfb4*Q5O(-ob z71qopi7m+W%%@WSk^EGpLdAK50Nl+DD9S&Ia<C=TG64zT5DpW)en&2@AzNAM%m<&2Z zTTL)&sZaOw#F`f>=tUyyinf3KaU!fnvf zeAr?#Qk)^ms`PihK;$b(%7Q=lVNp?0!c3~7K&fD$$4Mov#l^*9&KJpffQr*Yk-tI; z*eV-uSG0NaX3U*CS8NNYLPdW->1TUFM<3kF{qqH!e72GzE5ERV?9K~BJa!2uy1`=; z_uCSYybapvz-^Yo@U!I!H{4FAP`=-}V-HRgm!q|<4bJv1KyHMN3rmt zikvQvl>*?-KqhkJwCMH0m-xJJ5F~9txu%pV{DNof{nr8E}g7oer7nWzyb3JY;y-vQ(wK7#VnGB_yhY*~b26mj@GUZDsR z+E4Wi5vd?$yytUH5?c&0QpUyN3%_GvAp}X(GvZ;Z8M1`rYat}&WyOggPe=Yc5R8j5 z>?3DZ7Li|}v8!JwX$U!;0uh=_9FG9=5fOiY0`gLs*40#DEA_?4jupY__M*Mrh1%L$ z)K*u)<&@vuvEX@OReU|H%x(Avze?%$K1HN9n1`-J_daiZ@GxE-k2y9a&qR;dm;}V|bBugfK%=)Hcfnow`QKl`E6+cHuPZ=T+CiD6C+|m`eU6HHXDBo zK{EDw(N+AlC7Avp!{Hj8KvouKhiXM&M;APLw}S#|=IbxYYmohXgVr1@HxF7GigBW(8lFC6!HT;shXd|Y%1wvZ z(U;=E-@k&lUi%9kx$inmAD#@0ry9H7dKSNU>R(uYv>qOs=L{rxYe0T~!P~U*++D@F z$I$HmR18hf!=S6B3P<*C!TKFX&@7&(=%KE)0S(P<)DiN-^dNxWjpph??Ax{ln>MV& zn&q$Kxj(*)4VA4TeQtXV)-U^C{NGzEuy*}MY}vXU`;S+mt^Y<~6)MCa+t$`5v@=;# zQ<5U^K%!#MfM4+O!WC=au3NWGSQVK$bEf!WH8wUj$aVJMczCN2{vuYbLn_V&Y=S>O zKOdjaLq5Nlz3sN!I;`KEEzbAF4;3mRijL;Mi!sWg4Zkjc_}Z0~Ma+6$baB&Ufxo*7 z$Xq;W<<*&v7Y4jQqAX?vi5ECDe)u#2_$W|6pVOa2^K)OoLvs%|G=2vRaKmOcATG)( zOycu$k({t#;yThec){~1vDipsAJ3(D`w)Ja!Qd}=6eV2le+E(^m&a*NCm)%(pG`|k z6I;+R)c&#mp~yZIe9ej?JU`=k+PHD!Fn|7hur&dBp_^J=kW{Ep5gxdiiN2Y}CbsIJ z)$#TyI-$fU+F(dKlrOus%P#14OSD7^*DrY>dE6)|EyL#RyU}WQBQ`D$M(X!VPE?}2 zv<@DzA4u@4WJ-@v5Rg;Bp$PCjZr?h+L5vx)V^2gsp+sXUlWcK<$6O+EQQ&mCP+e7x zL;DV4=hmIryL&$>E308NnrMvIqq?F3dv@)?&KS3TX@fHaIat!Dw z3@s7;aoGrEiqM#tN5sYR3yPOWY?Ht)TsD4Acj&w%2=*sPaSypir>vOaY`3Gbq71ut z??iJ;E8-JVC>1(-&aFa8aVgyNoFY_PSW!e+BcU`?RDw7hkq3FkB$5;gCCX6uZal)W z3@gg;4bD5o&iQ3HolewLS0VrKA?(_@13P!_LQzo}+#d2r^{2e73VZh#V8_mU?B0`) z>dHDQQ)&ZxBgvN1+AXf0p(Ict6)IGm3+Pg2VaZK1kz}L>?{lEGU<1}|IROWEWoHBl zE#*ZxS=RdX%%46ImLRKuE&;hCGGOAa+v`SC(J>S^_rBf|@Vnq*tqH9K z38|wnEK64AIa4ToILkMzUiFm0qo1KqKzc^3$eW|N8hf{F#>v*+R2|)iT}O)0?Cz~Tdr@C>2z&M)MV(7R?XG2b<}a^dcYO@5 zdEh%(GBM$Em1m#evL+Hm&gC6WQ8Lr#D07WIiJs-7c@~0}>XS65+l_L2FXcv)l=k{k z?Ax*(#dcX<0gnT1b{8amuS3x4tQawGDZcyEU+||V9>n!CvSHJ-BL9=O@%Cr=Xgu@I zNW~Dtz`e;*q2df7d0MdJtv}$Y-~An{4m8MvuT+U0@BSNq{>S?`Sl=$*X%9I3%{aE{ zHT>p#58_+j{06@Ez(e@O3!kFAU7m0Sylp60^E@8?*8OANnqS`|>&zH%j46 zC{(EU9N-r)_4V~Qdh{qNDk{{4{viZ@QO9QHc|nLxnzy&Ni>+dK;oWMbhrZwxAT^Il0tM-W<>c z9Edh+keU#UXp0#-id-I%IudeTfb-0Y{j4amYu7IE#S71+lm&hsYnc?q5wQ?AKoEBs8f(j3G&ZCx48X=NLFd3X#!a#g{GNw$Ofy=JA5(^hCMBb>8FdL0HPjQx2~KN*V?DUMG=wSyw`&$>$~c^LESpYJu~p{As961o51L z<)A0|^L8uT9|mZi;DtwW3zB&5LGiJogH~vHkOx9XnNwc5?1Ul;9T{AXd@Ta!h}kVj z3=T$y5=anS9+YQR^k7RUDJe;qIcqkqxcWNGnmre(Dap_iKbJdi`6HA;C@dB?N6o}> zvl}a}@Vr1)U?8S$|UjpC3Uar4v^ z8m0Vbsm{l$ci+bL(pI4w7#@7ES02D8t3N}2RVyTF^EyKeCe53NOcUMqG@$s{ArzIi z^cZHqC&As^h*n1crqofGJasHG;#tjv#+1^{c=OZa=xT^VkLdJ?xbD7h zpd}x>_Eq(q=FmPdBG4jX;v!r(Zxmw9)CmV%IK1*jtgVQ~#085nC5tm13NyyhSUu84 zdGfo^RJITM4%hLMZjbOa*Q240#_p(OjF>hX<1*N!#hF24Gzbe@44=ylcaI{V@ly29 z^38MpOs0*-_^DG7$0Bqzpy0C)@WFnaO*~FJx~UR)Ph5sKEuAsX882k7&|T< zeU~PchCCGa3@)#pl27Bc-l!MKSpj<9_R`$%RD&GsS1EH2ijQX~3xZZ3hw+mqB9>&~ zYc9v`H6LNkt`fA-{Hr^7yT6LV8?b8q5hyx7Qq4iE-Fy($EkTQE0$P}3vN7+bui$|% z-GB)xG%so|#*usq*P*UWUg)4MLWPR+1zK1Whhyf|_u%`F{~EvkHPO?*z=L;QjtSZE zu<*lGkaIwxu_ATkEL^$t3%Kix_u(7geH6cZ@@bLYr=EIBPWMy4!cTwn5We>R?#Au6 z-i#||=OMusaa~6hDpXtmXl`!C#*G`r;(JO;3NE|sGK?58LhMi8A1{RYD_EZB^2;$5 zD$WG_LR+zZQ&d!hsr0aV$t9O`eDNMBobv~76)M7jQLn*`3sP{$rRkV7A}Z)QP!P)> z;w2^T`yXR92xSp%K{y|czzcgS&JC1BKk;IasH&<88#ZhZ*4{3^{Bp5<(O|1LDpd3fxW89O9yU5D z^jjL)*y%J)PhsJgrNRP)!G!GLBeD33 zYjDFYx8Sxf+=82KxC{&CPDW-%GW13R{6R%QRzTpz12Nih`}cGF!4NskK`3!_q=!!g zb>8GlB^&ZN<%@{NZ=uLZXJ(5PDQTIQFlicQ%w2#*mtTeJZnzP*-L@3B-*zLG+;9~x zSvU`~rcc4>(WAsxk4A&A?G6o%39N6yd*6rKABl8OqF7mkeQ48T(bBQTzCETxZ%bdal=hFV8Mcmkeib&6ogo*MU0zVb=WhbKvwNNJP|msSr7Ha^*6W^UUBpb`V8pHd~OKnu@$JV=;Z^EL^nc5?p=L z65RBK+i>&Jn{dN*mt)bzGcjw%cubu#7Kw=o6d>0)4o{(QaHjlc3d)cm6)IGO!aV#^ zeB*on5A!n32)G)N|H*6k*NY$FV3oaBg@#T@Xsy_f74NP=kvKe7wk_7tF@2Wt#Iw-eQcMJSh|F=E<$ESNb43BeV0U)@2x_0n?Wx1H+G z5I+LbrjJKz9Dm?L{YJ14Q=s&7evcO}FO32bZ2nr?2wZyoVvI?!iECe5JM<}&uyD~d zB*}U%Ah}Fos1iVL zS<;$g5g%s~{kx~G6czQ|OHx8h$zE*Uc^u8+Lt?*pH`b9N8UW;H20oXadW2vI9~@>U z3>bd#b(o)Jpc@|47H-6=&;1Q=ZlrqR=$c-4-C?YFZ5b-lXJc&K8Lm~)9KgrQ6C!hd zNIu^gjs!GD$ERW9+gc=n&KV@qKJyuQwI^n2_m-Ln?!_Labro{JQ- z1I^8?@bZ2%-WrsYG<0iIKG^H3P*K|qr`Xqq<}2Ozx8Zus^AbAsyFGCApa`PT8(@iv zh1IHs&)$HV+E%zcq4moTPkSAX?b(8ZCtB%>m1HG({u1Pnp(abkWF?p#pC(1z|8gy6Y~UJ!0$Q5)5%h@|Xz&$7&MbLatX!6`I<#u%j7 z*z97Ec_5)RAufFsrd`xSI_J|wtee|{1@kdAFB385uF5Wvu6L2J0n1ne+x9AR*p znBwE(k(rq(6i52PK*)puuu5y5RD zlG`w;=wA%=cB|8AFgnYI@x!B#mSBQd$chloq)6NRDYFN53L;CHk_<8~m%ph0SE6f%-xksNPBoXrZe zUKg|~BBwzF#}H{(E+U6Bkx5VBah3b52@@uC^dtOIkLON2{;_ok9s`9;L&69*-g5!A zCbxh8eq4R^)j}zc)21jf^o4NqFxdIRL4Tx;Uj&wwm7%q@730T`7h@7DNwSjB;>C-F zk`b4K!mm(IIFoq8L8SS@!C=orr1=H{gIXr?kkJG2k8&=bGI+e{UK+y?9T;sS4H^;g|Bofp8gfc~jha&I6}`c=%6FH-nFS z9daHDz7^!ZWu~yif zJ}jP=fXw916=9_;PKR*$E+j-+c0-{oPMbE35edUaBg@!`qX+X*Qd5W0q9Rna1`v}< z{ZRt7Y2L;w2qipT7_T_A9ka z#!Q)n++-W|Qz8h04{epZ@y6et$9B&c+;;yrar>gt;@OSiE!%{D{_S;?r_RAe6VqYg zPOt+KY7VT$2cPUlHOArg2fv2PMn=oG0_`Y2ycug&??JU&hm>)%F@Nev#OvEpwEkUe zwdY_|s_C@7K<~2CAu7R&x}B@B@pu#b#u$vd`hGn0#ra4zo$5zxPC`b41N+u*#>oad zwfg{4$IZaRoOrR7O$R(Bc<-5~@K&J(mwf3FeEYT;NaihqIv}BP+dFu5&0*O6W{kV? zi&!!vSyl)Q1vymh`UoHItw6i89fs6Nm_9xmab^uX_BtHhx*RXR@hOg-IEEv|wP^PQ zkTL&G+`b@F-a5!pi-Q|hW6joMXtj;PWj9}m={bquq$o0B#iC=|*I?ym`Dl$Eg~iug zj#;Bp=viFc7JLDBD~flo!uy{bMr+h?Ty)vxm^&^5Y>c$q>+nP~ZaRP_7t-*i)`8T|KGzqsocpv6wSo$ot?q%k8mSg)zA7SmTlVF`A zUCu&$^$YV5M{}icz~O0hCRnKQH5cv2{^PZ9IvP=Sya4%y`-Yo_2(mX0o(}JSipJCm`!>DuXkuhxnE}l3HQM^^lr|)CGJBQko z3HF-f*s*Z~j#jtAV@$!=iQ|!*#BI%w#*;g-;?4i!y>&-WT~~rq8hIDP~b_QtctU|H>QK)|QG{GxLzB%mD>_XfD~0 zPgZ||!!7B!?2fx}?W|lx8^bHBGePfdyDNB`-?p|kVRbq=Il0$0hj7EYkPxm+PRFMz zi-dtl3Hw4sq-8f0WIRwZ8O$;sDl&;AU!>zyl|@d)`J7j`+l_+<4`RiN6^Mz65sMEa zM~)PW@ZpC4d115JkeZqrmgSOjDU(q2w+zll43zA6(To?$wrtrV>TGUqE@sV|)uC(@ z>G%|75#E+bRTeqFFx1;Ef1z5@;6jVtEAna)8!1NypR0Y-DPQs;0Wb78Tpl=Gyp;^4 zi*E>C`DMSK`S|mofFBwrzmLcZtJp!&a#2B^6OpHNJdLlCD7#C%R*`SA> z7tlB!3XH>3h||Ce7c3k8lNW%3cX<^;tez-oIWv4NnS_8}QgENei~T%r;;q+s+Ylae znL_)o&@jS{_wzzl*6AI5U*Tb8QtpF!%ahRgL0@U?Dg55}!_DhJ@r8r_NSh={!ipAe z5y)fR=FOYoa5%&kf&3-ASX>p0obm!F%e=Sn&m`V(5NW<}Fxc}DX}*ELpq2@DDNM2! zLFcs~{$WtRr3oQ%vlASPAYK9Ik0_aFVA4={A`LofM|9++ef|??e66LBiTDdqzLqKI zhyFP@u~aVHWMX$2bizswymbnVV0te9D z`4mb8d@Yn4M9}hhClVmCDqQ8G2#8FmfkT$_$w~_pi-yvsH_)>^D-2Nj z=oz2WXe2UGI+=_#7BjKpDu*EiA&A50IO!nHC_y59^0`Q-IEn&e76RQB+6a^e!N~IG zBB7%!V8O$37F>K}XPH%;a2^CHvf+FRCfOfz2>(Q#<;KaC&t>HlISfvR05iy5l|$z* zA4DL;OGhpX9>iCnJ1vQXmYALS^{ z#f5y{1;P9LtAxkfCsDktT%^-cS#dcUxXzjB+#*PCWHO4fwHWmmtnW0y9r@9XUp-*TpCTPMp?Og9oES!1d$HAUE2?Zyv}Xp0=Fk@a5` zr}F1_qXR>pQoW;!+|3J9YtC|p4%AgjKnH8s_Wu0lwO{GCb1>Jj1#{=_2j%b4k&M!n z(zf+fj3USyweCCQV{#g9gh#6y1o?bKgl;pD>K=F8JKLSz&v0yx?%Ty|0p#ml{+JQN zL_!ATEsk6!euu`6OCBU>4uY~M6Otj}{u1OT)Yrvj9#kL4U| zq;!)5q|4s3)8q5CpjY&|8rF}#Tf2Hmb5Twm3n}m-KK^l0jZMGWP%Hl!fSaG3s@HT8 zbt%xW+1lRX@0RuUd7x0HzFAS(U1+{!0RwZqs!ng|SRgPS!#2Hi4#=3~=`Mz--gRKo z-;RtCTG*jeT=*7iGn-MUND3w2sCf8@+n2y33>9Ikr#MK|k$0zk8K}qgHnWP=dTmIb zVw77qAJsOPV%^P9D=kHe9I}1YgRH1CI$uPaad~1+<7ixP=!^qt>B?c#X;h&x$>Kxz zvS>=_h}3a zzRRY~B@S&pFEoXZHCDpnh7W6io-lX=&);Q`#{$U#tdgoIKZrMEcaudHQcXjuF1HuD zZOPF{R^sEC)_Q_oo&|3OBDS}bY$QzE2|HpKp2^ncCPSBWSQ@!fD;=MKI7I8G$IxfH zl(+1|`({VggaitfT0R1YKN{qJD2B99kw?aUm}13|P1xSZKV5*lXjT6K(dyLxfr&I| ztiKjY+_)9cFFs?})3+M*|Gl}b#wbQsg}p6H&5|=8^5n{s-(w4;rLXB}MGeIM?rrG? zi_lp&zBO34e(}Xn?H*B2mk|~Fhk*k=I(7_3bJM7(rjv+}ICw?6Q2ik(8M@@1SkK%i zDuefUhcQk(ve$i1k`ns5WcfGNryZ*&az{VpL}4x?$vO?WTsHZmeKrO;Yc z#plm(w&-hc zRRekWpAdJ`Xg2N_^`m1G22>nVejI1L%}WlzM#>zUE9|Q|z6c`Oc_4DZtVokOAXw1N zzTR6Sg&xF5gYze&`oV`mFvGdISEBAye}`6lDpiycDtPX=zsbem5T*2mWHOgy1XaJ8`yj?lpa)&$V3OfbV2y zf5&-wVZTOh1iKflK-f<)_teB*p~gv=0_97IXeEs2k;CK(A=QEKkc~F}u=Yrki|nZP zByD<&DF})C%&ht&*KBeCleIR+m70F>+f^dT1KWXudSr+DYWPCsmEZw|76zD3>%6W% zI1uJS;+AclepJ2PnlC$XLMYuM&A&0|0mBo0twc(9iyVn>ph%8kGXm=wW;x_US5#Av zgseQh))zHOq0(i1pI)uFa>{zr0p809c?3VETNO#>4vy(+5`#WX`hH|!gE1MQ8=4At zOZj&8fiEd4an5pYLC;8l39;=^hd{E+9p4=t`Y%-J?=qDHS|Pls*K;r^#6qZFZPjEm zeuOSfQ{j+^#)bO(m~Ag5HavB0Lhs^45|$gNVp>zsI4KsSZo#<5lcx7_@uiv*!>L4G&b$p&leaLdDe+jf>l$@ zra_S=WAcg;4m9;zEve_6)`CL;u8l)~BNOaR&J|1qOw~p&EDgcypy{MnRep?Ij@Z0vHU?PX(c#VA9cHKCpGOYkD z`A?2U{RK4SGO>Gu+@;YAi~emOqDtAYTjOV36REfEAmqY?5wEB*l4wU`*k(teu%ka8 z(n!SOo*yqYHWhleI=T|z&mcI6?}^XXmY^##x7fXxw3UgtkmhnwL5Ja)=eJd)3@u6s zpik-sn!!1j1{5B`?j<54rVqj|_ua3(#NYW!N@tO~I}oi0X&IO*LwTvu`?0V2n=b{P z9@>I7(|vdgvp{e2ZL~A+wDjU9%l0XRIFR6oj+Gk-M?MK-+PBl;6fj5_eG2n#%fy2*k`G)tX^Ni01d3tAZf5!w6nlXNu`1ax1G4&^9=%0y z4t1i%B{_Ep6e@kf#=hqigU2$}2IU@=^rHgwVX1%E}ru8yEf zSHArV<~}|m#-w)(QEuVCKF|NFiX12M`%YzayIXBJDW;v0OHPqrpuw2(^P657$fa%p zhEpwO3*(^Yo4y#mubO|Y7cfFuKqGyTqn}AdxdNx zCuX~(@?V~yy4k7EQr=cnJ4x#YaM8pgnhB)roiL8X9FqFU(U~3R3cvLHW&z&2Zn$IzL7j<&&A7M z`vRk?puKEc>3ltfoi+d8xG~KrCI`0Nt8VR}2NSgyD%6C~edO*xvvsaOdRVj*d;j~% zV@7n52O(n3^xq!{1F|Qd*K6`KxuK!ZXKicPrDyk%c;vp86sDPr`@qw$V6EJcHS60XWH{ro@{^ytN*HlZ)E`(oazo4* zI>l>kL&~q@J{shKi(!;nT2-P>&03R_g<>`G)kc|7L{Cv zF23TqVUXbK`ufUmbAR_TN`{?>(>IR7FA#i2rIT)Vfh4h0Y$KVoXAWlX2OFS@ShnS% zHRypsug)a=no*bSHvGawdc|8wq822de`bX<`W{9#Omk&Q&|hClViLLZ{0^OWl5@Ji zJXval^-p+T_@h7eMpZffD^ zBcGYL;~zWaZadM>iNp~*48BUUlU6WblO`Qpvn%>9^@pw^%KBPvvtF5I)Ack9VT_4g zm>DNe-Y1}kmV1QxZFtk%_=7V-1gJ>4?sMN~d3i&1f_0ag%GwmQydmq}2uCvN_rJX2sSGl%;Z-e@h~HCc{h|W*Q9;kSO7bvUNxW z=G1R=o6?A&`B|H|P zsvki-&G^lffOoLG|859+Iz?drNs@W*rD7~}ApNoi}d%RVNzK}dsq3}@ea zUl<3``!}>2zuX=Me5inJ1>&jC?4eNH%$Hht<5TtU}`zF;`T~Z zX9Z+K3tqZ$02%Q)UdE3kGuB9sDqidRH(Pe{?ulJ$Dc_bE(#;4T%1vb}u%PIB{T=S8 z(t0-TzY?y*5HxLSFY|U0RjT~uja;tqMn-04>>&Efbm0B@W>w>eZ;Gy{k6_+HcaWIq z%e`eBF&~<)X~4JopFP5xVfc)$1`8Jb%P!sV@MIuy=9jyOmox=agv{TPeQimkkM9$n zXm27=iNg+`fjKJ+z@Fc!dQFXbc6{$}*0ScO1V~Pau9Qv75@pO-Z^hca61q9}p0cu4 z9T}>xPbYY{;A@IQo8uV|e}RTN;0et7Ml`TkEw36U18$N57Y+2j+hn4l(7D;kl<~|A znbaUg?K!>a+jNc1RM06QvtjyVU>js0US1BbeXBNq{k=}ePu{zL-!PlK>dTjle)1QL z4)0T}X9B}_nmvK7bs9aJ48vVpsGaz#fBZtQbX_(cb9r^i9^$6fV)8?kZT5hJ~kch3rFs_*ZnKYsc#Y^px9(%DAm7aDv)8OIUIc%Ae8dn zyxUsgTyTv%cnqZpO~)PyiiCEXrP(X-2~Qj+Gkv?~;nE|_{yS(Gl%zi1~-dW4Gx zHpIjKB(|pX-ZkNf0@23pPjYv-Y!>pV4#5#m?tdQCbZ|Cp#YNwZLqo4c?Da(JGzmRt z@o}IXMql!BC!bug{#$-U%s=VSPv%IC?++FXN6I6HpXJWEVK~VXnIs~e)u&xQfQkw| zbBBT$7Q!D(e--`a$36E@O-ig_JNtR=mm$NTO77BnSYnbibgp$TZJ?$yeAtJ~_j6)U zZoe~{w$U07s>|fplA-ZQ*?6>mePkH@)=rzwxdBv1WJsOAU*K`H8d}}&OFVFa9G&N? z)*OLI_`FDfNXZ>9DPpdIN!v4nll*%;L~>iI?Ozv_V?gtaCOn{s@~`^DNydmhE>+o3^qG0yKW62j^~RQW~H_)I&KBFZpvU)GxvRdsOvZr zEG;dCF;*KC|E?-S|1iN&jR&LGu&fXrJz)#fs-jH+|obOZu0s_54@8Gyt z!@Py`(q--?l5*m~rR(02@Tfm(>^|6s{yN2s)${Ar0mEWTS*peTh-3JQ=x4mIM1z*u zue+sgQhI*Ng8*XCUNnf39^T`{MnHJg62Fy1m&3&6IWdFc6JBnBN^x z#)1b_|-l4K5fJ`&J8f0r-QAjBPVW>2oBQB}i8Gp;O1}UQXMRzn^mR>Q*e{~O5~8Om!+nj5j8Ucv!P}G+EXUP$T$4K8tmOUEP|%d3#NSm4i8KS|@)pGE>sVajG`;r? z=t4=KS%d-!N07h7P-(ckH`WLC>%^tOHhRFGZA-QkX{5{iL^nb>^=4;^on(2)RlWaW z!}bF|R~6leEYWrTcoC;oB5T=t5c5Sdf=8L3A5F_d_#g}`Q62B117q@F6Y*g3=$!IZ zn%4V7EMiGkh^7Y7xnW~N)}R`*t#T@hoeH~O3memt=a+CekEY-w2HKtDBhP5m-e8iZE}mBu0V_!qIJ0I^w2?c9et23wu#!o&*Aw_ z%G`6HeZ3(i5z*|6&-r)KM?h|Cnnu$jlRv!^(07UlWl0o4vC77-@K1S~KX zLv|uQaD*X9_BG4YJHa#tlI)hCn~~WX5VJ!d?CpJz6!Oey zKGjeHPTO7#(NuGb5r(1s{_(f#*)B-%%v61hs@IaCeJ7Mq)jGwxtd(gv>5`6K13pBN zw7T?r<5nA@$^r~B(@o4NMU)I8rDK#}DNWi$HiP5hNjz8yW{loQpe@`z+e0vl0X49> zr@uz?xTNWxktL=P;Q?!mN>;FHj7kdgKY-g_)>L_b*<6$|AG|?~HqGKpmZ5YGT6{^ zFoky!f1g=UU#TxV8+rO5ib>&tAg>q2MtKmT=$5?~tZQp2Sw})z;oMo|91(im&$p2{ zALpk~YD(~Qf|we&l?hU_!`!IdlX3~zZh3U3e++*eb?^bMUBIws&z$*I$V?#6pa!AX&aTaxb6fy26BV$#(H!KiW~Js@ zk$VDu&feVvjPF#OVIhcR;Vg0W>ro+8+t+TAAmMAR@mw_3PY1e#!kG!xIF!?c!J-w+ zeF%Q(SK^Uq;n`+>s5@#5V1&Jv3e#-HF3 zx<|h;^(R{D}PErj` z>FoIqM+V%8W3Axb$rieD$on;)kl~Gnx^mj*gzJcAPPp}rc2?}nbjY*jBsun{F-(>` zLy-<_?rMhOcd;n%|Kkk!U$(KTcd=km=|<6uY7@YX3eZ^H&Oi2;zFd(#RY&z8SG|KH z*TW3`QG3Bl1^|+>*v&)q4iwbX#P;!s{GbhS&1LXJt6E)q17dz1;@o&{55?O#IP^n& zuh<>tn5E_9e84I-lHo{%Dx+itwA8e#^S5&+77ZodNH$PCwl=ot%LNUH0{8J&VxBbgZvDCpsSTRR< z0aQ;=aKbDup}=!;iuQnPfNLIXJ-9sA&}U*`U@-aq?=ONwDx!w}!dCANZ$`nXIbVBc z4?v+BM=`n~Z&awUXsGZtgpx(%Pr29l4-XJi9KXSw*9*nJPp0DwKc3@+a?Zme%QH#R z{Zj5P*viP~tXP*hy;X9xk!9T61xw*n!8VM_SQFghUhm-`rFsY7j;-DxIN-0QTKNrS#mObw8B?9q7V?}8+r zxP>}Wr_>uoC6TqA+)P)P57ByVp%ZP-lSWD<7{&#JZUomSa}p6`@@hD7-=z+(upYAb zNxAE+!|@aidz9?U`~#_q_Dijd|Gi59hbTE%7*WMs47`KC@AmC^-(xT^faQ1d0VGTY zz#q%91U%@9i;F`@LZMk$SR@4syShXVink=8*<>5tM}VVKJrJllFg%Q0M%VmqdVWrN z|7}%BOiVA*SB+R9Hl^Xry6=70kcGlyGl_#One)yrJp`=Gyx$MLQz0b2zWr=?L!iaP z)!iBcK}Nff-23N17K2kVoQ#>`s!^zJ0W?u1VIw$Vnmj~05f`zI@a4@gC8QqP7@4nh zNbqBZ4Q>;dJLD|V9uCAbdoimiR%+C70sEpcgl5YLD z=I(ai>ao9JeLFa)_|*%bFXQo?xQ7-Qf2eNVU?e84ge+ogV5TUH`qOh*$zU9{+1r(Q z5P1X`nEbxe3ZxlJ%zUAdH|Tc7zl(y9N$38{6QUQVzG@RGs$ULpT4=r#O5Ga5LBhkqWmvY-VD?ovK31nNudAjV zTua-)JsPcHvv8uW)eB(|4cEJnDpL267*O&M+SIMgB?WU=rXJ`|x)h%u5}3tOd7A!z zUVLOb>JR&#EkxT*N~11h#Q&)l7-BI3v!H27NffsYEqcrnXoSz0zs1kPLwnQUO+YSy z@wnEwV_?3gKN5WynAc2BOPetl?BbKwq}wcRY{X)NoWFJMywQmx;;GV&r4LmY!WX1s z5o!+n3XqzMW(MY5{&r-tdwSnEtI@0FYqt~$OcXXRPb7YIS-)mr5Pja^>SAO1$c}is zky^Z^)kZH=k6r))LFGYKdSj(_RHvre@X%b|M8+J|UrZv-DAL(&ac*$P^_TPXZun3y z>Br8c4o|6@NS|O8|I#dK8S33e>N#*0_w#&{I<#v(ea|7U5EUF9DHG%hrK-v*p+HK` zc>6U&LHBP7-Qdqo^Jpf!d%BuHmPzh7Ye%;Es=pfX8UMdmj~EQBcoY*NdTBNpF*w14 zfEZlpOEqV(TD6D+wCmJI&EFTr*+%*t11iiA)}P9b$ze6g^#+X`J%;n=cF?Zumkhwx zpS3lzso5^fy?4J8oUpaiZPu-1HT?m@X_9)VPzGl-wC=_;wq+<;i@rQANJcO^vJ=iY zN>7mlXLRg$=<^%w*sj?sTxS&X!K`4Pkg?4DeH2G*eb}{8$LOT`KkeEA?P|xL2h1z8n?q=y7Tf4O! zoNc~~t3DUw!`TostH4C#4vgC_HB3bej2)}YFIvzl6v&t+fbAQ7t@JeltCj3Nm{B}4 z&mG0>gsl9{W0@2?>mnEVD2LuU?7%Yx`n*E>pbXQM##pr8CZqU-8C1K(EEF8qEF#w| zJoYy&=m*S2uP#$8i^W->R z{3P#s6UBSX?mI3&{z(+^P~n382-J`uju{IZl`wOWjK4)ik&5x36)SZJn9e~DyO|AU zy>E@YX5U+|7QKtTj{AEy-1_$jH!o3P;JzwX=mMw66awr4aVRNqXc3nJ%4)o%phZGa zk&S6s_1`xZE?!(?#wH7ubv_y-+>XF=y?bjzZbr$OYD$I(8n}omtoSV!(AXa$&xAc# z$MIn^nMNM+L5+RVBT-%F+Lw_{bRaukKDL39sW(Qt6K_foRrw@+r-W8B4JM7nasbb{ z5qWfw$-;sbBf8I{HP7x_OHo-Ts&{$l-fEiAS^rHnuDEaMc~&=c(L)(ASbp)AugaMEn9=eTSC~N!4dUMmU_t|T;WT3_Qz`0z97kxkH2Jm4-9mr#V;A9j3VaB zH@I9&rvq3qQu0(?_a5LfTaAwLaNfEp_}Va0QT>wc9UGth`Mb<}l0@8t^jo4J{*u8- zQibY=w!joe=)kH1tVp$+gnc<}Ud}WSf3>q4l4~xQ<|Uz(aA)N4I|IgD+9M8qGR>${ zPC33{)zx(xLidE!Xogt>QF4-%jAjmOGYks~@8zh1W(YY2PTE3)yFY2p5~TGp1|=u< z`_;MpT|7P(oQ07&%3dSE32;OFcPJ%A=qgHr2NG(i5Z}{1CvuH`sbao-z!?2g`YtvX zE*a-GJVO4vGs6g|(26t5T3Fi(OQGu}O6zZ5Klmp!xiKq#0*L?>GxvGOqYS=rSW^@C z!Hr{WI@|%m5V68Jz;;eEgM~8rc*OGZlCpZpj=@QI5*2nrf)-iv1#&ZzAMJt%5kb@n`#-RKmsHp+a>c!o{Q86kyl3r&b6z;dbS? zXVJjFg+n#s=8IM&kG?P6OcI8OsEFjMq(^bVu=`IFjOU8}2eHQVGa;dZOBd|D$r$U} zJ-pFI#&lI<+@W(FgiWuCa|m2*{f5*>|M|;+1@HRT#jH;U$vO!whzhDKD3<8Mn3UWv zzvwG36ODb3T={z%%BlTLRjvt%QcV06h{|(j;!`~?7Bni0jRN!+UFg_M ze}7VlAn6L+14FagF?39yBK#jS{7*IZ(36_&;1pd|vET?s#cp&1cK{a~#dpO1ju!9h z)pW8AFI5H{E%K=B1Rh=#dSG*`B!?CsNg)J=+n`}Kj>6p*7Ncb%RL7R-i0D{jSv@gh z4zHEQTrHf+Qfw!)#T%Ov(@)W-1$;r9u0HH#Y8H!8gt4nwn#PvN^E@K=6Q4AWNQBvs z%bl|^2?$*%J>bv!21fvlmYiKRGm@Iss3Lv&RmtVm7hUtIfgF=u>wK{|H0V53<`=0j z7nXr)afVM?{>Q|0mXTDQa(Yp}Tl~*n2N6jkv0)zy?MS%4yThm(VTkW4kQ04a(H1)? zR&8uVV2Zd0n*dXbA@j$hHQQ2Xw4!)c=!hrkc$?%2neV}^>61L=9n@MzWJs9gWwZ3k zvFgSc8V%?=;LAxTHWWHk&aWlx0jiu^oN<%-quHh|uAFcfWOd}HtVgj7r&Bs2ad1D; zZMhl)w!_2zF7UKLvrL8mYq;p?uK&}9Y2Gs`N6!p^E_HrDO($i;4BJRvS7Be?GE^;=&hW;%E8$dCrX1+WX z+l^%-a_Qzqy)()tgm`%0Lr#UTk{%Jr^UmLfvX83;K^O==&ezGjG`s`XcOaRw0f4J62phG1P^`JS#VJ6nb(ex z_IQ&@7eCyz=)n#ike8kk9;(1yXY&cUWZ35cEe;<6Hq?1?PEk^R)% zP^G{7(OZTad4BC*F`eluBQ6zEW%J09Yi|;D)TH9j47PX_46>q`Q{Zga>S~TRW{)>H z39JE&DtZaJtKUI4bnerdNyG=9K%!Z&G)Z#xd2jx=Kyq#q^lKBoVmo1_NoZ^?3%Apg zsgcGUOtoNx=)`oH>yrt!7k%W9+ZNWCRMAhqP28$j1*vB(ttS=s)iS;+upf?(C(rEg z6=@v-+rIDRA0VVkRtL$jRM#psNKlInhx>qGJ(C4cC1mua5ml5fT=f}unZ}YqpxER$ zWim=9)$A5iIqc>;dOOzf;vbASjgQSKqU{;n>7ZD#k>nh^eGz7cobPSIJet zXlEY1Ge2EIUtc;zdbe*9z9v2lT~&+SBBdqb2<_Evt93<)(hvu;tvF(N_*mvlo~gEtmN;79=Hm+wa`_Ca5!n#w)eKj9H$cW(nPse?9BuLNqJqU2&6U4#(+t`l?{VOd6 z|8?tHEN7#OJz>zR=Jx#HTW4<0=lJx4G*$o!PLgi3u(DEtdEje=%ZCpiCcoLDgvAj8 zQsh4LPi?vt)7Dkzll5+Lz(5wf;C?Vf>>_PrW0SbF8VZ|xwzp?PA_~?6zn?hNaSJr7 zXfbS=PsCk5+PJ*u;NU|_l^cTjoht&@=z=0TCPh7-2H&%u- zZ)s?ciZ;tryl54?j0c^_LnP{f@^;UzhX6H?GJ*k#s05XLdE=`zri@hM<&-Q~2Szy{ zIs8cKB@eFyYD-yO(CU6=&7zYgy+!SI)XTAuv z6q#E?&Fz?3L+NBuT8JQ#w+TFB9U1?qu&b5yb)x<7>1gwuAJZ%r(?*1wP9*PmV;;Vc zu#lEd0w;5>0-5Y)5|I<5;ghOkK?x7$#RYfiHucrJC%Bq(wiQMbQQK{N3nt8W zY;|fO>-33>3W(A2QPFbT>5;(%19aTl&64I}mz9VO@31(Pv3HPP$~rE6SIVkRWetGc;(tkFRS5QKj*^|jd8-qH3 zD%EGX~*UMz$y10SaAF8kL5Tvj+}u5^2Vg5O z*RJkzw66fWb27_oGRMj~>6e!JMW})X%war(Q69plbZLfQv=Jo&Ip9Gjli2`#gi(sr

      r2L-KtUurX73u)tCKTC`(rY+@)Z%-f4v~i2S&_4p2T?t!PoDr0XyQx$ z4|m^DGbX6&vQZtVg+6&7+VGbsABy|F-Zo`oHpu+^yd{td+m|x}2seXULN3rVt2t32 z!AT0OuKXk*U?p-`kKvf(mht5(5k^{2SaB}6(KT*6p@4|-*QlZ&k=Ll~PZk>33GRhP zd(r2_#e6Sm*1%UPE57@ABcr2;YZ@F`Zh&2YvJEx*~wLxEV%a<>yU&AH%<)I6K&%l8W)P;}sfuCyp4t35c^^1O zQXFv%k1kK18v)7d@p^6Uv|WHx3&~m=u3RqL0Y%ybKIXr51S+iiRylreU>PCY{rVP3 zCYI+{m`yUa!fi6M*ihaG3+u&VM?+ReOJqPrFutS0p&*W9Hi%tAKu_T4rbv8_j7(Nt z)k|EI$2yy913TthL41?4m066n{JHhUp!HgipV2XK9=#y)kAa*J_|$1q;56zp&gW0u zS$Tx<%QiL^S+f+)MbV$xRRR(H%s&L1dTNo(pg!^dt$4JfugAjPI6UbkO+~;=f!EeU z%&S)y-mM@>#T6&H%7*n;3cD_R`^-62IVeyYHZ{g zb|}7p>%lZ@djs3)-M60pn_hlJB``9?CG0FNRCHGRluUxKIyIPZWIn#kx^r6s6M}0I z5m#(g67USVA^h1yG36zq4Ln%M#Tz%i9}7#RdRTJ}9~O#)i`f5!!6l4iRa1?1Hsc}k z;5w5q*-6{GA?F;^5|k1UaC5K|7MpP=r&R{lrz9*8MLltNL1mhnZXs`G9;0S{+bX*H z+&Y{YgKgXx!uH-AL@Ng2-uK|ob!}g+jBmpJMC>Q^retW@^%S3T$H{Kt<1ocSgpGe7 zSH7u9L=_xgpxZjbI2pZR0m>IP>ag!Vl>d7F)*K7gSpOL!+Hhj)?>&8}@K)@tRgJ7@ zGKwSTdaI!EgmN6&`NZx`@=hi)Kl^9Q%nApA&^u@iW797MzJK0|@*fD?9VAm%l)jh- zuVsWQdw?IzZqDm<$TmQyr|YvG=H!U)Qpx|MlWilP9`GQYGgO> zvnw6e2N7WrExayi-+*n-s1^0nGKXEn;#HoT=IL5NK3M#vZZ`amOd9d=gT#4p8M2wcJ80Y9aJIt{yhs( z&hm!qW8oCBd3=nlve8@3>FH!F7u>HWFGJZQt@ZU7xVX4|R4?}z+k;E*eZVH!DAu{r zacaZ#-2$5bHY6z`NRm2$>}X3{!1Su}m!RN&3IX zAv`hr!u>B2p7jfX;D!RVVeDd8fY4l>DO*~0ZTrzX4hb<+92uaRx}8ztfudT`H$|?# z9DS;P1u+sC8NfI)o*&=uroq{CBg5H}F1<@A>a2w!>ehmQxAEyjw~|2X0WX4vmLGLo zfS-b8ft6a=A_LV7BEb5JnI{035#~t{hX*4}HuL_y<$UexjIGl^Q9rDmvwhh^0t6!4 znIlh5%M4=GI+#^|%k<~Hjtp zcHx}BagI&Fv1(PyD-FK@l$?5;t(#O`HaL8?Uo>#0Y^3W?*$|I~O`G&R2cITDv{eM_ z8)6kcD}3&mPP;R$kI;iFX2GYZK3lPWk9uU&fvtCF9)Q{X)(<&$X@!0L-8+0SodaFr zy#11f`LWZJZSCpT-FY~1?J#P~O0>)KlW+B+E}GEIrY*Xd2r>CEJ)3B_OTYyXaeg;f z_e?O+*$KN_q?2T~rWxQ$`*ObO@4C&h-uqfB67o4|+2m0He7i0-^3eKXvvPGqzP;$f zSJIlCu0ajo{T1W4=(7`e^YbUy-!0_*bp;PxFPE9!BiH-u+xTMlf@kdyenvst#-RH| z@(sOICWhgW-eqsBW79|KE{{7@Z5ySFvdF;b%y}~Dcw?RW3n8Y4tDADS-|=Y)#Lh#; zrjM8}=VOW|EKVVOA<>BPEDXzc951&E(5=sOF|lK3K(0D=q=6LeS!Cxezi7aN{k!!t zJ@;Y0i=A$r6eG~nDg5M4-=cQJ>X-GNuiX!{WUsToKzkF~vO$4xY9hDnUGD4i=NqrW z(Ykz@F@ztHa905}_+Qfx_N@+v`7UfTdoe57HJe_SBT_y-(9oCn^yJQ9)(-(F1J`Z8 z*B;(9D|1;4APfwSnr>I{zxJAzUQuo5@Hn_$A7}vPLRALdvhwl*pJf_G^+$f(P*g#` zqa1E3q+eIScs%F`A8VhsHCw}Qdl1?rO%k``YCl_Z!Nm+- zNoP9(fQ{jXfIg?UP;U&w4mDq85>p>q$Hsji(KRM`&EQQFRs zB;rd(h7uFu@?TBLWd)I{!kl=+RMeG4+WXaiZVkspLt@40b(3Y#P&Ziw!5Ge&$zbx8 zbp+$e06L)24?LNpsWbPemPR-L=#Cuk14sIhjw>O(fs;^-GmX1SZF9Zj?N@(;5HG7V zS|~PH1#cUFxnCY~U{8Y#$iMx*0@an-xrd>fpyoMhx< zgw%_$b+nz@N;K;3(eYHlob37(1wA-yXdfsA?qVQlnwoU@ag9=oEvvf_Q#`ScJ4Pic z#%81GU)cV+#P4&NZ#wgB0>UFjt(LNOCQlkh)gx(X32)gIR&obL&R2WzIV{b)pelH0 zp{2YZ^+H6IXncLU_8~j_EFUTZ?4n1+cr*pR{QJ(M%oX!vJGx)3$0uf_ELHmBXia-V zO@YOCIJZ$;iDr+#8vFV%Z9$V9%HxYu`+DT#_6qR$=FAEDVn4^Y&j|GC1=_-fZV}~t zgDK*m5O9I&x}UAv%wf!Cmq@IBxZ9W_UkxQh91{wC1F3nx@VKZ5Zti|d){UMLIb?P= z2FsmRE6}=KRJXB3LOsst+j^uVHn6?^O0^d!Uy4CUHj|lA9NfPDi*mzBQs3U#mPm?X zmSlLdn9h!KZ~LV{I9}{!m+k)8^i}a%PWYmq&3x+KD(m1u#A)o?{cWg^nfE)9VdMfR zP_7dYArk!TX;Iwp1DLa4`ZGs{OH^%-zj+qKJ-w*2SVU6u#dAK{$kc|lHi+mjZkN4>nUpu6%7LZGMwbN#Mda)OsCk-0e z@Aib;j?1=_aco0>6v`}j;oiIYbP^a_6D09&Vy$>BV=0fv$X#9=o*!(ZZ^mq9Yy^m{ zoU&AWDH7PU-`$dM&=NWNbRxp!H@Q;Xc}AeqeeM5ra$$RZ7RWbb6v#&wc%txh=F{C< zN7hDM(m7o3zl@gl?QkOcJtpox`Oa;$cXrvnlC9+TF3sBD9!HHC;-8A>D?L`W3KyR0;3`R+(9@GeJMrQ84 zLCXmqD2wZlBiv5vdF)2GpVVw6d;N2>eSZ^stiHP?+Kn%-!vAy;bbhavKO_X7!!Se9 zL`P$@YoYB;RDAHj&%pzYh-Ya_^yEB)d)kx3W2k<}F?}z-%^p7`tiyybxp3lGEv{n7 zJ!AyNK5IKBQdW*ddKYs)>3L86bZ9F!`1SXhgs~|S^_t&_-|@u#!}f%QmxPin5x|(> z4@8kOv~->7Tpw)t4Z-4~PmnQLV?a@wDY2LktX}pbIcpMVz50Z1b_g+Ah`SV$8J&&m z|MC)TD)y`kX?dnu^3#etSF)>FO1vw0WXXS+cPFVj>*YjB3|-@GT86Htu{GbiW*kvb z9~%!{dj5XV@(M;`{QUuZ)i8X~U$^$La?vfx%m2wvs13I6=>qZT&pME;lg{FF^R^OJ zZoV_b^!3Re(q3~kdh8}*8Otoq1V=ea%xOnj z0@Va0=DP9b<#GMBxS~Q{&yIlSLt5+n($c%?cm_F>g~RCke93~lwZn>z^|dHpfzB~6 zL&HxCKM%GO#fIi;3n@H^{~y4`RwiOTmKw= zkqO|rG67NuE20>#1z8%km4yiGcp#owAqpxA0rhYqewF%v*I?-iJg*~Wa|wO*&GkN z1PPS!2rN^~6^IcHe_9=UEvHNpCNeN20dfV1z9(4h4|&~Re`OgF(}=TDY!~z3F45%5C11ZQuM6W)>lYKnZ!adq&_X^?)TlM$dc4Li*p(3 z*UL!R#)bnvwHFh)fX?JUwmN!~2>meCNtE(&a1eXKct#0}?^IBKuCB7)iWrB>jW0_O zNx>^XkxzlLZIO5R)TEk20l36{~JSjwZS*_wfvk2@?k%o5|(_ zqY<4-62Eh};1D0B(4`XMq!YVj`{q#6(P(4AGe{IW|2?FFWc8X%>GmQ5lL36OC5sucZo3$S$rKAHT3(^keIQ z^3E=r)C}X7GG`o87V$4bAq&-r1bGhF0j(&3`CcUrc1$ME&uZs8LA-?_B78~j1adwR!2yU*h9=l#qAsPJK|Do$GquTnSuHP1ScbB5Y zy=ZYLUR;V7DHhzJxLc9pTHK2}gkZ&sdvKQ&3lQMu|J>)x9ru0r$0Q?TB;%ZQ*4}H* z`Ktx>MvBB22vG1EwerXZx{UJddM;Xy#NxWDO1)iLcYA-36dMJ{Z{#YlR9!Fabg%@+ z_H}m59f-qO{Vb?SKECEbNCo?nt6M}cuE&MCyw0f`D!Cf67* z5TF}Tdz0(F#Qma_H=w#?D%sY52D5 zm#rQd9oOVIB=@x+4HKqfmhN#f7^i2?rzg064Ou3qxlp?(j^>fb8llnwbO2DyjN z^`}3^tIKS6-ZxZ=$T5y+{H|x0O%($L&dI7p=dnbxI6d?zMmG4BKH5I(ZVD1 z;Ka5TA^ZXpczIm)BWjz?O>U8q#VZWy3>XiF{IuNy;L5>g&1U?5IRu|i98e^Xu>3E>VvE=@A{J+^^ANIBF?JEnz!;v8Sl|#ju5~=^| zjfERPA#pvaEE+oxhXte=!U~C0G4RkvvIGTJje%p}p13S^cRrU6@`_2fLxyHUoZo8_PI(R^6m|L?#S?p3+$e0cUJyj|0Ert6zZ_%gI0VqzGjV7eEb zfLLq^p3Mel2=GwGH_Ud=FR4Gbpz`1)ImX1ci5DFj*5XR`AQeoZ)@S7R7#M~;-Mry` zXV;-f3b{l1Dl}n_8*#)v7PG;m9Qt#FX$-M#ME3e?TPA;3xF9|)zQo*Y<_IQF3<)Bi zli$Qd;^1Uc6YqqL92K&4(GA6Rgz0WX6wuz|gVtZoq~dIfG~u>FwSS*~FfDTk;7}ld zYgc_NqSGnifxM(|d_p3VYBC3`IKtdVI&}IUDMCXJF)FO_3V&~xw67hP+5$E|*ksG9 zNdA#>5!=N%$QeC%jpMcqObh-Y*{mQ@qlfv!ih#%ej7W8YEJ0O;uUJ5Vtb)U%CY)eK zyU-1^_bx5cpy0bZPZfB@n{>Y*n zmO^%YmLN}73z?uK%2E(LX-pPLv_^-o+KtE9v8~Qx7oe~L`&=FUnt7O!QQ4F=&ST5x ze~)%&FMV%YON#19usOMA3)s=P#W^kiDl+h2+}t5CuNDiS zmD;rKFLrsxu9x)1{3{D0yJ{|WJnZs$Y&0`Bbj5If+MI)qEpf?b{QI*p_Z^W|GRf@u zu3-5jE9Ua$Upe$oIw79zUj7$_epj;kmm5XsnAsA;#b)2mZTZDOrZs;8ErG-d*9al5 zR7~;2P-fvq)K9bOeqs%YI2N`3i;)Y-0YZ(bRP4>;MAY5 z@mrF880WriU{t9W9OrBaDlKV4zigb{TbAHxv%Wwqa{VAmsh1a0*V$2|%&+`Z)K?!U z8@IE>T*r6Ccwn8EVDD+k$nm*c*46I#e=ppQ_tX7|N#fCKTohpji@nE@-^;OypCo~_ z!7nP^?Pg)8G(jkRu)BrPMilby-^HW#!Pmky4m45q&rsg=;0=Lae{<{K2iUFOG0^xy zmssSVyM}>qfWbL*EA7<)fGdUyuiQnsRYiikCl5 z8DHqo02N;!81sF75qTb*#UGd30JnQs2EOd_Zti;L!zvojWWh#;A>EWTKt|k+#Hc9!AOH^ zCyD?0Zt5=*{|kx(FvA5o1#q*w|R6kQ3a)pzdO4G$rSS zIN(iR)TP>9_fG}f;;@9_Afl4NfxkZ{e%N&eH=SJ*lFD>BIsLo>7hKQI9}JOlDGy5M z+n`+LQp431dpub_^wqz*Dc-l9DK^ewsGS)2Orpsy0*oAQ26#vT}eU1RQ=c#<;|10o&&eDI7x3DcxmMDc{^(IvS1EEzs6Z$6>`HdQ{Du|Y@XftL;hdet4!2T|(GgRvCdq8f zj8DfXcbYtG^>upF^Ubi9$@!)8{bS|5rQt=7IVED*JqcafM^^e|6#C?kO5gFm1u5Cl zhim@$NY|{15Kbf0VqZo%CYkMgYj?if*WVs^69g)oV&XO zHjWpHm|_X)Oz@46dvdnDRVZsiuU~p9&=}BjW|6H5VRanE>_0VFYA4|JkdmV3>IDq?&UyjBc+Zxd7i2v-PJ7czl6w8!xkgt@y1P9nI_S%yacH$v0ibIQHXLfPqj0l%a$Ol&IjT?Hxm$M;|N16~Wq`<3*RE9i=@VfDO0qSX5!tPZ*{~p~ zX>70y`+$94H}cX86o92o*$7?e+x6~yeSg4mH@q+WCF&YC@4G6u1F=f%yH1Mfmlq6^ z=Mnv)T$m#4De;<@Z7WIc7y9LvH*cK}UnqY)lsA-iBj)tgG-S}Z6Ezq*w#|wuS~OC1 zbu^tg=y@Fr7z=#u=Z5dPcGL1_tegxaF6m9Yhd6z>C5aEeK*V4>H-VbqiUAnQMgKyZ zL}WVo$Zkg8di0!4T7Mh1Jpzff-If6Dz3C$%?4=ZYbnB7>c30*A815YeYhXA?8B4OW ztTs@%NCdGs_udmX$4gKzZ2~&*K0y?hz>uvMq`ts!AO0lvA@ubfpf20`7?t-c^A$T$ zUiBy4k9@6X6}e&Q8`)T*id+w2kxljqG*e3|&>xI_-M$;= z9x)^JcLj1gkEs2migNSj*Xw>J3ksh5;#2NP5af2=(hhDv9=(R})=MyAT8;I&0Ed=d zP?@Bj-@n}S1c*PEFuJacb7tP0Q$l8Be_H5Kv@}-TL(1RRgJfIW0rzHgplz#b5ZP$Z zOAQ;7P8U_Zgb(G*Rg!n1+LPqVWtocWM0kLRaR#`knhSJkwh<30HwmGv7op@=2^AnZ z&HuUD=F;ED@=BZW$PBJ=c#V{L@xb}*hqi)6)Eb3)m8rd3LUSB=fa~I4XV3FUxs@M& zUb*Y^pzTa&U=t%bvxUM+d6J>zxq%%aN>d?9KH({@bMh|z<#QghH`*P9FsRQAExz5a zqQ?sBzs%#iCE8!2YIVE+sZ`DSX!vcXh;rslmS7Rw+P{5`t)KQ!TPylrKFha;b33S` zql6hxx>t=Se+%{}Sn&kPSUMh**uY&J$OD@-!P9p6xRcRZW zvV8{L<;JeI9w&N=CCKWCCz_(CDh)npUh6nxTv)N{sp~*I-}Bkx2&Q)tQqB2!6+_9!m|^Dc}1FMA?y}awzTH? zRv1r7o_s>hd7v1bIasJo_juZ(b2SuwLIxRK3d_s3mh47_1pS}92^nW6V$M$gA+}aP zb0wn6X|@*i9Ns+=Sanpg6!v#CwYe%4eKc6dme8?3|&Ify|~yU<-UQA-tCZ z4Mwn{M+jHY@)`ibl{xynSuVsxV}Olx5l%vvmB!lvELfnO8fkhQP7+xT0%?(X6bTwD zMXk@Vnnd|s4E5t70&Rq))3I+zu>`1nbPQ)BKyL$;+5YR6cZB}UzeelV z7sZV-Bt0%1i;X`*&GSrPzQ5aAe3gDZ019^esG`w|9jE~*iO@Eqsa%hNP3xhR<;15l z9adMIuTRIgF;bn*zG+xJE)+e40(G%NJ2$4Fa5{X1LE~o zIZ9utT{H}-xp03+CCJ6pQ&*D&CNdG@sETb<<$6!qi`+#crt~~ucQ=X0r#uB>Mom&g z{ZJ)cr4UP&JVwj)ebma#Bp!&pHytL_5RWW3eMUGpWaPC_#fQyD-DV^h%#HZ$Euunv z<3i1|aZ|7{Ap`3jnK`QUJ)xwMM%aul+_^#9$jkN3U5hx8gUZjAFCu4cTS@|fH{$sZ zmr)g7*8N(y%omT(Jy4{lZGGXb@7K8e$ntWTdsxTk25sgvn-6y6z(L$$@1(26zB4P&Dt0UiJ3gSbD)`$^f`Bi?kU+aLyGA=4s#oP+km%_m9@_Q#!m&u47f$KY zOSHA>Kn4Ttwk@7PeIMKut)>RFXCz+@#c%(K8PHCK_MKa;yU)==oJXu-H>;F@0f;~S z)^w!$0nT)5swaLvT1AXEQjBl`eD=ukPnQGnNIW^+qPdkd{PzP^;`El z@5t2Ji&||mjrZzyztnBFHI;Bvqz%31e&D_1yu$(t>b2`VTvBktifM%ztK zZfPn^+YV9wQYC;dF@zj{Bt*%0)+xno6~*orlEg$5k-`vj=QB=qS z66B2R!__Q*wH3-{^&G@h9xbkFO)C{pGME<={T`8iFrlH0uIxjMK_Op=fsA74vVJDe zTeIq%$f24J@57A3yfdra(ib!>lcMlg08kl8F1nax=*MWQl{eh^!Vp>UihhhPZNZ$t zN+ZuoAj3MJ2&h1uRU+g4ZKbBSe%*i;o%(8U9aHsD|7}7HvJva_gy)#uBKeTv+-&rI z%W0$a9D9vZjZECE0yWdazv067M8XVmu5y)A#U{2o!K$td34iC8RqtG5-zyNY1dkCl zjbdrY-3~<&Wd4d-_AjZh_{Yv(PQq1$(!DfZ9ccPdA z5xPB^ryO2!D}G|@U;oulmkyNa?^VCXYGpwi*y3Df+^s?wP^EMr=98ji98fo1${^p% zC6T9B607ZRezZ*Qyktd8f{5aC!Ul1&Vt!Cv`=GSI046UZU4Z)yZSJpLSq8*m)tP?R* z!1WP3K|eHtpU2hl6| zr;)%QlxvEF5Oh^Z8E}&`Tvc+zLn02LJ7`e{j+N7frAX350W?&)jpO^Gm)e3O6kp+? z#Y8kom_aa3tkeDFbS@FFN4drP%ORXQ?9$)nrneh1-WPRg2H{yG%?sUnJr)hHRZwo* zReKDjaT)xXWENL_`G7|KAr8qAIVz9m+6fK7i&utVH!NSPh9XD@Y7&(EuN^ zu~FPo9x`xoI`LFrbaWeyk8Q>>~C}jeLZ?35fW_` zKCre4So1vg^Qb0M(Q2a{vOe-!$w zqVN%d+HY187YyfXL82-SGVou&;ZU?)IqQm}2N8YL!HrQ6jQ#As@Bu2*6D zer43;@nc7jXWT|FllJVG*~B0Ev0u%5Su)*V_duZ|+`{++oS>F^5ApOSZUA?4oApwa zC?;LZ|9a=!86FP03;j^rao)hR+^SDHPM>wy5cAUu-oCCTB9Z8|S$x$-62vcLDq^tq z!TYQ00is5e$1ZYuPo<1Egda16pExp$A6G0vAy4_m*owC~IZ#EF(H+5Q7OCSnRFpM9 zQHL?Mh!IHC7>z8IX_@y9rfDM7f*JecgN!|a{KIHE9)qeyZ)8!U0)FHX?L=?fE2w9V zULeR*jl-dEO*$(h1%aBA@$_Y2nQZQk@oxLk`4p*uMN3Apt24cn};; z^RYajJ4x%@8D!Q8MX1IaFR_-YECJFcnH#fqlQa#u_%UM|Y*N}uWM!6)R@}_Sy0{hx z%ko#dvq>{h!Me!Ws0vH8OZb?xJdj#85%as_vf$KZ=oCjB ztArp)M$54sucmQU|A8lZ^L#~g%cFTX;*2cKI~n$)6irG2yyF?Xi$ZS()g_)D7S$rt zQLoeSo<2EyuM)g{dezHP5wFOt7%xx-C;9vjdmaT)sGM?6y^c;$fF%~oX!BYEZD}|N z94;+RKOd4g3|o%LwV&e|SbK^iVWmy7-^IYY@rhUrE$$Uizab3pP@FsPG`JaX@Hy)h z13ahqfrdz5kfg+4#dGhUFL$4eXCB%wvg(mWmQ6(vrLFf5=_UtzhV;NUh2|%-(9I91 z1Muk+^wiy~aMLiJF&Kur?mmK?8Q>BXU+XL^xftheXtNel%oF$M3hN9rZGFbIbL7Qo|z5DE?W!hfL~q zbAa{2>>q-<22B~Y8xl^o0a8f`vFodK?$rh(uUk&Gi^qJdt-^HR`Yd#%Hh4tp#f@=F z(ve6!M`~c4J1{ghGI{~^9=8`ZCh|C6!m~QQD^II1EspjyO%)Sg+sln2lT+2uT1B2! zGHl^vBMRN4w%vSzF1qe++dK4EC2fv`dge6PZ-~x2APbmHD`tD2?z=x?@LpF~0l-I8 z6tv9x{6r4a)8hon6k!@Op!V!h_4J28+=4 z%BD>n!BS)k-XW}tTAY0oZ1kq1^efsPQEZ7N6&H!Y?Z<7zvpiED__^;m=SN^==E=Bk z%8>aBsPS5V6uq(GCmrD$TO*#4P@P=64d=QMYMPH&F*dQ?YCFPq**BkcQ>go=Z6)qs z&HTyfsR=g|Sgci)w`#QpW`+qvX7z)SlL=_@f}2Z(R__{%tpo8Loo|v;{%-vEap@&7 z(VYbXtBrB@LS?AB7SvL+}=|8tCizd{>O`&f`Mq*W75&8gOIU+oTQ zs9JkPK>fX{@6kurb!q#^7?Jb&WHud3e9R(`u0xLxJ)CnycY8DmiV@$j>Iv=8)$C%9 zF-`XBzVW^xY10>iczC}5y+u@TE@RIJ}vjC}4faj_=LE%2Knb8pntGjZJ>U8r(bDpPZXXW0n zHy4KU1Fn&yszey%p$rxz2DA7@_i;)2_=<#Dl(QAsO(^vAkoP*rH=%o|lke#1Q;h7e zD^^URgf8sx`}ZnSQ-%mDHVY35edO38(~|~!`G*G_3@_0LwiRl!Ul}esUMzD1hgaH< zVsozUSxd^PU3lYw)F| zr%E()f@!?OnRu@M1do6|*FZTY9Mo;wRtA^p0GaHx14n5-AX|NE%~fHd_-O_CEfmWg z-Kfo60-&7ZKRD?{Fo+9)qXXgXeD1 zR@$Dm=w{-#cn9(}+wuH~{XD|DF-Hz^8nSezJr7pmcSomoc~@UdZt{APna|pA4z{-= z7kuusP_Mw(nA`b0s80#r;MDS$Sk!{2H>Wwpc~)Jf7IIpBXS(aY*JeE4sj;q6`EldH zt|(>uj%oyDV)1fhx7Bc!XYj*&Q`l;M+F)XT{DiL0Rpi$0@x$p1!P+O3qbyD%DDf&0 zEnk!dXw*{b_s9ijA}d7>_3%%Z?r876I)yadA9sifHTY&dFL)QQYs`xGo0L&!+M(G= z`M@n&KYYyh#(1ZvzFHDGE~XmiVj;h^N|MtL1Qn&Hu+IEpF_}L|6mB~;AdqfkgC=Kw z;L2Ar?PTOvy>M47&hXX!%bg;z%Ml04z~#duM&8+$H^I*vPS9yvO3_D#e;=|v&i>wOLB~V-r299H|SmG#A#E(m)K*Vr7(l)_2bKl@w9+>gxIx&FX-^ zWBmjo49Oy(C@!&DCMXV6^o;eG=*2rssWBnvPmUJ&G;1qWMl5RC@75z9$>iW$U_eMm zGg$d%WL@!!?$OrwpFzBEF{9;N*b_SCgmKKmj~;dRzv1o7NR4_paiep3%218WU&ixu zxvI)uxwJWF{%dM13}MP?Q+HSXTnw~~(~4~knhGLRB(5035Ql#*dH;a>>Y7Zvw6u0R z;={WXS|*w2k_yFr;Bp6T@2e;CDK)ACp0Ldr0BiW&9gd%{Ofhu><*RG@b&4RQeL*tw z$T!;Cx;)wDhtD0eJ%dwFU&X_mm`xl0ERA()axxi_QnNEK(sRM?oUUw#CX!8ieEY-! zR&Q+%+&qbX+g@{@ux;@UdO|_~xS4GMY)7 zpX(8$MjR#v=!29WkN41f?5t;IJKfkw79zKAGAvKhQkGXkF8tal3dFr}ojY&wYsMt0 zq+a{rSPJ8yF#a-xO9?;H3qPnJNA+>xOP_G$N%#oq_mOiFm{j)xr#S3J21^WrDiY+! zJPBCAobE8>NhuVzg1pz*E?qDbfdJhbHB5tbLdl}x&AoBbD|y@Nd7m9+AYYIt=02os z9AILPo!|3>+kRSkugQj#;wOvXL|g!dbiKs0NLDJqJ!`HIUDEC7*^JI~In)y&juHL$ z>@x&^{mz99#MCC2E_Y|8wn9)hD~H=txrQc*w(|O(B^Bm-;-ySpQN3- z&9{pacq-~*S>olhepU?`haaO-#|EAQPV*9D>B zxS{%9uzAG-JHB&(JO?5gT%DN#4iUL6w}>@mtjksCQnURW z683x<5qGbij*j_YB(`O0jj(0g?dg)$T$vgirb}Sc%hzbsRp0ygCsyshL!`sOQc=Kq zZ->Z!A%ZFuO@m|9n+jSmzn8B?_*3}zkqGqth)h9yBtCI>ssLBYP z`@FSvQdVTmtoHWw`>---3c!yQhj=jqohq{O(bStvCZPvdOhJupx86YHXIug;)Gfd9 z!H-5()fqz8@IwLL7WQ<|7+G;PCwest2o_g1;@7I(+OeG2IlQG+Zg6&Il!SegAgNu6 zf4n2xNsBhnB%ovfn!l{vhswna4e{Zioh68G_8WG3u(|oTHW&80?JM%}1UA7pK=~M{ zER7l1s4mucn3frACPFfUj@|4PORd^Yk7GTSk@t=kwuN50cat*7rEj|Dk@|Ee*# z*cthCj)WP@qh*dJO}6RE(xk10l*QxUA&tyl#fdj#T-l*gxd;#O*gbae!@&pm^HXcHu{Q^_8}HB9G-UK)X&m5lj3X zo~S@vnAQ+$OVsK$kWD$UY8l_{?7 z_HOW2!s+W3>mn&tQh`|@^3(@E7uAA54z6z4=UiT0c&UAjUZV{l9;bwg)aL{GPj3fH zI?e48;E?U|EAi)d&Jd`XsBNUen+^!|(RcH+o^1tTWo?Z+AmrkDHCXP>M6 z2)pUr5cm3a0^1AYiANc96yU-* zM`1R_0zK52rhX_c_21~9dj0t%y2;JDuk_aeyFF0$XI2xpNkQl~`jh3Ko58CvmEah6 z8`P_zzSmH(mpfXH)p_SXxGd6_6^aX;7y?gmJXP-~2h_1eZm9T&cfeM-A6;uGFt>s+ zFN26>EFRGIY##hJqT$08l}(^*@FQV9R3OyQ)+BPv#DlLaBXOYU5RRAT{(%1B)C}~c z?S*_gu;CkK2)-ac<_G84EKwIYMed)((roVwe>V)~U|?WH-N;Hmi|Xf4@?R$oUt(`e ztyc&&jf~ipj!mHJp60skt#oLZ@i+1uJit);fW@NopDia>!>jP~jw|?+#F+OJmi%&D zJ^?`(vT1gIfggZ=O7b_^oI<&At1R-tk8ng3qVmLvLflk^49mx5H*2w6o@`@$7bjV9 z65068@%CxVJ)6Pw9K@)#u=cr#_8sQW$%QMG2-wbYcE#D>>%@vqC;}RVnjH-y6IZbT zN3fPm99d<}SQILB+P_VIw%@bLrs69;A9!B12=%AZ31cfYVBBhot*&Ly%orqTzC(!O z#?AraOqgj!Bsqso73YFG8Yi!AN_&p+DfW$#3mi{#7qRZ+|hw7 z)hrV* zz!Dvyc=qYgkN8%_2Ev8sYEWMM*XI*4koj!+fnzzis*>L{O8KS^@Y_=MH>xtchzJPG z>(!1tp7_bd^jXIjM2~fBI_DZDH5Zr2-?}TC2BvR%&b*k4drler~AI2`5}p2 z4!a?ML7NA8idgg7H^3n@7>q=7<}_5mA;rZADh>S#2!fcT=JwTZ zz0Jwsu)K}Wh@XBTVD>^Ooba(X@ppMdtM3U;&xw6rW4gi%l@_k%0Qkv}Y|{Eb+N8dJf}fJaNVl?+x%^1p z9xMq20glw5CzJXR9Us@V$xI{ zb*a-z5cDW(5?(;Uyfhyj*98rYk6uI7&~a+- zT|;`Fzq%rD2&|39m<+?B^mz|Lf`q`)k&^@DU=Xf^Tzg3%-j-ySmjTLr*3L-&f7h}Q zJTe#5sL=Xt|4W1c2eUw^fPg^%%%y-$w!^ND4)PQwB*_Ae_RGp- z%pmyPMpEPKr7cUO>xthS>BV`u{*}ikYy7+AyX0eSUWXl4Dgh^iNLy%;!Wy27gIst; zZ7E6ZId*h-jz##%cCAXw&T9`R{hSHW+g_@^eK;bbR4p)Jxz|K%*XG-^c^laQo)jUE4H+aEB|#| zjVk869|zcDCTLsv9|+t(y;S_vkXSg%pFU*qQhlbwU*XR}V4zAEvp*j-xEgG=;)_sf zeW%?Y7PC{t)2-n`kTdD;nwBMB#)Gyti>pn*`o5L{s)I~7PH-`BBZe)Don8SL#~eg&6#}3u3yV( zM#M+ew+t^<-g(bi35FmGjKc;z^xJc)&9bTTbR&Z46=q0oR_AIoCt9nRls^mcWG(!E zFoe$&<%*Fh-!B%I_2!xC3M;7RMl;kKcaI^|AY0!?`9K#>U6=YK`y)wgIJS_`oAqje zBeYW^CklvPo05jxaPt#GKYB}l4~a_D^b};z7A;JKyPyTC2(yz~onT}xy=^4NgXI=? zHy7Em1n>)NDu=d6Xt)G@XUwwhCl_Ohw^?=n}d?k$xajh`cJXflqyPPfX`myL{Akc-!pIQqwvm7n(!4A}UYY{K8qZ8mT%;S^hB$0shP4Zdz;^ip^aOO{6*LBB0)uzuD zI_B%~)qFnp68vZDOM%t$4Rv1N7sERBrl5}`2HTxLNQf%e%E~ed-RZt z1v09JjE!r6OXWoma^2auo(3S9Cb=m3wml-?F^Uk`=}x4+Ua*g~UUyh|fj_3%Qh3+N zkQ5Ee>y?1&O|#{Y*z|5G?ov_uKIn!u`#lL*D!7NbP3n$W?*k^UkS%5kD|(Fc(aXXA zOkI?e&UiXGE3hp%j9alT`(ArBUE8YrC^>(W3`1Y7gljUu`W55{esp?`5rJ@~b! zKY=S+h)nn-ijr=2x*hv3?X|P zk$}fPPNcq-$c##6QRHY3c=ROJTWT(j*oh(UMpp)PWp_XFM++&PO=h2*e4}swU;zww z-c6GD%E3TS&)ZV?E3nJb!<58v2ZUh8j{J~L4Amw74qimG@}n}f8S2fGd$@r{ zexpp+pw|yk=947zbY>(z^6`iVc+;-LN$@+0G=i`~}9+tz{O^ zI4w>Yr7=NNf~8P=j0wBWHru_Q=9Wz(1DuxOLf|oT;4RE-5d2{%UnS*BA#8eY+uhoo zco6Y%jwN`+CFoj%;{=c92Uu=+84<9GtiL-Y78F5v$1eSS#b2Z04Y1kD_4v{m*wWXH zH==g@CkY*O-IN>VOq#2J*Q72GlxIYlOaJ{YORd z*b{Cu(78SgY3ph8gUC~^`&G|%P`H`qm!2z#AIM#Wr&9ozTmp$v$3=9Q8WLy{H0jxX zQqE%eP1)Ot3PDHGdzj}ecvZgr<$h_mcn)WvU8aE5toYbn9GWxk$}iEBneuUb7#9M* z430YNZ7wh*a*)c4!_9D!>$0Ky?MmbP`fv{!N0pRQ5e~s;`KvHBj4stq(6-@)+}ria zisTr|j; z<@^}fEnJN@hbuvpdM!Tso;>m&Fu=#(SMwVWYHHHM+q(7(BiO&F5lb0qJmb-{8J~e1 zeu}7kUv%g8V+z>buV)^FGPD<1o!6_ z1S=!r+Ob*z1+R~)p-#q zLyfDGhjk!I!|wUxQ?MALnrZIuq(g9`_e3VIv7O#B`7us7T=!?Ymq0pgkhz^8yxAYa zk{{h@HDXlv1^#YXm3k}cE8Is4n&mpo^QIA}N&31*UCA~0gGnX4AGK`_Ai~ic-$17{ zT>?0RLVX)(e~w2C4=d190d1yS;M5g7frlyYUaJU-)uY)v zC&uc}Geq(pAZ!1pAcJf3cJmF^5V~9B`}x?>#|S&4m$zA`tk^qdnyjWQIzC`Fst_tn3qF3Ahk{T+$p^H*k*v)4R zRRme4QI`Md&gLs>BH^mOB>JF?T;YNjjKoz^k@HT|HTSOamfd={sr9p9<4tQ7)l_rpxRsIC!r$DqO=@-`Y}uL33XA7h zR+0&aq0S+c1p^$NLB^Bt)0{z9G9fb=ETKCOt13IShd{e8|CIt+Gm zV=-SYh2Z+ZkWr0Q-^~jTLTo5ZDj2jc)rEZ-bq^f6hmq?CdrJ{@woykVP}dRG61c2; zOVqVLFsgR`qgnr#I>^I82IMDI9m37O1_S&05{xz2BA&&?uM4R3EZtuUye`xl99+4+0pV|HgLKKFmg>VabJuAK9s zkHaM4mIpo|-2>;-GN#fbcCpLU0%g$(JAl6d@yPbAMy3*xn&HsCY3$M{saikY$;-`Q zU;@pV`>;3QD3hu^wONs(QIz8utOmP$Zn@HsqF|xnqaNC!$_kq2-RGr>9oWBj(q2|v zVu%Lif~GsBDuSc60tJN%BTiI2h)MkXTh%r2R#ps^N+2oc46s?ja!m;nuQ^Wg zJjiUjYKd!>g4O3XMdE2S@g^fbo9#Mr){8e1O(v&V$I-3VDNVJ}%bzJZnp0AeLOFu( zZ&-t9?nO+}a8H^ZmRlsHr{AkikMDbGnW?u(-;$l$DFg<-FD~Bw)w-*uXzGj-AOonb z#%cs$Qk>)zbiWpdxO)NuJ;*gqBc$O~VQ#@D)FRpPT@WRld20h-U5Qb3K`ub!SS&Hi3i^ysF0 zJJPI}glFjRYF*k84KkMrei*ulFJk4eEOFo)V|JgF`~G*2TBy;&lGW~;^U&{7v)#C9 z$&uk$jOMl$y5StUe7gAwOCO9s+nf~Wie9_~`EPVEN|tmBgCOFpY7Rwq^zU+MVrP|= zSpmqg$-+S-Cdl&9p3Q}j@uWgsj;P{cd<6v4M%nG~shm9ZomX|z-wM+o@500IAcu_V zu2(7iaEtBuOrhLbp+a^_*|$uS?3l5^Ce36xrWwe-SeBz6Y zsC&e4LX*}uFi3$4m6mk|?ih&XM_utrVQM%wRXj(sv0G-O1)cM)6P-;Im`8PtEWN67nyR>#Nit0ChtmC@FHayEs(ewv}!Xznbw7(4{5S^kT4e#Q#>bwi*hdAShDZ!)>|`ELC&|mkSy&AqCCb3(E5Q zXMnVwnK)t3vOB!yGKd_NgjZjfaIP-gOHM2P7z}-23Ldn4B?>r%L{;aN8EheNIvv<)n>6FrzwGQ$zEQw0LzX2$dcGUN04Z&E4BX z6A@LAU}oaQtDdckRA6~M&Dm%*UF;IT9}J2k>;#Xf4-pw1A4e55E&uUkmzj}X#;)RX zjL~^gVTv@(n-NJKuH`@)+V#Xa0f>5cd{^RYEw8ytf4(QSc{;T|vhqLiF@CA2%}nQp z!|s)TL~=FWA#V!g@!R4stLL&d^WPk3{P^`UGheCg3c^=B`|Smyv=YPc?lhQRQqevD^wKQmT;}nEJj|I^XoRE9mNJ?8vY-u&MGR7XxrKe5ZoFF?(XgyoZ#*O8VIhz zC1?YULxA8OG`KeI7Tn!6xV!wFbMF1exb?sb4?Vi8*513;`sSR+^E=BzQn*D^Lv|X; zLPfJwB59`Q+`(7Fq@Whaeg+~F;3x7@+jAS{i|(rc!nIkT(icBPA)`UOs_ue_A6HjO z^aQ?VJ8X_#wO!tOKfLK4HwkNrVyc#F#s8N4Ul}N{X_&6Jl3l*Rwpxk+t}cBV|pcK2xl;_0^Jqq+qJqN2kG`527P4u1|Ef z5G*spxyFy#GQ-l1?)4G(?{MGH0asK-FvjGF9+Xj@kDk7Ff0mv1dDeT5IoXzuh%j$I znmz`G0pC79oR@GfAF*VKe^#bv98LdHiV633G*&|`x7kqjqJ*|c3UO-from|V+Mxw^>}}tP=h<$mK=^ddi!dg_J($oT zwU+>9`j05qn`!cOIu$`olQD2xrs7!WjhMO|9c8rY!iamxJ?RGUN?)UO_oQ@vcMFN+ z4xZbxf5qn)LP?#y}`RGK)Eqbh(u5str2Jk}78Ka2D%I@^!>JG#R|ZEEQH zU8!Vaem$9ApO3%P&FZ96G>UhN`A^%uK{3c`@F_Jl^{pKuZn~5~Uk(tuAEpjWeoZH^ z^hke1m~PZEai0D-*Sf3ZrEF~eX#oS>3B!JGl-9>DPFQ7wRQ$f~l%@ggV#TWp$Rh*< zzPffhVDY*^bwclp&m11um*FhO>@a@TL+7ciApmQMn@6_=&Ee9D8g}RG9;X?O0YsK+ z#eqyuEajImKuThAY~CdYJ?p^>8;qODL)l0y++D?Cg3UZ;sS7G^pTYsHxp?+}zwGmUNH}{~^7{ zLmLHR+BRo63GxQ_-L&q&E_6}106qm>6oKCo&?s7V>m4~_w?Xj*k6WBXG&Ydf0k-UW zt6Xg<-b%?bQ-Q5nCI)o0tRG0K!xlZMMpBc|j@%0(Dj5dGmzzR)Zbw6yax*R*s(wMm z4X0wcxDHUtUs`%}5_xhW`89md%Lln)Qm7Z=idH;ZQNlC6uV)Z?@FkIsmQ}l*E5xPR zPLisF+>TiI?a>Z4V!y)!&58>G_!L$j~EPfUn>UDpC_c>MG z+w0#aYBlSJ3vk_C5T$Y?oMdilaUsfleu;^_{1?C#&7rd(^MKgYD+;&#?^43X5E zv7e^h%+< z0tb7(1c-B5^h6AuTW`l4nz59h_nHA;rraetv~E%2KN(D0lr5?T@|q&8)x_hAF%$XW z-F?L+Cf$h^K^AMdkTG<6cHOB?eB}$-3S^@q`b)r#VdW@pMl_1pDh_Q2-K4{0IkMJG z(HRopH{Hea-<~5`J#EnlKkeZaJIu$6UcedpPF4@vKI6Rk9WeOne;wWa_mEIGMEo+6 z{>R=zFVkhG3xd?=-Fh6{cxe3`;0Zg>DT~!1u8S4I`9u#@+1&|KI|da;1Z89pD`xYe zA)NTXqm3d9J|02!{4&!aMP*f9DLvdj3)W-_qnaTHiA>3!A^1;vT2=F>@iEIj?eVai zlCWo>TcP6t$h;B)OjEikk%~}a>V)AUWqXpcFjoJ8TqO}5deJnD7z145KJJch`dkk zcoo^`gmvnC{}lNOAsP(Mw7_#l76cT~t$d6;es+sbwPeab1g5z~E+{5_rJ;qcogCoK zvY|o;IroFlum~-(ad8WR<_hs~3){J}6S^(9N4ztWU?b@+@Fad}Is;Vs7u0`C>dz}b zJgl1X@sqq1cj7zFtI+(a2@I*yYTzhsT|;{JeK0Kg{pG;cgjsCY+C#MG-8$PwaFQFC z);Hz4JbRQD?t>K3;SBt$Zv-gAz72*abH2_}XIGjXSpSU9hi&TttKmLYTb_dGcH7VyWze@dT4>tPW^uESwYU%Yypia z^|8BQrGw!y0Rei>>3SzJ7xQ?-R1I{Af`3fK7qG#(WFm)dQje$Vw$;^V*=xr0qnFHBDWpJcucoq5%jH-t?xI z8NopfWKiys9RV{*moXSYYUDAQNnv}kqd^+EQYX;>E;i_E9xuopAeqnPAM`Vzw8{wG z$jID(^~C{+3$M@#EYmgmn}yz#8q0MPB7oMea+}}90H-j>mb7ew#gOigbTM$7h)~K! zf!YH@nn;(KXSQV}@M=XJB_*tEAsnUjJ9YEPq?Tp^$sDJ3s@x-uxFn4dY@(zEk|Fjf4OBew)38I&eJO zm$=e*HUJd7_Y)x_d>VbEr;u%}Kl1GbUkfM{1%h5uj&a?T|5H}WAC#siCMVywxdi7@&kkIb_BMjZ`8vG@A5y!`s8gu~^~Vc{A-=Xc=;baQy5nU=tL^s015i5) z;=<&Zx@Ug_N)1tEUj*q%&?JsNM23R7UgF=8iSmv-J9!ZV6AqGqQBgHl1<*)FG3#xOJsj# zqNo-k4{Z2p1D#S5vv5nRak{7_W`_sZ&R=WD=OEUiKjEUU`*63GGo3#hJ9K4drN$nf zD~tYEn&_?0Pixps%g?35jFz!>H|^RKpByoB`orNvQNAf6e13jZ9Pt+sN3qx0(jQ@Z zh9~9jPpzlrq(vIWKfp(OXK&ARrdSPn5(&-wwZqO5eggXMb_LzB7Fg}pS_#hDpIB{d zZCTR0P}ZJZnhc!;GbJE2P@EgfSXTjB$;XuJ1J)>X6NK{QfA-Vm5YIV_nr4P1$O06! z;?hX(2lqt#+U%3s|fhGnhtv5KTOoy?2%C`{4 z@FR~h%_)zjG@s+gr>`!EOuRh6$wxuv(&C;iG&fyH1+D_$%=S@qv~GPhx-Fw^bw!Q- zL>k(xZkq$wTp8EsZ|J*g9-%GYjLA#Cji%CGc!nV|v0!4c0#-D+=GwGThb*1#(5 z`q6x1MnO#|^BXT;)7d>f^Q?VQ4{jQ^;qdTc*()#Z@iCs?4%Ujkw^HKTtF-U(ZT72f zC-`l2wnL5`baFqAQ zD0H*>@LSDI-S{AuOve9N?i$aiF=$1R54lFr! zn%DrTPZ@p`*Q3TjFp2GaMW2R>2u;=*7(m5r5Z)CPxb?LKtDJ<8wcVB=ZE1^tyGkO% zMKgT&ax_lfsO5K;zPPRmg5!(;gMZ@Z2An0%d$}b1i;_Qo7=3U1V0@d*Iik(dFy}K6 zf9=Coywf*0^r8XCQz~z6Yl_mDmZ|lUNL@hwo+pog;#Q5S+QLIACB0zLgWi6Chvu7|&HLQmd*hw?nRXzdKt+rk zYSL`9u0a!lf=*>me;T}9h5aWN1H_X}eymXJ(gf4VhZH=~EgJaP;0j+?w7#tg2EQKh zSI=jQz7cEHtdPYLl=p+W%%;}&oC98lmSug^iK+lTO}Ht}93}^T5z$g?GS|{t5FMf< znuMC6sx@n2v6QlO9hx6umM+t`)=!_a$nqmOojQ%M)qW<21Pn-D@ye(9K~|oN)fZ3U z^Ptnzwa|$FJEFt-<$~Xf0gkOw*&9!rD3${L&D3#2xX^c}@awYsEF}vc0`5tmvE?`< z*I(J}6iV;*w05T9?bmYkoC1seX20jJ!U@nghG!)*(0{;|AkV|Gvw!5R?Bxo<`&J*b zzD3oL2#S%ShAGd7MU)+M3Bo5$DA5X!lwDB=&3;7QmnT2aU?170#$ zgA|reG4l3^&0L`$G$$p*9kQiO0@g_DZ4+>QG!s?1k@u3%#Lp^zH$6B$!n?e|!>_#T z>jYyr#Y5tphF9NMPemBho#SJavoE;gZuYeOxJ@-l%&JvJxAUQv!*aza#fX!XCeYWj zBw4H2AG^Jztc)NsEy_wPgMVN4I>&YRBGts87Pl8*3)dpr{Co_*L8bI7*s%gBAY0craFt>3M;4wG`y0%XR*y_Rz)A*=xZw4HCbVy?0|R5@(lc|>(d z$L_6;Ign`-b8Jc$J|7<`CM?@YoG@rE zz}#bWraIkAr^#*dA$L>o&C?4s6ieLO+dCU84OUHCru5|op+q@BXtlXn@E;}s&8EV3 zTP1>b$-Kskmy<#yP5Sv0d1$>ou9Q<#TRX3y?PFS2fJ`e>*K;J_^$&XGmmf00^0x`c zxX2-x=wXWDfIi`Del9NT0TK_xO4#w-h%C1e*z?)nI+2zF79;o+iaq%Mhy&^N^nX36ic#WjON8sU;s}I_thSG)`)NF z2yI1tX_~o8-6*u*xcT2!=NGCRco^p^BzDtgbz3T6j`-|8^*!<|K!7Zzd|D=w zeeh*Q8i8)Md?r1QH^s$eIilaF7Il6!A!}*P7Ta4%)9(cgD7V;ShvC3-b z3kW{ZQU9?IRms4rrz4C|dA+A2NAkD`&Xn~2oiNYbp`^taT_Q*HX|(m)9PnS${^=pu zaGHh9)Z~7!%{F?O?ew{u5|h5$R&A*Pakg|S=BqkN1Q&x5FAaA7@GK+7!#-70isF0t z8aktr0kbVe_uCL-$@|UjZ=+7BUR3M3_)aYugQ_1B3m({;sIgO7KRA5b7x?_OM`osO zmfWT{$HiD$8ZQW2Tltuf_sku}JA7ef-!kkpCe#7!^}Z4%qF$_dzg}#U)~}^g0#h|Z z^$Y9$swnC(iv-XceQM=Bq?ooT=9s&TkQ<0Z;M%lQrQ}*4WwA>Aw3s=eW!B2uc`{{Z zt4Y`#E$hlXZ@the8oH-X8h;G*?qZ)_Kj>yUr%+Smi2NsRup-scVY4ux zG}TgBOnm#dC#8hTA!5@7e&@CA>*cg|`8UFiU7EfR#ifB6!hHRTe?V-R%p*WNSs{}R z;ff{v?(}RvlaoF6+w_c(AHtQp@_GteWCoJ{FDIY>W2$Bu|A(^|itKF?Id-L|8x9VQ zVw2tPSUh2te~JEhBEau7hJ#b!(|1(wa+GopGg|UZcC1@%fFxSLC%Wf655{~-m_BPzuIJ8h(N(tzS9f7CE9WeA+MfNWg=*peY#25uTdLlF$ zzA~*mY%xYXECzeNzA4Lmp5Xa)#7_V{LmtOMec$-BOo!-PE;`=YBH6Sx^QtAo_*(GI z?ga4%vR!e|Qw(EV8wEBS0rq9R5lObHs}rEmCGR>J?t`{p!zjlQhoSRiQnVb0%!H_g z1s4Hv@WD_#0s}HSbb@YbIWu&6>S+0EXyXGp?Wvsz={PRYt`OB~;Q2h+aa8Nc=QL?< zrTlQ6$c+yoJTnV&UkTPc_gR>A+fwkcR?oc?y#VmYO6KIi6xm^?WecFRQKCjBV_gjX z)+Ro(muY5vaCf)FNE#1{k>fnfq^hi=8#7O&94aj;4%>24T$*$P#>lo_Z74fk@px|2 zpS(VWi^=`SYIhBknNRSV(y~517Wk8Kn4_L+AgBD`N7^2rp8k3k9-nuhQ?#r-foPF(mm{4BrI@Z ze@AnqKAd6mtM{gQeH}SUX(`4UjUnbQ8F&7Azu_ts{S+lLF41zjuu@<1(~jS!^ahb@5}K2G+DJ1YPeSp(hmk5JS3{CdT^=y@%O zCZ4b;8O-7bCj|QM#fXi(Sw<55G2<$Gb>P5u(e~1L3JUvhd8NCGszqqJC zyz(feLc;_z)=GGqG(D9eZ^O>FOR_8a>FQ~=eRU^?&CKTmZsQ51QA$RCV(f78waNFZ zbIcN7EwR$1A;&mzAs^5VeKHy>0F?!L=4am$pH>moy9+JiW2@z?aILY^q+OHlc|v%2 zB2!wEyE@!OI|QUYr1U#jVCV5a(`5m5mdf-%^N8LyxWHZ96$iTxVlrFY+_6H9+Ce{w z+Ckl1yUXhHZiQosk`FasTRv)1xrUkza9D$JhS^`fbM6VXi7FgY25&xqrUvKB7Ps#J3KDyX3d^_|?*6bFfxcQS2 zYHCXPUsF?)$^^~-ZY&7h>#FpyRI{(@NP2WaWXl@buQ}DFrSiUsg3u(M>h^W)|E>)U z(6!+g*?}YM3JFmubm*zX<5v7zKfFn0HB#v8J1Sf2BcsvxmoLmK6M=QzoH!Pil)rP{ zc@g+sXpz-3OD)@|>gh#CGl}&v#fk-Vh+VTi?|($TTQX#}|Cal!496?=y*X|og;=?* zcqigKaP~530p}8m*PA&#AIBp?tS-4hM-V!7eQilu$UlkS1PSEtZP@wV7BV+CuZc^a zO_^>DX7-2=(G~72rO%d~BH_>XI_inxs$4cl2aEG|op@|wAhER<4B@*=ThEglJ`Vp2 z8@sZy&QK^QRJIUM&y)bxa`k`{2=jvKMsBT_cy3aR=n}YoUkdQwKg!6 zG153l3!1q73fn_NxcEuaj0^yX3E;4W-mvG+C)qE5TfAhA#5O>G4B zMB3zQh971Ra(sn0(C;^p;Bw(PH-{Jl)AW!ns}RwRNjGj`jmT1`H?O~T{FPEN#6b zd9+f`frpZTj2CwlG%Wj4IIVQEA0%VXyEGT!WB|)KElkfcCf&Ks%TwlrI_GA9Pw)TI zWy02Da`zv4^mcyz6FJXUlU&4ROt#t(>HE1kW=h`s5RsOYi*tO#fPw8f+NN^7Uxi^7 z@hJs*Ag@#9z`;oRcYytZ$dOjJFQe#za#pB7qp|dh9c7njDPfp2;W)9iPkcPFDSl-& zT$Sc;=GTxfgE`;ORIY&X&hS&K5xS1yv|GhiOJF4u9z?1L(87I@Rf<%>;*FJsP`%PH zPCGlWa5j@3BU15EDDUn;H;btY}CnGw`b!oBT*i~@JUHHv7=tLe|s9+{IR`# zZIAt;mqgATIdhwKh7Qr$F!N5N`h#zi8)z?#rXK38;AdzJ{PRsEe?|-8WLH07ONw)=xtd((1~T=H!>(6XF{_%)0?n~=S*0UPi7c)y zo9L$d>)*(Aw7!G+-s8$w5o|(@cqL`n{i^5CcR#P75u#$KEv*E$l)HPw!ogX1AUzXw ziGB>$EE;laEkcYupJX*ue)7z3KUW&AYN)^>(_-zWHB!35elxw)qaI;&1*+)Sj2Uf- zGk@ArLWvk-Pl1$LaJwYfNEX3i2}uyuYx z)gZ691`5|Wb8(j}Q}gBpLhs#K85BIL+{&xA)F*}M%a7L0JZwM1!VP_M}jihVgPvhwmn;|Jv5g+kb)-pu=Bmd?H75w0J+- z655q20+ZDWIRA(qR^*AZLbJV@e|fJ!*C}Mj*;+8Y!I<$*Ws;v$CCKp}C(rsk=`18F z-iJb@2s5{tux?&6r>ULLzFbL8${_dPgqwFi`V-&Oa3SR<>)wxz^n8xD=L;c?ej{iM zj0KV*{k?8QwGB!5*K%An&&rDUdf>90*ez_DqP+BZd*DB7;NUjY{{bkMjQwE)XA%Pi z2p%@kOn|2CpJtM)5MNM6DRGgMC3J8R!vV9siop1fJmpr^AZcALY?Si?9CuRw#tfaK z!(t`XhU^KWOXqsQL;oj1{1rDx zK(*meTyG-yGY!F@6O&2w?WPp*Q5uul$5QbHacA}2@I03qZIYT>RA;u!ct$Wj3zczG z#R=Sjyt^Tnp@J_{vs@mi>pLh^m)XX;Cnru>+HkQ_&&;*Q8B5?xsjlofM1;ONMB4Cu zDfVx-I5Gqjl5-I;^maUS3t{auhLKygyOI@#>Wv6xH`Y35Rr1q(&#^o;|8X66JfIYa z_?JRw&#mnfp0l$Tvq$L3ddx7bXiRE2(AIeM@yWC&CLP5Fd);MCdPquGLKl?LARHR{ zsQJicAc-ZVdZdS>b2T0z9FlWux2qAy9{KC<02c0~WRR|7+ylB=dhW%0TRj)S`(m-Q zut>q#&5Kf}mdwFDGb`GfDzFW51beC)2I76HV_8x+r4VdMMN95EGrUjcFX%1$N4jFZ zhDs3Nm)s`H4+<_m95@pEsv3-;(mG-2#foE0 zi~Q$LTy|f+fActNPYOWby!boFWolT|Mi5=lRV<1PRiYkFRXy_0ci1rzp&Kc;slN8lG^AgkR;rGn6;=9ZOx&1NC!6F3u zdX?29;(sr;a@(n(t2rA15y(%1w(`3X!+P#7huPHB)PRe>NIyoWU%aNVRaMvH4&I3~ zC& zIv?s6Ksw%+bm_fNH9EEDR-`|c=S(uFq#`gpQ>gjnxZ4pm?DmQTQ{NpRRuMh5gjt$* zJIq6|bu>7VEt}Gm<#5$?5>hWSd|s&h^_X>`qfXqj6806+rdRN;Wrjg}A!NWSJhB|? z7aJ>d`66@TmHayiljpT_B_XV}R5~jDD{Apq0y^wWa9xQkh3zG9@R#l53{^bGgQ~f* zDDf)?f~uIn`;%sjQI->M*z%?vX!92*0}^90AOzqb4XGfs5vHwktgedI&@xfYwDE?i z_FP$sqT-6|s3I%+6O=;g>?)<*5BsaJYQjRh4X^=aP zu1$rFtPCEE`?lpNAo2=SbvQ=z5%npfLlTdt7DEMOy)2tQeig?)lCY(NUbtotza`2YvDM$qdoBw|QI_*hmowF#=zy;86?% zvGtrQ2fM`@NKhcu_lzqG7zB{95b_WP{-N)7-|oi*>rHf+=A#)-O@3z74}+;zkM{>K z!&79I)1GWZZxhEW-k?GxcK=sj3e>fwCB_xS4jeWF@~U@;w(4IXP84fCXCl5AeRvC1 zz+lX%#uvKa*qxo)3$eFn7vJ+P1l>BT`1|wV?Z2b`>s4Hi^=$9Q67+PwgqSbf4!O5c z2(&7p3YBXDdSW^(v+grDsVtbW&fr6UrW4Y55>&oCqv&B0qf#_)ZGC)BPUT7>fx zZs$Al|9}GMW$M&L>MSq!t`x&BdyzUL0y%b=hSuHSgA%-EvTQM-IXx+cFE`@)F051A zM}^lrn6)qS^f;lX1mJw{MR{|w2aV^0U*Fk*4QCd$kS89!jG_Q7S=CSk{+UB73$KQFiHx%d#=B zMOexsG>wQIloa1`$|V`o<%+t6hnSL__fws#E}iPaofeKFRvJa6Nt5M6$F0Qe@2;Z^ z$s6BQJgeVdhEeJAW4-37olniCYrr%rNJIQYmgd-`p3=rA8*WkRw90Yp|77aDgR1S3 z!jk2sd3)!*6hIxPdu$u=Iet}s*hxH$m#p{g5^jIeG&H4sImXKqVolgh)%dYB#df6W z#E{aKt8tU1q8@#eqdr3)huCiPj@uwhOfl1}1oBy-w|p&{!$V~XIEVE&=8V~zV_!iX*`e1ItqZM=>+&Iqpu!jlq7cwl#6j)0S_L+rP%lQ4m&K9YJkW}`RetiGti+))2kiw~^98;7&a9ZWEf^E1S zkr&aP{KSC!nj!>s{`I~5EPOMg;dxZvsA^!qw1R)jAJ{c+I$Nscc~U=r4$ULmfX2BE z1bsjO$td3TTWCnPMJL-+X!T;O{{D z*=Lc-^lZ(zS#6;!Q>w+-kTHW&1yV89@3aR-(^iVoCn0Aa^hSH_WPf@EP%M2eN>zXo zbpO33v4yYV<1jbaZgr=YxToRBtv{{OEvQU^I70<1Ixclpr3f1d`au z$agQSV`A2Vm#qfvf0T_mEXFgU6f5P&rKF*T4(n{Kp0_WS2(4CU<3zDkWA{jYzDw`v zFAl8M>+s(m26{X%u8`{#ZlGj|B%$y1$Ve)en!38_;xBZ!T-sKh7R1PI+iFq$VEFux zO4YcUjWc-^D?y=hOovpXuJ$r!5CSFKS#?OWC(2xTfs-xNW1xBthjo)}yLVo-rOu4- zTF%Ijq!j;fH({YIXKDZXqFb=CLcxyDvqK@*+uWYa2v0R7A+)VO*=+Bg69*0zLW!no zR(^gy`CfBU{t^V@`woHkGWK({Ji4;Z~@VPASZuEfWketWMbCC{(#2IYb;T(?d(1=9_4it!Odqy8?i{ zHd$nu%%6vW3T{Z7KCxRMd5h#_B@49p!>`2;&mU&|?r{jke6ga*o>6#QvZ*AM#)>E3A6FFg>hb2dq|JRwd1|c2}4!WU*CxM9D0kg{URFI z{S2(|^r}rm?mHBom|@%iJ8II;Tvwo*?L zWt=0xs9M*YjIE#`9CKl@4j1s*@v#*mnO3t8#cX6q%rV(D) z3x)0x44{GSuHJC$kgCC3#Izecq`ptR1-VKn6c)VzK5md1K&n0^2P|zDqo;E_4TvS5 znG9^0bGOZn#VZ4UnT*9WPdo@EF@S>}=N14Et*rZ+p-;^1vS_*<__u|)7>p}HA_=o#uUm&_k+Ab=P-q6~dQxDC+=KJJlxyC1k@;5TS46Xjf>oDbI) z$tXR`VAn5aD!{c*h?rDCbCekWJJjmWz0-hO(RJ zkIq1-PF2&5a!(zgu4Sc65}^?s3xD7{5?Cs))Q1S92sBnfX&lA9@q zTe8`(`W+T|y6t9*=~$L?4!DQGiI3Fk)_9WzqIrhH%#bxpD{seJfFwurP?1r3oZgi9 zd~pJ*@_mw*VZ(+M_!2$n3Y2CfPU;1SgMV(+xco~e@PT2zRamiPVkfxzSp~y2u+Svs z4}akieDBMr&hR}_7ZhB9u8|nLqN@uU9r5xC0j=tO>cznV3I)rZn{4$la~CZlUA{sv zae-r%;wn-lL0b~R*g_opU*km@9eza06^MSymFHGD+wj7cHP?u z!7x>#S8TxlMNjyW(ncs|@7}nTC)AF-nE}^iy(5EH3M}P0G!F4#MF^u23mW|KhovsT zj*j0osJnmLnIjpBT|o((Ps+;ZP-+r@2nQnRnZxb(OT`z>#?aH#gX%Sy`()#4$f<+( zMTjk#aj|t8EX9tKW9v}hgPmGRI(Gcxhg()LlYsGB7+Xu&c9zRfJAL{gtJOkMoNCGZ zFvT0$Uj515crMB_T&ehCnh76^iKiU8d%fi#kr`YoyZouAoQVy(Tv>o6g`S8^pu2vd z2V%&g5wR#29^HxYCyOgYhkr_$e++3buqQMeLt`M8Y+Yb2VWEDbx&zc(5lq^>_x0+$ z@`y52r<($^!@orVl|4nL zrW28h8R4@KT($MrZ(c{x6wTCAHFjrRR`g6!!tE=ObD7_{GKV=6kkxpw+A3_QWZHUY=q8J%xBY04ubj#UBXQsJ6$3nfqnZ(6mguy+O*=Q|+ z!Q)FKCMws$6T{kKOuMiD%QvmggQH0`*wK(FwXtcWNGCTVfak9Zd71 z_&UWzRPj6X(}94fj8r%ujUr?>dxf})B=aHMz?e^w^%N_U$}grthkYyO*s?56G7uRC z65!VF;SWidd*8fr64q~TF}DO~@qA2vK-ZGmiGh}N>#tXt%C8AgSurJ)p_$dJoYJI< z)e;8O-8eb5eFDwcC?V(KoVOxR94`V7s(E&Xjbau~5e6j7nt;}OfRatihKy-yj=w)9Jc~s(L15mmT}qH>q?Mc z{^b5&KF&&iDc}MoSM!Nw?y*oTDPg5{t6BIQrvG@c``f`K8qcc`RFeb#Da9>=>1C>F zw(ny6p*pok9kWzJv_b{bmchkl0*iw14n(i`qY9CZ3lVeLRcc$%PEYTDE9Jv0ds2vp z*Kq@vsAAFruX(w`vFmyA>SieN$lJIGg>Jjc80M?z#U(meq=K0YjY)RlCf|s zkv~)v^*ZV1dHAxsQ0GM=>i?J2@Y^MEBHqXCs_d0WHr{D-T4-E|;|qz-^NuCt=5K6u zi59j-{J~||`HAyadL|~xhdN2;$u<`?$Ip)V-o%@S(w?_JQJn5y!?ub%+Je~TH8+k_ z-Xfp;pUjB;&xwhhZ?!zPg0-w7&hh?g^HT+?Zo~UtKd;{R5y+aZ3&?J+*uDjNDRgwV z`aff>3tk%V-{lQo3}d(IMYL z&9>GwI7;J^Wdl5S!#tmQ7$0Vj$zj>Np*m%-bX+`@Y(%BSKfM;zA!I2g5`Ao+9<<1+lqzq*?3N?<+j%PuXZiRld9p)k>tgfYitWdVpGr{ zuBIdP?dEqfYM#Q9s2wP$QK@A_ti?!`p~b5z_6`k9orh*??2n4R%4A&a2K?A4S6`zx zo!X#M5x*tdeHbD?I)hT^@aS{=%X`Tp6f0#He3i&mokS=@7Z-616n*4th1koKTTFmu zI_4>G7$;Of9RYw^mZ(j&^DWofkL-Z7oTC!cI zKJz~v*E)}I4CJ)78bTc3;H53Cn+N@n$}ZgkSM7EEo&qqhHw1V6^oi?x6YWkT8&20h zy9X6t>S++!BDwSHb_Vy$xUMts_x^QRDoYKkzd(!@=8iBC*PY`wJBI|UArU&_GbfgY zSruc492Ve?{Sg|W>3)wO_S9s|yYa-w*8jnSxv%Etr~^P@I4zdQz9e_5!{RvuY1;{{a9b&&SnCav z@WWv6{qXADc6IVvCsFKrK>qOeJ$cxkP07ujeCMHZjLi`L^>7zUG}!=JlM1z>jS5TS zP87D*fPJzygyd{tw7DdAzOtJJ!yHNYZ|xHy_+L$eF0{^Nx$>YrKKe zoT8X^`!k)P_hi!j^4rfm4jbes{A$EfywScDHf^)j=K6&dto(ze^BV<>jWN1p3XlH4 z1;-`ND11?vX$=1NVjZ`P#C&tHy0St`La2)Tn^-H~hL!jo-(W6o^3iB~_|f_Ibs&e` z+w9iI)EQp0B#^VxX?DuYKMzVdwupR{PEhJKgNet=ztX1joo9C*rH)BhX;|xU&pg_alCY(}e+%KQ!G8o3q>_nn=xlve z{dZ;w9O#<<=5#PZ97VF%Xh+?EoDdRxcL$0`4>*T3J^T#;g}*}GoI?6=e=E_96SZS%79V;aw+EF>Bj zOH(2)4XbG7GAjUj<854{vAk#LCz7zxFLaqFleAlkPpxdtEWtP1naBR? zCU!aA&aIu=H>E%Sifqm;GLsMgHp?*7nLZaT3SM1wb>#2Pd}>JoYICiylAj1 zwb(BnrPb5`GO?u59A^E9H#^MFz~C3!STx-u#3$g*E;D~O*>Kf)7}~9wE&AsD!#x2m zX7!89{mwl+X(GZTM|$FtSVa65ZV*LYTiW+l`g+gTncEP}BpPZ{q-cJUJ^r2?_GUnl zG45crxHHWL3V&ScwzSMoGG)F#poLFo*9mlJ*}Dbb>OiMyaIf$`O~Q$s_I6U+xw5W8 zrS#tRV5kq&3QOX_$532xg;m-z;}%@oM)@N(t!XejTJuxTbb7Qif-eG|u`@-x%Y^yP z-M5`YfTQeg9^d&=3sJm~m;XpA54_$j#}9cz88SD3pU12<{>Q503{mrsF}*tkTvhX+ zZ#y4EOwrF=E%SW;ppK9~5lqtpHJx!+xTC{)nfv@-Z|PRv{xBwHU86kTw!*3%)aPBA zn;wf>ZtN###P;-m&E5T-?6iOO7Bv4;ld57`Y8Jb5P_iSIiM0jOl2iazfZKQ~J$XC- zBV$a{vHSCt@mrVe+xPb>Vy`MI{_{8YlZ`5!dX^-yNQ;`PRN~k_ApE~u`S7cuCfT4k zp%?lLrwQsE_-5LgWrVa+YmPcThyFw9tMltszO6=&_JE|{?Y%;kZ6P5gh?(%nMY+%38>h z87V^pA)(_v>|cn*=Gh;j$5hx}MReR3K7{8=`l5|lNEi6UZaLLVAk7CPC4STe<%^9) zif@Zm6IeF8Blu8sNZf-ueHe@$FIqCTn7j~Bq7m_+O9lm!Uzd-%Slsa_x`hkZG|M7w zNG}XYLmZeZOG@NaPq%ld_g2wv&#B=%RGOLssQH7b{3FWuLNIgi+MAu3iiGN_M(sx? zy4Ye#&^ni~MKoQ*pbV$8iivcumW273#-Jl(c-|=*B}S&T{@rKuy>LG;KQ692cL>Kj zyqo_=*IPxk8Lr*h1xj&u4erHVi@O&sF2x~u@FKz8-3!!kcemnB(cl#K;5%Jw|6?C~ z|96s+(`3BQ^WHM&HF^^rYuu zrs5@$;5BwD`1-bD?`*Q7`9%hf(Y(8Jcl_Lgt!p{~7_JWbRNa3&RQ=3d)2Oq6Job{6 zjHu~Fd7NGCK!2Z~+RzUASYkoq@PU_Npn7_zx*Dh7UR<2Fp)a$rUs+k)O zKzTcZzeDg=QD7!7C zwI`;G71|@u6?{M5x`9der~08IgQ=jvEX7?kUcwa<)nANGcQuQ+>$M5Eh2vMUi1|T^y=I}^{h>aPBE1r&#CBCBmVK;{c{yG(p|QmN5D5gCemrku z%u--@te8a_o;w_&I3&YLWo-*pD5(wK^9}P0%oRjp8N!o4~5nmy0rxX_6jn-YHm% ztBO2G21($f>6?y+7_fYfXB`+WW&_#!Pf8JsbXJ)+;_-;SX0Lo+rOo>Ld_QW{hPTA_ z^L{-$eZ|E0!`rU_9<@`*7^_P7|)4ggi6-3HR5*P9n zuQ%%?65nkE`JHBF4n{)ES#qZ^4C_mrT)iBI@hHArN!GY0bl<5hMXh5s^t(N7uR$&~ zGGV$pYQME3F*GUx(Q>BsRHV;q78{@F2qv1ZyHQV=|>cVl6gJf0ym7UZZ|JtJM`!&S#;Y3V5(%303p6%zy12 zI`Zk^{fmdHp=%Tkx&JzhVbxduP{CTvttC-X_a%xze&b+FlEd40Q*?=LtFe=_;UwAo zv1ToK&F?D5e;4>f9Y+xuF=dc+Jq6~+?Bq5`YeS^h@o##hd`WTc2U(P3htBT$uUPFD zjF%Ng`jnxgwblk@JBeU45C7SQ3$bIlM4RQb_Yfb9<&beTAT**xRi4It*UyVUf#beP zasCZ>mP*_DAJo76_H)0T} z(+U#70=Zd^W*wZ8mF~>25 zS$1=uf}^pC6*Wj1gb-^$@7#ZxC_vR4Y4eBv$m{oq_Ilww@{FT$4SZ3S%%2Q}^iU02 z+n8wu3J>mpG!OJ3=J_1Wv}iF0ynufKkj#Wg#H9gEYyM_oHrxRP>L@{GlsYSaK9$b# zUa;5`x2G4T*S`FwJ%KUm4M45d{hRYeu!b)cg=`C$iG!`LIv2OpZ&rWt?X`@Qe{6Bh zEsL6Jzx5Dn@iAOd$tkvZ#fFNZLT!Wng)q4(iG}J!Ytu<1!;U#XWEPJ+0wX?Rdx^^& zX959lXfdUAbOvnI<+YSQM`64j`4O*+@k##14lQEoEMpk3NP!l8K2Od}!uGMRw}f}l z&F4+iMsPhk5o&ZO6Cf?BR?}{n#Y4KNhQR0SmtY(DPn}XRtIDCWdxB;mm;Y-m#>HKu zl2^d(kV4rr-+~*09G?YXZLF~5#o=M*L?y=tA`4ITtE>F=O`-udBQWLHO6;kOL^S%K znE0OaKi#%&Y`>JR_v`8WnzC*D-aN~77&+_up=AYi9zZwM9EjMdUyFK!gRtK2U!+r- z!8WzBDZjj=Nc<@oiKE3qr@(BT$kQTu{2;e9n#_EjG1Uy^GliSro(knV7Z0&L@jGtu zwWZI2H#XGMx^Z7kD`iA2*~;SHhn$hM$3;t^S?=dXUa>z3Rkq)HH}BrAxL&!icWb7g zZI*#ga3RfdOjy4LVo*SX>$aS0df{{0 zqFFG+mBykUqLBv_(rKn(X@7U|+>@pXD?%jE*UzClDeC7D5>IelFE>@Lv#J7+Z5Whp zP>QxC>*weS^_;nYf_ail><*WZLn?Qe9EP?8@~65HRqfQDU(je{+&Dc|m_Nq@;seTK zdw)jJa@GnG!I3i6@#EImNd+H|GxCI=z}*Xsany{fWVvQCEWGV)vCwbq4W2En&m$i^ zErtFGsEaO9S?|KNCfNOotCiIP@T&EQsO*{AqTzKxMxDZ#H0ehVr@>hmNxFIKhmA8O zG2SIC+-vJ-kI33PJ>V+f)&FlRjxy$x95G5rcEI?yk7~<7i;15nh=x(tdxmd*rx27S z7|4L|o$7I&POU)y{Vo_vq+QO7cp=Bls?xumi&DGe69+5->`hxhnBW9785*r;;>(>RJF>e-W^ey*Cs0q zEnhHdySyM5Uu}1WU8?O)@v@+;{pi0k{vK1w4q6kpDzhn~>mu$$Rnp8gh-3?T*B+ho zKvb7eq#($DmF%&C=jf!(9Y*o>sIW8c?D$52fe>n@)L1ui+TguHiTPFLFZq;0;V;k10nECC6#q?q?A`Gx5uTx$qRF;ydlR(CDy1DG94G zQTJ2ZR+m4A$7;0n_H3)sxu~y$hc{7DH*|?9XD~#+N-S|od@*EB?NSE{|4}ONTmN6$ zCT{loZfdE}%mqvqj$jGsm4Z3W|Eda`mUY;Z>0F5VupZ@)7*{w>Klm%?OeqL_G%rD-Z9(d2_Q(8Z;L zKe37ar-}lM9qB9VOo*KITW1)DA60cx76jaqSa9iTcRe!=d&RiYp~cMco#~U~^6Q`X z(7hPufEhigZ_T5vm+p)pPdfFVT&WIp6Dh8dQGGvFsNIg_Sdde^E|s{h&sEz!cAKs; zlsNdSyV5%q3-|9~{peUMWCS%zLc-S4?Xr84Pg)!qvaGGmiCTG}nZ)avitrg5*^ilI zRvp)1<17{a=(wV=3Dbc#v*vaM{Sd)_M}z2-Tj6_Z{=ulXziW?GGK&zqY7`kO%~7l> z`qi~mrKY64rTA2|-Y(ykdf+)oZwlx1Kw{!=10+lDXA`sXgNvV}Vp+&wsZ$nK zAf8RP3XKX=jbRS7v}R+}$uf=$hm~arTZV~w92&ZPW;l-;zH+N`T=Mhv zUcpJl^rc|rs1q>@Z=4+NHRU-#FI4R;q|h zh3lb|Mp!tS6!HROCqt#(wwMV4g>Y(%+s6pg>YWbgKIT8%w|@sE$~lpiE=m%fakYnL zOc)~dbQ-_nIu2$u)2hV7a3HcgMSJacT6`jx=-iPLeLiGxTolNYdrR?4)%J{2R(#7< zwc=&ZPR#}Qz>}D_8oM|)guk#74wB3 z4#Z`uWQYYnG6rn_b*FH;(_~RHQws|W^|9y-g1i*>Btpoo{;;7v_nIWR$g{>6G1tK5 zKbc2O05Vug%66yo@w5+-$mokhWy`x5A`U`%cSo$~TCvRlB-NB&nhpHD+i0<3Mm(M4 zbSu=@>MsausN*4d7KE76t2tzn_*ka>SVMN&rU$1(FMK6ime@<-O`n$H!5_kq;IqJZ z27>pj=7fY%(lPR3IrnPax6MP8w5%3|BLXr81S(kj@Dd2887Xw(IvVnkH zcl{b0Pn_Iw4Iug-WM(UVq5>u%)NW|h*b7s`y)NG* zQ$9l45JMpU+HA2ETXhYiXWMoV>TJq`luKaNzYo8Xm(b@wR;W=Pnphg24%r2!Bf8#q z!>Mw!%`{1l1p3}gy5j2RsPN*-vnidVfGd!Tp(-9nrTpyqtbA_vnD~0Rh9Kk@55{OE zZZ2OV9J%-rnF~%>KGMhLfSpBIC!?=W1N|_*@|&hi5uAGiXBhJU#aS?j7c)B@BV+DA zkUZGUd$4);n3R?VuCK2@Ax}^&MRg0K;{tua(^BUIY{M#{f^`rFrQfxQQNTJi@#*4J zwJZI4&?%+S-R~b4!x1@c4v$Ok^s(39s)l9fXaHt~iTE17%wkkcA{Ip@nIv3advNWp zPzxFj=Xf!7dM;VvmkLeQLpMOsv^0wgs+o^*vN3uz{J#8UaSAJT9L2cgaj?XLH!Zne zBvYi=L8jIllD2U<)o#;TKdS@^qHV6p$@xp(oaRxzfrYGa33i7z;?T;Otv0)+k8cL^ z?8*GBp@@$sR+@2lz3haoOFI%|bUFL}dsHmV;99_FL|ZS9XG0`5xzu5DP{GO<)Nw@r z@6ckde9r0?NDhS^Z4c*A_S5hFK4XTf_0@Ao9Gmv94n@%o?9UKwdY+A|v@;SfN0oq{ z!uw!3uFT0i0b_Dsa;0{H?{d!xl(MTmLpc3~=bEqI(ZXj@GNeoN@vPLTN5m%fX>D3-(0{s!?Nvj zvR=liR5Mvk)AN!~nLgZHyra3_;*_>wx^&@zz9GLFAfij6-$r=i`oBen^oNW0h%?0E((<^wEx^y~BotS&6^rlyqbqpUPpHz(asJ3K zbvX|pG%3k=?t0%R;2yuI}yT7-X&rZ#j6w1nqu8 zqWccxrElmHKWz?0)(B+^@{!Tgwk!O0mAcSa!NUka&?a`}Qx=^%ZzVwIxVHRr;6W|e zYbpKT`YNgi91;Q_3y?6$)!%fx&^NZw>oXxpNhH>wQ;*E$6;hpK9+;D!>~=Zz!Q~M3 z!c)sjN9Z@K`Fo{zzS*I^dFAWw>Xie0NRq=_t>;bY>F57wrt1DtrP)`$cTTi-LJ4y) zmU7}Zst9_y%Uetd$fry$HX?gR9+Sr*OK3B5NbBh2#r$8+vJ(L^`t>{^P0b{p&*Y=+ zsYDq>LOwnU8|H@@@68Wz9G@Q0+8-hCj(b>Ec|w_2zg*0_X4op+;Q0@_(c~>~x>LiG z6PL_kkS>*6L7d0aHjY*2ChNy>^EjIKxMA)m;wPMN;?MxPo3p!)4_iiXs}$+cQ4QSd7>zw>bUy=-d> z+$dR|YXp(23qQ@ee|>9|D59;~XKm7ZAn1}woN?>q=gIFmecHz3e!XMY=# ULxe) zlzcf?EfG(RAXa|2)<{Q?0IIKOG>BKmCt6FNLc9uT8Gqu)BYWVVfPQ8(!Kqce`H-TW znQgcouWL+ttVT2oQN;XN-k&#yJq*HIF87l~N9Mvcmn}?+Bnofighmz{Es0a&GdEcB z<5mW{U}Skz9XEakHa1tENMvRZ-rYxn=2dnpXa~~>vYC_#VHrWWrCSkNU#OZ@iiAXs zsz8#|?v9plST*34>rQ90v{`2!V_va611bmz?MoXK9~=E^QRc6rKB_2+H;otRAV70x zg$o~=syspa+EOuz()>KEIw`4W!WH)^yok3AS*Vi@nrq_5X!=pzSXf`>uRT5++iY4G zMmibL3m2?FlT@Yl=b6er_peMWy>H0q%1!gP_SFpBG4BCKoX;zerh&3b3H#4G2?^1z zJyUDfCM^S=|MDsIUf(U(rBhInF;Q0V7I^#M`+Ce}dm>!C?dZyFwEzU+g-geE<4q-{ z9Z4w~Fv!J^cMo5P?+~({f7M=I`{>0&n(!VSDS`Lc4V`U^cfqjvJ7PjdCr&|?t)zT& zfy2nZu|X*DYS8(gP*pc$NeiYgcRxT0lBm(>(mD%76%|?H4^MC_a;vpS+mtQPTk1qU4a~xPrLIVA0PC5SDWoK^^I_zGD7WUN>AL4 zFM4O~qt%rJd6AyE&O~SPR?q9^ciJlu<{||o0l0=!zFBtPpiw8cnR)~mk_hSqjGf{5A#qSf zd?(zBS_zChItxOYN5MkfdwS#jyxRo5t;Yp5vFTPR+=%ew|HsM)qo@Q`^IQ%hTz^&0 z*!urZM#-%#vkPfAajEiITIU8nUwl3~GVO>Mh*#>_7>A3#X_NIYTW;KLuP&G>9w(+` z>p$EO$JkaLL~atJ=n#w1XA6IQx$ClZb{^y!A`gX)5zRLWZ1^*KPHfj&JwXqL<*o|` zHbw32t8QEuFcIn2j53F;j!v32Ef;b3N&L#;X)G)O#wg%s9y*;67#17b$w$K1IEa1D za^9>>+q}H;xJLGHRGTy1$1M2ZN_-Ah5FJT>O4C>E@1w51*8ix}a4}=B|e{F8q0H)y88vxV?I#&}REtzb`f4{D!v_a1gZss6Bi(Q~8Ax!A`MqhYYnJ;+}k-h0) z5Ygdo(R>hQu;k##Y{Yg_pK{d9alHsLH~kOOJn#1=C}|8@bs`+eVFIEUbktFKm-Z9L zvLLb;?>1){;9=4BpV81Iv zhH7d*{PKzvy5qQKw>=&&;VCWJ01D&21ZfD4Gl3pQn$XJ82cJf)6x3NxW(kaxI)CC5 zJ?v86fE{Cc3srR^jyEpiLpXYP$St@P@m`x97-%)szI;+O$oJFWLB>je>bWJVjOfCg z?SQX5ac+>v#>ighR1To=h$8pYGT=-oZ-skwqkP2^3Y6SAb%)4O#+76=CcK17 z4C`>6N)$8g#x)r08p7Y(OT$Lj^UC0>UNE~vD7<3h=Fx_CaDORc9vYXYwBLN6B(f-7 zK_Be-o7bPKisUHW_M9r`@2s>*g^tGCJWXIpVf55+$TmSQljHIsBq#JJOPd!e%53iS zCH#$tx}By(l`r(56q$xJ&R7F&U^aNwf+##;toK|~h=~LuugUCiRCIjlpx}CrBcNg7 z5vSQ{mn-F@=z3`=2?J9v7JU_qFW*>;eQno^31IxZ`LwO9nZEIr`Mr*)hq}!7BWEVj zm>v_xZ@2%AzzOU6b$`DZiWN4fZL(-70ZWOxOU%h}Xt|B=G18R_dj2od1BUB|iK%6z zr9%bd!x!EB9tAy_<`lmt!FEX#%5-$d?e`q<%s6|LDQTVEf&E*>0m`oDssK1}!;NKx zIq@{1|8vh3=&37aDtU<;2!x4ho`-R>b4e96`) zAi=uN9_7=`h)>1z+Ayg*a-je8y`aCpW*4F?ofk!O$K_{XEV}zYXSb}>u#iKH3z!+V zk5+iXs}IaG!FQ;q>Ochd@U|~oc|awaFzKV75C7+!uZ95&rUqq z^Q#sil#K!~Hj-HL?qeNf!>|VTmp*i>060+PGMMAAt?^rJ&@{gTZGW6CbZELn&En{M zhN7z{EeQ0E755R+#335;1CtDq^2gE1eCA_!L-X^t*3;Hq1|(9Ggx9>O7bWcc(%K#P zry!#QcBB1^{yt0)8U1@d_@*VMjDOY1*Ah(QO^yv;lLkpx76M1i%YQP5s>!KSK# zqq}Fq4C5>Og&p!}2blKdX{*_8cxz<35&KhuO`s=#p#Q6XJ!xR9I23;MCgBs%-{;Ie z?O`$BBP{1=8dqmiU0psqq3c3HiYB&|pbD}V6Br+)qdvg#dg*NxILCRNUOas%VdT|q z(ClY7%g+}>Y>1q*$kwxJbJmx4CmsimbH{oIhLAIB(*a z*%^oamrO~GjZDSC;ev)To|km`0eXbxCAZ!je}_L#Gi{4s5ct6SFC8e?@3dujY^>g> zb!wx1esM8ooP`kIyt=hTgNg2M%J|54-<0jyh+kJ5AZOOQN#0jY} zrQ@VLn+sb;HN9pyuvhm5!(u%~+qKV?*ETt#DRwfp%7w68{*tCENyl9C+*RKImw)n1~cW3s=_ji85>x6T{$W=xd!f;TKnm#r0Q?t7sC( z-&8!>$NjFEB+808Wp%Ta+yRch>Bf52n1-0g= zFg|1gKO93gAGC$SCC0A4)?-=s8deOxKH=vyhLO%o=Dg~!yN4PC)z7=cgyZmAIrTS;9K z5vCgg7>aW}*n>3c0VG(jhsdABpzr-pcmymLDm*ep=ZKhiVWaP~|SFfRfpa-{|KQ!T3b2TpF zA@m7>WUtRT))o9J{Rmy9P|8+Zur6gTlrzM3`Qhzp$736`kQPB8;0^$KXj`Omcq5&n zUs?X)H(dMmevr9xC}E3bx!K^H1^(V=nt@{ZsoCHMyp@Z+jef#B+hWAK%qE9?eXQ3HXea_(Jiime`AR}X2mrD8F8e{#&9fI~c zF93NXjY`Lk(&|Nx`ql(%c}nMlK%fxb#Y-H$RCA944>ockOh<*>HoBkj>B&MYyGOyB zsdu2)Z0E#+o6vkaJ~Kf9^HVKEvi4#WCdc3hzu^AkqFj2{VzXoO)+ zbkB#U7JP&dhqzz!Lb+B@&X0Gs!DdaBrjLIX!;rqdn9<@;#KxAz89Ak60GxW31>G4PC%;!j+z{j>tPl29|`7a;j z(cq2q=DJ*`Fk*zQCHZ7sWE4sw{`6Oo8zZ+GTPPvKWnRz1cZg9iAnewZz}vgvwdNWF z>st=Q&*VOAmtl6jE4%(i>@%c?LoP;?|0T=iHq|?gvpQBqzE2h%kvZ_sW^?l|h{^(k2u(VzNd|=oElO)azYhcPf_G)ES(lzh%~?^Q`swC?9%nzL!N?bx|Lt)9qOAm|5wzZ_kd2u|}<}YBF$(q%#anc2SaPxGP+N z`rCpcwMEQtX{WdRgs$~Iu780@UV&4w+K&Mpak2-C3%`o1!Eu@4;k1QbZ%p?ghMU*` zocJ_tG!3#ZasOZy#T{_YHX2VK?cnTPK5T~xjYQyBz9$x;Mgd#0DU2*gI}~0<9*14^ z8kB*lPXmoK^CK`TXmH!rcwB^!o3srhxdh@pte1uxJIKb?W< zl%~y>aw^w3^6tIyl^W|4?)HaKEk}uIRIZwsDg@OyW8}AoPwAhbY(5H$Z5rN=L(^I$Qn0zd-vN z>RV}R_G4}P?$FEqv)(cClmiBok8*$ea(gTDkTm9JLbTfi%(oYe1=pPiVja$U`DW|P zxt9_}?0GMbSv>gXx0X)dFL}088mloH9*w$!7Jb|6c2pgI@@^+Tc7EYl??MKTFxXav z;$63BdV_d$*-XQm)_t+sz6)>Oc^Ca!L;IzR^hsBo``28Lh~@JSv(aJr_@BMu=_t_S zpjRS70-3ZTfcUZRB}}@;(RpwAWHW%Y1nu#4TyH`0^E&Hq94D*(Jm9B)`=XorZD=cg zGKj*o)f=wXuSFB3XVxex^wGI3ld<^2G9;z94CwTNP9;(83%eD#HBWBpWQ){^7CuM z=Eqf3RzA8LSkOpK`}agddT0N%yao1f5XHpAfa4!61&`=-1yyfV`Y7%7o84ysH9Nn4 z{$$%Dx_cfNdR!uo?#Kg*8vfD+5i+x}p+_Wg26zq$Z-)z#sl}4;$Q{H+eVqvBc@nWg zAm(4)%D4~cSU+zVrNAtiX0j$80scq{4}S;w(@=#(`eU92eHx^;$q{g<+^azctE{{I z%!1DAgN9frn0~nTIWJi335qmmPj+Ao^gpz9Oa*P&NVj|9QpB59GnvSy{)z-01R83Z zgtz`x5;VpHiLrlok>H&(<)km~&F6#vdkqhbRRsDxAN=*`T{gm!pO+N;?bWBF3$Hh? z2y$SBIMLWq+x-GpHBbgOzN--Xb>6Vpt!;`Dq%79%bOUVC?VC45WCUXfeX6K>m&vAB ztv_4D#n|VG#{C+dj$jfj?eaDE`@lzJL)6~^6mi*M>3=oe*xx$io<|aux5gXl z3#(g5DPil4u@7eUIuaj}tUNp0Vte{C{JTl}I?(&XjM(Uu3@kAO@cT>9d#Z)tHULb8 z`ZhEiT|QW3(M%lvY1{rX$NQ2P1nN~d7K{YI*>&G4R6a}z`#-tob&IINp9}X3xVmv; zV>0`>KWvqIO1uw{QlSe!Z^drKA`?^@MsjCT*ow%y{p3%XDT=B8#9rzE&s6_cGuL`3 z)kuV)l$2|xP*#{=O6i_|%7=6ziG?awrrln`zaAYm1E&-X*E%~9`ze+26z>w0ojx)D zgLkT?2wm*&^HUA>n*VYgoF5^M6w;CzU4HRr7fsC>2N80)EL}N_5BOsHX5r9%aZgSEo{)inX6mv8t+!aqkC~z%nNVzV-q0S)_M2GYRj<7sbz66A&2|cx#ZLle1K)$# zRMSR};wNyg+?wXfwxbrq)S!K`1dhl&d6DWX1tiqE*k(sVRBmmxpo`T1 zygUn)_zk{u zXkZXsh}xEFdnMzc!H%@qO@Hw*?48OX1r$h!XxPzblR0SUt|Jq9er_vLD|!DOZ#hed zSW}czR%nFN9^VqjrjIL?yi!WrUF`4QPv9g;0T!`jH&l(ptmvl;cTWE0(7|Y0CZUhf zqkrCc_@=Ul{mG0D!D%7pv8Enm^enbo-ncl!piro7{pP;*j^mm$3w`Qs2YwgHHjzoh z52DGTet!`Wvtw}6A=5(p3%rSO3!aV|g4@vKU_;Y-_>Voc4`o^M9Qg3Yo`SVDWDAuB zF(*jBAyZ{kth5CDbvTXJ@G6-dZ_qx+%9iB`J8hx=J%1FlWm$_cj!-Kqt3<&p(9%sC zjJ%lG@s!^D5H^Mj+tNWG`h?PtJ+DahIw<$Jd9 zW^sI?lEaq)HS1BbIK^2Tv%TZxOd)D4!oR_{@f>em95B>!*`!qi-hXFlF-fh8QVX5X z{?RaXh{5^EshY8zhKCiY~2tw7wXr`E>?JFt|A!RZqD0e&() zcB5u&fi;hm=B9H!%+EY+ZzcSmf~nS*S0l?eDrGb)Q3|@gzR@09>XV<$*Y^}y^$^YM z|HN1hwluwxbQaL2)%Q144nXe$7&`;_kHy2h7ti{||LXAxN}nJ8oU&b326V|_^gzn_ zn->w%7GW#9b1l=+Mc~`-l`Q6^ko|Ag@J}HFp^1a4cEJx)!nPGbUj+^in*rj&>awm; z8L6ZuTCXbYO-1CcIcwSRbM;A_*1N^pc!l*k!^2s~uN4NiTwkTj`J#h~2*h|o=c=^J z3X5tr424aJ<^l!$)sT{(REb?qdgoKzYi!r??IkXGi=JweJUNGjDZjvKhhB}{_nN%} zQpKr!3qWbf_EGE;13Fx_3(sy#0Y??DRvR4aj}h_H0g+;OVelS7C7i}A@0B|2j~y;7 z{Cvm^jVa&ur|JP*Hw!s(&EdMjPuH=JXk1ct)|KhYyZpP#x}M;3M$5}?i&;M0X%!Ip zj($qkjQeTDgUanloMbMSt{CwVwLk`kWD~Tz|QI3A^xZi(U3#Vb>C&VeES6cJj|Dp%2>2Xb| z?v%|Az+V3Z{exD%OeQWVWOptWbDa}hVVpaQMa^O=_Cu-ti47ylA-74<tImG|&e1>iH}P023C2nrxDc+q}&G)Z0Nyo_TVAkM@(Y=PTr)i`!`H!fvwF$o(6- zI9XU+Gs~mJwxdVH7p(Y3xq75u3f$1?<@I>A2sCGoPq3+yqLRFqmoS?sRW5Snzm@Y0-^ZrYm6nxLFWcjqE@QmwNymUri)}yhBs#mPD^VdtpvILb$yZigjoVEl;Rgr z>YKH{+3_YPB8l?d!D2DwzYco6U)=EdQwRZF3`k9E!>9l;zyH_oW`ba-l}RKct(z^LQNAF8n%vrrrpw?oj#1mi8uM(E5|> zVVI)|OpE{X+Ny&0i6#pCTkL;3&71NdRP>8peJ;^WKjnV?uyR}ov$iiuOyoiDAdNpq z@qSK%m+zb*ua+o5C3bwn!=VDUZ(g$wY7`kwUq@7FPs<*`S8`3@%b^6;ef4x+^l6_k zs@XKWSIWDQ<zT4bY{*-bd!x@YC_X)inwlAbb=p5=EO*#`tnSTyk^lLPB3#TRW*w zHb_74NPnTiIbpNM0rLKQ*)k66h|{=}&(ey$>}gYgm+GSNrt?Ug+2R8a@2fzxT=Ql~ z%O_rY{lLJr$coH-YAYWvRB(p6MeKAKl327eyP=Qv4~*xq%SC4HIk->1X9=M1vF zZ3g-qqc>9sE~q^46E<{~9U4-cp$=?PC|8JNX>c`zhf~qRIThfFXLP`k0S0SaA%@9G zB@$}?AdRC8i&~+ZOsp(OJ~IFJ^%Bu1NmFEFRk->ED8w0IDHZ0{q>YX%#y!*^_h{9> ztkS9wr@4@$8@?~s=_FhR!n#v>?Fh3*jW#M-L~UH-W#8|#t$_{iZvAQ87|>uxBah%Zp$S2j{{EC0rGmNwN!cV;X%4wAU#*5s!FCUMBIM1Qy#(lQvgl9A7f2mk_ z!1p~^fmE@wOK1pWF~_Sdc(di|SpQ>0revAqPMg1&!b^CMz4h%lCJuCT+PhE#&~bT| zSiNs1Oz3=aH%c$_xJI(5@DRWkrab+3U4$l;@e&@6|9AERIXZ-1Y%kSX$8hq(f=N&S zhtv!0ik3+|Mrla+QQ3+rD=WKytXRrnv@poe@d87i zX=)X?NtET~^_{bO%sdoKcp4?T+NW}|eI zy&}+5o$_EDUP;CvbAZ+2Rp0?Hsp0o9SCK9z)Q9`uR`jq4%gcAl?PjoqKNEh`=^s8y z2=Dzp09bFI*CxgJWh{{5Dd2u_M(Sx2`z2!t=b50>ywec|%^N}+qDNXm9SZvwL}R1E z{k#}OY>efquhkEMAO z5*3;YR&RC0uWaVMtUpl+2vk9fg-)HUf0+}Wm=qLriGS|G-5yikD zi=LuL+QxoO^$-%j?d`E^4seE;hhd$I@hj~6&BK9gNwCkp>rA}`+o9x1Ud$$g10)f_ z0AoxUSgWfVzJeSbWNbZ3_0>c$H90~dkGWUgPs1XTp*A8T7mb3fs-k{s`U)&22{fKf z0gtK$JsJkf6t4MWAV)dIAfS#Kq{?^R(jXNh03*-&Mi$lYZb(e}v6NCdmZ~Ru{Zc(_ zWo2)$xd@~9YjNtjnSNMwATz_@X|J71f?YXPqClW z{+y@m*#!yyS(j0Z`zg>sAk%x5^rWBwu_q;I#}FOk%%5%oy=e;p~8Rn-i~Q;2X{ z4#u8OzmQB=Tf-VT^-{HW1$E304h}!g)B9%EubKmcW~csB+KW~(+Ob_y_iHZ`kzFjT zA^x(Q)O>n|s6*D66Cf=vlM!DxD9y2?zqS=kT>v?bG{VSYdB4^uxV)ovA1mdl=S1%UZa=pzm_iPz5tWg zC_DqH@l0?yE)Iyha<=OGKW37r!$TEwO#q{;--`rD6LV4!DY~A~BND>BqIgsJtf@x} zHTw!@rIasflOl!tP3PV9CxR{5dO3!}wT3RtDlXHR{xg{;b!aZ4GO;ccN!Qn^J_SCB z>~~2WNca&%wO{T6M^$AU!QgEiZvo)%{BiOnfPC2L=6+WGD)aR?QdNwf(VXyx?y~-u z4vX^~4_W$X^Dms_o2oVH)c%>KSusuvc~dyjcWRiEk53lg3NtOw(L5KDEjV$JVUx+6 zN+A1FDpmq#ePJ7y(-BixbiBKxKjvfQCaLBN%gxvR$@}9GvyXC*)LLFItQIf~g{q0x zs*=@nZ$vBB9a*f~m@Biv5z;Q*gx}rGvWqAHQOW4zyCc=P+^Hmm2yVcSnn;VCzQwl@ zdxE{MGNLqt>jp%-)S8&gQ^I7iOgsx3f2M~XaHD?emC9_K2u=n3!p%Jy{_L&gI8VcG zt9aD=fsH(}N)gc{ITE>;hOXI}wrGMV_yaPd19`Q9$FS0LjOexe|N2<&!OUOOlzzuG z&X!3hD@~SrV_E;BGFyn`OuBgP{P$RnFCRerdciquxKVVJ=Lzh@=P?}5?Ir;KmpAZ% z0;9T}k^7^cH@CgjLk(X3(n-IOKzHOPtF5a`N=~LBO~DAwIdZ? zo(*AcT*?_X^N>;X#UPwi=|foD-0;EG#L%VSC=%k< z;ds@dc>QUdZXLJ!lGgv33#CSjnDwLynX6lB2xLcJGwnkd1IMuL@-={;b_{=A#oWiP zJRnPvL4`6e0)@6VDa}eI+aTd%3MGjLYz3qIE72pF7cJ)9x6imK@0b}VlF@~U`B-f4 z*RZje&J*F%z>6T}Bn4coz_cGv+n%yZZzXYd3!u;5XdJ1Jxiq4M%0;$$<@21$OH3+m zsdPz85g!ECcy>dt?5Md?G8NG294jT8B(3{D>c>w-XJef4){m?UC`{2lC3w!2ApMbs zfUAcvaXXac?XDxEdWjX$J;pvLcj(tvfQiB9uDH={86JQV5Hpf!UQ{r>e}2?$jtnVV zyLfe^U|6*`j%PES(s%?%STHzVTZgsS@iyt)(7Uh`7n7LUH(+Wr1G^)R1fMp!^YMY! z&6&<$Uq4 zL~V&fFd1Hg6J>M_)#4{>k^&uWl+{u%piGX= zf|o|93&^TYlhhB!mdfWRH-7f^f1JJBeo+r1k$9fnI6oT-igtN@K!^?6q+ZoGBTkM_ zmgyzTva_=z=J|~7=I&lJt661ABP74HR8d`x4R~A{3WH0Qp-G~{P3k%635i2uNT`Lp zu)1?{56MuN4@(})gj4|nYnUoauzBeWJ+Xy#-uU8%vM5L`!l5B9UM7be&5^S!K}G+X zMq10*cH!~`5fm%z)H&6tLik7tV8lBWIb*1Cer>@ci{xmE`Y-zFn@A-j33YYpUJ8PI zTuD6CNKuES-F7JRdBOQq@KU-0UDVsxpN|>m60Q;@VgU%qzFthP#dk*CQbDQQcac-l zxv!f2QZ%plJfWR_w6CcmHEu3vXQ12{*-@6!wY(*}OX>)ML+|}J-1^r_5ic?}N}|%s zaS8y0L~&f7RhDp?X&NtFO;;~vzyq3_B+B!lc2UQxEZ{JysIavsZhKxWDVVp-g!oK} z4p*^wYVjzrjAq6#{H z*A;s9d=YR^^~l}nImw#|;?<`ifd|>Y%YTH8YtcPDJuOgCqjrpi^;EtApAO zyDm#{cMXu@uEn*uI|WK{x8f4q-3rBuyA&wy?oiyVI4usr0|eOgeRsauncdlclbIyH zC-=E>?z!js{}POGubHvik2RDT6nh+j4-@TQk3pd!Y$9T=rkpv%E%cybj=ySpKQcZo zpQ}Sfoz0)bfG1Q{V8WWzSy%QZf#2*64a}&LK?C*8vlEb2xGG_FnZU*r-iJlH?GJN9s^isM+ zBdG1xERF=FE}5x$#h_pi{EBO>3Q6bRVgMKQ2!R;v^Jt@^$>CFJ-(Cq(`oJnvX*{_F zA7NPzC`kacEO7g~b#9+{E|jj@_5LlvO8iXNfIO`fYEEf~fD@6vPzQZ$uye#RB;UBw>FW3xoN1 z=Lx)MX>O5On}t#NB^u$ypi(}dVkNnA`}0mwGCJ@N9bW8}s$%>*^jnmbH$~#qHM!J< zB=*_tyOt*VGP;3m8b3MhXZ^BS(0%RfQY{Qbt9uG3VY+U+?CPuw-wt`=m#3 zRvAdy$*Z07OUK|FBtl8e&o~eFZK|ZH6<8mA48NK~lcnEXIR@7_9euK}`8rIEr_iZk zZ(3hQ!@!R|X4S=mmxKYJ#@4rGrH&@%%VJ~4Y>mMp#^Mul&L4|^R$<4p{6sw%-!o==2x!NCtEl$Q7ff*z105KoIcY%ItBkfc1 z#qeRiqgxi-?Xp7Zq-LTTrU>Gg0u&{+@~E56LmCYYJ)iDe`kh*M)YmhGiWcyy2*0X& z2~d5|otL~`SS3b%@HY+YBcZ$ELnzGoo zUF2Ipi@V(?Jj5BMA^-u_FsY4$?diCBmuW;zQ=GdJ??&5-2Jq5eK>>;WN*nz|5KZR(7ru}c zH6b2bvT0w2$a{L?Ph1|2n1Z&$U?bQGDj4h|7r-n52PLY6hlo_@e?NN=uo2e6s%j{R z;vq`%BY^mr)lh$F5TROd$7nDIjGSpQrnSJE(bvUKps%>zuO(trBhgN<~HBkT?7SRzq z(Z9w77$mYIYCXaP1n=&+rs!R1WQyaQFwfpu~7lWOC4j7&sgSfquRGmX!F*__o3W&c{uci_b&+w5%3hM;&-W@>TKbh%p=lY9F9K1K7^Lu z-?(`E5?+Ct6Sk35Ig1aIMlJ#4A}QNx|1%-%TiQ^zBZ~V>KIrR{&CSmbwM)DHbe1cw zs6egMsqTm2`8)Sd(?8spknU8Nh8yPI;xDl(<+#8eJ^)ziO#bc45-V_;GM$CVeW+NMNOMv|p1NlP_J7u8x$Rs-V<-$*xC4Zo#SMz9K=Q9;xbphNVjCW8yup{X(cXCLx- zI6mExAvf8HQFD}Kay^{vA9~S28~IbooaCq);!&Rw`b=s{i}mc4F` zjHL|WZ%5+rJDLw3ySJd(rL}07742+zZ4J3ii$=ae4@gCxem4mC?Q@2EcmdMF%BNaY zTc@Bq-VnOb(1Ej&h-4ydKG4ixFpa1J@qdH4LStVPx$swmLzpV_fAh5dAYE>rQGOQK zZFI%$jhA;6xk_g@s?qh^poY1o6=4kP=je`+icF;b&_yNQ1n?6ZiVgR(hKW|V7f^@a zDBl&KuteV$Q>9i)F+;k<8e5unM(b$w>g)*9*bQUub*qQZ4LfFKpQN<;nSbCn=_nmk z?zpyV=;%bpXSJA&+~qj8{qAQRRJt96u^^@x^GPCS=LALQICW#qW@uO#b8E#`D@QBz z(;-wFo$ugfLvMtbVJ4DpBZVs~rb|mK^gF`LJZWxDv$453;$Crze@;(&PKEJMM%Nx) z$lwJ`3G;#SwMnC}`%&w(CAP4%^lP|ypQcZiTGz;%Za5ymfYkHose^>s?jS{l8Ny6t zH(btS=*^UeZ(w~;y*Y(H8=(}f!>3@jhaCo;kD_9u9VgpukzU|zr9(NB*xh*>P*>^ zxF>MMBrJ76h_L_FW^9<+=Xjh9!zZKK3PBPsMfcn`xT;@L`pS#4Cz za8hfd|AU13>p!1Va-o;*_fwwT9g;&hVBRj2bA!dFEpMK5E#12jzO+Qd75OJ?MToX?-vqGkG)Ytyn8k zd8l4V>VYuk2m{Oyw_c878>f#WCXN*Oolz%c!~f5=izUV9H4?Iy_5!)Rl5NMfpf&(b3M|Mzq)TD7F8A9-&g^PWU_<9A(XgYu^%r>3yT z#f@S-PWa6K{Q9_}TTlGu3)@0EcJBpt?-_dUp6;%?y|$~>RImx+rM9T4IcD=SN`#(^ z1PO+Iaq&u0R$gLf5=~N4PV_!4pwrM$AOM2%C*KCv+K;3%hP$a-8zPs2!8%N>A(HHE|bOkt$KL=?i;r-T5M?W)@sC+ zlyIH@0hWV)2O5h`b>AbV=q*X?scp0Q-vwOGVfDCzxefJ0P;0seFU&TWUUy6z z!m3SiPlXNAul18AvC|9nxK!yVl3zHeWtYCDwv~~e2}9J1N~}M=`TmIGnD;f7HOs7( zn?Q*4{ALk#3wvcAXSz}p7TjS0s!kl&edK2$Hl;`ulUC_FK|#SzUGCpWJ)hrA{v4@e zMz+foEr&IlRbB8-)37fXM}Bm~DvVq%DNw^62;fIP&j|&YFYm*+_T; z@QB#k5oGUtB>Z`DWa%C)iS?f`i~?2_!#t*xR3mkcuqyxoJ&EoV1o)Yc@Bs_F`~^P> zC@EWrKP^OVKeIJDO%yYYGLyqKEFx7aVzTA@-kc5IV^Aw->w-r_3~xRD!IC6-V8csB z$EM1VO{ z0|xgytklANZg#-k^M;Ce!$l9dtsRuFm`80C7Oi25VzLZLfuJ^qP7r?!?#_P^gd-f6wDS<=c;^kq2ObVTTiBTI?C0M>FQ-57g^&jwZq4PzMYhk*><0B45 z+5^Hn1YGZxD+6cf$5>d7R~wlLqXCl(x)v{gSb3m|`pW!)-wKb=aD@2*w&)(b?0fL) zlOhjW6;L(g;WlT=Yd5>_cc|ktAbeTkKdA>TDpH38%B{$ruYOG@k;OG``9ciK`Ar;; zaxOPqERkUhON~LCtrq=&19uFclRROFzS0Jj;KguIt(A5agU8pfoAGwdOZRL$Q`V4t z^ngV8comgHXHb$ZC!U+t^oQ9f19#8HJ6+DKZuof2;9U?!+=m*F4BzLx3SV79pzE6^|FB!ST z#W{QNllA~;%j#;RK9K#MGTp#~h4S(7{xUw1L)vPvSt>o6DSazU_`I7`cZwXdLu0w7 zpmi-fV)i}zNKW1h`FXbkK@}3)6?-6x_~YgBEcIZ8JU=&bx3|l_nX79C6E$~hW zdY-;Dv@!pF(64*|De(QB(`HO)>Lhsd9)}oFM1U$@hW0WHxohfm+*>ggQ)Z$|QQV+*~RB%lh%{Y5=E#xj+ zXEV6W(KNNRRz%;0h~0@5h7r`wv(BGXQ>r6|2) z_f&&+A2Eco+`9rJ6^2-jA;xRe^ckjG4>;_%od>I?`{^kC3&_bcV08m&1S-Z{&em7Q zutl@*c+q7t%o%&$UBtH8QTXHE$n(7;o;dFH;PQ7Pw~xVO+Ejc51$>0QFD6CtCNRQP zU>J_;z{#<+zajjZaM$P*(_jWy5m!AYg@O(*tofE8HNrfzgVA?#J^vuq`rFX#Sn0DS zGn#B%*k*480~u@%PBu9wm`xPLI|R}tr5k_cLtL+Uqpd5nWBZ>v zBBwj|$G)44Yk_&o`jGoe=l)#{3@XYLpDthUu^?dU_0UxgPrW#KK_dU%0vM^x!W~zy zy@pUah^?SJ-()$zmLPHtmQYlMsFtM@53J@dXSFg#(RH zgw)%8Y52~?BXsf{)D2jj&677)uC@_u3+F94j9eI1 z)yF}3*tzy7ENX(QuKof>=9O0*msIrOzPje-tOFs0P;xMjpQvjrB9x6|MmFHHRjW9+ zX3qd7Yge_xl2Dcj(E$a{qS6>r)X(f(4HY6kjR#U3KJAUC!pC{Ss_*p4awys0uuer0Tk zGj%Mg_j2dEVz@?6@ATDx9DHk6r^jj9kD&aW8}aDqXom)GY!zShji(r@q|1f<@>?be zeoZLWiX^2?KYU>FiU~#TMZ2&utz8=}F*aE|n(l4!i!KlDOi>T#g62LfFHWR-l8ao z0!D;y8^z_}q@jxS)a+tqH8)CuaGxIOm_O7=_pi^6_|D$zNf(kH>TNxn7Ti$#oDjK4 zKP*3e&;y<49#V;{Svb)cCBn0@#UHu~6)a_%D5*xwn#pxG8ttoLRK`8d{`n|c<@IeQ%m1gDCLr_!$jNQ_IaGsOusv8@)c42kgT!|VRDt6t)5Rj!L7)t_5(PXb#h)=q)me>wdq!>KMo&xHfe zJ2T#WZ~Q3v4NP-de;Yw6%>BDHY?Hanw29oGe`H>fg8NR+$E;s{4$-|nRQMTTA4N=` zFclSU9bPlB9`%0;BvE>O5O}%#YsCYa4y9}jfM0+stM^{9WS}F(Ti}r{(J*SSo$P=To%~mpm09CD|>w29T@fd@z;s zkJ|#)#6M{oJooxEMc5sPu%1=mnV*xv~?fbRZ0;Syt@A(w=K7woV>Rzjhi0O&)t ztKW6>0La=6z6rA=y5^rpqCJ2=UqvZ`r-~trFk2~zGruLIV~jMfc_&6y@D&)aW|fB= zfQIu9w+JY58y&L@5+cBh33}2zl|V3YYV;8@Rw@jRxwExtkp+ZhPHAZQ`FP-1!Bdn% zHYl;f8uSSBOPfRWm9c+`DI9_tXYz9WhM8{|j%okoC12!rCDC*yDviNL1d!jgzK&38 z=;_HNaVEX-Gr%o~Wz?85^_Pvknhg>7tpoy&;HDCdOe3$RqDa_EsgBgkMurL;RR4%8rUoUpC_;eXI?u14V9l32>(s zqxavIG&d7{ZgG@RP>2RCzatgm5vS@uNrsDRq5`^w<_V$uz%!l=QD{qj zFg3|vi-6h_dBWd9EDrQE7$T8YJ)bc0O-D{3_r8%k{H11hDtUuV-;yho6mve+pr5hD z+mu&I`Afl*0kUR}-%NNaCC7r8g3$Q|<``8j(u z7uG!rcy{(6Om}*}c`TeARhsKt)rNTZ7kui*u~$t9a4B9wlyXJV{344sZ(JatBA$KbA>d*nyi~21;FZ=LUNXKpQtDqe zQ_@U5ZTu|CJ1;NztUkwx^&7US7sc(ueo`8T^6`=>E$obx-R6ySP%)A(jz z{k@LKpC74uw=Y5tt8N&Ws)TV?ELB9I<@fp1!I?$t;A1WEk(xZCm;pApa<+ws98ug) zKESoC%$dJSy;h5T^Rr<-KgNB2%p?7{{_DxVfP{}BRD$t)77LPN^Cka8GOWnHsDjim z08?33F%f_NWNtAyZP<={tBXj<=yPz9D!OYuM)`Ud8i)8RhLO(`d`f#VXEP(QRdtje z38F{y!uH|jq~7x;cYE&E@cOu}`O^*=1Vcd*D4YA1Q6K1s>B(2*u!RiTTciV7KPXmh z)FO6Zrxm8%7zbJan$v|$rYB@#Dn>?`H+#-N5r8V5Hn^ip&Nk(|m6a$M0!2Vfj9!%eLNX;J2!K2aI&Z&$6~&wZP>>77?WEGympf@# zb{e|;qf1~%>pX*68pARwZ2n1P)QL$>F&`aW_Rr4eWj2_i?WvJ1W~yq$`{%_W7;3fc z*}(;?f_FTk+GadPdIn(nTCmk%@*s6Z*s?fa5{B3QMKgaDb_>IYvpi4o8qb32@x(X_ z(X01yIF1KJQ6IgCza{Kdf0Kq$kd_rX2mUd*J<9)w$Q`_(;CJ-YF0*PygJfjB<9f}g z+X_v>A2dZHzLA2>qs&QJS8VH6?1amb#FcCN>AbPyBPzg`EFQp7`_wN>9sG{xZKTblfHT~bvA*^!RcZTEAVrnev4H^(#vSWm~;Luy`4* z*E#ABk0yI)n`L;H^%W{_Ufvv~y0hNkY0e8NL6l?IKa7Y_rVGgNqz}T;83kuzI%}Ju zX%QYUZA*WKp*7?p3H3z$n%A->OB2zfs?GhQ4~0UjA2mbjued=uSju^vaA(z6`?gnm|^yD+PU2GVTwKeamckB|CHM4_ZC#(rN?1ycG}W8AVC#f|ar zoe!;&Bz+O{^_RsxX#>*5OVj<|b9t~re63LdC2`y}1DO)^2DZQm4M>quzzC0j@!y%{ zhgxnJ+#h|fEuuO(KLbwjx2DE*$8*~C5Op~*;6^B){7%Au;5w0)OV)6b>PDa-T&eS5Q4~gZUQBKH>bMvi@YXlRxh2JP91z%-*mH{sz2R`$Q+Gqr$wJLN z-T>{@U2JcDz8KUExCHt+7sIKZXALQ%qK>eo~1*6jX3r8dpB^|FVR6Zvv>=YCWmoU%I-`a+yZ-BDBFSeqBkngzu`>+ zt7{3R-vF1(z(7iDu`3nmab3^$QQ1q->%%bd<)~LcWQ&(>-_eoeS-00^q*DOez5ksr zWJ61Bk0bFFY3u3u;Bx3fG~y#NU?=)UuER7Gp8pujY4AD_!W96y?lD^|phw_^B=F7T0^@^O&(vSZn+K+r-$ zF^0lMJh*B48P6CpC<@tcc?n9DD?crGM;`BcxwQ2Xvb>buZ(WO4!x#Mb8FV%a++|4= z36YcUzE1Zjck|rdZ}tM;%LRr_|86I_E}uHsyulRdseJqMTm)gqK{TO>he@9WU@s(wB!7dw)^(*TCc&4f2r%r*YCjIxXkB4G}rHx zA97CIlc<|`Dd>>B5dN#H5Mz29#3?FlPOr;vP#RJnD>?d8w} zkdZk4QXNwZ`kvxqFH#W#YE>9Lni9c4cj3Cmvi{3rjG-$lk2 z!GuCBv8jGJZvqn=*e^R5=zyF|O3+CZ56SA$2_(I?AMvTivL~LYDJ?_SUXHOg64d&uI zKjeB8pb#VCA}I*Rc`3>s-;`W=&v`;ef`WFDYE&Z!iuouK2Es*YQ&_=??4N5R$ZlrR z!81Y%g_?d-1My%XbrKLSqYlZ9NZN^_9;HlaHE3WjV^JKQUhQgoh<2FDs2cIb$-90%`HEvZ&GdKy2p~?!AJu5R#p{NR0?!Y=rm((L&w8YGQXIB_-7-~`j zKwhN?tfNn~NMfgmdSQA4sM=&1DrD{-Df*@iM`r%I6|{}V9qAfHpnhKxW50Q8BV13( z0rf>qC@~J_5OJo6X<+TQEUc--1LVH41zbJR*^qf4cRzgt?w|uZ5Oob*2!Hkj;L?}) z)CSH!fdbEb$jjlb=#~4+mC?iSBOQ^hSPMg@a--|C4tA0?;7$#i;E?Ut@+XbM)I^;T zU1_FJ7Lbbn#t&pG9Z(fn66-h##|^6z(r;WDx4K!OZJ$e@jSUFs`1~7&9W3~T_0Cd+ zXp-CX^|MlK=|sq?u5tXsH#F0m=AyQ`1R)c`!$v|g=|d8ur~BBInd}G|euKnut2hbG zT6|28R7RFcsdxV5R;qwlZTthB;4(%V(K^ak=td3rmWuN;!hG#iVYE4W)$EMN?2;AFxxjPTRivTt?L)&szQDSw$C6tPLKg+& zMm<8{4^vx_z_sh5E;Dqkp~ z;NIVz%#*)V>$(bm#Dk|M5Fxv-Q0(sO2eqA_>yASn>B=9BWXW7dlt-Wyh~Tud{s;<& zo!W#UUN3{l@8K0QZg(H;NLW`h1TOHDCor~0(-s*k?x^h_Jz8&LNJpFPcPdumRYvq@ zUH9RuHa;WgEg`t&qc`1R6({BdGnu<4WhN(t?5?)HELCz1|CO19J?X)gXvBRM+U2Fw zGmi1|!dYwD$yqWrD8Zrs(Gc0ic?*um?uyG}O~7Kt3FXD=)aOA13icY$Z@YVfj9BC)S^{@yk{FrIsLojH9HzI6~TOA(}aMuSR~Ki7*G3TExMj-}%1Aub4t zR7FD_KyvK(!IZ62LPA__lOyUjgC?m@XGsW1b?&6X*3QM!&K8O01$Y+DmdS0dTSn@2!qk?j zGqIdWbD? zS)%w1xOeK7It1Vl-{&NbsEM&X=0j-Z|UYkY<1)|1*C({r%T=$(!@Ymr^wS|4yEAUaaI?H)rUHO@{lGxcuE?4xs`GKL<|i6Y|GLa>^+syLHGn zIxsuNLo$3;=}6L3Y2!pexl=K09NYJaU2%KuW3AVHYP-J_RyiZrf03=r8@yCF?LOsw z3gZa`-1Xe6i9Fokr7U#FSTlPr)LL27OxvBBTg)LF;<>sum@M5pR@t*wkhE;;_#N+g znOrX5*3Nz;KCK!&k4N7A?#vNzxquHDs0m0ghCXUQcV9)r#)oOOkXx~Y(|W=Jd)g6$ zzvuPB1h$j=p}?1m^u*m|@dxgoIbYG-Q%vvgau}x9F(GFIbim|CcnSvbab%vvS4y%n= z7hNwHl)k&9Rl-KG^M3qcP#cb|c>b4L-Kr|p5#IaXku2rs%94FP7HU0&*f39i)d=VQ z@nWE!uEuooMIY4vaHIRBAHBvw((prk3X>a`b#e96NLAgw$z9X(PW`Sy#%!=Zi+Hc; zub|D8;H04;M#}eif=pI_#*wR0E4*pABJRJ%jj6#0N=dX}9ai&MSQSS6hP(pRodcc9}$O_whle*joc0G`!Ay zlmougBEfWY5))l#-&$?43|acQWg-_Ymv`I}e}^Tl;ryN~f$5fA>$O+EJ!PbwaV4>b zJ-{UqODRV;LD4tJvUGW4twr3x;(Z|5{RSt+cUjB(AuMr%8y`^&q)>=vq%pyDew?&x zb8Q_8|1LK-_aiSsbnh~8Pv?Ok3I$eV>-jk+-(H}@M+mBd#j0+AMMbaL*&@VNlee2_ zCGpkuqR)#*?((A&FJ-qCQGN2{4gu|fA~jzv&2r(Xh&&!*gGQ`3elnMn86LC`2bLc7 zoX%f0jt&pwOAQ%(Me+Rb)4)1JKXus3|sX;g?_H6w@Xg2XW>+^DiI^=c* zBX3T=-Cr(@Ne8WIiIrgov;fBx#zv=p;qB46}ic z>ZUc+P%awthTB((@_i_-qo7g*LK7iP*~Ccyr@f?MN>9JV74Wd@seN?rp3HYNtNSI2 zaxRs@!Z!S`w#(7%+P&UyFQB(Tz0Yv_Dh5Fh^Vd`k)P-Ds_hOJK`)>E*z#faH2fvoV z!^n}dKcPtGvO_s+$0j*#=*51vVlzroO=BprtE{EsrUUVDt1WvCz#dAi>r?0koO_zP za;7gL(Ijg~xKBHUN>1%$8wGaYG2aB6BEGSy4F+FarYkAYG%0$+uh_>&W}Zeb_rSc~ z#NmXw`^$c%teI31BDVuIDNl|PnD^xJ-J@loM#lfYB=xr zR%TVHci|%Cg7w7f_vBJq`*YhhY}%)u3tMXnTFZZC zsoOv6LiwZGe|j_|kLjzSF+=UUWT4#W#w^B@4(wl=d{RFv*pjfvw*v_8c=GQa2IJe5 zO$B1x>ABLI>=Y_-vM&>CGVJtWQ@We3pb1{7r^!ztQ4a2TBYF!J1^=YNa)Ux#Qoth9 zmX?R7rvzHFjZICcrcN}^sF9`@{AXup)B#w4%bV1(LygVG8rB z3FlU*&DmAoboW1=EDV;#KwtR~JR;la$WO*xS?e{z-q~Y@xm5iraUe3WsCqzR`uttE zhD6cgr3osGEoN6)S?ha3RONGTSY_CNL>Gx2%f(I{e%oLQpZHvhpxvn62hfgAOoECG z<6%hdVSm^|m%yh)+F9zGX~b!reAZaoVCJHWKErY<=_Uzix0E{J8YbJ;9r`_^?gw7O-BKRB!yKKMD`s`q7x!lfJWVHY*96~3RZKlsABsz=u?f`hP8E?kGb?x{=tWzBDbJPX}z z#YIwgOc>-nsQ|*zDNdve#U0y$_vie}F<7k|9|;HNu1zt#v z9I5ViU)03HI07D&K-;B)qYd}Ge^C0?XM;l5`FB2aVgG!j(Ysv}_c^$Z7T;Sfp|e5F z!?kgew)}*dN%0FMsM&<88~pkLQ&igk?o_ti=DrNCF9y+c-0j#H3U^i!_aIX`HNp)& zgl$CHg&&YN%_hYCCNHKtvUPHT=Bko!NmAU~afq-Ht~B0kl`XE%b9|987x0^zjcBju zUT8wjZ>FbOv>D`|P1gt(5~_5OF~0CmJA6TV8z=7BBaRe%ecYS10>>9+yrXqrbJu5QzZ@NUoJ&dH(?Is>a|5A0pWBbT7z!NRUgs{Z znTE+46ES613{m(GLgDy-Q&g8%R}3{%_<~`n$u@S+Gkn1HxbaTgLGDPfREf=*Giqoy z&Rg(TN;3P(TKgwYkyn`Jq1s5s7En>%FUO}&d$z9DU@u8-;X@+VZs{x9a%ggi3$?N_?Z1xWi;~VusLKql>xlx zjFrzO5BHYRy*b;+$zzg!qLZa6 z5&5ET(M_)JXX%N!Io{7!A4=I0z^wN!JoHc5Rxge5Bh|~}ou_v5HMRogdel*dxcS@q zFYr)#c5VpV#4t6KJJopfI$xQ(n==iA-EXoM4%t}B`bu?R`IE(_gd+sjDWqk9(ZX8L zD|cRKnV1yF4{BM-BV9Q(qE9|j3*zI_;C4I0IG26p%O+D{vRapcY(>sqDH%C}-25E{B3jNW9`0eUySJ!oJRtshEwZWK ztpn0MbFd%T5$(AAz4aQNo)?IC(bYmmhm2L&RYa3r=~leu0@x&Fdu|e^9(fGCcUu<` zp~pvMMv5hvJ$B!vfxDyapC7F)`z#S98Y3_>tDmTw)*0RNdhK0RTz|gSgc*r-J{bJm zQCGA7j_f$A0@(YkDSlLn+i?VGfDx$)S#(fCzjpcF?>*lyo2iY#=y1H|uYIAQz&~VF z-hr5Q%#|)T+oq@Mx*>n>*jxSJ!2G&YO;~n0xT|f58Qhj=a}DpmX67W^WV4p@3YorH zs9~lBlID6^%?ImLt<8q1S_iH5`0Q%}^!_zqJwcr;WP+s}ZR%rFQ^R4X_?$jBHi8kt z7+-lg_sdNwc!_n5#7G6l97HeVZrjtrQ=6*#*!e{-1kFyHaJnzr2HgH4b2M5%5Hel% zd%0>=niQ*pjmi?;+f+m2S(0N%^3_}Ozy(EJxbf8#2OD}}lVXoz6RM#JvBV)elL3NS zB!T$M%vm_vwn-`cnEav;Lciw|;|rRt;rqZxH|SM*5A6f#%hPp>xuR@i4yhkWkvt(B8Sc}{xJn?NKkJ-cTrn;NR%%)CV2H=}a!ePP7~MSK z7*+%AtVt;D9b^509`Xy+3em`~hsfTOKQa+UNP;D5g0IfL@57`bu}sI)*lcVaC9GtH zh6xi(ExEUEduKP{TE^qw+?vD2oR5#QY6JG@Q*wmDtE${53_0=NmnMXUKYqByoV4)* z;h5BP>feczhmGtJZ4w)@V|PlhC39bKI_+4(Fce&R4Iw-6@G^*~d$wpASexyVZpnoJ z%d50DY416F$F{m@=5XabhcE5l#K{E|O^uL+pEWf-^@SJ&lR z;W>B{WqlVIbV|zM<;&C%B7=!IWq)5K1)szp~uK3 z^i>@-d`(Z4@t=INh@Uv@TCHiUP!gZ4PlRgXuu9e2v71>$leL~ z9juiD@^XW>%Mz7WTiS&&r@khUi?+g8^0aue3H}P~+K?j6m)4XmHH8Su)hY_4@*6=t z=RY4aGV|chi4fQBwioG6X7|q|H!dev8ka*0zoTb_eumn7hOSp7D- z2pR@LCFSkMKiJ{v@_X>Y2S60?uDI|{C3P9nCpfx$)lcRsu5&z)#-x4h@Ra*g6`&g5 zuj&pied5v5|goDnGT!;4tr60ze$8boL1a_R9pKnpu)<`f9+V)Hl__fg(*Zn zo^6Q+m%ZV!3RCtEtzU31d;5}6YN>G5yc-=>x%ck4C=J#sasft0_bCmRFlmE7 zq1D0RuU8o~9KJ>6ahS#nG39zTc{rQEXo=OM`$(L3qTz3u5o|^GvGePz$j7&QvA>g& zLIy{~*~J+3Tb-#i=QZn z1biU{s5SO;0My?Gpfos#CU0#2iYTGgf#~l<{DzINT*9^sD7Ak6`z3OKs-p6K$O`m(vDCDxMG(8#qL(kb5!62@!EkP|g19ZgP@+qL$;>Xz?@kX%^r za8|*KB__&T4M|VM@5hFZ;}}kp>(UxxjqG@f5A0CeUR{p1VMLlfgI}Vg&vjXG{awBT z0DwBj4WZ7*yJ}FkJ$E=g2*!X{{ z`)HuZ&pF-4{^?B|tLg9YFztP8U1K*kS(VH;Qi|LQLYVoBD0|pF)dD*63;|N z9ZrMQ&EC0-Fmzcj|KHIgDgR&_% zayOTU&cA$6$n3Tn%4K}%w~Lws-XS!}BBu6*gmB@zj8NR~ov%9$^?Sz@L=4$Q8s#Bm zv0>7nxtg|oW}<9b349=2-Z_5~k48-A=#MJYs9pQQsmE5lu54lX_ZF;6~%k+e}}G^^(aGIJA{wCGKV7KfsKD`#N%ATV9P zB?T&U9=bnX_H7b52>)j9gbzG!7n#V)ppKLMH=_>YMygu)slaf!?Mnm7kw4fk3FLxv zqFAh?`Y;6>P6Of+tQM8to2Vqao{@^d8;$8jl1T7cS3F#V9 zEiiv~5~YswQUuM~x^_Kk@ z>foV3;UtALH`lg#D52p(?t2H6jJ3}9LGPc{3zwop*4Qf+LG%NIp8G$R&%8I>+pBqD z4Up)`3xpnDxB3|{-}LF61{}zAw*GySX<(hzGgwkxt?C`oE8iq@%ZE9+Q#S}&?pcpGzFw{)^sy?r#qrRS-Tm+;Z zR`K)axmY?N!NJmRcM#Qu9q43hi+jK|IXS7<6VN696i#f^D}2#O)BpbWsEGF!?Ivgs z(xMgH4fO+`gt3?a+lWx8pEzw>vH|HIXvjR&((3U>67$%oxmqrhhmjlVFJWu<)$*t@ z?(PuWgS)$1(BKl>8g~f6JxGw?F2NzVH}3A%KyYc?$*p|*jC=RFM}9K~qr0nW)mn4T z_j#=1i4$D;$rf0Mb3p_PtjE#E3A3yW448v_jy%^UO)Cc46L+glT`X>WpmZiiMkynz z$E8kxU(a;DKta#56D`^Wacbj=jJpkA)!m05I!2gqE0T3t|40T{!w)ne`T6Q;BCY~1vN+vec)85ragRHPMb1!m-DKFt7Q z{ULJ=K(L0Iihmictsm*v@Vm-;4hFpDaj_YPYv*E~_xmN)Y!JTp>VuTwzTyt99^f7r z9d&WsIfSre(&vk$l#%fOIjJ>UpvH&9=J{$tPo^I-M_mA20Bp)qB~)?_0aTx)9%84G z;%?xuX;rf02wnH6s5oJO5=Ezp7=Z^)96{uByPxhjp02LF$C6y?aGA}JoT7N^I#Rv1 zq;r&#x`%ZruUv^<6dld5Oe>vm25Pb_f0mOF&e z5);^}s~`A^#>DU{P~#&BS&|U)swgNV?lghle?|twvDEDwhOb)gep~5=w*9PiA2zPZ zXCzF*H;5X3@L!kv$?pCtu?ubI?)*5H`eSP-67A?YGtL_1`8&UZ2_cpn-n@9M`(Uj; zb3Z}AKp+59^_P&b^G(VT@)8Jg^ zL+)vr{dZtATi`evA`|KvnA~|-7VL3wW8W;6H@T2&fBgIAiE*V2#@9zk+zC0({&Orh zF?wz&F1h{S#9fM+Vl0g9ET2+_b_C}l?V`NbZ&vjicOthWZ4J851k|wapc)&G$V6qX zB83%9Pm^$26fph$<|f5HSBvC>#Xru?M{6y;9-W)8u7SYz<=YMY7SfyS5AT!*YaH&_ zxjcNN$#CY_Xz(FNs{YuUuiK<)X~p8DJA4YaDfyK;e29UNN!Bk9()Zt=Eot!I`^8Eb zP1P%93lZkr_tAlF=5@?3w)>kO_A;DzVtJ!dud(S=LV8Rv_G_Ou#>SLdH*q5w;*pS% z`^9Yf>QPmL`es9+>}cRwrNf&M(%7zcMP+5&eSq){2PB|zW}~3qYwX+RuKN?XhZ}CQ z%9P{@{Adqe;^IL==;Uo@t;cLOGt{PH-Gg4k7R|4Z$I=^s?MMMLdv^DplY^tidYTfG zH%HKFf&^%rWAe7SXS11pTbkTAavkR$nw-SHxNKP)$`TC>Xj1XYD)HYoS28ln9$Ex& zXpAW&I(=GP$H7al4u7Au9PWim9qQ z0+XuG8|64_v~N(C?xZpOYJA@{4N&OZ%;2kP^gr=Cig*GnQ!~+$@yG3Uwu<_-H3Q(o zjUKu#4RLp0sHSri=^K;89cNCOH<582{1hS?O zk&QF#5x|hntE}^g)~G(9q$i*=CI`++wdgUM$!!=2wa_PqTn@2v{MT`HbLo_)u(&Ad zP)))JeJ$(s!2Vq90|Cwe8BYeT0E_SV)-)3Cr^FIOv|6T0|>6N1@6vs_K;}LpAUs{m3~7JK;~DAThO* zW0@13+Y1gX9?JZ4x^2X2CUx0Rh!Q=ax(5^FM~#1Ie7%(h-iTd}c>-6d)Km>G9?MPO z9030gzMLE1`|@Q!K4e+WpV$fCAd3Z4U;#i#g@a(isTZEZ4BTjeG+hDI_HC@TWI|bZzb0JrFlM`TJZ<40BbHHT70NH zH9p64Ep=e-5R{;%y*M0xBm;T4Ml4APmUqLM;c|SAI}a_jsRO%pZCJP2gos1hiH3;9 z1%Ev*gN{yv#%D@X{oAnqly=RJtNwU`S8kqvHSG zm~@AXR`)k>9jS$9^8NPZe|?F!s=ZZXA=$`QVc!QYgK&sbStUaqkOm5~|3uTwFnBin zxKu025L_HdATW^(V^+!%M3?sXWSvXcmMR?F)dBr@t98RefcUs>%~ywe?D>N-O#tgN z9@-`Vy|uh=E;zT7*3iJW5&ikbz+v3l(KB>sn?XOB>RYL=)6{Wr>dqnEm2K8r%M4hV zqg2>ts*UY$U8xr`7$R24jEeDjb;C4A6y3&lnI3OLn_8+=1dDSySJ-WO{t)CS-C)=c zLQtzAIe)!T^%XN9x3cs4Q{lIl{!29h#4CwW>L!cbn{Ei_?VTusBXV^9Ncq2k0k#T7 zwibGMwqNnyKlyQ_4t^#?L9`6_tL_|8Z{U1iLq)F zEqJU=s1SJGZM`I<-mER{*u{Wrdfoy^C}foCtJfj+PTB(F>-xO6=kaoV-$Nib4GmnT z4c#{N>I&43alzm#cAhT>8LVNG7Lqt<;NyhNci;i)_V~%zKHxl)3Y;MvMI7rdzE+vF zcR!fDNL}_{)J#x`p>#S-sp@sA5{ss=_qqq2ME0xt!!K#iVA z=%oS$gpJf^=LDs(qZ^pX`%MbE1f^SUK5B-B@dGJ5ODFPaA{LZ&v!C+v_W1&rrdJk% z!UnW$pBks?2*+=ft;_ACBpqOgj;xI(1&@eV)im_OTUQvlxaX3%a?5HF9UKgy-u}H9 z!K;%-@%RCHmjtmh+6a^cP%3&94lktPHx9FjgRu8}ED!Bpb5h&K%{iDyT7(SiFpNr| zB;3(vrGOio8Nv?dnDQO^ZSaeiedOS@*K>sI>{3C<5aA)flDwzK!m?mflz!AYUKV8i zF?cT4y%Vx3Yh_(lZKH@c%TD}TC|y}iN%#bP`lkZ2=%0@ARyf>wt+@*|bEOO2UHo0J z*hLwAys)VDjg)Mn#vF_A`n~#r{bKKv#e&?qYb|zda4C);p^=oda1WZFp zo@+#&TDYidXa68=WwD@o`N{Je>Fb2=p^+y#_$-YJzexOR17BkZpV2(MCL$vS0HTNl z*$s9mK`qH3DH%Q0O`_>HMDyx)?8R7BEPY0E4w*^a7)4sf+3{=XoDM+pyX&E(G~2tb z78QHW(jNwng`ifwa@l@&-tQ$RCh+M18^{v61+sr@tUC=QOud0t4cPbi3 zYYjk0#k3+#WLXqc7r+om&s+Er&&vJ@j*@)USk$u5{Bnx3RC8KUEOw1sa@>K0;K#eX z-!7usT@UY!ygiZ21t0VfdgXnfQpAYUQ5@gJ6kDtO$jI~UMO)R6$A)rzInSEw2s{`I zs{in53qR34Z&tsC>~Hvy>wBC^4{v1Mn>_sTdWL&LruE-3+jbIGE{MXX!HvYLWHiH0yWBJw6|VrC7=$Q%oa8q zi4Q`F>icSyEQQ@s-L<0>_(Tc48K%re_2Y~*%e~Ks?D)NkK_~~Gu;iP%%r8uG-O}R| zc?MUFKT{pvc(zv?O@_)nCvZJt`M0xL2@FWZIrE(a}ali)$94fPN~5xhhL4 z^rlh87rP@@;xRV;vAH{Kl!lhdk%5^x%6$r)h6cAS=!FJCc`Q8;ZGtS6o%03!Lmib5-F7 zK9-!mU)9j-TwjJ)kNfASo=`o>!{``|0LiF4_flmsVy+u|dH*|X<`R5< z^zByxjAIkLZ3i$WhC{tv(aVXjzb^jNp&4Q#7aR8x{`Oo{thHirXj+T74M1pF%@WFE_s-FO_qf?koA&xe zx^jl1(&T+bmIWVZ3t{Bf@ibXOUQH>&GJ~acOVXdJ!8qRRZl~AZFzJgz-<$&eh>T6} z-Zf%f@|r){1%jFMe_>4hOc1@&TVo4+A2V%JuFS?4XL|b6ztquAtbbUew7hCyn5ylf z=aLt?GJ{&m9y54N0J?c+}e&YjC^J1FlJ(DKjmJk-F9zp>mYG-pe;6P zBygqM?ddh8|0T0by~{TNZ#go+85`LQR_Ia(EfBwtXsPn>YelY zk*u?dz{A1KR1}3=FV4Q3iM1oz(4w+I`rs0Dh^muI|C#l%)=DB-t0;6hB}`NC4fkMt zZhjti)r1cT({ij~o2Tj1-i>a(3;%{uE7p_X6=*g%2R!bIWU|Lmelz8s_;GVH`sc() zu1uk*nJ8P$Pu@)x{~$d9UoTBKn5%4)wYhswT{Xz&tpyrz>HPT9_-NqIG<<7KchEE& z0oGY9ZdN5XU$Zx1ildFx#tAGzQh7mKE1I4$aop*8=u44SX&JMuw*@Loyy;@t9du<> zv8Lryw0eI5X8C4#Fm4vKv_f+c&aKsLXjDFkXG`L6U=jMFMEZOd8=6WKH4nZD+G zrumGniw_&VJKXJU5H^xxr5K=a(i%$6A?_u3ujfrz&CC>JdFkM_TkmOhTZ(fqufWp( z)I>T7+NV<97=#hEU+Ra^vmYKN!!m>T?N*EaQyBpnH~2{+V&eX2`)tNwVe0Ea(1JYI znzfyyBVL%45Y6yXt$ACCb0ltO=h788+_{*q1#@_jZa*9qfGWbNes-mWQ>4S^m==Jz z^PmVjZ-=B&C6>H31J@4KLXbRZZ_n1F0j_bhFD`Ku)SWQ>QpUxUDzIrkPQ6;(GD=)Y zyxvh2*~myge8h+Mk`qO)4m!#p-2}wi%vp}rLWz<7+ki0cf98a1!-$apYBxFV%~dPr zA6i8G98@B&AHptBSq-6eXIEvK+tZId*^+;zXsyg1+gpGRGnPFZsn-m4woI64xy`vX zH~|1@>1rwX@DxuPM)b0qo3IgXdGM>NYX|KEz4r_rM{!qkPJyz`jroO`C=I$A?@<~1 z^;&qkkzYM3Zmaqk)4Dpr1+r-7vwM}+jyAF$Dj#KV@Otfe#0#{M)ytLH`!L{06Sk*< zOg_-J``4&PvmIcBk&qkm47CcOiqqGL>E8XD#Hx`R=N(Hv;efHvAO@Lu!ssu*Fa^r#tn84DyieVQu4b6# z;+@2Fd^zQjk8Fu+n|9wfI&8-KKcVM==Oy+%^E)bGwFXCPe4kWE_yb7bd^uc@kiKVU z!%_LB3WVk7;3HLb<>cN@m(-Me zdt@r$^N>TyTtwEYl(qdyF1QguZYb@!)z#_4N=tL41789@JXAU);P*hOV=b%{}Q{i=E>}6>|cv<~Q()l_czf)IxC>%!DgO=>5AM0u43-{O0hC7b8 z74!OOYI!B>qo_Lu>hRD=4Kg}T?nj4D9v_(vOKR-S*@2c&Uy!h1jl;3W39BomQ%~0Y zSczlLP?e#V=cx#n*Vqs#J66E}j`^1qREMWc@YQ5jOw-0@*d-78w+*xg53jA$gAj^p zg7lvsN!ITLvV9@A?Ja&VDqL-tP`BKo0^~St_-lUYA?^o|85m3G@64!|!y!E1!frlW zmbKk^K3YBTK<2wItEf|-N~V6YzcY`j%R4QX)lwE-$#RS;d8_rO1Q;cuF_71QUN}7? z7`Hi5AlcV8<0AH~8pQ|e1LLA~MP4_{snD|&Dr+o_vp27eM9C>y1)ZI5l}|-<>s-Fi zRfnohy5}!sBp9iMs4r`KW&BcCUqnh>4ZR&8(oD{h_^eEPvQynJY5amD;(dejC5`gc zf~&^L0zBtIvU-=CNR|OQngU3Do$!N4LDQ+5p{k-@bhL2sChNBa)}~EAe~j0twjwoc zI@1i4ESrAvR`WCyMa!-bH|%##K3R^XLZvl$9A@3A@@`>^B&#B#J$hbg_^3%D7^P9z zW)6RAvVU8-)cV5K{*b!;Iw=zJOBPn`vd4GnGohG^uuslo)OWl`WXc^Q)2!0pKGgd$y$8LTxgBV@+yIK9Wop?VTw!Lg z>{7rQi4|PZ`RL$xwy-?(oBk2-yq z3e$-*p#4+N?G$3C?jNbf->z|xRg%dhHB!+FXvg~YyPho!0_Xpvu{#LeEH}^#e}3!h z!idLg5V7qn&31q~H(Agp&GVz(<;7XOd6N#s&^lN+zI2`lQ$~qKl7iceJQ86tK)$r0 z=gqSbZ|DW3l!dmP*Q5OHQ68RujL8&h?=S8V{!D5?GQ{_~P7SK#-npa=6U*D?l$N-@ zB6;ot6!|*NS?Z7CxP!-e4Q*31OJ617mNlXhe%c2`8A5GK+i_I= zC53O#*MpNR{tOKb_piaGQVMgE(q>l;oJ-glqAoZt#mpS(t$}85ny*rhLEzsyCfkK& zrFJg4b&aTuM^yeNLB62-E7F(WZC+dag*T=Qr5d;~GsYleq=NRTl87&(TwQBXij)!FFB7H2 zypD>62KI9(Re~denh_(tQa!>BD%3*WG)B9Sx`e{F58+Fi#6c;mtd>8g-YPxH9V35C z572m*Jd2R{qU#=&p>U)L*RH~)Mbfizs$XNXACSE0Ns(Jm<$1zo;Pb~Kjn|u`&c3#l zfBW9R+OAyE+zwt~MPLhbwN{9!>r{9(r%6p$^npqcM!SEc7{A1rPW+z4p;Xu#W0lvT zZy|A~)WKcSZoF^l@XC4lPpXN6I@u@HjFQ>-i4}9*zh}p~wGRkC+coj2=uBTNqH967 z?{|v{APlQtt^PhBHEO~Dvq@2cj_vPvToOYrRJ+&Xq7qSc8&CXC1m+Pn&b}D9&;His zauu2x$1K31Mey_$MsxBfKd;;f3I35awNlUakoe2_PyT26k8&k$jgT81BiS-X%8&YN z*gG!89Z8T6mn>Os8@4+0M>6Ln`9A0`Q4A;q|dFQ#nUrRexsJG7fi}; zUTn5O<65?p4%=2XSRnsj%bv*WuK9`KR~7+{gS#OzYD!}i)V_x?V=h3U7}_&LOsog= zHZhoVs``LdKol^V6B>WZMsPTwNN3Nc`RNm7@$DA=#lyn`glZpdKX%99w+pxJ$ja8$ zG>ce&aJ--Y1t|AE-6K`Ja=K^7eO84>6M_kT;}?M6nt4i3q>oVFPhH&G0tqAjo{-QZ zf2}Cwo=AUPf_=MGXV#CDH+x*wrouIq4k>PnP)Al31cy-|<1u%asuxTQd&zV7C3LO2 z(&>3Vo0Zg#NAvQ`L0+=PLB5i6+MGMCO}hon(Cu`1o`qwZ(q2^9|F*F~`hRW8pXe@1 z&fk>LGex?Dgp7=nafQgD>1j~NVZBq0$3l959NG5lSvYOoq^6oYCBVfPfw&!8?;LHx z6Eaa7Z?JQ)p^)7&7iRxs2NK5R_)ypV?U_+lX(M%tZvxy6K2%3UM76G~WJp(l1s)fI zP2PqM`6RRPerCbGSuZstn?W}Jt`-4rPA_tFSzCgEHaXjaAc=LhJ>B^8(TC!esF#AlRz>k4 zUy~hAz1|ahrX=cefDjEtp7k`pW zZ_WV#It(73A$oqiNUgQK5?`vXgU;r>;(s(|OK>}v0{47sf9)Mz@_fdr>ilb-*5t*0 zSyyi|ZPQZ_JzZdv$XWj&#`N0Rfc2ZjNQua%2mhSDuhjm^n;R$LuHBXHf5kfh$}QmE zhy3d^n=$9)scY5xEZWrF|1Bvji=%Ht`E zoN3GS$$o%l<0o?HG=`m5L@Zn&FJrG@ym{pw`JbR}=Q?@KhcudT+!3R@!st86_bgd)?__hZzv=AfBGGxjvX&qVk%mCc^a8i$Y zm1T0;gS*~82j6C~F^p8~d6K5&10Rs@<-!dOL05g%M8ad*<;Ddm;d1nZ(p}%;9rj+XW9Q9UxZse@~_ zpDf8bFE&0gckcMW=~Pg;QED%oOElNq`>k*16+XoRsFsbuyX||dv=?K3EHQ6>Yb(*e zeq}bX*GOvoyZvzpXy;J0Q~VAC^;u*}IIIL2A}%)K>UkWjErmIE-G(|dDU4v;E&k_n zmXr4D&sL(bpQ3n-?qUQ{HR`kJ5veLzk*{tp6a>-G4x4{9y_Iyp(zG?B$cjtm+e}3v z&%AqnyLR|XaKrKR8gg@U%5#i_q*g18|)>A8FCF1=wzm^qpVUJTC@#kv6$6#4>m|#@uem#43A_f@Vx~c6I+!v7oMRHqLLnW zW~_y7+qQ$yW8dZ$Ph4NV{&_z@e30K=x>569b>4+iFngO5DlKrWfp(PB*c=-U7M=ZV z`2K2=)#S-6sY#V7@~mvR)KP?|^wPtTp`>zu5MeIpBq?GYxR*FFU5x*}B0IQT7edAM zsokwAO){)f!soi57N;jyh9H{Fp1F5W5t$oz=^s2EKR5@8{#XY^@Za#ZF zt}&T5Tbi9QW%dGt=ZRw&1A%`YB_Sj+0(-WHJbj--nsv?Q*F9yo{f6(2zNdgiT}0YKni zk3c}wPvh++M=X0R#UQr#q@d^jy2TRINCQfvD58HKCfk6N67%P8!0d|f|7>3`aU%4} zOf4bUFSDXN)z~we0S>SwXJ+-1-!pZVer_Zu$2Fv(N)!C~Wna%iXhWDjWLEuw&_)i8 z_)Q;n<#d>)3KbKK%`guE)d1qPj+Kgpo*q(#jDy!EhGgp;(U_cqvaBWEAvGr{LvU_V ze2gAU$8uP50}A`Pq@@qz$sQ+R6q^t#=Av{;Y4jYmou(lMk#GfrreiDGN%8D)aEX5{ zbrB_1((&{=;kILvHxP%g9V~B+BOS!b{4YvwF;(0=yL^h032!lFgte(lEa`1N5hhj) z%%VsW{D_-r`dm8e^mW>-z%|*g&MFh770;n~7VrdgKv-idw10WYF*%xp3-}U4LWtp~ zdqL(bfO6M!-5tMJDg815RXWY&LrCE3EuUr{b*u4G-fZCq9Ihn5w3OT!#389@gif$^ zrT&vbxbm2xA~x=LdS*9x6<5O572C1(uoxX!kuymX9?4ZuPlw!>=jK8@H~sNf8y&zj zsRlEr>M}%8a@1B~fs+h8q#^;zXlDk=K(aRYr{yirC)LTjjk|z<{U14Ma@>I8%laP< z#P1Glox%a$y_fDTZR47q3Le=r^7Gt~QlAcV1(jhzo zL&y>XiE``iQ{XsF-~LS~_|i*a=|jkZw_l#DSG#y*yeJOz@f~0jKv3IExB>V6)hz#t zff+BCBpeSC{pxmND=n!=>W7m9Oxh2B(dtwvsyWRf<#Rir-|+RsRmrO8#nmxldBh?l z{ESfMjrCzc8o?(CuV5gi7j@A%zD?dZw1t0*tY&dKvN1&_Z+~u%ODUZn`!-p#CS0jr zTlGa~lFqoem#D!i{3C7;q_m!XYHAp`vA+JDWc zL#w1dsU_@2Om*%~Y01qRz}BCCBIu|$g{nyVD)W@b74@uJ46J*uGgZXVT$u9_Mo2{% zWC(FD=sR`LoGIFO_svq)s;`2^>ZLeHAe&Lkx&4Dx6a7?8d*<76oSkm6d3+0h&~m|t z*sQky830M>gYhUi#T`J0E5Q7CWd7T;0la?)KK9V)8jXyK5@^48{3LX<-?a^3z1e&> zXqq1y_1Vl9xC#qt#S1J85N^XpHv}7MezFXWj!BiTqa;CBkRerVEiJe=?#j@ocS9p% zVvdeHTzZxFZ{)4mC}>!asbujeIGMs9-Qh-3iSlx{YpEpqc7tQ3__j^D;p0~kb0(SM zo?rriO*TT{)9NO4zB+(3=jCJ{Iw&dG`v)|A@n#=~PBmp4IFR@9c3q(QJ&YO; zXsEst0v!7Aj1g9$=$f9$$#D1M1qi-;@pa)bVmX z!k_Pyf6&pE#HgiKDYE@_N%NVJj6YHrQjIVT4%^%d8Qlvznr3R~YecgMHUcQNYKT)Z zdZj|}aaAYWRatyS_Rk(P1(T`rUh)sHdDCzAGzCA+Xna?CQOc50l!x7N@yM;M#fz&# zTRPE>A>#gd)Qg0=h#We-Trotz|1Q*AmWpN=E-CJM0*;rYve39WelYutC%IvJI&~C! zTP)N5yX>u2ESc_@U*&OM>czi~O=N7RxjAsRajZe?=9C?_Q&R9ug=`CNIeeQ_QCPp& zDg9d`zU}QtaF;^JzM2&eA;p-PyF zIwTc#&TDTvp|NehTn9Uv%8Ea`z2-8?LQhZseM5>m(KY1!aB*y+ds0*oZCIOyXhrCL zzwO8URmKm#&&Qg@k4gW@Ct%^v5#e6bTIlKA=0cqLLE8uF_PQ@`Q#xPKfI&=U643yx zW&Q` zc_sK3zXxBht8w01_fhSJ-=1nMeT0mJw4g&z(c9=*)ITXERV_HT^+N%UBG zF>{Zi$@0r++4{v6Q0z^ ziDdUgJ2sYp`~553W~rD%Jv`yCfD)laL1Wp;4gg99kXVxg2N(MMVXTB_Q0@_<@#PGJ z_=`*UJUAFtEmlGmIrD#Gk_i!SlM4%}FQ9w)jnC3u)E6e;@4@isNRU)0;2_0ZXutR} zFZwl3xPFTt08LZ%9|#D%o0#$QV!DJdPKMPBAx}gx^D)*z=qDxd6+fdLq3dq_B8zO@ zF=}Yy5y!0&>z`e?M>X;6N$d;lN~|a^$1)}*<9B)g?^p|@16hhm6OGg5smVzT)R8@? zUFrrRZ=UpOgF+0yItk8&p_ObK8U4HEfp1eVf0#Ram^Wib z)%3b%g)<@D5MhKzC2KAIE~P6JKRXc7ZqXq~!J%6+#}|Qj(*@?nT~@q=7YUuw)*XPY zsn%kne4miCkC61q?Ti=-5r~MZQi0qQ&K`apwiQz-m-j-4*dxNaLgZ4#8B?L(B>onC z_WcX;C$DzdTK>U(KN)4)EG!-~!3haEz68zEq}S;CRPp}To+J`E{;9FCG0G!<-Zpex z=2VP-L-6D~_kXSnyIh|}0-Mlxuw&2eXhGlCjz~~J3?ys`n1}?eIy9LsRD3qeb!>** zglxInl+oC{e+O3@qLTo%E}*WG3XEjatRe0NaQUY6z1n-cYap0n)trwgOMtHMc?(~- zpia5jQk&@0sQ?MzpYE;{Ez`qdA5GrYve}P05C3U!XA;}<C(YI?0n-#;NLQ5uV8BJ;|^%w~^BH|ewWOw}!5LF#O> zLaV8VocQTdoy~>$kPX-47iK!)aGH=i(pk%$2oQ#{V)NZY+5J)kmU_^3O_#`b)8r*l zj(F_TEB!q;xN8OyS+1kmlAJn~w$i6N%J`YfW1Wy~A!`{0=QG0T>xVe)o&0u$o)}hL z^v8ro@t|qKb!hbu+|>-|>xAV@9+4|8H3aainqFLMrrock{(BPn*+~X2sS|Y}cZ$zd zk-{rizemF;!597^AuE6W2O#$bj@fCh1lwty=38920v-7CU}Sz0fH95Y@)nCAUweMD z4+U&++jjY{1X0iI|0-`@CBv~f0|PS2DJl8Q%>;ns6GGL&pCe$3&TauXkZtpTe5d;0 ziFg>XMX*JWFGzSwySgGrk?`3%m~KvmX;=;X=J!A-lOw}d7I|{dbwedU;DL^j!{;mk z0I$Lcqx&gub6D4%AN>r^mTF|E;hc`meriR{;roSC(|&TZBb} zjIU}xORm0l2GbtAh}(s4Z%g5!7WjP;Rfbck5datOn>;6%r8Sv|08zaNCV3g$-5zTb z7$nr+-l36*)^Aa>1_y}k$8w8E^|HT(#uAoJ4y^Osa?C8OM2%-O?o8Ec0AsNI?W3&V zX8E1I2Bu8M2##SHO#6_YTXcJ=-kZJw{7lK%upKinJP(#>L8Ns&F@ysluZrOJe9L)S zLVD!(#SWY2nLQ9LQYHvR*ufhyX2Z$70s?<3;68Z`cHapDrJr1w>A?Fn7@=ewvgSx$ zp-n7^6wuxLQ`Cef)gmlX@%A-3tcHQZ z(W$7YB!?bA7q{xM((OGw&F{t$9+Gmkfj1jp&3%d=P=muf{FP*Fo&YG~a&&--j2sg% zz_}rHAHeB*YY#aL+pkD=_q?Dye*Jg9l1!8?=CX~3Jfl$g1$|Ew%-&DT;bkEF%S`wphI0J^|*@< z0gwnx;Pw*0V{U%0QKC}x>JSAb>GfBL16y-Yv9glST8;Grja8G8WQL)MiI5)aTxkTXvFQ+llUiIY&DNHtYc!tqPzQ?$T0tWGo(kCyLNh2#kqcm1IiP?9AKWA3 zze=K%$w%nAKi)s~S%cki&3Yvj4!dZ4fQRH)M6`+qvMMdLJ(pzbt`D~R$c7?cfZiRf zpLK5M^G3&0?S8j_`iUz=e zTU!Y0q?~l-KMXXfnjc9_t@;-hJHZ^b+8?s`+s+XS^~WH`>kMfAHrzh zNo4SIGYQK_Q3Alp*kwsqFuub3jim9yR+g5xPtKS9#YtXRz{+N)|DX5qMYjnI{A+J` z7v;9YpI=nNp>T`Dz7AUhOF*uI&ErJ(qJJSY2&w!#t&lZibyrVFb<))f@%{v}r~R^8 z2+PR72*q^Sk6B=_Zy!BLJh-gD;ePISOY`}N{hF&ZkIbK%K?dB772Wt7U;VAyOXC07 z6y-juc+qRclti&>SxEN}yz6cLP#2C$hy#IW-SXvWALq^Ln<^^EdU%kzxVn7L%8tUx z!1ZAvElFCA2vSDZ?JdS~6RC9mOSc=&cu)`~K+L1|Hs~s6?Ek%tFKeDYyoG)Edg;bCq8LM;|1*6iwTHZRE#`QqCL=i*J?=g-9JjW)LpnQ*!KE?&01Xs7ERTHl<_ix8uGD&axbwI{@rqk(RkD1d7}BEMFOYB zd~YIu;xXH=1;8(fU;+y!3FpSl%qg zKej(#j&drohn`Bg+Y z)}~Q!li*XsFEL_>=+!#aE_~NP{Oi#BcOA3mO(bOZ?{H-K+Qx`lbC(wTT->arg{72mAc4bFcuRtASem;*(L}#h%}DF+Xb4YAldcH33ql+bium zaf1IWF9axH*9qImT{BCU1l+)T1$M`@i~;h^{Nm-{#RcxI@IcYHAFCN5?1+D>EHcFY z^OSgXo6B4){h?)VivB#OCkEE20(A+|qYyrmk+0~lsQGMD9~~W)l2(F;)M4dRJJ-C;dLk=s5cFp!-~+M9UDo-Vs_%XV$IZfH%#ew zTkOaFJI`p4%mB;qpCOb(==+Q7Fes#Cz)BL+h{L1JkeLRIza08qPK2`O@8pK}n<4f2 zz-QmKrp0rY)D2&nps8pa@vZn|HRiAM${4WfZyDar*ndD;QfGLV|25*;ov&tnsZZ}%C2?II-+PkZ7(W|x+BomnWV*84YdUxE!Q2#mc!N^*i;Fi& z>tNpVJ(}+iSv3)=ucUEL#Z}$xn%Li7il| z({tsFU~aH)a{0XvdT$|BXJojjZuECfqlm$&_7nH#=jY(5u1O=lyW5gl>P#USy}#iZ zaqBR_N99-#2f_qxq@L_&zV7Vb61kHE+&HLjP)~W%;zOqTFn;a=nJYY`Q!;dgzd7)Qbpb3iDuS zNlZ8TBzgD?-Hn8I*`x}?#vB>Mm&q-}Y-x&sV>M(a0z(3UqE&oEElo!Qu1zRv(rI8M zvPj#kmcF+Kjcb9|WPXHOx88$vWn9>Y3yb^ia2&z?eXJ&?!gD|E{mCb;_Ebi{#n3a; zR2YuNKmKye4+VwIEvz=^p*1e^jLe3uC_s3d`)-k?SZEuHkH31Qw|WoG{}5|8p*nVL zcCK?8Ur&Ivn2_>-ByCpegtqe=of!1efed86J?Nmpn-cS;asMwUJqvgZ z7M!bc%poaz+963s-t2~~lEjO?dI~*mRbOP)qYXwJ znQ5Y=Ma{Ab;SZkky#F)s{le?LgR-*R#PaZ7vZi!)lPq`Fpn|tRbUJG-^bj5RNuMYXuhnt$LmA$uB+{dWhSos zG4y*y`V@IPyI(BWXw7KC*J>!F0yqJGC;ozH9UsTote!3$-i$1yo$56n?W)&ub_kH#=gW2AGjfPY%eFYa;jCESiZG=x|bYVEP zKcHz>>N#0`H>~$SHS)g+)NQmURF#siB@d9Xpqk=dMqS~>i|=u=*GkUxp&>#nC~6@b z)v4&#cN*`0ez=;RnhH_AK~3ZJ%+4Av`dtYF0liO%XrbbEmNHjA?%V06aX@ljHd=}1 z9AVL448q@p%!EW=B#E;2j=_%8quyWa$)YyCE@-E2b*~?(8T|xRU`ilNUloep&0qN> zI6-9x4L0KIqd=C2;=yn<@ezJ96OqQto;?Q5hMYAIsH3 zqKv3OLzv-ax>~(ILI%IxL_@cY$@{1#yTZ9EA!AKY+qt=S80|eOFX)OQ?tF6hyG@gHo(h6D=@O>P)mg=AzqriMRp&y?y*Aa-7

      &h5H! z*#sRrKkMnTs+6&#yW6e>VefTb5}XA>ZN}CK#!N;rw7c&30{r@aG+!0row(lKG{2Pa zLh@%-yAx!|f;Cv%Nj8Nn7;S&SnL8y_g`j${cu}0Mw>7OdHZ+(3>)cO6>Os)g{&XYD z@t>iGvn8u#YhIEyoUHsia8{TXu=xndp;MSf!I8e#veHGFC?0USvi#W70D{6iRmP0` zGL+TtEil}zsDMD>LCZKAeK9QTyYq}4W_rD)aEx<#+b+XwxEl17Z4^o_RD*yn+=Nse zs2l(1xVbOik{Gzs6m@w+uZq2j`+LW9cRztZ2RBh{Ze<55`FMJClV!Wr+0(m&%`QSsp^9ebB{cknrv*OXDcx;Xg)* zq-yryF?w7zyob1N4xYcV!~8#_eP>itUAL~4A|0eR6#k@3rnU$EoT#cDhTY*hv4b+hf5bfvV6NYzWL_ws~m}$--a-{j+`gL5WJR>at=?hV9b*e zU>KVn%S@kf?sX~)9H+U7)&+vIytZ<*rnETOq>)RL8aHqbG^UzQ$*)Q`Fec8Qe{C_XwJz_BC^im z3BZQLz&A3oLzIz%!VYyd9G(GHrB*8K=j^oMyJegfzMK61N28kHnsg9Uo{VI>a&vv;en)B8qMp+*|JLX0 zUPYbDekWaD1M{Xw=-YtAS{?*$+W>jEa?8Iew-YL|`Km4O35e`&2)(rlu2^7M_#5u~ z9iPFNbPRRaJ&#?HvIp%8ewffhfE;P&eXjVAVS?M=#AGzmW4J9>i>c)B`f{-)|5cc$ z^)y%Vbj&bD)6K0#Wqs*wDCj%tn}G*xhMdGWZqpLX3~@0#5HW?IkKt>x4*Q8^{x`{a z-Nk%g8>PCvC@(LPU$hAM_RV_RvF%X4okBaRAGbRo2GA|B8Ze*zvTp0x(nk{+nlh(g z{kFXHdefum^2H7g|GoVn+~m5(QcMoH)dz?!KOlr&HNt5b{snsPQ+{3oMW%YkP|&4@ z+CgQ{wiqYae#*4k)VyU5*3VjDQ|j$lfr!)M&kIq|{yr zxH)cdPC+7ESk1=ctv;9)Hu=y<4|V0#E<&gN&_QQwZ&59n1YgDTM&{6F#RS?!9`ZR{ zhAWnyJzT)}afvo<`EyCZ9S=cN=ZT4LPBmZ$;DNwp7hLzY&`TNbYh7_WcF7UtPt(ee z1|dsEwF_b4z~3E|76`6&Ud5Z$L9a~G(zngk{&WZ8L1k{&U-lmBFlf&GX7}C~lwGd9 zd>8UtxqxJZjesVc@&^63UN`0K^F3C#`Eb+R??kJy=Fjye&6xI{C)Mf6Gf&_=m9Pf7 z#mj}^{deyzGW>cwT8Pk#=MQi-R52D^?b>=JnEgB%}q`7=1WCIMZ3|9{?8V>I~_8)sNH&FL#bG; z+d?SQld)O_4ZdgEzbwTfNV?8g!*CjjJWz z7wu$H)f)6-=syQz+=!=XQ!+CzW%jfV<^|0AZPcbEXUOey_d?U)*BcL32DwAz` zFGbOzptr+?EAs`G2j6?A1NwMUD)2nhPPJOUw#Vlp_Jf|q z*LlgdntqoDk-;i(-|XG2hrN8OR8rx)lGKOd9I-QYVNxb;KiGo4F&=Hb4kk5{IXN-V z`P1okXRkS@TVd4PA9se?MVW?y8%(MWhdx$qTW*!ZcChs6qto|1N@4x$Qnl)TEMF@w zq>V=;+fAZ@BwpL0q|B__oH^QWW|h3C??TS9_8pg>C1gjjoYmK3CwcX-JTugB+dOV^ zH1l?ulM#hYJVpllPL%T&e_7qv{`>7S{ucgNb$nx%gdogc;zVq!th7g+Ct}Lo{9gJI zbU7y@6^Qur{-PICE_cFcc|%8;1VFHW0_#_p(tNFtos~wsK}>-PoH>`6 zN63o;2iIr2N#6v^&y!s*A}u=f7aY47WQkOerY*}?P9k64;(h&bFmf!Km&i4qE{N(k zg3z%-zz}NcDyw7sFnSNR2n9AVcMuB#n36MrG=^kvW z!ts;ZuG8>$hUB2PgcTaxL83kgb#rQIBngO4<82CkxCp-qY(AILv3&=MO~E!qyKWu3 zpbZx<`2yXEfxkSA-K8dLI9x8#UhuvAMe9I}{<4EnAo+TrFWu3xwACo5{?b(K1s4Qn z6pL;KGm9!78nT(q9gi`LEXp&Kj95c!)Y{Lmif6Jig=+CmsH>@ezHp$m+3Lh>B?F)^rlC0 zqGxUDaQwW6jxt`>dt=?s+O)IAown{*cio5?r-wAz>OHY-ag$-X$2sBLpkI2f(KW>rV>L4i7B-y;gSt}Vb$3FoF8cTJ z5aI44FPWdE3ZGj8w8e8R*fnT=xAX+ru)Xj4Yh&v_nXtNBt?OoqdlYXS6Y}N2;1KAp28a3`O;pVC|lnQ{u8U~2(tNdFnI$(9T&vu0?2`rL= zFJjhb;DbSr-8IC@G-u=nDFD^zMD=uR6^FdwbBkEIYB?6C}?-dw1t#dG1}+c(w(Tk&xttlI|_t zC&df&Pd)rPc*=wimp20NOG4vea~^G@a?hjWL4!jt5^7+iZLL2wn#u6rw)M8o+Fk>u zi$)gtnbb;~S#LjGgaDh5@&NpC^$Ej1{4;$%k&Y;3F1vUX$~rxeEO0MMwMI)HowqAh z`HTh5AuOPv{*aDWBFbNIOC{2(Vk#cIlP&2&8uMDK`!R>bab2-M8#ErGByrt_mt9mY zBr{g3_ZG-gURJ zP9xMbW#SyNiuLBBQAo$r9hDlEe)B-;xF+0Hh2LR60c)sbl9ghxq^jILRr1mp0QUJ| zaLoX?d`6%gT%2*CkdMI0aFFf9F~zCS=bXR(@e9O5^lp#!t~0*i+xq%&!(vYj-v(J;+urN=Nomn_T1F7=nt~%%gl@e8jimIl0mY#5_7M4j^Pvg4V9fUOEeO)z1;1Fqr&N|0v#!!@|TM6ogICv zS7j|SCwDuU1H|14a@4{LpJicwjDTT>Cm5@M5h*kS@;qS?{dZ~MpKXt71m*vTi_W2- z?`RZ+1j7ht(i5yS-RotI46wxvb}JeE{#@ry4g&?fjr#ZV*)!SVE#h!3R?r67?!CeH#k~nNyg(@ z^4u;H|Md3KT{eY#%Z*>7rfh12;z_&R*FPgweFI+gzL&A_piii#ndA+aMFP+HoeD|! z+YIwhc5Cn}KV9CNW*juTI8kcQJgUvEd1Pr}9`GlZJ=&-iEVC;GiW=U|q`o+ICa_Zl3o@gp_`?+~#V*kwwt>N(mne6Ld zDs?kp@I|y{L-i>{Wjo1o{jOHvy4w&Bsz)G$rDkV*;NG07_F>>GCG|#EaP5cK} zqD`l_S0EGdoGyxvA=T3<&cxoCMMU!Oqo`*ehAt(w>V zEI9N9%b7D%*=j}hi-R<(Ivt6jfqNO^Fo_J~_gfQ%y*a79|AEckdZ&t<{XVceOYb#VK9{+k^s>KBAf4DPFG6H?B=pW89Ub;z>%Nn%B51h zND-1W*nVE|5QjWlUycEtwzq5;H13}w&~;NPk4+GT(|)KcM&a>2X{>JRgKcFt=$tDV4bJWpI_vy&~1?OrC*X@{yY$`vcwq!T zpWry-yYvm&#m3Z<6suyO>?_>l6;%Nmq5=!a}FC##V9jsR}XLqonrkMDu&F zxv-adDTz04zI`-i9HQ+jY~ILbn6J|ccSG;E^Z36-hb7Bkzoa!khGh#-Up=Q4dG5a_ zoZEZM`z|caeDAn_TVgKpS#58(aadRuiez2eM)JfANlWAKM=`93mYmk{}lQc z9^*BdpFX6#AK0nG+&=hruSKhxO|iI*$dNdnacJ|@P@4@M{>D14T>5R*Z|+l4d}@|& zfbv4>T!i{3QG1G?(r5jXV~-gK8JMPYZ>I#)xoN8ZM?tQyKiiS@a%F(04G2X)QHV@F zQF_m>(D+pDpUlt>Qm9J3B=l0M*wkX}6>0jL13J1SMxJ>$o=kz9xfcxHYrpX;h(A~2z{vj2J?4+Gos7K!9!{;MnmT%VzMofrMExtEk)laq|z*{&ZPU%b4Z?w!-B>%H!R zXo&^-(Ow^dQ_5!gv7NL01+C@CE+Msfp1Q^XP-~0xLF4(3_-mD$b=z)BcFvtq=CGFP z)A?`@u&li%MB${x8d{%E>|XV!jxyqyDX)opbx(oJT5N`{dfCi(SMVOavMi3 z=rv3L4S6bO1L}KyMPh7@NXl82q0@*Y$;Tcbcp^yb#;V$zwoUq3l>aW0^96&lJ3h z!Rmdup@-{=#S!yh-QNTAjOC)Pw60#Z(jfxHogubYf1jw(kpkPg<$MePHzXQapA9-F z@u+v#r+TjxOrgmU0UXg6ekcbo0^?L>J@DKJw;S6aL<2x|4V!<4HH-1CcF%`hDuda( z@|cYjGEc9X>>G%(DrnicFS||;g>_J=SWtMdTs6lj@)@1=gk?QTucRLIAjM+XkLL#z z+kB8)TJI+v4E??4y$mtp~^4C;Xkb<<}_^zi$mU zRbM>?R^CcaYyY870jcX_*B}t@#ju+ONkTb7dUJkb2ROIIy^N#HfQSmDOKo1K%5Bb> zY})~OH*e`#wU+~Cvr+jq`?m9X=IrY|Pnb1okb0HP-kw5VGK?0!J~Dc39^d~_JTDfF&9+bvyPclV<8E768c8P2%P89S8d z)U`LdGAd#(B!o?s`1KT)W*}@lZo4yTODhmc4K?*t7eC=J3jR7;*OTjH);_85u)Bf- zSvxRYLb?O0Z!K5e&DCz)|1Ly>6SsQGf1ZY~OWd9lA6~h8(G}-T$UAIi@|$^xNiCJ; ztef7R{KH0e{ILCoVH}5vYD#S*jmU01>Ay#Km^9y{9z3c>{LZSZn%Q{9ql3!bZRz!l zND)UG7y^ybe7CwjFXT--y{|3-9VS)3Bmea&MW*klOsLvsfBC1AC*wjNcZDYJQ;?g< znrUBeY;)7|Kll`msi$n@?)eH7P8-m7>7emSHp4+hk%0nw@aY*9Y;+I9Me(C<^Q-vf;)~L%n3~ zpE~#R z(doN72AeO|Z;Y3X^3(vNZ7?izEh>3z>Y4fazVqJ&%b1;Y*xrP;3IuSb|Elb4M(P!O zyEy%NYvS+s6qSn3N!i*e8Zmobn=J->YpqU`E~bpDi@LDN=7r1F&ABJxbzO2(SHf=@ zl2AfA_+s|w>yjEXY-m6|m)#-fbwlB(X;byea&FVkI_`6l!F`Jz)Q!)KRe>+bmT{UT4<8+-XMz;k;8%Gff9gC zCYw#EP_T|qiP*(o#&P{HzPTjkxe0H>J?6FgE@F~obe;IfQC`AyGKZq;q_{KXTnb20 zA@!hs@Tf$>rk5DipPvibPOHmuQ(QDPdXu_c!zmoxo8#b!7~Fmp+~mv|u;voKn+rOr zACzEQSTvQ~;>*_wK2gvwoG^GGdxmEFkvv-0VLF$b>ONiH;Z$4$m>)l)`t;ch{(G>R z+B=iOc8mS6Po|UqV%Dk7C8#$?a&IrE$Wg^uPm)zC{hXn)ziTA$c#y#)tRb$jiyy{o zGxj%(G@psAU6-CQM%GN+F{~v|UiQ$WZX-3v%L5)B_N;op9g>3hUd%H;EAh*WFCo3d zrfP&jDti9&G~VMWKzj9^E}d_eEjCgQHk~d+lR6EGJ`4T^LU}#llhI;)EA-=FR;#CW|gLGU&-PK>57ZCWSe%vzZ z`lD}}wE{fRR>wuM=p*|n^zK&E{wil+BxR^F^gt?MzmmBqO9$l`X+IO2_Dvy%r~L(TG)l@vC* z$lb#%(xI=BgRYtx7iFU%W>ew}pW))(wYg4YLP zC1;X}bl`v6`+jii{^D+6B&Fp$vX>o^)s{siw3>c-AgVz}CB<((uIaE^~6U zADwcHHr?)GVM1hrkuzx50lNo;gmVV&n+&{VRg+?SI`e(pZF{(^+pd7N3c!G*01$*k zRW2lumnV;6i03)phpYvTW?7<+HfX*7Kux8tClBlzelWHFv1%9lIPpslJ^1g&)S6s0 zNn}-98avjIPQU7R&*LD8>-O@m06%nFg67+TFBTV0 zlxxD;jaT#s9m<{q_3#sxI*M>0@$iI93Agh|v7b^akyA03i&cjcs>N62y1tl<+LV=@ zKKk~mPG6_6)21M{?L3S=5Z_@iVESj(ri#z#^{yCH07YvOTt;*Yj1E@7YQw0Ptq>u%IHKDY(hMRaTmbkneznNUALJ+z7oD3DpEl{ zOswd5ZRe6u(@-`$uP`~nb4ge{Ul}bqn^S3R zJha(w2a0|Quo`5|rkfZ0s~~c*nlSR?b)O!?+t=BrBfeHV0>?^2Ns(*Bg^W`+p9c)7rX{dzCVsnr+O-Dy+7eT@ zr!Xw%ORp#Y>2tuc@42od>3UWtsRS)i`_FYC-4oTC$pWX&gB{DvHJBfuVLG0-OS914 zb)7}*Ogs53Yx{WfA=n*LL%Mv>ZvTd?qnXKf(uJYJlF;ceZhH%jMXU3%O$=PIQ3wAN zm~meC1pKgk+}Rtj`Rk8}QKopG0{39b{AXTec#i@&Pt7hYT93z>yWY{_JZw`tq1!;d zs|pR^l4fgixR$SEm#Xr2F!CB4C)w^cK)l%BLuw$;_pP^od>@LqnpbnVR{Z)jEpS6? z96CP+#!V>rZ>HRx+{Bn5a39i%ealF6*mNq(g2l-D*;dB6MMV-e-wn_n@chVjlKv_h zsNBF9cj=WGFbWg(`eEO_&Z9V?pk;c9uVbIX+G`%k+H*$Gu~ws#Atq6YoHd@JY|{-l z?9z{TD>MO0=cte4kYY`b?i#(@esM)zXM)u2q~UE1udhQ*1Jev;J`&-Ja^0psoa;`u z9^G=<9j!HL$#oEeA{zii?AAMi%{hlujq~;^37_|QO<6SC7Phl3jE%Ug2DRkzA1w8M&A#5@Q#SchNlu-9+``47fY-bm?d1a;@HqxZRML{fv7SV2b zn`IkUI^5cLVaE2S1*+q21E1#9nVc>!_K*I0dzmNLbJ|Q^O{RVENpJVHu_L##j|elR z!M3^Gd!bsP>ZI`soyT0+Ywe>kM|go7yPeC?Xo0^qT!U8o+&yj z_SqQ8K52TXr4^~%EHwLG6P34nBsoIjgq5;XUrig0nMB|1Kw^qSu=~D^xTSV)p||1l zw8}IE`g3#vwtP@AV(KC8sG)(d>nP4TuJC!0n? zoY}$wiw9SD8V?_FnG;n4J;q~k^M)8pGtL5fFRh$%&0HsWq6cOdeo412sQI{{Eeix`llv)Cx z*u8JgaP3+p{5`2sBU_p)i0di?%+b)9*OEIoeE1T;?$c*=A^xMZ!&ERxtQ>f6ciG%I&5E#E`zZmS8W)K37gTk$T%hdGAJnZuw9hx7`oD^*q-uu;aDu zW)R(^zASrK$(9#c-S6*#C6&!mukG>!Q-fsqCpN(ucjubF%eeO4F@$vfsS8Y(lgu03 z(wPkoa1m%IFV0k$F;)~6(&~8an07H~531wugW{doQ=)Ump))LFYKfzp#i&VRGwoe% z$(i6Yuf88eGJjv2fo3zBzgK>IzXh5GDB665JCmEPzMabO2Twv20}6K$x_?yKsM>fLObEY;y#d^sSmS(oZR4_E>x2|c1=f}qw zMoF1Roj5DzX^1tz|GXs-%+5Y^s{_rNC52Q_UBInk#-Z^oN7L}$z1)T+HSFl2ZH;>SYUbp zGClpvCteIE#gtr71)k?9?oUJo;dW#Z!O{B4w{y013LJ*!tP?2S5Hb4jlLO3B*(k_$ z{H2@H$?4hjdcB`3-kym;Og6Nnsv6fN4Y0(q>UW2nobDg|ri8EAO`%tMFBl$d zA-k}yOi4}0Y^C@~Pw(aciJ^LehUr0Quoyk3_@RXtTC?jt5#5%a(nDYTtOH8cDsKo%xtC(uKkLZZ1;tozy6!&`4QMX$GgccuLj3oRS zP>7N$YctU&umI0{gWpb^J~Q?I%Zx)62>g_(xCU78qbjXKGYIqI*pdEtPdWRtyT599 zBupZNiJZ+o=Xvncl4`l7FgxrHO?Y0S5!Z53?Az*m2I4}n*smI-%8P@FKwG|b8`BPL z`BLC%D>?7<)hh)|=pA;F>a{n=B%#Rvy_$ZmlbL?ayuN#P((taKvJv2i5vyYb3>Osp*>cws0UysNZtNULWDp@B zkoTbkS)qHL?Z9+d=?&(!h4;JpOK%VvdWq=byM$10IgWPUoCL#GMLBnWJ7W_=$7J8- zs#8u5)l!7}x;BwGVf@Ci;K%x_1Ls%Y&x_lsJta!tBwKv*Xnbd4=9v8SL-tVDS2NYF zRxQqg^a8!+>AC6K6(`R+OE4Z@XPD{OR_R+?lMC$Dx0n> zY?Cgtpz3mCfy|8MA2kY=c4u!JX9G5QB-(NjBqNo63HV>>HvbBx&BmIKpz!j{20ewM zziGr^oBq7}60OEx`koW=OVuSr5l`$s2Q}%=UQ;Qa9f2l+y)r+Apcz^GgNFG9CB$=G z3Ini1^)(9Z2NF9|(-w(oRc`hrYt;Rv`=L^HGs_i9rdxeZ8eFRB<+{e1`NhbN=P`OU zr|!wkQikZm=t)h+6Qc}lh(cWqbS}=;06*rL4{(xl>JyO};&~FTb`sn_ zS;dz!X(9io^tkk4`2|OR!W7#&$I|0W|A)hYPQjzS#woa;DrV3r>aOnQuzvJ3xH0kF zVdp2urlJ6F-R%?1wv4)tMIR`?F=~Lkulqt^el5*z*4$H_L8gW+GoLQ5i*S+>%AHHu zXnRy7Bz=&N)RnI@lcdOGhD+%;Y-&&0Y-k&+1n8V^dT$7d~YLN!FtqNy!E5A^z$&pk;)jNGk0}pI}PuH7tnXG0sMEHpGyqbcC%-cM9(KW@KaKSZ>@iUDBmo4=nS>lKq8Xw_tI?0C^^81KFk`+%y!q%U8YUsZrzJ zz-T^KO5b;H(Fs-kui>)C-Ko?8UgAEvVkJc~VQJvrKh9OngjImxf8kmjV3$46BwrjW zk`K`dc_^PoOv_Sh@UC6OKlL!Wa5v`bM#x*?B*OQg$;+-X#a|XkHiZR;jb_HpM5 zE_OUU0dGiA1_d5B8KQDNUSPlVvUXio7V`O6I#{=c;eHzN##n#}JlJdykO8rud>L8h zsLyjh`y7W{hp=95UFt^C!Hx=XwSk9@{@Ia2^@+Oou*Z@-P{?k?GWMVx-$j=xA)|6! z35-G1*V|O?T9@>rFV9xxvGY3085L6t*j_(wmdidsR$SRf+Bx63)Yu$uPF;?@7#Kd1 z0o&@AL4Uay?9w?!mO!rM8bE03c8-MkE^Xj`JQVD3@n0Flou7O-_z|f%!Dw-?h(}of zKl(T3|Mc=d9S#5xk!@k#)5xF%BycthYaWEc zg&bbqwn%<=mAc$Az<+WP!U$ZKT@<3N^h{XAZQIF^Os2Ie5=|Pt-3X)^b_9aN zpTLp;r(>|?j?a4s_rzw$jmRqXd+|7uu`?QY)~k;q*&XWg%y7(pyf{C`l7vb4)S&ge z1gdmzUHxe}&Vw0&y0$?964MeZnB{#ha53PtbE?=FWWy!zZ1V1%W561=0n@BVTn^{$ z3T~ayv!KAdXxLSmnRPa*D4&vfc!P7jLANofKTNgPp>*Y6T!j)-(XYC`Ji|_^<%2;G zZ$J+@=iNhTYxMHo+nC`GvjdYMA4YCej+5~@oOFUp1X6~F0zI6ShREMg69)V~RYC-R z2yQch^7#!)+FR`GtGtwK_gt*Gds~Kk-No#%E?e@EEqpIcR3?e(*QIy3PFPI#8v`(^ z|9#kMCqc@5bn)-`diO{y2Jf$eA1D<%kT1J$A`ghyRZ$u8qg4TBN!sZ(R{xFBH=JfB z5hk!>;7EDQ(^Ks0pHT=I@e~uX8*1FsU3N;7P=0_?gkM%x+;2Bcba7=|D_d=^=B=-; z+Zz7+Z9wcn2Fpau0(p1lC$1BA>g_NT~*Tr-5k)kgdoyw4X`S><{(=Nto|^vSV|_DIJ_^Vapo2Me0+LP@lVuq_APctn+>KQu~2bsC}Z+!=e)rs=o%gI7q6OMB3K+>_^RXGsL@Sot>FpN(j%1Vk$4a1;| z$O9olfv1=0rwcmQ-a`COh&5x#4gLV}V;lgTCM4RD&0Oiae(KVm;E{gj$L8Tnks;3jx1SA-EXNP41RkD?#P3 z6goQ=QGd%Wsq}|UZ{n}3*39Qi<2F{i_wfyT-Qym!FC2D~QmkL)N`4r*Gch@NkDlT! zoo%`F+H4x3>ZZpN4Q*|zwP9G?H8=CPG@l9l6F=Ip zGeFAKwX^GUvu*xIRYKyruY7aD$ajMOoSoR=v(8?;(~tb;L&m}4I|o6oa*lSo>jQ`A zuGsdGY_*r9m_nsTV2z_B3>sCW>gkD6w-Dw~7h^+q8Q-t6CF5zvz*7DqIW@C9?&?i9 zIUkM*hU!x94hmt1(lM3+#n;-qF_BGPoRPV82hk*H#c+Bk9gD2hNR}VS;PTyfG9&aL zg1`as6$(*Sxwx6(y)-hYCt7~8>U~k(u%E7J-dR5>_H#EHiy`0ZI;tD&J&4QpnNjrK zS-|h((xY2vLffM)T9gqGGAdKstyppr6As0IJ7At#I*_Dm{~?i_Z)^}w2~*-uHaio^ zAQl#~nN}Y&0vw0<`E7m)3xxtu-6Q%s?U69H;J$CSgFVESPdR%ZT)UpX1W$l=s_^;d z+)vNC^CI?%VO*!I3!KsdX#eOO?nC%~(msJ6ZucsyV-w_+qix#d0Jw#47%f5KfS*b2 zZ;|4UvFRG+EM=F+Q@91MvM!SMR4m5Slex5hjf_Ch2Y_^qEn?V|Z(oc#zIe4j-oTCJ zHzm%ypsU)uehMh#bNwk)d`=SKn+G2Iy;{L95{TyGRa08s8R7lS2l6^w-ps&;lXrxr zNyYts3uI6_0>a(wapC0ssf+9=y`C=rs7%9s=3P8l=2MMWj!+UI+oYSJjdkGO@`G=C z!m4v5MfnnG4%JS2t&SGx^ERx6XFPpc>bCXUBZplYosy@WjQS@`7u6YEPU5Vh6JUh< zWVBonDC=51qkkwPo(q1|#4=WvH}6F8Ao^rcjZFEicZnV7B2xD2e-u3$Gh zC~t07at`{wK_SDN^G)CpPzjHTdm(fDj4NRO{ShF}dYKB+kyGZ>lFQT_##HczZ+7%^ zr({;w#VLWkOcvT!xDE@taFAW<1YX{%B@orW^PDi7`fRegwe0^ES>>S0u9N>#?}kVJ zLKj%gVl}OY%hklKkJzYpn6DVg#Dc4`ng6mOE*azkXCvj7djiCLd!EIli&0CPH1Ub5 z+Lr~fDf;N|)UL}N6+O@*ylWLHBT5{x%g{Dpnt&&%VpsC)9lm$*Ieqx(toCDZPY$bH zK*Hj%Y>7>*<*Kz+m9*>+ek4@`uiqoRbTC`jiAe2so&tOhPGQp&ZZ*c^mo5d>C>v^g z5G5pLd30ZA-V36`JwB%}-I4RMgYcTIZfd!Ur5pV#__Qk*3o5P*q5*YT)fVE>`=I>g zT&B40QF7hRKlK&2U1WG4kwM$D{B~oN1!q3g{rnWe=y;vu zUK<$+#=RF{mBDh99loHD>1E!Ov6?gcV;SW6{wKLyX3N?@YVbe8Jocxwa!Xf8<|SIe z0p;y4l($v3IBPo{(rJ^Y`g5c+Ymxe=vQQ2aF*P>eP#Wkybq0*OJ|37#CL8d}2+ zCp!GSCgwinh7k%1whezL7CyAvgwvStYLO-I@aXz4wk0VK)zJ!c4T4z}nYIq=mcFH zEw3iyu*$$gO~O&t!v}NfTOLX*9lyN-gAbuZnJk*PmUxRs3f2IuCG|5uzs1y8ZNIpV zkeAnA7=LvB(jXiy^>Z7$W&!!)VsY}oH}j0|ztW{tE)#0SaU+Iq(;JLcvc>Jc|E9l~ zaNz+#B`NTviq{hH%9m5UMFp(KjY%SE_|`CKl&W5$2#OTPW9LacGGdn~i* zqg1Qa$MjY)JwlnkPI50}SR_+*AkZ&jF%yKe`ZVLzwtDx81Z~A@1q0x^pt3QHt0?zv zxAm?a(Pdk>&BZod2YQjwW=qbH?{nxAGIS5=ECM*-JocIIb|AU^8n67HdQ@~pA))S) zh{w)j0$GLL6#p4c`Nz-{C1i2-{NR`3S%%TKl8odYf%%-05*kuqa1HbxVOb1vJH6NV zy-~TAD5)AWLzFX`y@>g}#j!oCA57k!EmBLBE$zJvi(9VqG6HQB40jqqxhnhRqTa^b zy;m~$fhLXQxl49M@<+?T{-Ay{Gu)=i{4OWYnEK@oFOl63Y)Nn47J? zPaU<7#PgA)4P_-<>R{=psB6Erl&`3T#kTY?@QFG0+&L;Ll98D~J-DY34)`YjVCmp? zhBWB!@7LfkhLg|+iKEg0k~0DJ6mdB(y7|OUY{b0r!J#C#W(didX)jNdT|GRW6h?X_ zPU(Pxr2c^F2W%P2?YCSn8BgX?CzU-UJlkheaePL$z$rjHV+>k!S`yzwU+}4Ii3iG{ zkA7M+ZTzqdO@Jk|X4LWT%*ws_@!H^eiJqT;qlTZvzQ@>PFQjl?%u%J{uMc%aEvY-p z9o!U{_ooOL&ZtVa?fu#)z+(FwSbh)&q=ErMw_ zI!mc|TwI4^Okj`Me@@GxtxN+tkG(%&y-gt)m;GaX=+jOa7tCSegUR6l8hsTaT_d8I zmsTGAim*7r@Jr_O@eE@e*m6c&79B0{F(cFi>2$t zQz>&$_+Xq4yUBaWIkhXzdrH_~Q5vrRh@LHYxt$X)M#Q!sA_Lp$_LJitVN-b*Lg$8O zneuuc07cEF3T~Wxa^-!^)e2T&@LYSb7u@wTDd_@W~lQVTKm zZ2i^eqrH4IwM9LK#CO>s>8eRBpsUPi)cOSL$$c|TQAd=tvK3m#U1@EpC$C>YVu(25 z!R0;RbUE}`V2)l;zfk6fAg-vaIqK|i9X(;F-x(>G^&#`=27$i=8s=G9sUR$oP_hf= zCmBv9sQz(lp6hU^bA0faTh*97^#3sIBVgko9o19j z4v?cx))TJTy-N(-{`#U73HHp zJ(<#=2A#N&dN8cOFw3%Kx#cbV{zxM1sWIA=a9m)7u2)>_=qbCnhQOalQJ0AXl_mT_ zErX@1tN0lEo1ggy-Fjt|oHM;G!|2q}ppOg^0wCDSzU#H?V;b4ld1Po4KjjTGHc$_4 z*P;h<%37m?uyU*EvP+@VkA%Bf?^s%}-kw%NzWqJ)KrwhC@6^2aqZa%V$cZl~fVu0*5l>r>pEqKHD-s zKomeltXxIrvgdgPe|nYtr##s?xQd?n{3nuXsyk>k4UntIlZTpaE!X;O29LY$tKdz; ztAZS2eCj(3R)v@I%{pa&`~QBLxr)(*^C?8vRP#yo|NRVya)(y?<%|5))kDp?l;|lw zrFE&+YN;&c?XC!q-8ADAk4l6n@iGlJ^E&-ia~)vzLJu49M(L6wD5lI0L5-&FO3wII zFSmn+<4|o8%J`XVq?Qho0nYhPDfyRQK<`*H+?bn>9XN z-tIPwKsd2M(aqQZ!b(2Cx;Cn0che?xAxbnu{eSb9ttd*eLV=e!3FQ8UfMFo8&2ybM zmf^LNH-3l;d*(n4`<4^b|})nPOzR0VY#zqd8c2>=n2A{z6rfojK}=i^W?3@IALc6LM_q%Ndy1C zlqh^8y;((O5A(;x{}H}?_FKu_2BoJQ!7OJaAMQ{pK%>}y8LlVnSSnE1F)a_=c&N>ll@-Ye?`g44aG~P$C*P{kg>cu<^H_bk9r?xFCT?W!@|Lh&EX$ zPiVzA_m~+!0QKqY%Mjd_*k;e6TR$&vtFmln_=U!Vb8^nJYrM~YYgkY3-@1d0I2K9* zU3AeO^eYxptX^m?e?{we+`V{%?i3_OPN|s2T2|fi3m_KZ#>@lC-OmHyr;#%spaS{~ zs*Y-;B-_mF&)08~Da1r{P~X-YvDEnYaq6E#ImwR1>lvhZAh|{L=ti+gtK|r3&3bYE z(^FmPO?YW$R&vU9Ga^G>sl~!9lw({5RW5f=gj0Nda1+-JQBj>C3NC%mIc;m%8rYU` z5@;mz0GfW8Zc%gV^)y=2hN;}ZW8@js9?g!v<<(rwMwc%Jg)g;SoVEvrbH~|->#M## zqu;5V^7dXAV$>Ao&^YY+`?e0uq~QO01NiB_2F-5Vb3KoWdE85L$y3SqubtB-mJA%$zES zXSJRRyB@O8mi3c|+D8!M@qd%IOlpp^4HvAiuNl3c)llAy#fg6{)TzU`5PGvXd z9@5C%+?iYR>2kKg@fC~L(jRer=iawVid5R7^;Mw8+#pwiRySyIG=Q-Z`bqsk#KB$s za;3$Ef=Z<(ZzUh}nF-aK;mo+a)?3sZ3b)kACitla;)tM=*4A+S_SS|e?{^XLP*M*J z7Vf(41nh_<=?r-ZTf^Q?MBk_1x@(ii`czVUdj!SHWuTsii>*`uL{J`$qAQqTJg835 z$Q|MFgCZ}W8#Mxgb_#~oqN@>?G~;$9PyOzHF2D4m9g_P&yS)%_3*v(kDW>T&G>p)3 z{qYZ=lS7&F_nR5N)kx@7(ZmWbzxU{p@ZuuH8!$Avy)qGEm@xDXsr6*4{cU z>TcT~S5ZJE#h{T8M39sk5Rpb;Bve2`dO#R@=nz3Vgdv3yr9m2`OFD<{ZibSU)ZZST zbMDb|ea`pZ-|PB^|1dM3{n>l%wce}Vi^ug?-Z#-h2)5;yl$3oIgpOnktmz17xi8@y zMS&ZMDfw~3YJ%{^mxrenymK$xY17x|Fp4 zfT)j%DsGfam(zW!m<|cWUN?sJTcM+Pa;>LHY<0Dg3Zl5GN4+psQYHlVWQ5 zQs}RU)jzMv4M4l(NQe>_M7Cr;{gl-w3y0r5+FuV8yZJVP$IMXo4g^+!^k4yCA`@v;^epH0ZIPuP(>H3=3+$f>RvX#)- zu){(?q=~%yEzi*&!IOhItIe1*qRuA|PuBO0@SEkijy4OeCgqSl+^a$&C(~pxWsll> z@O!RNOIm5XQ)l<#GaJ+e;LA_4Tv#UTCy4#$fBCD4w@f4-*EtA*2|a+XC>B)mWoBNI z@FkK$AU?0?c+S=Iyggy(>3+(1JAku}tmf#lv4d4q7`@;@|K+VcPWLMkY^M|N!x(AT zYezMHG@Lq|;t_i7*U2S3xaJ%5$*ZP$%z*l1(}3d~MrO02LT}A|7iP7c_N}S5Gvv+~ z9(;+7Lu%qf7^tJalFha_okaYgHtQQ~HhbtrW8e>`>s=x{oFaRGP0S{4)6%t@`)a~4 zOvG7@sdBIY^LDle|M@cyKTPO@paI#hDPb#(En-N0$!v^`lr(+2PuYFBjZZR8)+4 z;8E=st*WCF0wypG)1BX`f;wQ+?!r1FIk$710w8Y|F}00S=4I9a~8&s&>^gsqfNQ>2YXhHdXt0o7_CWjiEd(uzg2_>@6_FCWm^DuKrw#AnH z+s-zPm)WkLNy^IVAmYL$AkT}e_ZxC9q*vo_jl`yNw()>NAe`wkA+zqHSy?PcKP>pK zwCAm#J@n=aHlF6aJ6O&Pa~7L7qYmG;{#z3(+K|@$K+knacnXYbkYC@~!_w&;GznCP ziL5|Hmm&?>mTndJ3j?4P!)F#X(Dh9a2ajm2I7#;7FjE3>$8D}-US|c-ybE^8{XwB) zmQy$E1^ptFMjLc?!YXGtsE5M*xo3^eeZr-Q&$!2KnCIj6$5O*JIY?;=c&*EilUq4u z+s_nUT!(ISyzZT_p3(j`>w`@DFqkYvYu3D7^27U6wH^0&jPSXk?+0n6cB6(72u5Lx z;=txaeBamjxDxm>4ECp=OzugSq$W$=k-E*1D-C~IhI!kuW!_dt6532+YANofDiht_ zc=Zq`J$>U%X2EPHNL!fTXDce25K5)nXa=0z{*)#CREjkK<5oap_%wmwzx=~X^u*pa z1o7oTv>94H#s`}-fSIVp7)IJO_j%HM^Es@#9e8I<9>rtCPxy-1@8kbGR-Pdl>c% zYUyqcbbp`)V4l#J2XlpL1!@jEf5ZJ^+OCh~(@5MG6}IdsF;g`jB{WhkXmcd$0we?!_|Y<3 ze7`W%)!@7H`u6I;4Fl;=$**W?DOqLKqmc2-G9LQ`@zzcD-kkA%UeYYoQt3j2Jt0pE zmvrTdE}h~Ae|dX0GLuQ%FZ2Rl4q%odzi0CHwAy-3y5s%*F$;|BzrmZ1WuvmqOV!b) zAPmY|XItTVXmjlKBHxVR!7EoLTA6y^uLe*r;?HhA25af`_)Nrm?AG4mdoco*w9n;N zxpe1m6RZ>(+`CL~Px$)FO)&n!OEHdLZ>H{z4f#KkUH^a%1vdxuym@i)^Q#0y7y_@~ z6*n+o39zM2G@!AVwR((}UCs=#YKHoGgrxb?n~Ut?bL+=xYE&r**RqumDi$fM;{Us>mVWQfb3^=D7U1&7e7qi z(KdyO{TE!@fPVGfC}91~S3OEf$~?y{{R+bK=LVGxu~68@G2bjPyi4LW)nv2Bci|fk z4@hr+l5`pgVM|pny#|LX+eZ2)Cb?e|b>2%TT|!rE9++M3rVcOk11osQIdB#ScX;pX zGS`($Z?f>%U|2S1IaYdmkt{E*g#4r=#xj|Iq9`m@*!J2rGMQ&*D8?I%E-vQBcJ4v~ zHhrt?u2Mkk(%meRj6&?S=h?&68oEzrR1YwU4{8m*^3l`s2HOwTY_-t+@FyPf4(GWq zNBW4(SAW(bNolZ@fb;b5#=#ewP<>h-{gQkM<)?`&4BiI^z^3l54s|(ydiLzubKfwd z_NWmG>3g-;%3je9b=DB&t^#_Kb)jL*yHbuu9%-pe`_|AJ+Ay@rL`wgi!;oJLfVv#PKaZ)XPp)yKx(LkgsypJ$wYFKy0Uz2uBTk zQk#o~z5h~A{2(a$g*yd+1+9#~;?OXUW#4E9sso8uQ2X*RC8ZJKPe8>_V9*@$xiFwb zS@JDIUu?Pn=KMRBIG^xrt?y(q_r^V=e58vSandt09vW$U(A1H_*4voJv&GF9248Mu z)@i#YPAkiA6Jh%DrMMuq^w-HM5P1m>4z}+WX9@4$DIAC+ENfxI8n9sv^SUoQG*Z<^ z1G)MOZIRf~pTP;=chh1-oNQF|m(tSAS-jQ$5B&AR)fYbk*m6qGVZ-eYsV2-v~^jVEktB(lEnKuf4@4a;f9*b<92h5vBH8s1v z@{Xr_Lp?n`dNr4Sxkr*YGljeT(bdlO91ySeA3NHY6I^yQ1aLgER{N9s%bD!q^LRIb z7^PV$i9a@h1fKVLDD0P!o7)VaLB3u+?&@i#XYTSv^%86fNrukN&X#8G$~}+oI|=zf z0}TlO`Y4^Z-)&xWaY#Y3$OyD-7L!6n3m%{sc&)zcIsk4k5JjQy4Fi`@+^_Q0>Kq`+ zIF}?7Dt(hAk8>T?^*;C-+p}S#m-h~?>C_z+nf5VbXFvdUAlVH?F1I5)ED86ZliaU2 z(VKkYCc}t1FmGOz^X!x`)MA?#A^DIb}}=Jzy0fBo=^rbIjNY4F)}jJ-l`Dj z5~`n1i8aUaRW{Rkh?K$1Hp*8yvNQ^%_n^N#B1~;FHs!tMCA`54qFUMMCu14y5;qhFM4~`N1OOTx0&=c1U^3G~ZEczp_ZpgU5+Y|o_&XB18)&T^ARm-) zIK1_~=1vopLhh>F2)8k*=WuP{8wzR3DJV0*eA6Z4JWv)60Hc-J@0%Zh*EF(8zq}Zoi zj!vZD)<2I0{%hEL(g^jzm8n>3NxyYZ)veQ$r3W(LpG~MRFAUuPSfRpBn16jv89m|| zX}+0+Oc)rB_LHE34aBXqe0+EVH_2(FpS+MP2A(RAbR8rl?;{I4o-h@^`Okzpkgkd# zzsmH+{mnMETwTC~+iHobACXd@`t|o2)RJpXhIqHMkye61SZ5C;3hl4kyXR(PxSa_yFt26l&oaB8#@hC2B}; z+aH-%{^b&T59hZi8xDDW6*@4`K)c@uy!XOp_8J&l-=v)>x+L&2=!o3wbQq-LnihzF3XX@&2Kr zM=iL6k(w4|w^+}viqyqQ#?KL8Z5vd0y(szDqZMCM@1;4nJ6^%1SJo?+oSogt@uh32 z&yIXXPZIpc=FyM41m9+V6qr^ ziuO&Ugzqn`62PuyWM+=u>E3Gj5?jZq?X!CS(l2i>nInrJ?^zptb1^zzquNzy((Rje z*7MJ8)!(Lf%s2$*PE4ZqZcaGK`fV`$Jm(8%VZm4qBQ9Waj|&pe^nhObi!YtKA28h? z!IV&<-^?MsJ){?fvzt}Hr7M(*ev5XX2)w~r{NU9Bu~At+!ITRDov4;P+K=|(PpxtZ zmnaHbm4a0@53i=_U-qq6uW_$J6$b+yUGex=(FE+GG7?K95bq*D$h$uJ*~HSaaHv9&u2!VuBNWWv1UXzAo5v z^~Cy-fRpa8M#Q@X26AzF(}Mo@`1!G>R2`HS5Ng@@9HShl=9)uG9{4Q;t%&b3*<1(r zOxen6ni~In*0;MbN{V|XB#$0F>IQ>X0R$My7zdSDmT9)OmG+;Tl74|P;PB201+KYT zHSK_b&H9*LK>lO(aiZ6>;p;a`j>q50i!Ge5R5En4i|!gAm$jIbvKzfYYPA7Ahi4sT493d{qO-R}tqXtI!k zyG79pw7P!H<@}_-ER?nnsKdZw6*6Xp+Rd5zbq9!_`EcG@Ab5V#N%qrCh;x_)w%^2c zo(j+|Ssvtk{P^+9clF3Z|Gdi5WP)$D&vw6f;f=p=Ktj9gtZZ!cAT7Y)(ake{(-CWRbrR-KEE#Y|KPynOj`E%Z5f-1gZ4aq#U5cIss7S52N2 zX|LuShz6Gj+Rswe*a}e+?UqJO#|znJFE z;(VAmgudeSvuYyYFnwGa+TOWYb#@{C*4DQXFi;bTKP8bzfVmm269vli{!&Z}Gt(Uu zAFNp^b8eIl>9~y4p4A3%J(Rz(5ZlAG4^?V7Y=87f%B>wp-p2gsbhF!oWDZ zF%FGFK#wMTj6ZWTydc8X_~!4fS+TDj4ClVtKGA4+*H_fiVfzZ;NiBvE3We5h2{NVj948jW?8-N^Cc=Os`YyaJHeS(NX@0f-}b17 z!a1taEg9l1NJKl%53C}E#4^AXny#}|uCL8Ro%)Ijg zp`xO~a4g3a7=~K1C1_=Q1s4*F75j}OoQrtx?H*aDCeKBA(zX&%H$YZtAR)J)N9jnM zr){b}1s0ov3xc=pjfJi2NUKiU0ZT1! z+D=xCVQy}1BaC8!#|J~}c457kd8DbqSf}L6xD^GKo!TO&4CGi!}jC&dBK$B_Y{5lR~`L-a-kFb>5+XIq4frc?VoZGza4 zP`ptCm)tQ6MDSEbdk-f0E=?PE0-Ddux(t&{4VOlVY!n#?kgLHU7uRRBBv_Fdv4VJFO?&9JzKp39&2Biy^EM8nj(@6RA1o zs{(ULuCP;qRf={|DpU}%c>NJe-=d&J#msW@eMds06HM}op(w@pv~^Q695%Gx?`oVm z)sKldyAzY)p0;YJZQG8s$4!z(Fg~A-w1EoSBjpELcO3>pXi%sDzNW_MWxGPW_At<; ziEcyf>uY)2pd!zD^*Jc9NW9d^K=K6fK82N2_cauSnIj;@iY!lIYKXg3sG!VuQzR$9#7Say!G8owKB z`zxD~6@_dw3&9~?7Py5V#I( zD#D;<-x^`8T2naCTB@!Sa(~>)u7DsWM**TygW8`QaJylo@k}9(s$WhhH5ejnrrN=> z-~{iGX6b9HOs;nexo@Jmg229esfn&G1W)Si?*dixnxq$>3^t7=N+I8p_YIlI>Y1# zvdQ{VSXvUzcg4n%g2r+&CUgP0PpFkvA6j7!VOG~;b38>o{C z*p4zgY9SrogAqlR8SWLsQE;*uMsF`6RHC+$zoi5KNPWQFf?^IYK|=Z^eg*o=xwM$65Bk+cKwU>GFnpoVe*Jlz zsDR)F`YN%b64^ z5M9xN1}tK-Dm~h!EQQ&bFtE z5m6R-ojn%jn*A7dmq;Q@$B`U-55h3_dinIe470UQy{Iv2M}r3duU8m3rIr}r6|=4w zRRWI%;f7yp0M#}S>2GzeJIzqK(H?M^LeRGq#Xm>~amuHgslQV4J;0imBQ^At622P6 zA0=>1KX_}Ob4_@Bt*J`SJ>DULu`(JY?&HtRO{RXaIf9A;6ggm zXu6+ov>~6SB=)HHiO_yn#7sR=v0gD-)I&&CAiKkU6+2Fe8Sb3nL@IS(5pdr& zipJg;!!lub00vf;CV^5A9ur3NT4cBdA=OgHOkK=X*MwEtC&COg^|gvF8>!=F2--kj zJksuuot$KqL1j^~bck-kqej*>vWm0t#sUbrhBTF(9WYfX1i*D4{xvbG7=LKP=65f zI0_GeZM2KipU|U2%CW4HtlnDH2g^bfh>SRt4p`#to2?nhrdCJfxfPuD&_>vLK{Phl z)mA#5)Wq6@&i6U;o2PoYI7Ts|QQs<^cEVjsd>|vDPMx|x#H%a>-B|mYO2?1b>YSSj zU=*9qO+50x7-NdO@sZs8s){dj{jK)3+#b$TGqw8-t5*>!=qI4)}uL^=dvoi zP22lDV760@M~_hK4uh7(843&Nqc3)OHAe9mrL?~0Je|C3!*UB%7PyRC|7t4IsMfyL zks@02McXbHZEE%>uxC7OXha)4OUxy&%V@%|eEf*7Nydd6-rrbYQz4*B4FtNiT)F~i z>HEXE%J0|jtJXB_qqjuhk5CrtC{u$fqf=vwO-N)$)sC}tMK^8i5npjx&c@b7L_syT zqB$PGJGrX;m?8%O^^KEjd}H`V<13VUar)p}c~!7purE36UJBpDM9`}gtMmwCN;W|Q zz0_bg)1#Oc&xKa^K+`U}(8s_aoucn?++vr<9^dC%s-md;bM3eL?pLeU2tBhq;u9E# z#yn09{^lHOXA{(3T+8#}{`dzD9SMybfQj*m!%Dcy1s?7^PQhv?SLBsmHhhXRL0~s?7 z-7U($n_puj8V{pT&@883y?_A5(OrkPH@k3ZfLT%Ty~xODlr?v52X76pVBYu5_iqmkJsMpLPE>IE9=jEx$R6~? z5rvkmKW;WFbCm{V-GFuN9}_@|uPI$afIP7_$P!G$b7!Ii7%MN@6jx5~ zEPCRoFWsW1yPksA)DsWCZ&Co-152*Nh-&r?j|#`txrLFlv@pzXM9?aPx=Ll~DadF(4W3I2I zk{_fPiSV<|rpvgFYcX9Orj*>!1S_($21&|;iH z>(NkRsc}Pcb>HFQ@s)REj`77t0F@vb6hqeiAsP=Hd6&U&m(32jB{h4?whbXq4);JddA<qE7lVBFQ$*U40*4kh<}VcJ5CzbJiL`c-L1JK?gVu-d!bPA$qi={m zP8_k}rUvd?(b|`$1fO1;(;qW%(H90fURV%l;@FjvJ`Z6?nPRja)kgyO5@_Mx*LCW% z$g(YpRo%c~+WVtZ+k_w^lNwQLaT9hbH`-B}O$+v&y1Ep3G=h)`pkV|?6Bf{pCNBg~ zS;Ek`M1|DvaYnVGK8O0JvUhPNTnZbF> zm3$-?iaSVGj1$GUnyh(hOpTU*gU%VtLar^vO0RVEH!G#tx*2;cq$0myYEbT-iazD> zcy;};89Cq*Iq4q(c$Af&}Yp!*o%yHl25b;~vQqge)q4@+R!h;D4dvWVA zkTQf$5Rf#&!n=j;g2W+xM(Ql_+zew+UZXi3ZEXIh$4a5qp$-Fn@~HPqSTB_+1I8A-yq!Lx;g=$TzVfvbi>}Be7|NeBg+4 zD~pw2uv5x0>b|Bf_D{R3{9dXu1uLPG#k$H<}yu>i%2O&e;DU-i`31DxU4HQJUk zV>@6|TwtTazDmH{yas+PyprFz7ZY@C7|j!WEgcDe<92IIHI-k=C0`_#ANY{t^T3+8 zfs+9aMGWy`m=gkN@Gc#>2YlK+H3F!xv9PXodw+-%p%I1Px?t5;W8LpDV3V~_6RXOc ztCa11owWVHvmA&n=u{3lL^NHpFtyDY>qz6}P>cq=Iuyi+0{7(h{0%OSD7NNhXxyV{ z+ZD>Wlz2^wZd~A=?P9awN(L<9{Hl(^r0 z$}*=nk)oJ^>~g?1$HrMUeW6oZ2brs4kyxOCMu!wx*1xz1Lt=|}-Ggla#Q4Ia>t>{d zS%3ojw$o~(TUO|vyka+RqPjoTuZRMeBC5i6!dP@-l=k@A=xazUbFr5liXcn4Uryb_ zgsiT(sj?>(da9@EGA+93>&Az(^H@7;Y>gOyz4NUvrgcW3yn%LSJCVWp zg^^>CSjv3eIjs?<1h^KqObuJvWCKc*$~wFXjizr#dKnYh3%y;?MIDzRL~TEG#fePr zT76Iu!lO;oS<+*|tE~Ju(-}Y|vLPZns~d~6Vn6b^Xer!~ zz`9f)>}i>}IVMe#XIr*X!@(O6&GsQwLiTO!ctShOAQ4IT4|sflzq43R2tq(9X!bfP zr0_;5jxV*1-zl^-RDdwZSHRMuo7NT6V5iE`-cC3f#6`?*+thfk90Vt-9CxBFLM-xf zfc)9OL|msOwSxi|zT&Qu^fgnlO^U%@7aPSYvsj`*2xPIZ>}+Q;(g7b6df(nW%^jn7sof zUl3M81fRa^OTFD}tMU>0(|GU$1%xt+-FA)g)bx;wk8^F37%Rt`4n)Cs6=u~q8`|Fd ziNYMCI}UAv#0wO3$H#W@zF}wynkby(e7hLtHsCqcW=uipR;G7T`ddYNK~#Vm!2>U% zS>@ls7!#-E2;FM-flc34vHi(;V926ibygpeMyJs8bE?GbNW1r(f zbxhgPs(8V05n=7s&u@hr$N3CnYMqWZ! zGIB4<4X^!`Db~YzKdypRl8Eq}BbC(9eO(ak;z%3MC@joBH*RGKfLfQFAM;zSfomTU8p3mBEJ z$Y&sJ*l~(06yO2C4BqU=Jlp7(a|`19%!$Z3OH^shmCzpxc_xe1Pv5xVu}7Apkzf-? z2eWCCUzBL<_eT@KyC8Bg#UNs4I%mDumrbCuN;!7b3!L29To`bHo@@lcKy)y|wolF_ zuS)ejM3~JX^)6s!;UX&b%vqsapiVnG%Aoq9%?9?P`6Qo!=d(kU5MtZ8`KTunx{{;= zJQMASIkM3xrv(5I1W*lSUv1WAe1O7nThGc70>;zRO_>=rqE(`?QTd*mDD@NA()IQt zqZN67+fJ*tTEZ1zxe0Ff#X=X6<&N;yWZpOz*EDxqc@*k1>mm~F`z;!{B?R@w(-EFj z=+cON+4Z5`kKn_mbEihB*5rKy1Bf`t2uogPo(OO7wHSMV!Ke3P_C!ACB0dnbdqQ{z z_@a`)dNf=6Q?dSt$EtbO^e#;Zw8V@Nk@_tK>x8UhA;J>vgVS>H_;P7fmPFPP#-<-L z>}8LzOKn#M5s9(>eDQMX_HdwZA+2yo21LUHi=c{F=k(oGn=fH(z|#0rplJjuAlR@E z6UwVegr-4L+w!YH!DT``oeXKmbH+lb5h^~1K&C^e;a$M#hyb=n z72I=`+%e9|&2u>w8<178#yJpdt3G}Id?=>)N&LZZ)Nt;Wuu<ssK&9Q@+u0CS<#``|U&W6sYyp090J6Mat!WqY02-mZ!93mgr8Mq1# zM?ztX&yyR0+3c8UVNJW5|3&Ngxi0m*~3&e~|EI1cvs42vSmTVyr2D;_o z*FfvqT@a1lx3W_~1I{i8>sTzcb?+iGh|qpfMEeJhp|@i*`~2I3?NAn694yuEdofXJ zG8`bBtpx(?&E;{KeS{@NDSb4kh*qYOB)e_iP^7utm9)@SX_in2<`Q18Wwr*e?$$YT z&e#@d7VCKs;w@5U!6>j8-DX2w#0FhYpF={R7FE?gD!Fl5nb0<+K}GNY?3#=VWXjPB z`?5oFWeTS90~qF|)IwgN+4(+24s8~>XxrIYe!hg*c|WAPK*0c}VTcBWv9W;2F_%3- zF+Jr#f&g-~9LtDKnK{Cyoqwf8AwiS|@UG;eK1Vf!*zlmH<>@d)E>kNLtzd(~FokHT zodR7&+qPxPEYuqb4S)9-Z$rLOPE&2Z;vYa`6pCiho=(Yk;QKq57K{x9!^83lhrMf7 zejVfWSMhap@z`h;7@+%WwieWr zNKV$?sCL%l@ZO~SnHBvlbw}R-JqIHI8R5{tztWVyeVgI2T*6aHY$XEc&pN7q`|WvF zY_5Oz{LhU4Z(kJm6Jegv1DRt{jz9iUiWd})IRBN#|M&I0#x6wXuTGdVwhgTDTnzp~$ffiNlHJ^`NlMRqKy` z3^4xNqe7j>%Eg~wBn|r(l8==PA!K{ZD6t*GLR|HK5}^GdH;bKE1z4a!)n)JCpijj{ z`Q>OW8~@WuP=Rs+%EX!hd4O@vjdS;ngy}2A5xK{pb~f-=E~)nlnsRa%oxg1ai`~?% zN$W*A3)lbEsBFv@ec2HCUIs*)KywLOe|5!wXOTcKWAFG(LdHjRz#krGUK( zDKpW!R2gpmll;O7ym+(xSgS_z<;r{eIX-|s1F&zkF&~=GiRb%A+q`*;7tZ#xog2GL z_SfS|g#xcq7U8Y;UK?q#$-m_LdeFFuzh&Lifz_@s^#9I>C8`Rg!`9Sc5l^3G}!B55;DG%o00pc|xSHMFWO<1IkA^ z|5dpA53&1S2@U&o2~aaXn5`K-uIjiOvF^QjX`pH&X}{_>evHaNCoqU$bg$k&Uh2xf zhSdGPT7yBbGOMr!TU24>NN4lo^xLmAga5PzNI;z6Xu76Nh;0GkM@K4dk2L*cBLLkd zOutULJ@hhX*{1p9>V)8<_e2iok^6=JoAt>#TtDoMuJ*>Gb-ekfUF0mg_PbsO79ebC z^Hv^Kd7TK1Th#}*cGbNx7uH0yjm`c|CKZgX{duYWwk-z80-f_ReMiEBsqfEWpa>H? z7^0`date$am~Z>%$lreq{`}A8H}Hiu<`2FUXzpKoD*MYdXh?f-a;H>{+*#+fB zGeB*80R&aq9XF~sFPvZ=dpx2=uq5m}Du~><@(*4IPI#J;$?s+5g(^)x}Dwz;0kG@BVDyg)jcC{RS`v^%QH1|0>1){UyWF zfLd+P&!1?h{=H?_d}1HEF&%Ar36&mhmGk zmbpO^D_c3@{M#cf$+&n+wntn2)6@1@74+9i=Ac#~!ei#*u!4-{Ub)Ejs|Aq;vXP3X z@w6(|!tZH|WkO16wkYSx35A10b>1I`S=gF9fQ3%e+P4m2!j7{n`OOlg4Aa%MSM)y zUluDD#?kBJ-c%;*uqKkclIYhw%fDGjxL6t~uw?PQV;bLlGWviz^wzof+S3mwQE$7= z#M#3Ihc*IZinwF$jS9wm@Q!-_YD+pu=eTP*=%%6Qn=AIS+)3~0AF0OZHosg9?Mw*s zkL~goJM`8zXp*+J74L{A;d3&mRA#InjukKigl_x1>T%C2dawK2!av0S?tokh*^QO@ z*X`?-CF06bm8g5lSl4Cz0FTbj`2#+RcUe|{pJ|@(q{Bm#`qmZt1p@{vI*Aq7lFOqH zwmxKQqKsE%l(!vKqA&Jzo@3r+lJ+1)X^0any4=%$oliohOCzk{_ePNmk0K~LW>KA8 zUzKt;sz48^WP#azC_`-fu9Io5fR@F0I=bWP4`xy8xC7V@+}`RzyH7K<48`x4{wnSB z0H9|0q`1GwYa4oVIu~MG3nSUi#|+uUeA_*>;J2=>Xbii^Vo`EX9;oRo%t&>((I7XD zZjBhSqp`5Dz_eVYv);E9^5wuk(J_Rzm{^#}+y=}}EgH&&eF;{N#TxABls9%`e@Inl zBgjb;{EyM(jdy?14T#>+V4<7D7CH2mQfG+Expx? zc#Aw#N_n31bxk5QzBI{Ys@5w8xwHCY8A51_(%T{WjAX=dj&<=uOf%&A>t?e>KhnJt zf*l#&SfB5hs^;S>c6IIWz%3rSj%u|qB9llhRRvXd3AA+9)oJhNn4=%TAk+n67}H__ zLW^Q6*%CRJbz0jxes5otzgFw|BhWZIr981J`f>_D4@FSK_17rgFJ>q#OYVDsE{_ji z;SkjV@-wf$p!5G0On_S3(R_u`FsT#i?uL|s!kuGw6!dSQPs6!q-G($LFD?$ zql7mubdFaa-#@;jFG?41Dwdo*LTw@=Wy<}`a@c!Nog(vPrn0IwP7Uei@#*c0SLufb zExeOVUYbg4=GCu#`$C+sw{V;2ypo2pKcL+p0|Sq1TY}L_fxVLWITIO0B zy=AQMI_^W_{UY!aH&YmMAhApmWQ;NPzES z^+#Hl4PJ@_(JgWX4?Skln8Ndn%tT+_<0(q{wpyyX@E#FlN+=p}cD&Cgxjd=qle)*& zJy<{*rgYx@1?ltWl%yfv9mN+qI|-+++`ILi+-EE^To$5SY|a zZ^HoLr#ZZ*m@U#JmlYukn+&_udU~qZ_r@-L)5e#5?c(gNJ7kWB+`JEuZNZKc`+tMy zoricdOfXW3zd}XWIrIM+Zc;RB%m)ih=JDXsxpL_^Ekypwufm$XFzLj;=_!T=(h$jR z+F*L}Z}hMA8v^9veg^@{+QMh9RpBOa2r&xIO*XCHw81quy^{;MzI@jqP{%F!&Ml)NImma*s=k$b0l3xIX2(oxK{`JL-Nr zXC-i7j4)uuC&_r&FlhAnOqK~Dy*K6gM@Na3Hm~ww6p%VG?H``$%fcjI5^ZcNi9V`l z=Rdp)2{I>Z>sk9GR#?;Knj_|0bA{9(I*--sZT`jfFHDbaJo-%!>fi9>^n3$)d$Bz^ zl9YB3E^Qpx5*PhzuF*QU31RU^#w$EivnGVTJ;St;wt^=60!bO!XW<{8od-;!0tFvs z$-~Iq*iJTYMBgNtCSYK`YODFOI7M2XJ0M`0Nq^T`!JwJXev4dEkWo&RY*cs@ZWC_= ztD7Nl6@8%+-#&kpp8UeQ(RwTkP~DT46?`?p?Pla=8Ok$(6zgGvj~K+tlvVwoMR-3; z8Xxkxa(*Vz{^mFsdNgQ^R5G}bKo>VC@bN{04s$q(Pbm4T9s^v;Sm)i0yI*QWpWfzk zV&~gBswcflb=(+yzaFl*Y+@?rVvEB-lP$(%B91p3=BGXB!&`HHJ)DnOkCOb?Rwi$f z3l}}OGU3k#NssvIsZWOzoWkoAOn22?}^m8s*$Ky3i0YytbofADop4 z85)@f6Y=Yqb3$}0dU`@Y4C9`qq)a`eD9%y5lZwP*#Lq)CyWiVqQPIRCL&jEyv95>c>WxOAH?zF_9w-sK$fp}T%WrX?MlkbX*rEA-gVV-A z4Wn?ql9Rz}JnQ(Z^n=Ua9{ke69ab9X)dTQUq!P86A?f+S+@f`xaN zS}HoQuMo5l9-f≫Dq&kq+*|4gy2$XrAf_qkXP)4OK^C!!x7^;G8y_+E8f z`{F9s#eK$JQvT!F&Bh=%eNnO#u`K+2^$v~;S1-N^4i+Yv&0=7tk=n|W3k>^ai=mBr z*0rDkP}?uS=kRAI;ZN``iF*Z2>0UF1o6r>+9v<#Z=6g}JFmw!%U%Vg|zAP(9osTA@ zFHE|BMchF1@^guGlfd(x)#Un8je`^oC;6aYH^3)^o~W_0QCh}yHqhV8`T}TG+Xc<1 zuOW4}`H$LyHf)-7_nY;t`vpGhOSrKmE>;K$k;)J&J(=fye%M74l9*CW3c8BK<62>Y zQ86P-{ga+1E-lodj7l1w0cR1n*R{R)%vSTwdoHoRyHB^!M09J5;UM@lkw$vZ{In>$ z^)gJEb^FH0cS>mdjhd&VZ!n9mo_5<#~?H{JtR10PMO!<)c<@>_vGtT zU8?624~y*WZjuveI6ynN>SNJ~qT+c=i2X@w9CPwJNBer&K z`!>fQa)Az}{!m?cyyq)%BzYkW%9396SL3=G@jqm<`lYwZP;(!gE(*eQKca%yPjB72 z&3d=hSlidFq6_4fi3YBG=$?73p6Vud;o9K*{ULJoa zLkdb!yW+1gapTBRZ>0ovk{i~=)lK@wiw<{1j=(#Hmh6QKRb3cSxlzoibb`te;=Gw`bfqeVTj$+xFvluc`WW|D9Xx-txugVMF@e9+v?txeL(-d=jh6$UE) z>?uDA%j)38RA%JV?Zh6xU`|7xYy4ZbxYuj)gS*ts*WTq_hTXgvS0Fe1Z@rRl9%a`1 zOJrMEp|~~&wv&ApuDDAM~~6+^o46tRo8HKi@U&L9zT7&XXD!S z-!xqV$LpOCYygJ{8-uXBgc7VHC40@?kSNv!i>gT`w8RR(<`miUKRwvq&dor>yJ?p4|rK84{Vv{P$-vzOIj z1kT>WdT%3>YNV*E1z{W0M6}DSUoRq6keepM=XY52ue2FiW~s`2=txasCy|g!fGEU3 zD17Pi9~|x8Y0i7-rmUuVAtqTSm|O6ZY#2>TBPsrYIk>fAfbt;V^sF%Vqp;1OB z>iRf5;C;_U`pOS2cFZe^5YI)qudgOrp5>5Oed6hGIdT_>6P%iuZSf{6(R7_?yjz6| z3wCvwWwKZ@Y%?n-5)dk}GYkk7JT4LSO_;-%nLC*20`&DK3v!Zcf#T>B&v^m{(FRPq zvX&u@q{8_@Iqx3~Ux=>8&e_XdFTBgfSt6fmb^S_>P)%m?mCuoU>9_AnXuK-4Yaqct zAmt`4_DNjs^WXwKve~qd*x{nsQbuLEN3P7;aK+nuw#;EX2W_87SU>Uk->LVPS@UGv zr@cs)LMpKJ&bAjmBV7(x>5}_Qvm~1l+Q5}d=(t|x!y17QClV{uPMO1 zekF6=IBB|})__j9fB;d&M#`~0H^u4t>PCVKG0xGmxwntk{kL3##45@0sc3?!G`V4m zlvz5xuLszolHBK1K<8_dNeZvr8Tfjq$ybwm`zA8!RWw_9respq1nGC1+CxSC%C`fc z6F+{sUjJJpttI;9p08q@g=HvcUcNb)OPp9HWPUWBT-!W9HyUz$hwXH^FYPK0Uest; zO3Fo+hxC=YF!Pebx@P#)8^2#tNFaO)jpGNbzlX`EuH6suX5PDOd`Xr=u6=uAV?>s_ z@av!wY-Ds@GDqonL-bDreUA24t+!iQ<~)8JCq#}oOu20>##u7|GSTOsq4w4JfkYB< ztC3hP@h>-B-RT14AFTz^*`1rQxueC;e3giv{L<(xQ^7!d8u3T%ZCPG8+^28vH|;Dx z(T}#`)@-w8zJlhzGS|<@e4a$c9}{`vM-eN@66NQG7Ei5z#BsL1)Tv?Zb2H7Fknw8# z5?_2NldfI2a$~f@S(lz!2IQm_o46tsNzRMQMSS#=Jx%_SN-9@g)wKBL<)4-4n2$!L zMnt_kEqZf!gDXrDBJQUNi^tbzF2d978Sm9yyDBXsBRo2$+0)x={-AH8*h^rmn-+9p z-#`y5zo4r-{wgqyNgJAtRO8o2$6wFVqG(tdI3#_Ry;6H}m<`xQ6dku)2OL+koy~_y z?zx4@OtSx?gZ!X!9y4RAUmt=N;Ooxqx4oCI!VVrTg>a#+E>kOCdBv$@9{s^zx=cio zeWpma`uNm6A~I_9E*|G#hUXYLjacauMg1sph+p@NH!bm^--yi6WwOvis!JDo#{1)~ zct%9dJc_?Tz)Y>WY!W`)NrrkD7WVlCS;75tNs22)&I+QC=W#vHYtF7y4SjIxyyU$i zkZ3f=TvSq22Meb#5>==UbaS_b_AmCuo1)}gdjuJ1^csY`0`fW4Mn1B-xg&3Ih)#KpWAK3xxI$lsF>om~_&Bg%+~ zL!?A!Xn|VWm#A@0nGF?o1@ldx%>AweXH1qWQYLC?GY>&*QK9+Do1a4c&C`lKI)U8x z@U)-I6hd`Z_M-CiW3E=f^>1M@EQTqexFC8*H(_OvcyVcIkqnxw;T40OI@zb7C`7a@ zHsZ?TfQVq+fqH$<9NkY=g{D~x@)a}3J*3R*}$ z;^!BHa+^aXQrK-j`~G=CxR%ZJ0wJ@3oX6G&C`=-4Sr}vgtLhZ8zcF?AdjS6T+!-F_ zTmpz@u)+d1Zi> zn2-gtLI$j2geMQ~gIi4Gq$#2pJ{a zCeHP5Ovs&cSZ3(C9&&wI?eEaNm1+_b@C@z0Lnl4{KkzkzVfGYg)p!c*DR2?fKh>`&3gW&0aM7btvS)Rh7 zG*7P)A6U)%((VWnJ<99om$W()>)0Shrp%dOQ5J<%im`Sn8Ng7=tuHgz6=;p`rm;RS zjXx}^0?>eUlGdCuVi1+?G~U%Z^A>;KM!+i~4dmA|DgxL>EOw)r)E})k6Bz0VPz|?P!=l)504ArOGarS z!W!rQn(}H*$i>A9KqD02E7B%2y&seQElC3{i+$IV!2C8XbTnX$g{6NtIRV@4q^3)d zzHs+70srcfIRqW@1CFZn9r#i@-z9jj#H>(Mbn(c#wN7_9eY|MfAfyAm2;zbw{2je=7IDg0r+A?>a7QUslUb$F;eD!MHCC%2U3 znCzRw;pWOhd^778WKpK4W}K^Cjcu;o;uR*eJ$=z)$Ji|Yab0hUAvJ&g&F66ouRp|k zmKsJEz9886W%uGHLiO<1l+sg#O|Tz{cFORyJxTpv+UYV?~>q>oSQSEgd~p5*>ML3^M%Q>$2!fC3&bvf3wh+gB~?= zbLhj%lpg2Iv<92T&&ALOo^jeEbSptNFYGM2nDr8&nr7$0Dt!+ z@U`a)M;u|3*Wi+7wl8$<$MoYDei4~n>@(Tq!*P2fCmb#9B~Ao+`+lHR+xZiK^Hkj; zCSZ)sZFlVuw9RMz@P?UpXF`kKtZ`iz1))2zyN=CxA0lpj&27t5T>TbfBPf~U`i&Mo zmX2UvQR#EFHgV?ZbKQMlG?AnW4`C7 zOpr(;Y!q5yGG}o{Ea~nZofX1h^&=W0g&wvDuz1-mNhpes;r_z@Y@9tQRV%Oc6N#3V z07e_r2xIU(#+wWWn>RUW%W8iGD}~K1$`?zxL;oXqhv@$^MB(kqLro2Ep#qcbR!Hu| zsxn(N85)O9m+3-+uorW z${KqILT!NtbBM2)wxGxSQNrb@ZgVS*!kmDa5}1ubEZ-(F?Ha@wt2U6{&Rcw1xbZ{Po&dm_pp;+ zu+FL{?U7YZ^?*|&9s9>NY(8Ue!sEwd0ruZ-7`NMhp;FAUL)4madLc;U?fdp8RJVqv z+;%@|3JSse0uVgn%r1)TB}EV#F8J~HGc`y1ROTmiJ!5~}_2W*h5>c#kS3lX0{*Q8E z7h^It?|gxe2xY))9${NqVKV4LA1Z%S1JLG&Ez)f;QbCP~8B#G2Cd-aXNO-?3X8j^B2)@CM z9L%TjiWQ)Dxc1=_BKAFs#=eJB|N9g}hP!V=C(1kE~v)_`aX?N2(fb z*5lG*GxRc?6bvBmUbOI@1a6nxh}`C8GDxkk&$bw~?C#;Zkdabp2fAf1Bd6-iV4!Gf z+JghNWDiiSM+#tn>woq8nKEpnm9W2K?u=sZ!FHGa38GR%~(+kH0wht=@aL zS-%gJ5UcOL9czEC>Zx_PFQ8hYiWs zkM+p84409~+WBykC|k@2bJhEVMf~E2Fyqc}jl-&3=gi)fY9+vb>R6Z2z>3NhZdCNb%6G zA3R796BD{Lylc69=+xj{X0za~q4*#GII8A&a9^w*_6B%&hE#Eb+~NO#32dp$iD*c3 zfX@c1lC0BHRC{7`*F_WhgEq8PS+{~aKB(6g=BMbUgv3p2SP!blBD1C$QNqWaF86Zg z6jx>v*R-_EZ?%c$X&a0?044wrd@6+9d|8#!#hoQ0OxJl3tuH@#!J zy)&0`Is9tDR^`8?)qJZpxBfAss$Zbmi#ox;qrgIJo>#nNQMsRZj*$~;Ox49|Glk!RGY4srrBvbuddP?;E& zH@?f@t>v_Ph_#<=8Jy0D{p`gGQ+`8dpt@yw{yZ}~v-42u2`6?EB&KxXd6dfd`a+P* zY1gTS@YlF*-CE54n&4N}n;b+#HPleWfqHHN;5X)B4EDud;X@(?rVX&M6Z;LJk!XaI&Rs^Q7h_zpv4W$xl57;8T}q3`KR4Vu(OV&?^VvUB*TiTj_BS@E5h zv=MK;c7-v9F~z8C+?-Nf46acf(`Y^Yii$ST@!G9GqxN0Jg9&Dh4VL+4t_zi}IlKv)aL_riwtq{WfH* zIdKP1vWa$P>0=La;|xA%uok3^%ZX+w`d9<1ewuwWW$Es|zh_6f;i@rjCQo5>g+^3J zvWmsn@9B1apvKKCwbq6vgXD1Xc|1tTjHU2K=k)oww_v_KkKp-7*^(+ z>8KL_AdtxBui(54wK$li(kx>L?xL{+E{{l&Y)nldZwu+7OpP1maf{Q7LEGhDhiX(* zS&*@+E-g{F1eeQJ`5K+=sCoSQYFm_*pvUpW=o*q{GjCzemAA1}qhI zqX{M2HyYRGYT5vI<;(%t)1I+X0eW_v8~9;$*}-5kBs6det8uQ5F7EJ9R`%4yKeyZ3Ev&`afdZ&PiDm(^V^^%Zm=7X+vK zVl|IqCrH3EOuHP6lcueiJboWyCOP>Q-n&sVtVOI;pv&5MDNVw{M0A#FlSf52B(-iw z)10OtK`*1;=j{J<&Zr&jeIu?ZGjV@Ro*y=&`EH^pnz|bHF9=Tpw)w$p0$&94Er@Zy z3a6W&Td3`WE_*~`zlyns6-7}9%OBB5Y+evl|E z`99kCz=Chrv*4g`&S&LKeS`bg>()*y+qX@~Z>UWC95y?+2tH6Y|C)PzJX0{& zx`f@)3!RgzzWMW}1Mkxo<-O(Srgwrl8u971S6^Rfh-vfiRj0^*Iq`0nx=g*d% z6C_ugRTQ>~etUNpuUtIiWmeGi*`VE6>cHYC&%N$yH~FWO`;m7edr7uq(hA3_)$2Be zPzNU^r{cu5W+U<}{G^qtFEjaBUywic_e_#p!|+sTWu<$=##<(t;xqUQi2=fU6&W6KNYVm2HSzx1B#Wu2P}(Qhg|h=1Dz#uUcVrn; za9n4REj`5Z?gvKU~|{1 zEC;CLvbmLT<7lX*UFwlYsb9S@p*8lO!ns~+bL@n4KjU8bOS|Vj2_qZ7AmA@TEWzzF zOVq;;N93Mo6U=oG(gG$GQ=bd^vJkK7GQkmN6c$@1a_VZ_M1}rlOAC2&i(v@8I!jNH zFFE-%v4WQfQ!C)6A}!p_Om{O9WX>h4kE6yHuKR^Hl$g+n5h7x$X~a3Mp@A#xR;8w< z&&)eNhR@pje3R{Y)CJ`9v&thaHlrC#y=ABY(sqzftzRAHVe>UxGr%(7kPJtPB)@9cSA}^949bW0m4SJ7Uvni;vZm_9=~3q$Wc1><&jM zP7=Aue|thy@4d~6Z3XGY!&mPM{dXDbP4l~=djfyI$q4V|8NRu>`Sq5`Z_8!>$A?H; zAOEgj42Ij;TX9G!&Z@EJo(Bf25hjn9CjCeqlG2#D0vze;8*hw%@799U)~OtSI~yDJ zCv8#Nj-Y8ck@_#PnjnP&*&X1XohL9$(e)`c|Gu3PnW)JLf5H2KY6xx4&u&(V^eb*}{l8O*Il7B&1Vu{aZ-<2Q$3ERHYwi zSn6bUQVeT@t#N7kOHW0F1;)9I_MI4Koum~}83F)GFX&+--y%3I6kyb7GxE>fn@|-zL2id73OnlMASz5Oyn>Jki68)EmqSVh9Zzjo znw3NipvXQP$aX``qHsF&1`Yt*)g2F};Ii4tQEp9l-p35k*FhBX;4t-db)kwN%X!{6 zWkcE`ak4F+%YqhKyu{BBv6zYEhcJ5I$;Na13Tc+cH8O0biURddos$y{5o@wSlCbJ> z#^ntP?G0j%0#yOMFx*}{T+8af5EuZfS3DvHN61iSuITGUKPr<+8 z&~+!Io0Fcw>CDrR8%^mnmMdX)AFO>B`lVInw~N+5vY4mHoqtRti8`{0M#B zQ}XN=`;L$m5&Dj_75Q=GzN8o$e-^bMd5pQb9%M}7hOK5A z<+)f8B#RzjvtB%R52xj9GLG&POPUI{ov;N))tuaoQT*lwZ(3d?P-LlX>i*W@q#jc1 zu?{9;yrBvFQQwnNg84O4zl#v?@ubY?W|<4im+{I3rb6D7>N2w z-e{oq!XU!0$p8yQhH zgV|x9pKAT zFpct6XnHBnW?g;B;&Wo;WBm&9GBah-W~ISqweSShmXQZ>dy!*JwvzcpJ?Vdn8rYmK z03c=jt97mv9W)$pEN*;Rrn8=P>{A@^vcq^Pw}C6r_*(7I{4$D~&3I?-$ZG|{M!;kOv#7Qvnu zVW#?(eseVvR@NFaPXefLcx{N_vXft9|GE?cCVF06s0OmqtQT#Bi*AcNNo~mk56!MW zG4$JG0P=xr5D&q)^fas<#KnwO&TBUXp7(oT&!4@Zfz2~`pBWrtPDVrV`lVQ`*~^XL zHFZkh*8!!K2@HtGjP}gl%#6IqG->KO&+co`<1Ew6r^i(Pb-w zl4)K)e#1va3*!;zjkR^k z-gkV|(RfDKo#YszFvh$Zf=@+E*At-eS?#ltCaZWA%PML&@0;l|_#E|xj1qmmT93KQ zXEz*bR!vR(Zr#HYDRmD-shOGM69$$b$(Mm}nv2ml4@2?bN#BGXXWdCdQ#Sw~P8uC} zl3j6x#Q8B%=Jc}|ZSTf7P0wDqp5o{jC}TJ`E*&hA3U1Nu+U!BW49U0SN-Zm$p>)_5 z<+B?%*$>?clSYe5lgdwwy4M&3S`>-LwZ4UtL8sN1yiHz3t#iOH@m{3MgBX%z=>f5k zxIh4+6qGK$MvkPaCGKo_x3II(e>gWqEs9xYw}U%YHJ<(BWO2%w<`W7M!CbN zNj9TdSHpb?g_@|%i{Jz!nc5#*3?OX=!?t(LF zl2=VDSre@S=nFHM${e5HNYSL2vZ4_IKCCMw5n@m?+W`w4+7ecYD20)1M_jN)WIwi{ z|D~VL;rs952b^uX7U&ztzqcqo57(IctawXO&V~>{2AlgzbBt}$R zG?Kecc9a-r&}9WO>IzV~EHGsLYvw8Y)&dX~!jV_P-xgZ7SuOv4^IPtK$!p$p)!`ws zPZ&)@%uMQ>smp+@R0ZPcYYo{9rD5sM`*36l9lxq&!3~=HnY)?_N%@8ZcFmu6pM&3SILd`|RtDbm-9HhR?3!<8LCzR$Kg2&mF%K!uj#k;lmDc z>azIe81ASz@n_h&lX+8eD&|MEh~uT`sL@HGD}t}HgH{u#|Jj1M2aQh7I^N7a(`R~% zSDEPgA?RIyfDY$iW9xNZ?AhopLHR6oJ@eE%m*N1Zo@6}x5&>lhz>b_oJ1ZcD*YN!ipcW1Oa*=B0+)49SaEWF#_#uDC@ zZKEd!_Wx7gqnR*eBaKdeSo>{TpCI#+b$r+orDOAx$LrWfIMSXbzq5ttYf`i>*MGEp zML!$r2Sg?mES*Ri+AQX;W-mNMF=CdI5g3vSM?(f2i-T=ct?MRA>C$cDSxBXN8L@T` zm>X*>`ef>MyswD%81V5<E!+s~mZ$0&CyAfM^!1tduAu}g(N zZ6Q+nIz4XbO@Q+|Lf?!@KsZ>P6HP;2k#QhXWIHZaS!F!6&!Zf118U z6Fh}Z*C(`y(nG8Et$insSoD~Tb7TYE2XjZ+4YjVURVQEE1HLX-PGM_HKlaJhYE(x4 zU6+9#BqBlbMWGtS5oFYrxw;k&tS=8JndShR0hS>XbJnKg*}|Ny%z0VRS+aj<0Npk> z6l5>^5dH@PF5V?F4=!_cgY|M1v|dPmcNGJa)mQ;zcq2!((rNkRAn)-y5iJ|01(qiZ z>6E8oF-JFZTpg|WCoj$ckR{5lb~}S^*LQVoF$Vrq)t59k?#P=S-17<%sI})ve(~|Y zTT9gJ<*&q)W8qV2o`-6R*`l0%%34Y5rLyIwdtuEL+@A@wnbrm(+U^ z&nhL7=dV^-Rig3v-)H@D?VzXo9g6=MlYii=>!4TPMnS-j zAoEe1LZxh}ly36*8vHkaV{wE~Sh2N^cg;;5`p$0iZm{O4a_`8EfU1eDfH1IbDY;3% zA@QAvTX)At-f#~uQKTtcjEB$B=B_3Uppa`6bO%$kj(^Az@TJ^=y;`tKU3k1dw zivfL(z6jWlQySD7SjNUz6T zLYlyLzST7>f;}Pbyt-vi|I2>a*P4BrZL7NL?RP)BBbj7O6)4LaIYL|04)Zbl-8F*= zWcB!?_LA~bLTcGP(qL6UK_9*AGH0szVom9~4qA`iP=m32<4R^dU@Zy|LBCuU4QoMH zncQVpAKQ^8b+?UW*b~ewD+j`R64fkytg#?Pzdq_7JoEJ(*f}fFE|(W91Cpq8@tT*U zz?j3S#F}u|zH|9Q#UjyD%0aJ%#NS3I4^1eOKGAotGh7(;*?Cr3=icRgT1nbakKVQ< z+SfNEkB5bYYrmA(O35UIZ(}Y~fZFI3`=gH%4Y681=WE1fe{tslGOxdg{wiJVUPtcs z1>ruKJ^s8*M7T&q(=zI)lxuddAMuKo;t7W4EZ+?&QkzLk!uaEX_{M~@U{g(2rd6Pt3skdS@c7bOAF?&-tM$78%x6XCt&9B@fU`v?FP3_TLg6Cp>F)k zXN`C>Ex}b$#YO&hKZWNt+{(?j904`*w4ZK87q-&9jj{<~_z}{B&vmZNONknPE(TDy zaY=dGsE#4%qVFv<+aiU}*=_J<$Gyj-Kp9*n*35fbuT&`TiADCc!oHu&%e;9d!4rP7 z(XTdY1*mG_W=Qwez~w0;kXKqc198%6?aR}(6e@08;!%pD26oM(er~9zIxnax+bAhY zQyN?0`4MiKm%ZDa1b`gvm%DUV`x21t{^B$FuzS^hlybZkY<%FzcTYlT#PoIh05zki z@}RScYP?jV_yiH_NAzpezvIV3lN0S9cdx_mNa<%6K|z>7xa0od|(l9qDn#8RmGcqrr&aR4GO}XCPv#v#7F$K!F!UUKS=X zdkrVAexHn=>i9oe&O73SNJ(;xngx^h28{vlZx@NcbAh&@niq1R$FXwh%EE!uvynXF zb;Pj}ZaE*9SG)$d$l^Cf_P{k|dRsQ?6<)G>W<4@Lm}eBptTdJEMFny{wSP#2h1is| z#zaTF1?h0g>65vD@_`<~pnK8a;pb^xDv7Cqy*;_n`A%>C^4&ve#Qx+mx^-vTjGFDc zEs_+$kW{8c9K+AKqr@{C8A1@z8%Wk!HEjas%+XQk#>T}hD*^RN8IzwvW!*Nkd#kFy zH6n8b4hv|JlRekCz+8qb-bV5BU@hesFhBER4gak7i}#bkA{g(&7or^?%NG}0S|-oo zOnUsUV)>~7zn^UKip&$oCsrm{F=*JPr8Kk|zSi;#yUTArX3IIxf{?r-7Nv~*$k@-hk)acAs?^CP4%D#h|(@GqHcP(qW4f6e%ux4N4Fx5Pl* zA(oV-!F8jJ>b%*FykoY+GyimDe{m9F*aYI`YuB&Coz)Q>Z)`biSU&*3yKbGLvWAV5 z#FPQzZXU%{F$b&}V2DbRxV+U(Y~Af;rexFEbDRD$x1vV+_IVsX+IUTZWaCY4q7;|c z^V%LcJC~74O;|d{cOHk|@4R_Z8ZjTqzXICZ$2pdf7ZzHB@+^%$6ZN;P&>XR-Jtu!k zCR%T@#OF3vTwX3QTIaY%avyTVr)6(WDER;@-E(>1^&xbw>!35`H6Wi+K20 zVjRJT&#Snq1R1K5Hxnl4oQ?D_|5bE1(P7EfW!0^$hHJ-$=D)%BIVXB+j0FWdYKZND zUjW!g7dKuYDiYqcH&rr%eDMApA7BhtrnEQc*6T_A=xs?mZoMY**8UmP-1bngAdjFi z9-(_}VTxh`eM=_>Y7K^T+`lfT@7Tx-A+Vb>2@|@%dBoyLx*s8Z@z2n6`g zBEOw?erOB!+7AAgI{xkpy#IhP$3w;fj9K5;=dLhxH#U80nv!!~u8LAdA9B?BZii6* zzBPgKXg5T^xV49*po%V@eoidV@RE*e=Z`%ct_Ah|D*k4w40=$~tgyNL8kVR;o!+tBa{t+F!a@GDo#pe?G9oEAI%!mv+@KTy%mcc7H zEat^hqeFksbg=iSP54NQk8&g9ZGvr5B(cDe1EU!$Oisx{_M$jGUNVQtG9!p0zH$cz1@iSybJ*?XdMjd$xO=kI{8#4y6MAo zZEGlJL^lFPF9r3hejlU2P^-BOI_q}75Rg2J7gaSWFDGqNXN1Jj1+jJ5<}qKT@7?yhP3o8PTSX6xlrn$Mw&Ct)IYq?fcf z+9$dPUTP@W^N1MgGp^kyMxv4ESzGLr$v@{wZ(NnzLD?6kQVCi^7RGzy*n|QsGTspB zNtUR?`GCszS1$y3Q9vYW=io!nd6Q~Pb@C!z0s4CVY#9%*GURiCF-td1&g-Aoa+*6k z_@v|oW?Zb+0I%Sh=)%9;R-C)t$mj$JbpHBHPv2o-QjQs2H!t&`tRQ;>c<~h z-=YP1_Q%llEf0b(e;9CmG0cMKUL96M%g`Eq>j_?w{aW*S=5Q(X+goEk`lG|c%;H+u z-${AHXL*&B!n-nYG=KJUdD$PT>T1iMc8rbBZ0gHNGxdDSF_#rt=Psp#y8# zY91Hy)_$F|M8_)lqfXaPdsILM(lr zUMiw$&p?CKAcYc#1~UZweq6(#(vJ>>^>Cuz!eqhMxwjk8mJu5MFjqB{r2*2Oecf9J zo$Pm^mSyPOMKFvFLXEVH@mw0E7pL2E8v;3ZQ63~->G}UfKp2GiWJXpG2-mUmGx^>I zN4f3{S#H`xuRUh!jLSPiDiWz+6vA)iQc*`IhOtB0zNXuh3{8Y4iuEb35uhCVUy9JWLgYdTKC9R?R|ztCU?nn7xd z{Ly1{Ahr6PS6O#8mXu7y96dh(>`^&gr5a55NJu&n9--T1Y1b9CZ`wtu_rRszGv8?W zSLSf8`e~L#4}Xd_gonR@I9!^FSi~u^0v8kse?l88V@YoWmOK-l{LZoDk;lB_dgC=M zZs0Z)H{MW5ZVO3baDdC-fz+!_d+?R$P3J8B${v*!bLg?i#gESk*wV3hp=<;r5MqUT z8Esyp*J%dP-tsEE%NVs&0Fdht8DYmr~!O;V9KIjU960x zAaLmTzx9%KUd}A0HsSmKXs(T`#(*&!>mj7DvX#u@ud4C#BWhC$Ty_txUK`lRx0!IZRk7z-^mBncEb&t#~~7q7@%>@FHVVGKeJ z73((XqvK_t@((xptUQg_ja(p+^sBCodU6D0sGe7AoY&csM-BP$l%x)rClv`0knQ5&I<0zshwS=bt^Lxv*Ub)$g$^;fE8#lfFK{Xe$0Y>Yg5yk}xth*`GT# z%MbFzbirToM?S{6J#`Bx;kpvCcAKgEIlp0Gg+5~kp=PrNuuQq{VFInU=Hb0HTlp*V zb?>--bO)jLZj#gOWxHt~zP~n&z3?t;NdB##RL|yS7rD#L{=iix_{HqWRgdslu}WUb z;0xA0m-z9x-^&ShJ{+}FqTvJayU||-Q|I$QVKx=662mW?Nlx8!J%~SML9R6|sz*8e zNjX=$8PbYB74rza3M&JRZ(0z9H+_8~zcuZ5lR$^U|)8#XqsYuAL+I7wWj z`WpL#d0`#zG>nCs@!cnJ4?5>?R3LsrKt|uQmYVn_dAH#?g$fx;)B?3b!aMt%`4~_* zNg6~q!FfyLi*VDr@!WFeKe@e$NqZs4L@n-n20C4Y$rKV`M@BG`hd zDan?pP8bTxCnZsuaV`6_y(tB|aB zW5D8*>cH?fFH^~}WI#9ry@}>NzAwGTZ~e8ZS8@vGEyg>kWbPtWAl5I&9KoKzXiabY zT3w28Y1)L+C#CQ3F-rM39cT_|CFLihWU=X3<{Henv9Y$~i8ss>^TlpBR6|U?;5{rt zN}eCDm;dlY>=lV3IX``3C2c4`@7!|zRP6BCBK;3b5L*4h+~z&S!c^>9PvSeEm=_Az z1rM~xO@L~W*|I;39GKu=^%$S1OFc;WO1aRHkJlSG=Eaeo$sI;}5T(?5H*;>-I*Q#7 zelR}7RS^*vZVd9_jP+p*lP{I3MWh~ft3Kl2YCOPYHn_oVS1IDWzjtmtVD1V@E2*`F z;n5)RDA55SUw0nK!#a*DRa}qWzTA1TLJRiNYl$Ix@dQHcG=zT7PlETd=>!fXSz{^c zcRtq4g|ZK&z5orkZiZKpHw2jJK>2hEJBAwvqd60gyv}0e%st@6!wY-LnF1!~{YEjK zhc$EbSY=>58=bKWexge)i?4+&gv0DtFFIMM#l~_p15v!|{TlRErltSkd~@SSnKX{C zzmh1^e9$h(y(HXmA+^qMPh*o->QlHUYjhT=NtDC;U%(dkNEnrA`^TDd5G9vl@zn=m z>hC^2j_S1XpY!Uy^cq#%tZW$t``C2WGHJl47s9H{bgZ^1yRk9!Bcrp=RgLl+<``V| zxc5A70Ld)W9eb78T{yfHo5gcJ31Jv4hrC@5)X8QvQ-tONqoc2Bf+sOWskFBM{b8|4 zB4-GCJ3qS!Zj2Rg^RElrW$!~WT*YBs!&~oYEHpQQa7Kb?P&C#PELg0BAWuLwwL}D3 zU!OB?MhJ&TTxcKkFPZ8;93tw@C-fb_T#e@)9~E=~TT}!%dSiY{X+^>C4*PmKrTl3}iy_Eqk!ru&dRj8N|Z$S=gYyRB(Fyh))1H9N{E93=2#I~eTS$}^^a5_cn4gk{xCbg%_;W6Zg76kK`f-0;oRT_U4Dh98XQkZp zRP6+|PAh;i)+>|i;YkeX44~L)jL*wT;@MF3ecIjCnDzLhU0SLD;%JaP6kmbfbVyKs zhtT#FQIi#a_jhe*> z14xWZXH^qx_1G`|C1ZxV68`Y6*|Ch{JyD9>uee@iWk9EE`vx4iht_Nvj0LN)`HX^f z5Px=H3dQ1CA8KcMsAhO@e!MzDrYKs1ATT;*il$QpbIR>WNuMRQ6RACEk1Ue8i}yuH0C-VI4y z7uu5}!UT&$`@91|Mt>4ejC1cBjubZJFw%^Kg7qmpExIHF@PYt59}y0)|5h6IqOp2@ zLyauHS(ON*s&{s-80dk0foEFdUrs9m%7&T%LoE0Meg|;KUPZr<>W|ZX8t*9;r4A*L z`<8IFaZ1FFq%A12gvDrzMz6;Mu(5H0*>Dh}kFUCi{{H$IZwgsutqn@JTTW6s3Uje3 z9cNu3J%_zMI-htc@##4{Q#>Y8Jlvv#?5klTBkGhQFpHvdVuBMJ=eV!2@_nuAPSJd$ zFDY^W6n-S;A@?;%J#>JMm;Q&G{ZidC1DQs1PiHLD+fX6|z~VJCl8(~qPlk|I9OWT{ zlChM!S~$yIyg173mSg4E!eKVLR9FBRG@2h=MREiAxxYNuPKt*rYK;wC`O4y%1a_`R zZjr8Aga}R}2`7S_&Df(>E;Tg{q2oIB>l73cfnh^k!ZB1LAq!^7$6f;b#x!plIwt8O ziaUa8Njn1Nue`A7hsgHLFQjVO?A-Wk@YQ>CG_AW%%JZt@yt4)z(w4Ld9P`!$%-LMz z?sR_O(pizjzE-Icf$juu%VWh$;zhA&adsM5MyTDW4j^!Q4P8U z&wR5Yg_tX=(sUVz&r(qme6`x?2WKD8$HPmRZC}q2Q$OboOcRxg@hq_ z`Lu(h?2mS#Xmcfbj?J(RG4!xt3rb7{+Cn&?+$3#79Mc}BYz%PkvotgKHUa-opDbot zD^l9D@GgF_g%F(j6Kvs$o@b-EpBw{Wrs9Q=Jp06i#FND6mAqN66o*<0-U2(~^a(z# zzkv0rNpG%Yrd`}vdXW%A9G5BzSdZMnT8MCve95=r(E^WKdMZS5d^&ij^o4Ofpj2y% zWv5ef-^_${RCDuHe;3`@x0&Gn*y~Pmkq$g#j&Pc{z{HC|=RGpu-6Z>Xl95RD%f<+O zX5z@S?2k~v#N>W~gNx49WI)+9GH1J(<}p6pO6PsJgUYzPv6F|0{9(_~P9%IJ&e%nS zV~F#=tV#*W=SxK%zRqh7Npq8eXRStPds|{m{#|=U zgYs4tf7Y7M<_ey8L|Bfuq;G}H4rZEJ{v@+d4|9sIU$5GR4s}G2Zi>SSL6A{=5qyly z6qKLI4OGk1r9yZTq%3|i)iX4PDk@o#zE32~$n;uE5CpKz>P9dCp@K}Iz0S#JQc?+U z?t^e?twP+6LzH|T7O{^ByzGLiin{BBEzOyculmarou8-XwQs(l-%6NnV5A)(3Lvb- zNld#L&E=T1(%8RbqL5bMowpLn6t*JM94ozEmu29hq%YwSXB`*QsvRz8RmZe$xsmZQ zl!>^2Rk|W^*k^~v%~69r!5y<# zH(mM%j-JASSz;TLIZZkIBNk=C-VM{jtu=sH%E?)>Nn5Wzn+t5p)AmX$Mi)CD%=2?B zU0oZM^n@^JsXpM@7bk}R>m433iSW`rk&&kD;ZzH3Vi_RNyzGYbWTxX5bYK1Ifk`|} z!Fftc43PT1)F9eEw7qt2_p`EKP>fKvvIYC$2$1je1tZ=g*3g1AA{7=dKMmkHQ2kB1 z&l$H9;Z431+ZM!C4oJ9<)Ff25Zu`Vn;L{$p@ts^Gc`9u2^w%D90uu0GC$h{m(Ls>h z-QLE_^PJ^Ava4ON{mbIUD*Z-{ zF$~Hh#W|u3X3@a-tZM1U6r{wa$ytuXr+Lr0(_1j|nTpf8X^gux7&NcVyS2?{ zc;C{pq06jp_AM$jgr^ocQF)3mo{88?+#r9pwcb~-*2KeTEJv^HRao2 z6)KtWj|{1@cD7`2NcU|A7QvBD37##AHZ1raYGwDo>kD$s|7HR0JeV7 zn)`{eYd-aMFhnv8YpM8Ue~Mq2mz57h!0Xk(@}h%sS?;J}dq%hT9Eb^5(?rXg-C3RU z)b^ipmEXu`FQpQ1@1kQj3!-fvoUDM?8iS$3gaq2lp~_{EW0l2Zl3Ip@ z!p1_ubJ~A-A&SZdYBGYoZv(MoOpHhQ(uQemH%603@@#M+QYd~j%QZKf&rSD-3!9g^ z?XV1uJNmxHV;L(eVZbu`N9!jQMn#lu+U9=?zhvUnBsZ=Tn{KjS;9j3C#bV=p?|bhZ zai6DDp>Y0Zug`|Q4=;p}dJE$jgqt3`tk$#9``NVzY3Smh*v!xqw(sWP3-`<9+FIxy zJ+n27+GzA%tK~OQUL4# zfAr41BLpxqDH@i))`p&sDH?e`2e*k&IBiW%KU~_2g~H7s9fU?*_eTO{eyPdm{LPL{ED2Su$r zBKo{ovKI?oY!V6rOy0#M;}nN4BD#=_#LR6N%sA*%&xJ^Y{!%Y6qON@@p?UTe9X>9e_eI=E}F0 z66dmjdfpp-7B_KdsILEFJIw0C_eQx4_NzwNvl5~Zj9Aof_v?vOWemdB2p9|A-{?ZMa)eVx%6l^Z*9w}W*_(MM{!JEEL4O+IKEuPmc&{9K_4~R~)c69? zscVMtLxPF$gCRn`9bci3_g(qelM!R_+j~-Z)Cjea5`Z1~*r&T%0ar{p_$?^w?o`+_`1K@Vx>%EpP*ORx`W$en!}=y_(%Ozn-hNojfXX zq($-$P$*(Y($-kq?KHiNm?*8jy7@kLTXI53yMzNaJT9Ur{kU%(6lqLDP5LzV73rn| zrei!`?*jPk{|HBi=h8&A;r+)*R2Ik;TRCBD^7xTV{8HQ862qA4G>?7S(KgoYQ+bDKrUIPO}Ydkc<@!3@Clcpu|7!e z5Nr0J!R932KcR9DF8i^}ZQ)*|VrNAM#{897BDq$1ZpR>GIu+3lVxvjaS34Ls*AR^C zGmRdS6CTz@swc=<=(JOFrvk^vaB0}vGbf`c6edun7wUc6YO-c2s70Cf-blUXJwWZ9 zu2^i@(%z?B!aVq@pD?9y-Gbh}`)^vwNJnS`-GC*}`IKjCxNwy)}<) zjTFw5h!Y=ov!_+XcoK?W>j zzq1MRW`ot*L#qcF02E5pUU}UKvQh$uQg*fo6N^06YF~#-6b{p8hZyZd9B6Om%||ga zlFnj8n3-mP+zH{|XSva}%Cwb~m1$Ck%l6Q!_6Mrw-hJt`=x%YPboV<$Byxuk25hZ_ zApYH}plJX7_wzj~UzrM#;QrA}RUAHwGh#Mr``m@hy!_Xgu}0!QA7IM8)I~YFo%Zy4 zyCyEV#bkxW;oMF@KWw-0^s3@PDur3sr#eu|FpcN}dkFtV>UNv~&yPB< z;E5`JPZ{S8ypo9J<|6_I1_YP`2*rrokw zqwWr%E*{Za!Hfkn>4nY#w!AGgJ%4H1;-S1}36H@Mi0tb-HIc$*6YZrl+8^5#w#Z;K zE49tEvheli!IojbGaSGrl^#AQLn6O273a)DBA4Fp!-<*Uqv>1=np#D%rM;Q?HHeoZ z$XfE_7Bf@-@}VuyxfyWjxTJE)oDzCE@qxUd048$CU>}MZero!h?|c4UBw*Q2;9+jI z8fa;+8&$7s)os6!DkRO|vC~M}Ju)g4CTN@6Yo0H~$bb|F@VlZPTDbmxrz7WS9 z80x}8mh$QEj<~tG1!}9;J0)kP1me~gRbk)77Rx)_uTUly0>v&onp#s`>=+=HY9Y32 z(OjQ7z@C}_xfg}|x(Jb*mUf`!O9(dPT22bhq@H*F9eo6Nlas}{FOteSI6m^b9Nl(? zMF~5Bgd3-{D8l18knc-GXaO`nY4XOd7xX%RF4fS2GP4mzh+d9tX#Y?j$@o-skT{>z72tU#RR#a94_^7`)zQgs8jHdoyW|Di8?eYK>$ROyp$zkTfwtlLdR?X_~S3 zFtX=p=zVb7#j~E3RU1`h`ZQl?`;Sru_O4dHP1$?r0y2)Zvqq9cf~yR0LTa&l|FbX|ymx3!BQKbi=O zF$3N=mxgiRO`7b5)9vl+DJM8NkE^R-gHUP>Du&{JjD8p~PMsE*05A&YXj2h&&W7r_ zj+8W8AB<>}=8#D<;M%2{YuM&^Dmib@H^p?X zr**baXV^84Ql4H|C{D>nenj69pcE(_eKF8f+rt_3EB%2jO{8pYV)9X%Aooh{$`@!S zz&R{+D)a3_ru`;Nh6DQqo{y6tUXP4qO-~dVo~K?(ySvzl5LJT#Aoi$)C`ky-Q|xuf ze4heCPlOxHmb3nbV-^&0 z=W>&$ax_Ro0U}Gk3)d=LmZgm5L!WR;==-=N8qhT#jU=;F5%wI+!9r$p^7y&V@7-k7 z4Th-1g#w!+kPlF=N)#whrC5>*XX_*h%Bt5L1^2F$KoTv0PK)7aDoT`ml88Kd@5ylJ zR$bi?9*TA8_V??4-(wzeqxlTIAf1QBdDzoUu4g=2iNyiwfj3c>=rO48+H}u^{73}7 z_-w2)RD-v3bF=YAzaAcZ?rAXW82{>?inhG^eN_e6?bOO8M9t9p$*Wvi*eg|%A6m|s zXq8&5X=r$4HMjs%UhH6he4G)PYYekSzn`6aCVe?L!YpGKYq~v3*`U@-7_^v+HPH5x z&!vA~uC3LNw6|S$BA`(%enG;iOjPuCHk!%QuWUlb4Q>8FmK8a_QiP}P`sbB&L|UHn zkQ@~>VWND&IM_D~^|BQC)3BsRZ2UPcb%*grjrfliyq~GyP2^Gw`vPC^yXlyM2Mo#? zdy;|5D4`0o=?H@`kh@>ApMT@P`S1&`yrTu_DRLN-8MZCFvh}c)0mA1ZAguR5=gono zI?#vYF|Qk$QBbKxj&~_ICl!}9cOvA*J8fbx1CWH}R-+lh<^cFrp6HLE4W^%u-(~q- zezc7dzP}}WWe?TJpxyPLJjuOo*ygv#yjey{+1hFT8n^%7R2K!>T#H{R5yXjSeJS`U zIuUkyd#1Wk^aBAq&gJpkHbX?ekKAL7Czj1NI$Fx{YCLGMhGtc6+Ou?V|E@wV{1D3N zdlDBwA%2Gl9`xi5?*1VbD-KOu8wdmYaBh-yx=Q@*-)?+G62GK;B?5x)mYYu4{ofJx z3yD|hM*bIN(&M3&yZPABAI+g?(XZ1%ji&yOCQ?0#azLUQ8v5I>uP4sjR$w`I>e%V@ z%{Zrt%GANPZgDv*0ml?`I}3`45a)DvWobd*S@mEHVpoz4FSbI7DdMb&la8P53*_CG zeqpYf^Swyqg&O+iu3CbAe$ia}SBMjbu47c}aX~HKtl&4lalg88v(N5xm@YRs6H%l7 zn6*kczwaanxWMu|+sPr}H7=;?=jDgKV{3u>A;1XF=)VD&aH3CFb+#SBr$EbZ zqlcSB9SkrZeg&;3$SWi2Qqg?)7+rg*$BesnW<_LV2Jw5|nn3CTHY2F!`t#h>` zqM2c@2l^O;L9-Ky!vC|QDL)Z0QwE@}r=w(d!1A%$KA%Eb56#jbv!gD2s7h%HL8`Lr zDP4swX&e#gAz7xPV$VRDxwjLq&bM!yN3TOXEvXEOghDmc&9$F;$}>cg>={rM&6_kh z{XYrWI__%xp@Fwi@qBc9z*f@o-_W&5AAK-xCaY(O!OM}zOqh(8`#WRw#YF_acX3f& zX;LZIT3GI;P}@g@5fzZAIn=|Lx|4A%oI!-WBtmL3t-YwAKu+BUzy|pqBrp2ilxy)o z5Kr6ig*T;Fo|9G0IMVUn3U&JCK`Z{}8p4mlQJ{O6y;%Do3|Pgt_9AP!n!TUTes+T{ zcf-Xm#>KsyX0P7`T%I&B*zv}JQgVz0W7uC;C)WwZg+tFCwgUQIq3agmLwd!!|HZ4L zs$6AVI8uqc9d!^4MHxXw#Bx`mG2wCzAR0OANvMfRB1{_gr9eD{qvtKo{&aS@+VWsg z+AeKs!2PX-0hN3n`SP7J&*+R%LtTxA7|vY~&*|?Y*AdJJW$D&n6qbi3$q@So zq)P;M4K+l&VyY3O4FO9sC%ALz(z1_wXz~tj1WSGoPguWU(Z?{?-;nTsw;_Bz-_k zHuGXyB`-F|Rzok1l5&_WTLcQ$2$m>8_Da&;b_{MbUoH`f{;;5DpgeIQNjnkeRrJeC zP^cQTUz60&*CTbYDn zAkG^m?XGvu66|ipFy`-nhM;3A{~=+rCl7!&zJ{`|JjRpxI37n5tx@0%(>B=u4efaG zpX(oD33ob3itN~6Md0XsLtA=2oe)NyKpYdKLK$kpw1@4_YDz+oSJc@T6Nw@bq|fq2 zL4z<#+)9zEQ+^M134!Zr@5@!9?B!66?%h=wT{40fElHro_YwJiY_q>?gxcPjp+Z2; zkR_hH9>U~LjJmOQ8|8nb8THHw;Dx5pfuv}&IIBb~bS_91%QB_$x)ALoEi~Nm&vH+I z2p$|i$;gJYmACBMn@g5i-PhZAw?_VgplS7PceSwz8ew;wkY+(Gl=q%|G!5s>(uBF; zLlmk}#{k878J795sLH0dt#_cv#4w|;se*w%9goD0)1Y@aP&u0`737mZOzheAv1O zt}TW#06^bowb}v`5iItaKc`3RdJE@jckE2D@E_$x_nWF;>)RE~AZMTCS3=DQD*RF8 zw^}W4L4WkkZ~iI0wg#EuZ6!h<=n*Yl$!>edtoz|4qU{Z;r~!7Ii-XhY!G00ff^LeB z_zjS8E#;j6%f~DXFt9;TM?0B$FeiXLc$7_lW--LjI99R^|5Jf=+(ofBBbpayAJ~cN@N|PpyR5|9! z+Un@>Lxx2@+oj(HwJm7-)-xCr&*x`oz7O6Q5Qy^0JE48);5-S6{Oqx@+>FUlm*!ZL zSt*6>=*S7>gmc74>k|F?uP`DJ#;z{?FY~Nfxy<^9pKat~I z|Jiogx1$@h@i^@xWosEyZF*B<)Z!zU3v)I4AM)s!m+xM~`p{Tfd7)LEk3PN|zkRS` z`sXiMcsc({LYmNoB*SF@M5kwx8All@c1@;u3v`$?@l)>n^R)C(3-i83%O-P`K4&-x zZsgD9D-Lqa=Ly}U;IJuQhfhmLKu$#Y6OM1 zm{r*@0%sNUR|SRL^}@?)FRkTZd@9)Q>R z#XLnH^Q~QL@Qe^tp?}Xd_WbvliJ%+eJ>cga4-+3vyaa>#@N~_v^Of1St&K2>9y9iV z^HcCEt=Mf@RJ@)u^ZW+hXQ%Dqr}IoiSl`(fyKqb(HZueM(PQKLLI?So-`%p)EWRPO zW{57Q-@f{a&l;`(r>>3}sXFJF6(w8CVY2h{YOo>#x@SLQ+x_)hGS5dWdKdhDpem{q zB766^gAby8hpSrq5Z&}tK5PO`e zA9(zwJ@9(VvpY!HT_XvW^51yVaJ?jFNZlujs?Hu+ZP|q$5rb7a;@Rm*K9Lv!o`!G6e^WysDb5}JqaRtCOpH`Q~TZyp`Ic=ql8y)AlxnnfXKURfPp_zNUUV!qI*a)c%1F zLCV2-&D#a_e4uz;X&eqV(!qfUYlPpiMLZ@yW%HL`*F&CElm{s2e|! z`&APNy${RPT%rt?jtf2hp>ze(s?}G@7K=#?!=!dOl@{8On1{G}p`ZxD=z-%> zzsKrxbwY>=JK4~pO?dRU@+ar6M+-?}>xP)k9ykS5;<@bye%g~I5)5i}7f1qM2}IIN|yqv(U1$du94 z8MzDc{QI;rvkWC;`WQ$u7A=x+mLA{A_RN%WIuE5IYioQuEOU9l&nixgNFtPC-;iR@ zRND6i2+nmda!qjtEB1yzGY_x@nJuW}=j#!lT^`5inLwsBj%@2*x@tne^>f>4Fdq@< zrf?q-(;qp#!+d(tN<*W)f?s5E3jMd8)wu5Uw_fdss##pLUrNpM4cyeq!yrU|B2RqS zdQbgyP49MeO`bZQ7i;cNa?3fY%_wIK*(Exs)>A3~oq-|8a9pT$a(Zn3_lV+O^@1_@ zc%GN@XV2Cm=g(PFfdU?t&W4j7n`cFpFn!>pff)=ni|TxUI=3B7-`XF6V2%rKoKTev@>HOB~XIxIWIB* z_QYC)&U9{~X1}6|y*hYo4sQ_{J#^n~rL;@-6feG`I38o5fX=L~v%kD7r$vH8e;8d^ zg97IJU(Gc<9$ckkLskL+e;GZ1>ONGWue~sJkbs~Q|rwaQswn-IV+4Pgt_1AhBcRJe2w3Fwo3&#o5SXn+G!Z96R=!}yoltznJ(y?IDNlTCC zpeiXbXk}}qGF(a=4Ic0`vLJZ_)}VNDurIV1GD}OCSfGwxH?ed{6Z0p_N5rHN=M5>* zlw7gsLxm)AG2oQaP6FhYTm?9n)=(cNfZKb~SbGo>JTebujY`|X<-|-ZkbH@TcwTDR zHMl{`=&~uvML9F^Gan@BLxM&LYAEF(4{$Ykt9_sf|F4si*q=WbKLQ=Ne+BOI!h}83%L+dn;}%Q&rmbjpdV2r8iyg%F zg8u1C(@#OKK1$A%do%2e4q09?AdOFs89QI&)p93tWT5mUl@H9g1s`S&yd!7PjhW8QJN@94)I@n9<1UYcc2o?86gAVY`>dJmd(vYB+f#z$&) zw_TU&QH!RS^C@3BoR58&w@%cYWAi%7zH6LM9DYZA7wY?e(LD`7%j;?6Vr%wwg=Xj1K&T z6iS3S3POJPRH@X=pbACr^d4pok2pqtMrtU2aKW+j+xita6kM}3VP~SP$&RnHljMD4 z2gq6q7^bCf(asiQHZlQg4S$IiTM%i4*?TAa7Pe-NR>O5ypei8eerCw=nnc80U(mln zj{y>Zx#@Yr(fK3)N^mT_!4hKtXU3!C0-srgO5QlabvF%F(I_DxNg zslE%U>$(BEy`EdihAHP(Rt)EK=3WR(!-GzbLv};sqJb|`$nf&;oPQ9VV=hQ;`oxUq zMA`d%BJKvP8HSdL$$AlQCD#Q$3V(xe?5}@#0yy@C_Y?7Q&_XW*>Oip5b;>qRmrGA_cyb3@_;ra%oI)n70dt#`JqCNAYM?~Vr|2j5CWU`6%Y{P3cBc<$7S_FH@2z=i?N26 z!*%^EaV9k1Pf&w&ERZ9TR?4mvOtYc4?NV}Y>#O(sQ1SsP|P za{&FpCoPnCy93g(dW)K9rk6(4za5dEMj;wv%8A*y%YJ97Th5?8LPWG0X_coMxe@#` zR|0J|Z(G*~$`w2PQ37fm2ocu<#{|myMa@MUz@pv{%LT>Z?j$>$cWeg~PYyi1MTcw97MjkaQmF<--)fj5LutcJd@3Ukz6;n=qCqa6Ra zLpg5Q7P9^8_de7E6v--lXBKrl>-BEhf3dpH6Qi_BGSXcdVaa+&UoO!YW%^5j!9+f2 zC{JT3Ut^@(r5qspNg7xh;ro7C-`#{H=WAfrmww>=z`MlKww;7$^{E#DtHCGN@S{^?l=6ma#!m@@(OrUd2# z3A<{FkboYW$yGkM_Dw5b|3aacHy!u&gYk)+D%@=&u(`waM-hI};DxrGdH;8@#=k#x zG0?#;-)-RYcjCZ&uuuWo z3J{|tT`@L& zZv9V3N%SWLftO=cqC0V(PjDWOPQFj89IKD;XM@_k>H%oQxi#KN7H=f=icArM8o`M8G1?rnn5bk-Qk0{ z`$wc70FVS9p6IfZ$Z_>6axN0~@gzts8SiK<4i0K41^{HA;O3omR?=e?{Lr=t-ieIt zT3izkH|MH2CKF1I4^!7ilhFnhY7Qu139M9Cc5Mvc|9!8xJ!KL+jU!!;vMo&M44ZZT zd~+<&G-hvaG{A;iqNLo*y4}6wvy+lSsyo10Fwhpg(w8wFs9qctuEvU1=T6TwP$n0- zrz6NwjdE}&G2=01lCo3F7P&rQ!4a_i?lT6Zq=hNTbYzg&0r%F}q;J&o*`?JWenCaj zJ!R^~hAl>>6$sysF4}oWvg0&Qtvt|)rrbGp0&t9RP~9!Zl0wEHOT09r-)H)GxQk^o5av>^-bSWQX?!uJrAJGZy zPelH~8Xx1#4pu8s33!{s4(sfp z9?FKf_-2qWXRQl6LIWr`5b|lCautI)g&;=CSW%M+pHTaU;%;~fnd~2{{9O%Ix;atA zP*~)LD@tiE7b~73odET8(67k_+jdUXF*6P28IYg}OwkSbkuJsA`qi2d6DS^M?M~9) z%*mpZ)pNX2tS;~%(6%X{ArFyP-nN4O@Gn-xY#ck%c#zpBrg6m=zhQ8Dadaah;0fn6 z-TuY8Ja)6R*YiHNIR5W5Q`=3bA_tLK^>tl05)CJ@rt}|yC{K5cVu}}p4*xY41hx(m z-A@gu7e)KDJUa+%ohVO_UuL|9;wT1ZRolK!_iI;5$Q}D2`meX^j>wI7uSEDbJ&A^y z@K0m~Mr@F-#d?4eW!epTN)bC^eZaP#_(uA#%{;bFhj=~r${WfEkx57tscAqyZyyfe z1^lj;hsH!AU0pGjA6fe(xmMjRexA_nltt&*dpe?#YEuKp-cutRE$;zdc{lX8JL|2Z zy1D}mWqNbWdV6}l{h9gL-Wl*F!c)VROi>AWO~1FQINjjTu;3pSoOhO3EOs4!vqm?u zBVTQx#}{EoLwb0AG8}LboKR&VB4^lijYhsrw@i|k*c-;&Kz@@t{yDi0i6&Al`Yf9FR?3){R-iZMAkPPuk-^yB^eoACHLVGnLz*;f_{LMeXWUX z!F<@ZjolqbVMqPX$|HrYe~$&j?6={qmHye(oe;Rv(6obzq&0cv-lEvD5^d5EbCMlg2l79+JdgQYAH&{K_h- z2vr$PHzNMsz+_(if^F(j_Qk&a-G#5}rTjQG=A;la8P?KnMtrpq7!@xX`cSIwxdYd! ze+>lmAeavGL8@?FE4ryX)GY^m35^ zpW&Q_e&aq8U#Tek`J&2= z?hrsO7dlcxVnr4|&4xUSIAw%_gv$Egi#$!i3#3(<9;Pp!mx7-dq31xcKvkaUfctIn z^^ore!t=jJhsKmlsC@>U=**+nD=1(5+qkeLzP#{I+z;4!3Nf1%40utxC8>(Uz!#Ts z>}zp7GITtg$kyZ-k=>UFYkKaV4p#4IH?BVhqI$H0V(Q;i;i?WaTn(v+hUbTMM20LO z#{c@vfrptt841{xHu-JF5!}PouvAK9BKV;&pRdQlWUf=GD3k#3LtNK8s`)( zDaghKHvUDhI_`s5mUBX?x-^mut@>@^%(}Ie*S*Elx>%+Z6NpLcS`G;D&BAy!Jz`uc zIr_S=c6tA^(oUb}4{rRx_irslkQ6PR9-}`f7V{3fwkpce7FBTE0CU}AtC|ebB;crU^$O7)fCI@>Gyu=F}fbtQ)0eSP%*>E1sL#p>THLQvy zkV;QoIh5L7htwVowIK+B)+gSo-qGwUdH-jC<_9Ll^S#t9+*c{gPa0R?)+@V+EdH6# zvi^G#r7(LG1maxNLfTFq-Xi~9zHRYmIF{re+w6g34KUH3El6O7bD9yRUEJzHI5R!D zFLdgd5jMBzUzuNrHdS{+O^68cAZd>?mEx>P(bS*kHVC){y!MI7L>wuNJg?cve4%2|l-?-K z5oQb%nNs$r0>_&yKRJSo4NC+~n(7A#V|ey6%c9E^OSX$Sc%86jVa&3)cr3^rW9gWM z>%l9DNf2*DFv+%3!KQo|U1%yeSY(=%rZP#IkSxQcnY`f#MAf>ILEcrp7+yK3NmU|q zPcHAFIxySKf4tn)+aq#(}CTG5IXH~} z#HyjCcAT<$J~6ZPzFAxHzG`}=?Dz_FezAFL`GS|#arTxB0}NZg8~BU7R;9@I4e>Ry zdIQ)6bnzG8K4ta7;#rNu0Q+kVmR3kMVqDYoxK zkBb(3xtDusR27YE%_i=#G4epyz){jWO&LwPeLHy1Ojm03sQ`M0_Pzljf)62|cxE^d z1H%U3hA+ib_6($5M+)QEcov^H@5{B+RQbrxexs|0Wh#nfjPDx;>>H ze2M#O*4J=coc>j@M{%rjnjCIygqs^u^SLig82b+!L zbjRE2&3M|>GHYT`f@GA!M+I8sUWkS-gvT~b@cV@zuBMN)~oh}E$^{K=j4LR1_Of4r6V9=7OoIsGWg6K0!OG@WnVJ*(AM8pImwxwZ zNRnQ*P#-GSYlLSpcXt2|i>X`ZRLK`(V?bh2%^`x@iqeIWg&JZ}=A#R$W5Zs|Z2bd# zjkjwem-3g@lT>jEFsyH}#F*z>st5tB#w=n@!3y&p>VN|>$rB0r)aj0!@IPkT)1@#o z)$(a|mn@?L&%zzo-nApH<;*ks3eD!fMOTV&E$SS!&CD`qi)~SDiCXGxM5k#1%}NwI zrpPMIXbVNS!TBBUazCC4i)+W-QB2UuZpWamu5xf91W)93hv_|pjIE500?u3J{*`0O=L=Z_Q!GBhzh1L z1^WQ<5ohyE$Rw!wPF@EI2*StuYRO$3dscC6i%&VHxS8SIKAq7Bd8 zO*k@ti22ZN@MY&4l7pcVPF9}f-_NyoJu?$gYRgIJ^hx@vEDkGU6c`a3tx*k?1qae< z@Bk5Sr2xpDGh(t@n%~8uM`ZzvFOV}?fj?n%kORZ1M`4d1>-&^Z+zese=88Sx3lGf& zm?8PUyoV?H*L?j2S8uF*8r>>kR@q-0q1V4+A|m?T028Rc4Lo{#v5!}X!}>dZq5rE3 zUe9LZWCDP2RNLs19k~8*ljPBfiSb=)2!aqv@!XM}dfj^|5rUeoT;$g7-?rHWpmDRW z1a-b16LPkmiQYp3?kGTlIG@PMGv2dD^b6WUBlm}zd|W5{2cty?OP6t=&lcSJ6mc3Xeyg+2_MZ^?{cTZy8lqGJjM)i zs$XC&Ils4W-eE#BNGc0m(R`(RnY1jrn0SS*oj?BZIIl-k&8PL7zhz$iy6}k?4i%rQ zeT(uUF0u9enbMc$7Nh@x^BiWI@zhp-DmjVhy{7XJr!sXaa%pzAzFi0YDELY?69GU- zFrm#OKoiRGA^YomZ5Tv|gVI{7>Rf=&$P2frtgQxsgx$(4u4^a{P+IT#Cu&bW^3X6NGy zo>2F*e(~MdTNeH&*#e|9Jj4*`ikB;i3eG8`6H4|~9HryT6w*4;75K4-_RwPR{dXRZ z+woOa;2FbaOQkcd+B9wy?EYTylSYIeR86i;WWc*rpE&{Y#V)|39dHLcSUS4?=8J};n>(+rU_pwY*#8o@6SY66O8yHdDPX!+$G z>(&X5k%`CF^uh`*Cr0@$fc_`FS1|a2gUDp(THWE)RVVNR1vr5q;qjYaA>tB~iqH}A zg|GHRij;?RZe%>od+$U2jjM9^nodHo*CpDA?O^XdNZQ=uCxR0qEhi1Wz3S}`&D@co zZU_9nzPzj@oce1enmnCSErQbfR=7C9+2n*Le!3Hmopd==*<_kIv|W=?Jb;Ow z4|{hdrY|K{T%88&$kX@UE5HHkKgNX&LX4wLCru*7bu*Ye`Txud>+NlT|L45WmG+yi z?cYCg&F9ts0u*%r0Te!c0GIsVlwR)H6l4nixjqn|greKt&TX z+f`zxWH#g!)MpG^UynoN4FC=$>d|0o?GR}(p*Y8365(vg&+aJwD=Z2RURuoW1!AMV z6RmE?L>7LkN-4~Qe(z6m?ziG&VVu+&5byhXOD0P^<*EWpBN_pvLNi=-AR6Y`*os8z zecN8cd@yS)lO2|x2>6B`2Z(ssI_YDsH-5u@Q+%exI9sV4FxD_Ed4I60K{T8Qd)f!P zVTgMnTcV9(keBkFA`Q|sG{W`YoCUUGov=8YND4>&MCZHoTPDaQ3f0hVb9{yO{LB+n zX82)sC91K_36uWxK}5{w2(9v|3v)}-0W*V3$e7p?GP82kbb|W&4StsQ8th8GdSAr9 zehI0|Ps%X#FzG{N&1gBY@w}HJ;5EZ~0zZ3$+C?$61{?>AUlA~Ez4-lapTJS=2ZAe4 z^R13NTRmhgc>J9&-nJJi>xQrA7GvN`9pCjET{EhQ1#D8%_R}lx@kWY$KSOfjE7g1f zC-x$Y7joy>H~`ExUfiCV$?P-2*w%e2m~VkZ1#lC82AhdPxt5K8Vc(QC;xS;#?R;>; z09@%GSSztw3Lxn61*(E}FQaK>uxrv*8ckF6T-#DO`Md=Di!^0E*%H<7c>H#Lg{19z zMdx%~jbeK}^gxyVzLh^XBqLW#vJPP&JOSz+Gqf6f)Y;xb6=kaznq95y`gIRFL11H$LGr{hQYgjXSo&eM4gI13ITI8pY<<@pSOf{I;3utYM%C4YM=iRl*OqPs7_d0LUy-YE^y|&gc*eO{WXm&fhrkl zDwH0pUcip)LiP=ZmtBYR^hsZY*Gu--Z-x%-_;)KVm=s>y*kl5Z9|Yq?p08&;k81{# zdWYKYR?9CTXExxc@0+5Y=TrlOgEiIFIfprRlRIXNlyQb1^*t*yq2pgpCiwsNXwVh2 z8RLFqY=z9aST=C@i`1o{Q~`T_{@Xnsfgch;Y&S}0HVGjcNjtCOwd@zE5NN2$aIX|& zs{D)Bk^@<}4!5$q);q?RQ)s5XRM7rdbqVExPZMDZE4lP;Tz21L6gxneGcb5NN^Y$b zkX|BG!0-?U5o$sorJVgUJ0m_nK-UMY#TSGokYX);=kq6R#@`GdOfc(pz)_gF`@FNH zW|4&Th2u$F$bPD(AE#NKen*#E$~r}EB;hJb#4>#FEoVYh5}EIh+<)c9MWB8J!uT`B zmK_*n_5W=X%bbvu&T=MbKHGrzye{2SFV|UbcFkevtw>p4q~px{RKrCqADo!YTSMhn zmpIL1+Zt`flb2#dOhhDwC9zZ$i$LFK>Kpp(cKgi3`S-zMbTc}iV^};uLSD%>E7e6T zVyUA^Rp(OSVT^gXC5bt?42d%IiyS$&)K7-ITI~r^Uc{XlIOlX?mNBLLG{X=2B@#?5 zT@^{%?IR*7=*!(ml;?d)Rnv?i^XH?CU^ekS%4N!-9@cdsZC>m#`T6dOW#fJvF?RhW zwiFr}(IsL5e89qeshPweWRfjBgH35V4qp|(`wQKfD(LzBv;%gb7RkJx`0Dkkp|OlI zQJ}_h&VLJMXvP0OwlHL!*Ec=+aqF?LCY6J5wTt#OxCN8DF#zLkX<@xr92(@gWdU!^ z)j^ZuL{Mt3y4At{ic3LU7kG^U8+EWYnVh{DBhY?mDIZ;g1(KG2Yavdqi49=;u!y6- zN7;RHN7iG@8_b%z`c)U)iF$hl?gvAU7>u6b#lssF)ntk4Mk0yuWz_2IF zGu}R^vryvZF0Y`%MxUKat%tE%C!32OJ@A_vh;MA!Ob8I#j$sa zR`H(4*;jGRlc~Rd!E#!?inuW|Wqe<5@sj3GLELVeW}?yd5}Pv=t}Qus9}uHWYf8c& z(qta`6x1vraq6Vs_*?;(^@bx~BDb$93O={T-MkB+AXuyuah2D&&H0~&4jt?`hqenDkk3hd({b%+ zwb>vBetOv5VETG~N_y4_By*GkS z#>ps^7Z_CSNl*Jm?WfL~4WF+ucR)oD?-P1BJ~u|AmFnMqnn3#sh&vKa;vF&h&itIQ zC-;4`VmOBN`TdogcZEWkf&qFcDAm2=Ysj?v7@6R=1KdG%{h|BOEe1=ZP6w(75>b~= zv--itkJiJlRn1y~WR5za6V$F#ND;@bSP}A_Wc+~eAM|-9k^{OXeG&*WwQH1IA!v>T z{EJeO31SQf1%U%8JW>EvO_DDV*%1-uiURYzN{MW*PrtBI@GAU=4DFrA%ay$F3T9kVbQhq1+AZ}0_=a<86K4+qcWJI zoZp*NiUfR{{hI>wFtjriHly?XFWVPtL`{Ra@6Q^%Ku#aF|4|ZD*lZGo0O36>Llxy8 zC02OMWYrasbV0+=D7$Qvlp^y;f7|!-Xenj9OSi70B8_sv+Cnih#?jYEPAV8#6*ctrCuk^l>}EPs$o+IFid@3g zAh%II#%36a9Y|+YMw&Nj%^0_rG4xbN$dI9+F*Pa20y!QM<5VS4E#zT_P5N+T& z>4n`b&dXq$Nbvhr(^vwc@30aeQt37P9?aV%0E280P5QKRzTF(~^n3A#uxl>!QMKW~ zY){F>%_V@Pb(guNV}o?^Q|W?TjPMKZgA(aGwcn#U25;bv)&<=AX)vAtVf@8otJap* z+qIO_>mAjR2Q1p_pGoj`*9}a}ol(5f%fd!C99(X)fmA4pM>VU(HQ( zO7DXYJ*7c3g&NWVL;HYzCCMCoM*3&5yWnjPYb`55H?SE&r2Au2xTD;ohxl*uq#9m< zzt@ZIufrswox9z4p5*KGSLl`To9y27A0w|YlIPHU!|cYzY|YW}bvnBy-IC%+O`}_@ z9i@JSUT_LP`Es%_s}yhhJ`w2OM&)(`pEtstOw%BEp5#Z>z-UcKhY+=KhD6@^hcV*Z z=(J5|&e*s(aDF(6s@#YZK0XU?-^MLCnspFcOFutL6kbV;xA;$&@_Z29%a2~URbBoGLlnziR^CKeH< zs9@?>M@C)m1;on2A6MN@l<@Dx|H zk3#fPJi;UV4y`2xzZ11?x~Z^Je$`dYc*{RwiX&c~FwiK*o4!0aCv)nEWWr|47oEou zxcHa0NoaPtk<)8km2sr_er&D?8qRAPCq6++&YF$wDw5M)?s<*4`Gde1l}>*T{cI!9 zB`v6Ko@jA1yz9O!I?|-u zxC~F>t6tS=+s5j7j5w%e?nsJRhp(QmNZd!32XjqEqCVV^m-&RIr|I1-8LHk~%{PPh zCSQoz_;zr;pupeTx_I&aXS*0x_IS$J!(AjMU&a`>s*jN)ueMBf)`n;Fbvh}bl!I`z z%SdW~)Zjg&q%o39m2!ox#UBZS{SZ!0$9(YT=Vc%Mu;|GXQbP%yhX!7^gw>J)^k#1?W5?9< zZB^N|s-4+oSOEu_UkS=^apvVbSs@4^&wpf-gF9$HexuUt<@L-?0%Gp{!<0BJT}9rP z-z*Jlj}FrUDecp4-^b)d{5JUkk1y;kS#-%kgn~F5^2t>ki{2erJcTasXbTn9tt4TP zwf1-^Q5b?GhKc%T?g~0Rjdq5aT)qN;ez<0p)L4>fEr%{7n9m1Ae33;SwL6CWd4~T% z#e#bj_2J{Ep|D}&3<@o?f>d`l41|5?5jxt)knCHmkJ9uA* zeUn(yd@GGBg&_)wU;Q+(u_t(dm7M}d3{q`xk5QNl0=g{0$j4=e9jmJseHJKW8_>f@-Pv%YMltZp&p zpehh@4qtlD+{P_b$18R_dRmagZo*P%E~N-V7<(S`C@-+>zw;iE`hj{)mF7Z*S_cl@*2H8gop)cBcog}IqNkW7``w8;;5F` z$21fPM>S0>+ zV<>wrxx)7OZGJCgZamdU+C& zAn*y1poBIzOPQ+HzJ@x8MwK8Xs#Nb?X^^43hcP^v;vKHFnCZ|@J+hnyWI%T^H(Sqg zSsPhIzTm(3MgAyG`kCHL-dsWYhv}HI`qQ5{s$I_lX~8#|Cu8x?8!-u(Y7&NDR5mlezsu-#l{3UH%oF?~20QQw(IQ3Tn?tEf zus_KNJXuE>8rk1%}N`IUj^eB(By;+b>o^D@Rm}uUFYae z2$u^$+>Y`DhaESUcE0Otk2U#Zn0cL5yvM+GIg9L7{cZuLq&-hie0TV#Q}tD2Ce(fc z^~0)eV~~7cjT4YG5Ofp9ReE zB2nt{57VCXX zT!WOhn(7ZG44W7tt$>d3!pYBI_!S_(hK!5i$ZBb@4MVn1qKJSmUkb*mD*msiSlLJw zExhtJDxKm}+r$_(te2>PzLyiMJ*0)sl>=Ez@T{cpx9_ zbKkMs2;I&A!L3RU&+h+*G{t%q8$AX$>o-Ko66w) z=zFt`J7Ebv>Cdxk9Ew7B*e}~!q@qt68_~^Ori8lwKzNp~xE_NS57}togIQ?F`0Ve_ zbM2Q?XdU>^VcQI*M8p;foZm-B!kzairP#T-qu9?)JD%Z$zwsJ25VLa_eE2UPyxiB| znDJBoof6W%!@_2APR|dK(y;!Q1k>zlcg+0-R#_66#MCXj(ffNhnznxyB9|{86|vt^ zr_k_jze$&5_`z9=^p1%16p5_RA+YgLW3VfOwH3?oAfD?HTed%R=ffjz*LfeK^ zs<-PpSdOJKA1yn+;RRc-)9TLX#d|Y5uC;0KM|E7K)Tk>`@~%1sxBWEq+JXVRJwQ&D z%SzaIc*64Y$u(GsWuc$>6S`XM3cS0BHG7{`r|ciUGE3;4bUO=LO{+1-S zeo!Xu&}!zUjijw#$O=ESR=gBz9=OzD*#3|j{dr!#ygw;Zf(05%HiOU+49NK{4FwYU zLZSDhl+3OURIv8E${?V;CTgOldW0oMBPFFI zA?=&c(GqJw`aG;6i*Dj&)SgVXIDtHB<`#`8axPbtxnCfMY4x)K;n!jTe^3fAWj|7vTRakSs_-8q_J$>|LGl+|22EL?-FjW;d@AYB~Ai*yrx($IR0HhP~)| zo3i*TP{c#O4Q@mkwh%3VP)X>5Pf_@lXpfQpr1S80v9;>=pS9zrYPbzK`Z27PN|z@a zBHv3chB>w{#{dz-R;VoCEL;!K^FZ@9cwz1P?=*Xj??WA{`*zt{%)n3Sj3BO8J#)+&ZRR+P2Q!GfpQwljh4-Q+{H);dt+KBYEv@ z%53OJ#3*8`1S_JN(lm{YjO4hKMP%-mN0wYBksmUJ(ul``QWwtgN zVM)@ZJF-2h3E$ZlLEgbI&Qo#ihfiOT$pmuC(Ru`0>6ic19xv3TFN=ihGZqx5b5*!7 zg>slnM)HfZ0$rVZQcbwBEOv* z`QxjtY(&QUpJXpClo-xrh1S}LhT-A`?l}OwoS<751N=6n0`WHduZ4@4;w(R138oXN zwP1RIC!U?$I>U%-0z`x_&(~usnx0UpaiM)6I5KxqAK}_3p4kspr~`*XY#v~};c;P_ z?FkrUD)9*C2YNHJq-~?PPzyP5^yVgv+qE+-;==^+^a1{7dv&xQ1Um>kxMAbtD;k{K zF?sF%C6Pn+N5t}`uXq<$0>We{>tKGg$f`hzp@E=UlhuTF%|gN?C?IX1%8+Mg;Vh?1 z(&1SKcf~AqC32emlMAQnNcAp~R0VXU5GgVfPor(g$zAg6 z3^O}UhA9b^!VNZm8$Mgo@byTG5iZp#$6Jy=#z{`6^#sNHBUc(`-v1?2QhY3IJdw+F zGfuQ6=#V|0-HJ;fdwKY3$Ze*JB-@VVc7D`ioob@3c>{c)(VNyjAtztQRwxK0!M88& zUuuKX99Di4cwM4o@^pZ{n1QV9{u$i^KY-2(<}PIVn0pZaO+P71^YtTI%VbsZ@NbRa zwe&8sc(gkXw^8HQs3I>(Od077qNJ~xs1z|fCeprmk0GNK2;Qf3JHM11^P3)lBdha* zMQ~UcV8>A_$6pCj9s#JWVxmCkYFS;KfUkp(C|N*h2HP4t1oucD^>YDbIVT-Qgi-(J<6rtPjL zXw3Z3*zn(+rbMkkQn`LmlOzK4lH_Loe;ZNd`y0^xu+>@8Hu*Fp3UAK1jR{ii2FP$f z^6a#B21nRE{z9-qo5f&?MPMX-!}>;{bX1Tp%THME@|~V6aQ-R=AnN-N=$aALTxT%8 z@*!=2f18T}!&OXA0r~n^H{g~^vI}p`4a>xTeC*oRR5MDYfSofM3vraDYg=QWJI!Mz zFKwqb6Q!yT`?UpIj~_Hk1ZUa*zU=JagRyIhirz`ZAuj=@UwomU!vJz{)1~@hS=OU{ z)MH5$Z9snMKwS5}Htc`=t|=JJbth2xpY?itxPjM?yIu>0r)%@AhJMFT9%#!tJH3N) z&Ny4wJxxM@e-t7>{kQEQITs=Iy4p>$B%Q#_+?w*^>z$FrHpxS_MA=K~qvZ`X$do07 zD>g+QvpBgKz2QYN6>S(B|2CA{C&;42su;%>UcLNNvjsWWhCTDz)KT~NO)PRf0(nH% zb1ftE=8r~LQUzhN-k;G#oS;|Muf!h?|0>r#c7G5!_lY0kmM`DZi9Uc<3@-250-p|br`AYv%4IyfALD``-z7nhO2 zny2Ze=E)0?V9a$u!23$=SDG3x?7E zQ4w91`$VrF_R z=)$RT3TMIJ)2W`$VlD zn=WT%(6T`%PyDy8`(1m;4mrS!sjGBZ6VZD-KGx)Dhh2ToGT9Y6(%EVWu9Vt#abZ~a z7-H)5R{cp!dsL4iOZB~63$aUIg!lN4(u<1-7y>RRfmGu4LbmIF$yCx@;(Q7@!x^L0 z6^X3ScTeTrmX)cn8Ege;<7YT$lPBKl9@BGq=CaFOuyoRM#eXV)NpaPfjSp~WUwWqxA}xd(F*J0QA()0^@j zjlzE0KPiG7@|*l$QnaA(o|m70$pK4D0uTUj(G>6FJGB%nmd4bvsQW*+CUK>F$k-7gqZ zzGW6>mEsb+$S-N{;b**>4PYbou|XxXL5pt0(6OqJuu5v@M~{549a<7as3-OfJjeq5>!h>6#7S}@?`b25f-gNz8SGODI< z^5I!t&*Ha(7aWHMYW&dFpEB*EF)6b*v8^t~7v8bncX8hZ-=6OFO-i1oAOlv8$h*eN z3G1uY%WI#`04_(tqVt;XWi*8D01kY4fpc0rgX%BjXmY8b8(ZwI9&&!Yf5UitPNaWBtx@)oG2}>rX{> ze>D|lzfFf9WK@V5$c9O_%I#j9(qk8T{D3TPLUu;pBaJn!C2@p?+#tC zki_4!C>&9$%%W9Wn;@F6{3%TXpM*rxC^HVWvjMvKjW2_>i2spk`%N-%zsm#J^>ddk z;t7StSFR@+hmZL9hvoqRBoGn{(13`DNJ1c66CL;h2w4B(8PuseGasdmmX@9G=VqPX zAl-U0-0+R#;$970`ubtjQP_}KTf$XGmh`P!67FGP93SjHXki$zBNgx|@O2@uDqx26 zb&7OgFAMgTM`}d0gBu?a4{zaJRKLjO2rprDawf1N@)o_GNe; z?fu|l&dTN&=XdYT1H1)F`O(UN=#p!`j_{moMosdNh*hBs@Jd1P0eu^nWtPwz2GQ2RG(Ol z1gb#XmA5K9lIO~Dv&v}{dYks*C3x@K6o%WU!?|j<)PF# zp^Np0qI7ZsTbbG+r*JF`a)T6a5Q;UdZFP%+>PD+F-4euF`VnKt_hgK?O{j)?`q4#8 zut^p|09P>2i7f9jk6Fl<)H_(v*L9WM5e8KJN>(Ts`|%lIZ>AyIjk3`G$dA-bSa_0D z7`WR2im;YOAVYpbH0{S#0@~>^$~fy-HZjxg3g#6inJfu5aY&K`_IFGp#@45ABJ9WY z3USf4Wg+7?e2T;Y=%Id^3Qg&k>C;umi&4$CD+L=AfD-7wi9(J*0=1HyftLsNIZMb( zgwR=$?zZ*UckfoL*hJ2Uowxg8rGDuSaZc`Bth(xW!#+-`$9h0wh*vZ(;TvznFIP%| zyoMmu?CT{WMTZ{ub4YmgQp67vE>6LWJV}(M;af%@8aVguC2-5E=Fri}C6V?!Xv{D^ z@LfbWUa#v>O7V4rUdN%GcEtJJ4_C!+QmZ^5^1 z+IPl)o$LiOxuWb!H-@D+!u5B|huHT!(u;M%JBlfVbgWnf==FV$rYZ~N5u)yc%Ni5Q zBT*_!Z{%XihzxXz&r*P_1omojzfQG}4P|v6KLRME1LJR%#Vl}{!a%q_rk3x0Q#>j`If1Cy0W6JUk%Rx#)c@y`NK zH}2+kT`E$eAwl8TsU!+B57CjTMf&7K#V10wYV5IZvDPTnT#?E~7^y*3a?`;T;*r7x zVcvK<3itAA51%`ULO9g-Hx#B^K{w}8t7 zuzgny`vxy{4naS;LcU+H#>O9njqUcN8{JGGDzTgSJRPi~= zN z;xANnZ15eyXY`m~{IDg#9}7*Xk`R8qAREaxi?`SN04dOs`_w^VbeOx&0}coU?g`;? z6I^4s7Y~mR=r*i<(ZtR|UzL(=nLMbcdhtqsl%64F)O6*qvr&d50paIA3rNJk&dXTG zP1*o3EgL&~P;=*P)x*a7bKo{y;04^=Ee@H}XM~g4>rmj278S=@rEq#l(Z|<~cmIyN zUo`_DN~wz(k=L1rJ>IUe<=G#;rVZk55>lW#=VVk82@4lQgb69WrW?s=-0)&jjv~+0 zIw)e6WX~@jmP+1(hE5SnNlxU}PBeCg)UWYP#!S;=!=d(D$_hM&Tpj4fge30j<@FApe1>C8KjH|Jc`Je*0pjtPP@o*X7w>1 z97|j_DYy|^>}swEO&DUk2R%j%{**VW-;au>E3bFYSVaHW@>!m<_U5RStK3DoF=(ef)C)$T+H+wQB=T0lX6u;-63z|;NZZ@#q`49vEWuHyrQUaO zn|!V5q8^#A%tOcW$@}`hLco>D)Z5kLOk?@PZUJBP#+dh2(z6pir1fZun6GsfIXpy+ zI2ywNv|lx_q;dylj2^WBj|b@L0xu-do9C+ZWFnl=rZR(^V_bPGX3@6f!K*irjK4#G zDdM9Yl;Y`XRdqXd@}8B15)6}~nKw(h_oICY%}c$D#qBsGGIn9|lA2dnGX=FqFt>JP zFX?9M`}N7zZ8ob!OTf^Ef9>MFZQNtOUFKN*Om-$R6R=6@2etuRgPqxkENkDmWE38T zjee;@daXj$>OhJ=**Lh0<`GIT#o&bmOt9+uZlJoi0yRiN~XRI zXT0@ZAW}?oW34u{`(vOU?fOf9B)<;a}zhLBJ4c2Ax?Y{rHyIxXMrg!zpyCGJmxNl8q* z3Ql?Xu#a1^CeqnOWkL_y#&!?N}>yW$yNVa%3k>%WHp?#KH zlFf({#s*5@6z%}Vk@li(&14@ny1YWMGdBXeT%Bb^)$#b z(daAxEXp5TvzeJvA`t{nOVm_-w0v7^w+*!klpS<-x=8x=8)x_tg`az-JL~4jpWjbp zRql;Sr8TjqbAYwyU)@m2mv|pUV@_6Kj~31>;u##H5k~b}aZ2QCf=t zB0e&%1fFw&u~l|O41JC{1sYL7=7#zW~})#PPm(qX|SAJJ1!=dnVI zIjQRW-CPh5SdUX7`9ye`3|qOxcg-)d!WjFZy3)Y4l&bStT8`R}IOU7j<71qT!VTXF zUe?`~O^hbejNHm0sWi&dN?B9UtE2;IF=xWHCyZB|xN#VYpok3$Us+%M6V1qkm-lnuIX{dK<-{~gb z_p-MS&IwFkXHVh}k(~f^&IaJ2U|~c_Qlf~LqBQA=x^for5X8&ne2^n}(GQvMk^&s^ zCdIr@*y2RIXmeU_^}P1h;rbN?!u)_aq~gI5Qq0(~h=wruW!E=Itfnsq&hoi@`PP=n zsHhfS{J$Jsb6`k$KYbdECmQJQ5l;v3huO`Ck-wJBZIVAoxQ!vDV`sz(-+VCiyFtkE zy7~|&trsWci-)9x|54kS>PfYSw@l%)+-W});Q7LJ7zcp7@e zH~`?kV!+3b**DMV*~9e5EK?5~9sM0-_zM1IETVd(~etRaeNA_Xr14%yRMfY%7(s zC{~weNCt|q`MtKi1}#zK%09n0++%-mA<|mjV4J+EPRD1~YqGE$sv~aMA?nCGPtYd4D($cj&^!)p32{vZU0$f+F z<=$gewEVj(Kfbk;^9*6lcQx1~+W}+bzJUs%R%pg2gOf|)Oe#5B|LGOIw32~c54J;Y+gq?*z_fCll1CP*|F(o#!S#;Y@HXWbjoFkw-er%%!Q%agHbKMhW9X8_L&)Jvh;oWg z9hC-Ne3oe!l{)xCG$7s)zSQk)j;(l-`1sJv3+IkW{!iF!Za{SVuW1Z?#EO60!coIY zU}Q4n2d+VQoq=9Nk@2#tw!Wj_moV2NW46*{U}=XK5S<~$I*L5mqLGbqA5e^EPO{f_7JcV<7C+`pi4v*o#~ESG^zB$r3byr&_zEJsqB1HMAt7lZJF)T- zv^_6qOPfh`yF3l#yKTRbNe>`3I)i3vF*VS7D1;;Ni_ z&^0g?xLT$Tf`^$}S&mw7;^`M7Pj{vGvD)yLH3hi1Up8MeQjp}sDcR~)K!~=S0X;Kl znD2ETkyq)HanYp%_C##m9W`s}X1pC|p5$eST?7jM*eTQi=YdhQ^jo-N0GEwyQAP6u zH&!_XrkmST?wcyKIH@jJiLL=y{ijh1W82HJ43eq|aN(+S=q?*yW9iI}h zr5K|7CEi+!LK{#S?h}CFBq(c9hA0;3JzguPUXK%0@^0(ds20xkJ0l}yOsZentVlgX z?%)aTQKd;O!rm(@oVSYQwp?wZ3bup5oJ*Ufl~T9K42w)BW#KB)H~G66zLzIPq!O;4 zIO{eIDk8r~SfX!A+80b#FekyAoh(JCbEVnqA4)Wy?%as-_f8kD$IY|@hfcNc7v-$c zp4cD`e5X5yo;Hye)Jc&GB3;zRy!%aIx0BVl)U`j-Z6(Ijx~> zN(SyYP=tJ0foiPwg=T%-tiW-&#DoCMKVC@s{~ttvQBU+vZ>X zW}9#QN(6#A_ARhd=FW8NM*q;fc{Q$3kz{BKo}iM;G$)MG4r;_G6)Rl@+o%FiBh&U# z4aZvbXE@-SFUWJ1XjVvUF~_)ZhsQ&Ss03Q{QHue+7>vW+V@wdPUfzGJQ~1P-XixydFe$=ZKu}{n6-H!8ikO^0 z6@Apf63>%0U9>eG>&BF+TTAgm<(QBE9!RgD^W|#n;moru`u2EXyC9lHRj=)2sR2%} z$qsZWnXB5E4)CtO>Z^5|u5piXuQsFblFQeFE8xJkc<(_A_$Ec_kA;NGqQ3H186n1| zaC*1^7@a(g^mrI=Lh`i(GxRbnP=7ZM7#XK%o|y?cJeAUXB5DS0@|Y_?inqr_bkepg&ep zH0bqSAb2xh`6XF}2cKmSF}cKh4+3Vvv&qz9PQ#&W3ZLkcsMVoBq>~y21337VTQ$sC@p z0X7JwV0)x%bIWKWxN|hx`B9_`u7Xtvj?=t@-G4+?K3gYLUWyXcVLfJENS!J@-loB} zSm_T1^Y*qI;Cs(&`_vr1WDhTj+EF3j#R*LalN$SbeMQ|(uPd8-Ia-Es{PHr*=R__h zgP1m5-MiX8UVo^c6B(b>j3{;QZpdO~<-1;DTOJhB%w$panSlJekK5(h$hm^Cov#W` zH36I~f!hP=>Z3{c{cr7oky7V==!Y2#n^*Up*cB{%NHe(1{V^7DY!;btbszQW0no)SNva=8xI-q$yQeZh3F7}rd(tX`Tt-29xx?)muqq8BNdK@^<6Vbq0vdc*^g;**2Y8(u52~3>D z0^d5bQ_;viD5W(lzHMJ8;~}!u>xsd{F_yjpSDaqQ$h@uwQcwYc3-sa#S!$zMF5JlMOaqXnW-3@+upZPRkRUBnSE)g0uE6ZZEuW9n5-YoC8k<2x7?_ln26(aGM!u*lqeoPV}|EKSm z{wY4xxoxS1dkp}*xO$I*UOJa@zQ#fqdf0xi`qs+Cr09Wxc3D_=qc>Mj)?o=LOaExD z0Egdgp)nwe^w%wNynOIZ9U^TmA&1~7E2X_ABS&i%!^eQ2KDP8B50(fy9VhglR7C)X zv9{|OVsQe8CO)Q5CozH#Uih}gqantEwn^F*G0WT37W$KWS+0v;F`qt>z+j(ihjSxG z@)#0v;q{Ia`~jf9wOoRC2|{l zp#PHakrr}NF`5n-IFI%4yCz)aU4`!b(Oz;xA{xPQ?+Rognd!Sf93I?iDk0BqR``5O zK#SO^^oWjaAq;GGSU9*jISU5k+1XjTI=GLqvQctWX=UyW}f4gk^gIKfGae$?C zz5Gzvz|y~BK-@4DL?H-+*)YS;FT*h4M2mp64@?wy1%x023)M}Dn&=7&)zu|gbt>a* z+c;oZ&(m_$@)Hm#mnz2{2Yqf3L$rcOgwM*w9 z9F({wM!BgsX?fyT@%d92F!4x{B4(|!ljD2VGn&HaJjUD%!LM_lwVw+^HqTYo4i1AU zT894|aAq8e{7Uppb+HHaX<wB)b z%LeVJ9UBCNm1*67sw zYcf?CGC3{=bRgLR5w-|px+wK9Thb0%00oY(B!gQ~Xi+E`_2g#Dj^nJUq^EGKVZbW& z{X3qNL^WW# zo-vix^b$o%a0XW#Aa4+jNx}7*8(nk~y%MZ>$4f{Il5X!-!C3#0AV~~7W3I&HQ~Tw` zeLz$;^Ob%u()Gr>LdP!ge$V;VbBGp$9!Z@6+UEKD`TpX%Y9!M8wPwd&B0Xhu>B!*U z`)MQRbF{Z8e`1rCeS>I1hexal!%ekUgX%!I`8V*8*GngJL~DuZdMK{(TWFf;r4EvE zVUyelxljw~%G(noAmI5Luiq}~{&I4QyG92#SFOP(&W`B!vE$H&<16>wtpM9@rsuXh z>5J5O&Jn(kkF}qQo6tKi;MEv+@AtDfpOsFms@LBHaq}7bhZ(KFV3uP<{kK-u&BVD| z>L{}Bc6bKy?-+rP6G8`3dMeFNq!-Ka8!^-kH(vF4UQY`TV`vUia#}b@!dILf=LT}9 zDgzT}4!vp{Pb4}10i@Fn>mm2AH)h((G8&Vr=2Q1K=S~Ys4t+sQOnZSBQeOA%sXZz$ z5v8BcyU$hv!=3|g`Cz}YbbsxG;#zy>T)khh0_k~C8eTh|_aySaCHjG~B0Y>3NF4`+0=47Q8=$Toz0j&3 zOODxpue0CKjdHyqh`e^k{{{W(<2`0#`hBE2F0xNFo+SulsOtctH8(%+Yq=j2>2I3H zVo2>Esd;~*t@T69jR^-|+&bV5MLR}~({Hr8*-YKe_C}7jb0T17ctZY2qqBiB{M+*M zQ%shn+SA4`U)%KY%tRsEDX*a@mn#y~?Jbd1wn&$NLD}O2zR}B+B(us3_%;7VQq#t< zEiWU4IGRKzb4{1g94dj~v07HaQjN+{wf7u0;P;nu5sR){?j}65f@GLNLGFZsOZ^zpP>q7yiiO)TPaH6Q%~y6T&x25dUF2_aJ$T;~V#^kpQePW@RN4!O%w3rf5I)Qrku+(>#k zV)I+CRf=3GfBW3D+@_S`VY2&DHWpD^mngy%gHe;0t2+I`OFxhoq&3P^1;m-k*Iy8U zYIr<#C0)n@NK8{lT>B@+q(AB-U~o1##397VOsVXr=jY2Yqc;qZ*F&Wgis^;UJMMq$ zt1YW!S)$p2NF17VHYIAY^mRUgZ6)4{C6X`=YoVxBUbC#r;TgMPmj2U1nKJB~| zlq76v{5d1r*TmiyDEfpy=QNw}1BJ7s$)`@*jB}ql0Ha4(YS|>mzV0Cvs6&WeGelE7 z3v9ANNNZwJPS_7GO4GDT>iGaG06{@=u7(XJqi2LHiS1$}KK#mLx_ZgGMH*zWD0x!K zvE41D|Bb!ocCcIVhPfL;de~jn6OoVRi3i&^iMv@;A8{kMZl|dukdGy6BH^vPo(vF3 zPSUUcp{Xf?B_h+uw$GOjqe2oRj_MsrOIsgKiH;mrC~fhGg?~57_lP*jVrApJ^^;$w z4A~hZNa63OB^+}Tp=|wCHR`g5UqSzq^{GSG|@fgtD37l@Nh%rSXLi_ zjFO-+N|Cl*u<<&jjKeR^KIQExkUTJwOiNo*Fa0Tdaw86|^PceJ7|hupK0x~5*zw40 z)wjQ={fIr9>M%5)>rWWiazfm_QiV5 zrows4hdE&d(X7c~iG7~R*TX8WlSs>SOZ||q!gnTFfp1ylW7s=mVY+pvlOjWLI|TaA zL4@o}dK6#l5x#a^!mK`<)0R&JqOD)0+?%F$$?NvEU+kobL`yB)^?-9jEzX}l8A|OE zVo&m=0HO6RZko;g?pD|P7@}`Po_$wuw?)hXzwV`{025C8f}R6np99^{1nLV@fb|j0 zzaxe^?@%fQ4+eUF2!|YR_;Mc^P>!{eO}1XF4_$Nzu0=7!O*>6=d`R}v943u6 z=LEjRm|o)#sS?oh5~^`B=`!?(xHjG)7TKwklQM^mZkIM#JHjm}P?35W@VoJ$c#1Sg zN&TQSj!_G$7kSwnLiUj5<*DDUxCIK#Ujjel99qeX@jDn0#Y5Hcxqu>;1!%_#Ki3Qu zbSUSCcqxE?tG>5AANUq;U6b^&A?QftRNh3EWA9lgyF@ST&BLlYX7UcvC(U(#q8R z?LV$5HprXu??#r&CYXs?fh)hul`_FPh9b04%T%;=sZ^;ReN0I3Ys4qG z&q$~gI25-d{N7KrVN`y=!d1+v(&f=^%Tdbgh$y`eb;Eh8$dR9(`s0U1bwUa#Xz6iM zkEOc`N?k-eqKHzT(BpwBbUVh)t{Hs6ZX8kS95HqbQFa1x@*H8w!`X3CAen&#%w~N7 znlp5B41v)2Ah#}z$*qVhcq4%nRh$X8gD3*p$#yS?*NmzELDf0N$KAE<`Yz%$b|&`3 zXpF`a+qP{tww+9phK+4Dwv)!TZSA?AcRw3n=KKFQYpqM?abCIraPB5Mk#v`=h?iHJ zBE}XLy)wK8>;&NAcnw`vzSIS1ajY>L~!<9c%im}V^ zcbo?`3<5iah#yZmAK_*>K3M;@s<1?x@1iy?h-&eKN=r+7GS_b@3q@uY7sLM5ZrRw_ zgbw+%n+RC^G2Wk{zP%@IvSJ(~$f5>qXoo?^BM&p9d<`(jg}v^kb`(6)%46Ow6iA5C ziBV31B_l$DlY@oI^`;I2YZD% zPEif7`WAoBF0!&5e|Y$_q>Bj2Bbzfpg0mKvGZ>JLaa!`#T8K9#LHM`vXW4{Stge~V zwg!;$JIzr13VNND>io|;!9P~nPA|Bf@Z=`l^pO@-6l8s5Rp0p& z6u;um51yp9_P`1XD1YACXXQPf!jw1+t(1hW!?Y-a!A0Px`OgD>)dL^721WbOq8xVx z_rh!ASF09=2#v&+XGWa&%pcl?R#~&|nQ>a2#zEv_t2s#xvAM_Ap}z(zJ&3UT=xYOV z4rh>hzt;6r8sU_+s%cLAYbup&K*%6Vuxa zETjAD60)Y$c<&Ep{eARYN-UJ5HeWSP_>ec_HkMDKSLRDBy2N&PGR zZ&>hT|8$EFf>45W-8j7hWyo5DIWe3-ec9n zo!9X;E$C5b8EC88Vg^b02&*p554dyPG5p9@2TAKtcBlu)9L$Xadu>C;Re#QQS6k!7 z23;iQN2m>RaPvlon9zyuB->c?hn&@8Z&nM&P&Q|oy6Z?w#SeaY^!Xtg9M|=$c*YJb zAP-(KSYk*s#M)$aBu?+~6^qNP84at~6zuu9y?>**<*0736<|56+1P0dRLKo$rhs&w z^C>!FzEzDKKS)kO$g4?X2IACFvh)HWDb-c+zw@lY&A)U!$19?n`^Su8`HIRzR-I>M z(zu+86fsCF5H|4~cv8zMTj85l9T8M3wM#I>_;1oD^&x_2;Sns&cc~SkLT~12kw{8q zjHOl=TK?Nyey}p@3sjH_+Kf-_6)>>*sAW!&FI-eK&zmQqU&fCLA)y%j}G0y%W z8<<9<4B%P0d6VsU{DfK3Rw$YXu_#i zWT^mL{Bvu_M1OL)Ax`7?(=fTsa)I$A@Bx*4lEjKXpP;`=#hqSgUzj>SCdFDd{=2xL zdqxh4pRmJ-&$UTIJr8JmABC{ZIVgtoYE?Dlu=0k>BdOLN0=;b&aM7c~AoM89f{YkRjULKZHjv)^ViMWCPKQq06P-t%*{F>)su z$kAwQqz&rKeJnH*eBG{%=R8j%I8vj@cyWGYbA)hr_CDCamnzZwlLkKKanVI;Y>dtK-hy#hN)w|l)nu%h6pC|9!Nql|wto8uv;RtYkNO*B{e5@XQlYC3bo#AwDGF;Sr?ti%a zM)IcILy74}!7)c85ux{Mk3@@%fHQ1vM(uj`B>Wn}J9vZy%obCx$Sd!ehdqG$>~RUrj?EdM&D02MHyJVt>@^6PC%x8y zw_i-Rb0sMwZQ3QfncJeKux&9=*i|+T=`AU&;0@2IJttpB|3DfsNVVTRtVfDhs?36n z?ppIeHDhooeja#yDFl^_J5+12cqX(_?E)F~4vd5QxVAR&qfU=ys}mwK*HWec7LHt# z=#7u}hvR3LDldY+3Ih&*oR%2?b( zN)glVR1vjH<;&0qfkdyrYUye%=YigPw0e*c5KHcvlx4I!h$jB(xMBOTx3TIdMDVvhX?I_ ze4Pq&1Vk#YTYBigpLO2P{v!D9M)IU~?3Omtg1IJDr6M5RM#@EU6c~Aa+ArZeJd{;G z98eFYa{MVuXfEu4XB;O8O%Ok3uQH$OsmND8Lo@@_tFFM@BBwV`+7W;U7Q{=K$`93* z(GwFy(rQVWcu1%sgMA~n=RLW)VF~ZU3m5hzDkxGNsM7hqrL}LXo&W^HKJ%-G^d0)@dhZ^-5 zS7$$45IQTIEd`bA3p6FpX?@8#21oV47(-O#Kp#JKZX!3UEA#2Pha(*9-K~af1 z6chU5rwU&)wxb;rv_YTekk5lc&&>+Ik9)h1 zq9l%Hig9y4OQb$m*QBYpae-%r)AbH58b9wdE4|)%H9bBIw-UtG1{8vZ zh6c%ZOj^~=!c^U!R+kf$*ui-|KD!BNaJtR5$c}TRUAY2zC(f^cFwtzSYx4zNaJ=B3 zxf>H5X!ZgbS8{k&@*B#6ASyYTVdiI&|J2nOl#8WQ1~aQ%#nYJGN z4V3uhNSU=_f(0+nDEmwvbZK-fgVT*bW-aYY)kms#ijt+^QZ6Ybt7+@za*wY~75&*? zz6==$nESetDMD<+z#1lvH!Ld#ZWa*@LkDr^L_d;x-8FlHlse8B{v+-{@m=3a!*lU5@0m0 zBx>cc#5sahTC&V9DN}4uKBkH{`6C>0&lvqdk>QPsW3ZrRkAZ{`shc|6C{GD4iGvX) z0X#~yU~@EL)7m1guM2Qi#Nw94HE$4^w^508?^hthUQ*wX0$o*S$Jck5Ayd?vd;GeJ z4#lxL`o1Us3GT=R%tAf5mBWz0&8SPeQ>N(^7fBi@*6R>2xs&+vS&4+Uj}bXoxZ#^m zu0lVtfKE$(($HWa7If9Eda;AFlbfb4hM%y9(imgneO9!q+D%okx+go`K|JU)Z_-=` z+E9O()xUqozo*M5zvX5t@+-B8 z4n&$@Xhqd92avW<1}~np<3v^Vd?++&T~VBaF&-DzoJ8W&5L-s<0{4|CTb~Y#=*qV6 z3wI{jT3)hIbdWgy*|0*Thul>Xg-wA@HcrftK56pEcZg2f(Ngc(;tIWe)WxIXHHPSx zn?;qkzrhBI;jwj# zw(F9KWOuNtD77AqvlPwqU+%u`{>aeRRgZ%#AfsecF4ycIGy&-vd`?-;WmjefKrB-% zTn3=j7Bifd;2A>Rz-Vb>sTPc@lJDX$s-x1T#m6%u~Q{nk*;!$uKlJt%k!jXW!k$ua~210+_w zXB9X)EQ>EPL5r5A`uhGoTT}*G?m~olde3B)FED51ta~Vv0 zpZcB9?wCp39s*$`GG<~?&yHI(C<5{YF@%HpL~Qz-D3`c-XIiS>Tfj9pnyXJgFbP?ojjd%tGgd`H`O~vwZ9nu9Fg04N;=PA9ZR}P zx<@7q?Nw`ADilWL2{gn+8zbTI=9AHv{I~4OBQ9u;0Ik}5Da3({BsefLG%IMm{{*)y3 zdO$%i5}1?@2u&7nCM60_jf#DxGpcy6D{n}JrW4?z$YEmg=3}_N)YQUIqmPr`&aqM4 zW>X*x5BI_Fkn|OW$u)d-AE{Q+GN5kt<|bO3>mrRfC~TS!_AP8NkBbuxA>rE9Q-J@! zH!cX#a9557XwTa#g}ZVYT)Xb35aAR@<@+L5g*bFbGrx;rBpXO1j$gP`|jif>K+xxw)t z?-w5fN^*SP3z&pvFfw&DWPbT+ez-p${dPF1Xo6^TE1EY05R&qSASD!U0ePMhjpS@W zBBX(#7^l(ZS}_7MQgV}ps?UNsERej*O1=iItAeP)%Fh_I$n7ys=r4z6qS9w9h^CBI zLkt8cI7p3ju)Rd9ExJLTqD{oWCI6A9vg*gzoKpTHYk^W?oOr1X=IrH5sT@P7H(iiB z9dhDq>~Zkuw7j0|mp!KH85TJD7v@$(N2A*l%hlp+2tgo)Cv?EgokmY6t=kDm#t^_4 z`*lYQ-Ii}Ms{@HX(LwQ&82TmIA;So!c|Crnw3+x!OsLcwRJcq`2Aey%oOeT+aii&M z6kqN>H-RYMOs;XrAEc^o`pMody~(eFFPIoTU>me4^Dc!k_;2QBGz5)lbZMLNhU=yy zDV-^Dt|wm-01^>cMca()w1p7K8;+Jjk)dY&Emkjsu(O)d_Yevs;j6Oh5|$osx-D8= zS`N`?jMzRWU$P%vc+B5~`%V(P$a)PH@tl2=Gqxk2)T}Ab-2<+r2AE~QefNXgi_1&l z6>mO^bJH>V{({DPMHC(T+3+M`1iV#%oKh!m4wtmU-ST^_-H`e@!++0GDz&-Xv z8WEMnu!|pfwippoBqIMjke-~V?QbJL?<}gR1H2xAI$?feB6KVcb8_AlY@se=9Jxj) zao3U>wQr7)o5>6oE!49jA`QGOsovwIiqT%&Buuun)3HD=2@s8$K~^9fkrV&|wi0jJ zn@b*$(XwUdQfDR#8N!Ki!+C`@aG__(UnNZqraN8cl^b{ObCr{1$aH5X!wxYVkT;_g zaY)d04xO6m@&4O~l>TaLUiD5B{F%^$L>T!yJI1!pTSqy!0HO?UeH<46Psrg_`Ix=( zVW2QTmiX$Iv7zs9{KX&4;cqt3l)R3Wqr4*hR(nlzXXX;`76<)AJ3x{%h65YG!wfJ` zJN}T4GxuVZ!gfF|A1&kj2lF?e(S-FO7Mr<3VC2I=jvqf3gSL9Rm-DX#=wycCdIyNJ zv(el0-Q)X1S5>(md7=d7Bn16>leZ9lklI5JdFmy4M`7%GWq~x>c?%01r$w>5Z3UjO znGn5}YE4FflI)o(@C@~ujsw&YD5X!9Pi4=hU#fr;L7ia@mTAq~zx*eWE0xSo7_bUt zunHiIVO}jrntHqe=S&ci^L+Xu4i)K#x3A+upHh+WEp0|Q+wTh-i$ECCsr`)WKTk!Y4;vee%!g}tP^7{BjuM3;W+Wq4$O{e#If~s zb^H0JV*0gjV$XheUpZ%tesuY%^RsFzVH=*;wp@A@ZY0%L`nBV_r0Pb{IO-6ZHe>6C zCDT%sb#h@^i{6ZCF{%F@5hEN9gqcoFa6jHRc!#2uKcZeoGtWQSe!_9guwmeANBT}V z3FC!|1JHrzDtOMEYV;E5G0^m(Bc?K{L+!qD_%(3j$RQj_1okpt$X7U)PQSHE?DsEC zPBrXqhfjl-2+DBFH<4pC3{}sfAF#Q5Yq3u)9VMxSpiF{k1&1cH7-6-mavuuGGALm>&Uix9{Tg zip(I3Hz^!wa+olm_LO6G$FWVeTEc1T;6;7=dB&;|N67lwPm4eR@d9y(7vnxDGaU)$ z1|*cQ(S7ldyeNqids1*Xej+QlfO(X(XR|STZ6D*1HIzAo6|Zh9L- zkU-6XeU4ZKwH}ZP=nE?DKTToUFo9U7XA+=RWW$$xn?;JJnd!AinH5)qhxIS!yi-v+ z(J$3r8=vZt#h8c#Zzg^KXpZ{0UxeK)c3iC~^t6x-p{j5|D`m1?(d7H=KC*dbol@m?PVo3Mk5&5*6eTTENHp%&@jlBFXK7K_HFcO z-1AU5cHVl{x3>k#=?BG6t`WUkOh(Xy&TP1*zKu9;;+14ocaF`&@p;S5DuooCh}kotRJ@!Zyj1u)mCXKXDu5xS&{eik6| z&7^O?94Pwse@EQwa-Tc!Tn5Nz5q{3fnR=`4ba=12P?X1`?AEu?&5T$&4knj@7`n@^ zRKve#KryY*_T5c&9*Z$X$oiA$_ zz!Qppl@u!*@{rPEOCHImEWM_O9_mTr79S}zIY3HD30?Q&d12JO&U9$Qy(fb@isKb7 zl`KcFvx$f+eO60R;t0Qmh5X9gP0y#eQ}`H*$3oK+EwtVFXW$tDteZ>kfM>yCU&oNF zylw{(A%bQsx9{Q8ucw&_oE%SjRm2E~<32Z#ZOM*p{Iq6L-O&AUe*HOCY{uyfk>o)d zrg$g%?lbh-?SgTkN1xMrKElPU+_IsVGJq_BzjGqrPsNCwzf!0cGeIv_Y7)DXY!7D1 zcB@WO-#L|d4y!{W@(Y~PbScO`f|Qaxh|I38SHKQSwey_&>JW{nw>Ha?)8Ood?a%*_ z8I#v)ut5dF7MiPh;h+;-xZ8u&DG^RF8;UC(NT-*A*4K#*b;b`*5OX(6#6)EF;mtFfLQ+68^b3N!HgaUEVZAi?0V`WY7TZO+lbyvd0W}7 zd-SROm04gO#`@D(`jL^+L*tIRAp)*g85rp_ABX^V_`B3*!x5r~$V1iOy-OQP%$~f* zUl-1oQwbnF88K2#kxarqdtW~4?kX#j(X_KWz;ss;74cjnwj#DG?EZ@l&gq#jX?m#B zdobt0tH!ZUgb+#0W+(k(lL1kl1wy-1s=<9bjlCw?q1O{leGz4T(K5r%I}S7+hvCKY zTG%cv^IZ=DvzaUb0H{Ez4(8{S{&Kk7K$NThmBw9x!;i4EOk3EA$MWd+Gk(opjMr33 zR`}k>WvqIOB{!HN7>vK@|IMxI=day%zjI5v4to-$`SD>x`&r?|<`R5LwQof__TTr% zucMyWG7lS_*u44M2xyI0eM`4R_SXKlj`*5WjExCelBye`6n>J0Nh)KJeld4TrV>8* zp*H!C=1S{aL9kTZ7=tgDT?#s(#bpF~+ZJ4$>jZXTi#C$Bb?p+}kZJ7@>@+VuHUU7E ziq150dp}QzBu7+|vRu1Ba`+cDVeFr+>fVE!GANWx=W1Tdb`N8U({kx{^!Q@lUFisp zoJpE=0$G?-S?n2$a3xUsQUX2{+zPovMg`V)=1~);>QUoi>xMkyt@!3MsPFVoj?}b? zz{L51hmPZ1F!~dLY#V%}LkSI-Jd<$(6J3WokQ+52QJRuINg*R!x=LfnFkhlJAZQR% z60|I$>o7_CO;%HAC_+2K`zUJnyP8J28XBR~68RUl6!e=wJ+R3vjm;DZi*dRD@I#dF z8awBMu8<-UJHND6Xg3b#X=?U)F}Eqp(Zz~w6LH1k z$jS2nm6^nN>#*OmL;Z=xZXVT!l3##@gv{YdL6Jm2eE5Fu5_oYyn4zQiXzsBWGeN$B z?7rjF8&S5J6n$J03wthyZTH7o8-h*^jwjig;z8o^iCEd(cz3s;x{Ybjm$xve=eY3T zKWF$AigshB`0eMC(mCwNcP>itVT08sqc@~qRtg#?8#E6a)c00Pj?w8Y>~+kUIE#;i z2Xz7|B2x_dAuWm0hd)~ly?YK4ZyY2zApM2PFA!J=)g`%s)2jaoEn8)|dQQ)Zus(RZ z@nG{I9X0v+=}#hlJl!IBcDZFFVP?IR1&@=d^guz2Qgwwx)tnvj!l-hlqm)o-|3?pv zih7Mci_q4m#WmaJ!d+T#Owt_8FS)Sy-y4iZvl$9bRnl19xUnX#$yQsC)9o1>>km+? zxbc0$0nnNYB<+5zjy2S;n7UtuN#+@s%3YIFPo?%51z-+zJn7L&crD-9tykKi80}UA zz!b=tyL%LBow|iyXXtK!zzdYd)>`52+VpwP-Yng5?er-u=DAd~xFhcB_egC88rv*X zfsYf|0ue2VCxv=RUNh7smqi@~xZ%@x=k_iYNf+~fww6U`WHC0vKc8IOpOILv;?T7M z{Mzw6n)Lm#%hc4m_kaRhHWuMqSA#DRYQNbnt-R|Q=hw+;r!+|CvaJ6N@_QzU{4}$no%>!>jc{_sIY*5OJYRJxv}cxQIL7WE#Acz6FX6FDwil4Y$#W! z2@%a3(r+zE_VL)xkkYhB@nlu5W7$2X?DqT*UXP#%7%0NKYFt^dG_>%5s8oM-z)0&h zO5LwrXOVVZ%?R2VaOCK%{nrQyW$(fRRoW&z=8`ISljqV^$#9c_7x2fy-V!z4(9mP| zwnl{L=tk@jf?d+h(Kfdt&CUO00N-w(bB`^qz?)o0*j#Yhq zpR(E%p(xD1B7qK3RIgXp(L@Q2(twt;ws>z(0&9O1Iu^CR{CwP>^i|6`dxH>V>P7%* zR=Vl-7%zI7vG>uLTM4y0uPT<2mJ6QWjH69#@>Y8(_z>^sLnoZYh3^i9_0w*1gK1m@ z!-NL`M)YER8F}ae^_9u3N@OhD$zZ${b?S;lbU1qD@D?1dSwRjPl{k2!8NJbtZfilC zJ0T9B4~n(~F{4w!fi`o2-@05xPS|*WZMv1~{&m=t%$kw*%*KdQA|Ei{MEkavcN1la zicOTIM0eZOVJ$g6Vc1lBc~^wVY~WWAe*B^$nxv{D+d-|-{&(!1{S9xT;!3oA!vI-k zaPz&-F$5?6ui6|9j(2)B3JXawGVRcPFt9gR1tiW?{UTcaF`83z-qHcW0$<8z1=5VF z8m?9%0VvD|IqF&MWUZ3bj_DlW#^Ifu}ATIpZL2%dBg}@`< z{R4)4!!5k__4~u-sgr51jIi0!KiqoF_{bl}zKF7|@EqQ^Io0;2!N*cdJ4MUvo9Q|4 zu#@}^$QBol+IPu}+t=S&yY1*hX5u4rrMBHK#k=~m8xdczJCQFQ=T?}kYUS=`_GU6; zPrD;GKG?I}y@^&Ad{K#>PpvlbbGsd>uaAf`&R@6)AMZA%QLlb&wVyaFwQU3xCfHID zS(o_M^FE8ezNy9bIHAepPO{UEUU;t#}SHOk5(ejnm+L4)?Y|J2TP>AKz4VIH0i{qE4{5U z!?bJ)TE9U&{Yt1>nV&))HY5C+alWa(kP`AE{qW>J8w3IB;o`l_m9vG-c)QWp3YXXGx`E>B!N8i`#h;nc_Xn zs3QTN(9@TGBaEov7Wve-jU;ZjjB#9Urw}y1Oh<dK-ew zSU~CRVJojp5*a$dvdw1@%rBIbVAXFS^!6Mkbdc@sI(1$$LE@Zg(TQQx|}U_~*#6*Qzn{bd^#Bv+i*- z)mB3#XLX-9MK&vjhQCX5bx1NKQwED`VOR0`NC7L6M-xe9V{jd@dGITNWrW^h5SIWOi@*xWqWSm-J4nhe^W6o{P3x|L0jH%xVpzfd!>2&C9AP>Ih8ajjBJ5^qUA zwN`khd6<6cQ$to=LRaOZnU)+#VgEHc51?_sOdkhzQ{Cw-cet0|axr&x2R^y@j^*fv z>$80=TL}i@S`U&Yz$A#<#zIy)IC(jJm9sq(sX!#G(qctuV4&HNhKG5XH9u`KR8vFI zYHI(f|Kd9*iGvlND9PcHSVWvP@=E~a2NR$#Ky=i8&q_BGME6Mw7*&z!_N(Ey9*gL4 z%5V5G#%6ebf6i>LUAY;2ib6nPZ2WyO-cY=Ex!WF^Po#;@)8oP6$ zxpaly?pO6(+YL~6SgBHl!iT~^^+n83PBEKGs zH<#J4RDOh>)Hv3{wVl84cRp7L7^sZ}KSN<}XA!-i@xRANG3~J_7o;EZ+vPpUejOR5 z^hr>}YQeTyDWt&>Qb(r_s+H39 zSd^Vt(uBwD=D1T2rG?qiIXTMFM7o#IIkKA4wSO{wNH3#HNS5|G;MoZO*TV1Pis<7L z-j~0a9BFQ?bop;7x~T}MB2f1(Cx5@uM3rkc!0WwkV>=8}q*Cw2C<^Q)iQZaWD?9~h zO?uyz=_NZ%xN6&XLs_2S60X@kpj@asUTKS4wDT^66ODRbC+74Kysjb^)UFFz8Fz%# zjFgi5LN>IPGyQs3Sssf7z6Ap<&<0{BfNJgj2zR=%79G|9DKv`g6saw$szEn_h=QAH za&R@WvQI-TJ5=HhvpP1{I$ln6s%a0E_vK~9#CrzDs+Ue$uMv<)*%k*ur(-7AfGK)0 z->bR+tVOCN->qtDy~ZWs{Hj}y7b|A<`iZGpKFcrwv$GEEq3 zLCP5%Ytt@AK4V-DJ$Qfq=biwzm)V8Tluj?|*%IH7zHMsVsJj5c?hl1sJM5hbq0M@H ztdaeN>8Z!Fgtd<;y`~m)z0KoWUG-zDRn294WX*N}lzl4Q+Md#zx$ohF`z6OiZUtlb z=MBsy8++x+fu4m9Ozh5k??t?eKO26Y`1fuZ3QgCfV5eRebfBvtD1uvwd=|XKGI1JR)A89;gx7v&E~OR z-_mH={Hs5aKb04<;l_^JPH>k%V2uCgSEm$%iQ1N8`$FbwD^^b@)|$gse206AQTa0` zN!?Oq;_3QNT6%G{9M<; z2{)ZTtc-TuL>GFo@z_xc%`|SNTbC~7}C}g3aAJLWy}r( z6mi2>3Zefxwnh6$kS-eoJKw66EjD8G?6s9uH zyXA5MqyxaoyZ-mDJ!7tdhp;!+(7In*;+%e>UZJe6purTtblL^NmdX3g zhv#bdB}<2q9;p*Dc3+bsjXeXCT_JMpuwbXMYUeFu-s>KA z5ms9Z9%c7W-inpj72sGWY_?y2jfB5$%<;b%I2F3*VIu}T43j?J&g9xEk z=!O5G(IGTu2{I%)y8|PGs?WNKwq`%xzqMQQVRLjmygd$DP4t6@=P$3za=kx~Grr<{ z+*e-c3(rK+V7Gs1dObL}45_nh=)ZRVC+nqYd#mdev8&mJ&$?_q-0uB#(0!#gA=*oh zHOjh)rBT9lD}BQkz3J6bfZ?r&J8 zoVG5ZBJl)`bR6RQtxq>i4DK+A=BMY?hQTE#v&6>NkR=~m_$VUiH9J>{GRdM{K-{E8 zUETRJ42$}afZ_5QK^F@RNAc_&G-etF_=0-E2BRvJW+^qr zL%1}=Sc#npv1HG*TMO?Nwx0F8or)&$0ID-l^9VN0kjov2LGo%}P8g)f!05jDK}>8^ zI)4nJ2tTQ8v!&h`sv2ckwmZ5z^!l32XI_(B?|P=`y@S6YKbTCbwtb@a(YnkZqRMl7 zDNY+{TsL#R3+}t2%69*(_kNvP<94B2NPK-H=ch^V)Qup496IRec=QmisNtdO`L?q0 zu4*&P>Tzw%`+C`1T$b2UtY2PDiT6KJtX{65HB$OH;@~;u_}y^idgW0sm)0w z$NGoTky;g8_ZO~Iu}B1V@`(8Kis=-okjwC{F3^y$1=Lv~!}=oBgidZcTZyZz0G*L} zQs43F7J@Y2`Tob$4PWHq{YHcSuUUqiif!~2Uk2-PxGb0e>WwQRE$Wv=T4+L$({%)s(1^^~eI5)QtHFBlu{m)(OH z+l8$tXF|x*UZ~xOHKbgHy-O_J+HOPIDs->s%^59-lQ5eL9V2aou2a!)X`1<*c{}NW z+it2vV{*sO=u-jh5ujnyV-}iKn~Yic^K{eI+LQYyu3D8~VC>uJ7J7HPlDLEv=~Q5L zmey2l6+w*%5|$hVuxMN*f~?^?KWBu60*c?0HQo{%{hS$vO}~9mAQ4dO=~I2y4&R=3 z(4zzu>hcGRYiBd&o1)EXqM6ZtX&tkVYt}Lr_44EAMw|1WR_9!4P4+BcZ##V=286=h z>4l*EyOZfvv;dnKQo7bdYqq&*(lFjGa)Kje&5oB@lkF6d`T)BAjF$@mq1$mDa2vSG1U+_` z9))x>G&FYCj`fO&f*O~puL>T@cno{#kfjFo-jLhRH1IF9PkJ+Pjp)YgM3J-4xHxG+u{ z5#COlvJK3_%%G_jH=Go_V#+CsVry$4$ek)UaTH|tEbU%e7B5Lj9ILC|ms!;gka?{Xg#VHwSIJW&rY^wf1rxOh!6SLV zD!InX?-^vzGCLv3D#=oAI;7oLU=)iO-i1Z6b5e{2A(kq#ChR#;i`5h&dXvPDbJ5hqS&X1@^QB}p8i&05mHiBZQ zKUkT;o~Eiad)hKk16opp%kJz#Tx?|Y#IctF6U54;a|#MR%-PHZj~Nvj(LGG#5D}S> z<^T|0XUJk*c)U;&Sgfrq`hNJq1o(f#HI_;*G^`#f5THE>2Z4?=<_sIr!>O`Bz!TDW zmAgx|zaRou5-bEst@!6%=Q1zclCC9f!3IX8MQvMTZHB7a2&L*a{$j}=L=JS^HE5F1Qu7#VFX{YVoubp( zk8(97FSoYgYPmN3xp2@|4aA=A)}@i6U1h*uO{mmNg`+)?>psP0o#G?5p+rk`{&M&2 z+l(2gE%{T~hMqrOJUyka){3UQ5{Jee*_Qp+Epzo{`OmCo^hw>ONLeyGD!Q`;3Bf*q zmv6&FT|;N_muMx;w^Z7{SV~-n@^_OIR|62^EDWy(XeWd#UAmC-{&#I=5Un?F7GgJ8m!FH-uY%o4|LNhytqbb@p%d*NaMRemng$ zVGDz$V9OU=weTL0Cel;ivf~*bC7ZUQDi-MIgyi*WNuYA)3uC|I6b7D zS8eRxbB-Mx9kC3>!mvL&DE#speclveM^aCX&49@X(IvhkLwp(^XHq-KcfQ%8pM?KP)O$4U`_Z+G41b|*7%&FaHXErPLKKUMl8l13O-m21w;pBe*0{zhBT#WN-9F{er9w6fjo48ZZE=+< z6WiLBas^JrAGklT{wAetwFCc~q9szup zr)(?i^o|FKbfk2ePW=0o+7ZL7$6KyE^XiHn0^DfQSa6e}5l0A2H>XZD`r~+GdL8hr z*b@m?$!hz&nr*7(j>;F%{sUf#(Wf-Od&snsyepEM-151mTZIFCRxK;EA6v4MQSaxnCXu!3eljjn3y0B@qjb6|nG?$(iHRU# zW)rjvf|~xj$ARHQ!qO#nh410fubu(e^rW8fp*}c&c%w)2l!K`JTEaVo6oYe*1EZ~s z8z#HtJ^?4RRfgv+a2ZD79xD(j`P?wQtoo^@F{$E`hL*km)j)#?uQCMH!y5;H%%8J6 z9r65}H(C;kp6vD?OGHk7cKuBif9IusR#j==tRi~SOU~={ORftCX40Hly7=ZL4=J#Q+|$JbG8=ixwqQcyFiuP;gR#DM~j3*JP_E&SYgS|Oo!Jz~N^qya;zy*}~#V*|d? zFjhHeUo^#<@CdLsnqQZXo9->y5qE&?8LlkyH9XHvpUU;Z3#;NxYkH+ij*^l-jH)$= ziqxPm=z3&XSVtzBx7=#$x>>o>gj~m89j!)LjHZa3T%ac&g_~!E2?drI4C>vz9sU&r z&zKP6j72j%Yfc)`Jw^Q4-xu-TEx)Zz*rtgYM~XNjiT9u{K9TgPCaBhOl(=FPr{kv+ zsUVjLVPtVZi3jn|hG?(uNzykJ6~(T4{|w5?UDb4?cz|uX#!ERvGcB-2u4i#-I%dLH zXe5TBs4&MxEI$!>gxsD4IW-BHetcI+LPYq}*B7s8UweTbfBs$LfFEb(wp$pngYgrl zW^an9%F*v z)qakvS_qgL?`dkRptP7%f|vWBgv1)k>cKF1pAZr2b88$3AZ0;hAWRbp&Z!6~p^)yf zRrX(*0$x7bOl~_{+tB{4kl(fNNL70D+SM@wiWVZfIFq2K!4?%2JZ6ikdSFjpo(eL9 z*gV+zK%ol zWkw4&gZblmPAN*fNNmQG*ThCD8|suO5?LsEu@TDwX_G=FKj%bM@&#r6@B;NJbu&@! zM3u$I1~XlnJq+><&g%Km{(o`(329}+Baem6Ezd=&e=~ig%n8o4>-dJe>W1%o`P8&i9>PlN2Z17R3x@=J zjpz!>gX3z2mea&w#QQRM!VkzUPJYeSB(kf`h^;?AU4pd9e97o<_+505`7CD<%G-QB_k@N9-Fz?E)%(4Zpp5q>_ z33#WW>doY^%T@$$VR6GC6Vf^Ut7up6-nYh@5%*W78`dzMI=dvXyCF{aZp4%>oRk7_ zoujHMA}5veW1ld5*_X_H_vZ59zE%bXL&RyTPLq7@FDn}{2iab-T6V1e+d2VVPGe93^Pd{k_8t{xADvB3{f$cTf7y35OOLSEm=gYF&PcN#&~>l-jaEkIK+R+2Dg z#qG;i2O_LiI_GVC>*?v?og#Gh#~k!ICLX7ypAAAOaK? z{RS*AFkn5{QeF;YhZEl&kswptl6bU?NeW&A`U(mUjb52Q)gldh%+Q>uHnoZ8!m zGl%HcBYX+0I6Pir9Hc}H$z;Z{6jWQ#t#3RR5gW##hc^tO9D zAsh@!F{4r3R;p(QBt0?_BHAMV(Xlujf0yvK|opB)oHhf z#9&CE8}j-upW3!gjIMGUwbdLar>b@%72O*pXJG}Ps@9}nX){kN34C{uv#fqaW#xbn zUtkr(g%w;#a$iIje&x&JG-gmr9mtQ-#FS{P!lkaPaU9eP2 zwnEYn92+zH2RWNisocrP?mF6AU;=lDgL*$DfV6Z``XYPrb3i`-npHXpi3{VboC~Y8;Igv-#lay$Ct5k66gE(i z+Rp?XHxu;?LF8DSppchT?EE>Y6o;i*)lA`qROcRZwxBgrVAGLl=hGu1GSH{eNI{&l3Xwc`bve2`PK zo6uQLpK|uU^_c~?c;HTHH^44VX$Dlf6vQ7JjKgHka8vJp^;hmc_Px#xiuUGTWUYHW zZ(wd$$aNRDgs|k&lIHSHnvE)fRsQ0x$g3VqEslRSnjgYNp)?<4Nt_eeQi zF08GUW3pRJ=T;q=WY;7lyS&r=5O|o6bQPhE_T5D=RDvg)oSc*~KFF}lRVD(U(sSmN zRbG#f#qEG$gvs^PE_aKX74}xG<_afi4@IU>Z>KKclfJr|FwddA?`HVN+XBX!!Prjeedai=$Jcq3TO zZ*qu9qm^39mW*YSSv4?(scACmQC!f_N|&BvrA;#%byC#ZSKuxv+qg(Y`Ywn!p-4sa zD`OlAtHo4L$J6aXU(kC&whRPR6YQus3Zs@aQ_9u1~E;2&Q9H12FO7AbB(2+mW7v` zsZ8!icC(P|Aii|n*s>Z+v6-2+wHqHZKDf(WT~<)Qpize}ma9j|zf0}W+*HVp8Prg& zvWQYTVh_Lt?Ik4rN-k87Oq_WnE0gPMh+be{f@^1PE6%p*^f$YqgloZrd4@(y@Tu@X zQ6ldU$5rEUss1e{bQ--zD`|F|D(67d5^>$Hshz(Twz#Xo1cW_GW2BO=Z|flB()U%4AE5+Z zJE0d|W}6C?wsxYP@k@a#Ju4PgSc4_aLv_+!%Era@zjVx+3L2V_)?4gFmv@Nk6D1+$ zVjKFB(w@PgO4JytS$fBp@0X)e>5~+YK><39*aL} zzw=Q`m8Pb7q?mM*ya@;T1q}>$`cvR7U`xGbhV#jYO0JO?n_Gj>P5tJlk^Zf>ZegRL zpz*0;EX~THB2BZueTbTqa(Yht@?4ft(Uw!F2m8*DFhLO`e_~7x{a#?fB}YWRqWqY* zap4O2nkg%>v@o)tyRH|{X2Fc>UQQ-OM2u}J+J+3;61teXZXv1=If&7aDPm<2y<`kF zwM>ErJ37!QEM?3tV2CO8VcgV|g1LPNleBv%8W^97_V4=kac#M)Bw4$<^IpgI?aWXU zTRd6h-5K)M=yIcN%mj|%r&9jVk3da`8nJ6@IsD_;n#P)Cser^-Nd5Xb5>@;r@utDp zWVR|PmKZ6@aLh^(>?Vs|@Vh`fIZ{M;RHtm1kAftS+ z_5DiS?>{}%e;wbR3`kM!fu3uqV`R#%#$(1kuJ`5S zPb}KIlO^%v^D#2GzEAY|2j+>S4PV17Q{iIf^#k>KE1MT~woE=r#-h_aIHF6Uo*{~o6Gx8}tflx0y)LALP#7J0p^cL+65WTSbCt9g=kQ%Wo>FE?k zsGG_hM#?Bw%gWDSxsqp|+N%tzTF*Dx2=O3VJBQwc!rU(H^mJ%ACdS=iQ)Ti3l7YU@ zVg=6RiE7XBjR;wjdzc*3s+2%6o-m_UF z5nyn+q2L7Ex8?xn594b??2_Z!gz$MzvbmVgO&$kTkJxY3UEq~zZXqCoSC+P7ejcQ3 zABWD<9@1gx`CkZ8plM*+qwBs>S*s`MjT+`+)f^1(uoo^xkI|z&@fm1s8l0cw_8i4B zB0BMsZqbhkB?k0OZ$}1~{7J^DYn_x8K9$p<@i-_tKc5rWNNHkT-z?wKEk2iS^f7i{ zEtXdHmL20u6HUby(K_Om&j(uXrz$vuT?wGGYc+V>(N9a>OH|FDT{Wck4g`LKS{L%1 z5J#18GQ7U5iiy~hB#oQ1Rgj`LuNg=TrVI-))8DnBpzQ*S_i>`3KDS`SuE~&-OEyr_ zLHAkn-R)uXH8|sWT&#uKeE)pRm=$Wvq!mJ^F*UG`U~X zPbOVh>a1#gIEh*L)h*w|09KoW&Om|@yaAlwyqphF}n|Aur zbt>nlA_kdU-X>-O7l))@<;@SqhUCVg)Q+hFkuDKg1|YQ@Q^7+2a!R~^kNye5N!sSS z&BCHa&o#Uy4R#iUI#QDIJ|lE8cvR2@gU4|$qA8c_eUpvgdD%PE#ALLY%KsaFHc=1m zrf=K((>iD6pzE|iaA}@5vMr}w|NFiF!2FF8F2W6>JS0!;_cM)i>y*0Usm#>ltu1@4 zht~!#hfVDBD|j05-nw{X_X6iz9zG^#!IwTByH!8b%|O_o9GJQqd!gJ1a|xoGj>EKs zj=|$uuiUSpZkYU&F)NLi-h8)zS-t!(B+eZ!B@Udd$<1C9V((2p?ip6Py78Qi6(yg+ z(C=}t>%qH~)vrqX&`_gEW=eBp-g7G~*;K1IPn=R;^9I)@V}2(dze|mHacso+ts4#@ zO!LRx=|1G=Wj*3?b@oKjlJ`q5Lc6P|cB892$VxqMicwG_&xc!)Dl^&tU6{vLVY-s|1}T{y|$fMSP0Q|?8HJD^$ll|3+^Sp?Edh7 z1!Dx`RF-}gjHGq_1w($QmbJ?!@{fjw;e0cA-A~o9G{L?vpDA2sC(1%QTe|zwXMV!A z`(8(juskW9kMV?WSA#{bPPIjziy{^zj1MCb=dyTp2?z@Jn5IJr`ZFoP#zx<-2Ndifl~FD)z>4k z&BZ3^ES;J1AyGyCxb4k%=D8*KbUTMTbHOSdCtcjQ2$-SVy z06TZ=YisC)ue2?CqL*sj=f=zJXt7p+h4VQ1T&>IoRBGvBv==2=Imgcm?O1JgB69`W+-PtV1Z?JX4I(6W?rGU93OX{*+&L~el00b= z3bDRu3TG7bGQf~=2G8uoRKfQf7_N_VEE#2?OW019-}aWR$fF6RgTYQ#08&Q{i{^Yi z_?MTL8A5p3zsRiwj7?IRGRFxCh)r6f(;;>5qj5}hA#8)Ftcwv<|tXF$f5iX$YJ0=`>3X5T}JT zoqraBYYBs>{hQlgsP?*c=Ry_ie3zg_6A4m6P49z0Uc2|t%G=Cv3Tb%PQji=VQ%3HZ zsvIDw0Z>4-0xT&BuB0B(B@ZZ4@TM?CnDg&2V6vFV4A1%AVobbHflXi)7}9ne?=kkQ zqb?hYKs4cjmpBV)A)gyQnz#*9WN$I=+zL;r_LyB(d0RMk))zgB#N5`g(pp6j^P=N) zhjnbSS^&*eG!-=hE584m2uDnDo0y091@(7a!P4;{UbP_6{Aq_%I)hAph~7hx817pq|`h7;gp6Xx9k&g&9l^Q0E2qXO?O*DAXoJ`{Yid##JL1l>2I#*$IP!|X_`?UQr za+l5tk~sx6`i)YhQrUi$?ZT|2EB;rQu&;df?qJqwu5$vrs4yS<%wFzMIUEqN^BA97azM`Plg_7^^(ZcCV2MybkNK;GV;C;Hawp-~pFxy6=Xzq+uE z3z_fDzT?>4upbDAP-1xR@B8xMN8rfa;vYR55p8V}Y-PU@g>+^xfWXuly*ZllIM5>a z!@EjxpR@aoZixU#Xo(*K(0$$cchE9q8?!vAKh zWqV|@NKw;^=g^8MTcSSJnvbI@5EBTN6gB$u8~9v9+1WK2Pp%6_1bE-B*0MSF>_5B_ zJP3BeO?Hx)Rr*h8xsVuqOa=6vF=od_&7Dtx**Dk9uTRbGb#24ZV=^cc891b~n8c=r z-&Jxg(?00l_Za|2GNDkQ5d*%*ifv>4xM+J3f|b|TeJ>wL6g$?b-m_}$VC|+o!=NTE z8|M>2Kg^m8!;LK}Jn`dPd9Txi@N4k(A$+4)Dtvxn6@vGN8$JJrOvnAN3a86Gnm^Ga z8}xlS-|LJoWVIgV5VX2E!?>5bV6Fn>37WUx02{Xp5{=NeW$E9s-rvTF2ECUk?ew~l zZ4G_V@N_1)9vFxQU>tQq`?k`YKJ-2J5&)hBe#D6FPBKFSJ7(d>e{@_PV}ycX)#x5Xa3Yb1z#I+P7KPLf>wehYe`tTn&k z#M%{3Xqq8fj`dNdgtlLK@i2?^g;dLKJ6wA1z z_PD>M)Y)M=@dlQ|yX#&nGxEQR7E(j2iE-n?vvjFqKgWI=<&LQtDIYC^8d1eP6_J-N zBKY3PLbb!C_1J`4-5M9Gn~j22TIqXu$OvVsbQ50_l2)p79s-A((KvuH*AjXgF^l@s z2j7}7sv$qWRc8M%;e^C0Ch4K^uPt@wQXEHy#;Tp#&s1)j;|G=|;;>g~r@%T{j;6jl7Ki;>$Un)Arh)gN z(ng^-iYNaw;kPyU7FZF_pu;BfaqMF1n}Z}pli31sudDCd6-$(lA}NO3^Uoutf^XL5 z$^(?4lYiVk_y}pXW{-J>eoebpqrHKd1H;3D@6P{-Xs!FPyh_T--z>B+kp9b;!09iw z#sYWXaV~Ls;$DjkZA@e3K^SXjg92f}UJ+gNtv}3mR2FZF1S8huL-uU8%b8VArL zP4IGeiBbn?pI&1nl)6R3gsj)QZXtJXQESFxnnyw)tcVeo<`8yBCpw~^E(c$viRGgP z4L3(dA;z9>=4_P?>MT5$iYFa8OkvfViNVi71QKroy9W#77goFSt~n1o=gGH0MTNFm zCP4}W+3j@_>$5}-jKs6_5K+2z?4wZTQB$?M@{KX>5|*AIZ6wuNw@Xr7MM`I*X@Bwq z65}KyO%hmsfeY%3y7U%~Qb0pZC+5}rr02ozx#ix$5a_dxqsfQNUbC*BpXszR*ugDG z3&l-4NHFbxq&6V-5JOd6lfwlL!?2`F?PM%z{2gK4AoU}l-U9lEJ?{*G8dpwI%)$ihGlm(w*<8DM&ur1W=Yn#Z6^A+H=cPKuC#Ib4G(osTu6 zg$$o81Y7bZdM-%zc8T@CRmip?6&J8DDYjk*ECrfb{Do3tN2s3C1U_N+>*}jvClIyh zjgkjht+#0R{Z@^_jkFS>QDA0&jf#mP>>87<&A4DLJRyJ05Vur zoo|1XOqzk^fX8w|ZIVyS;;hs2Gc{5v_mJG2_M@Hya8qfj0-&p~QC|hPSbJ4@tF>3A z>m3g-oFo~!=oIt0N{tC+tuIVeRP{!e(|VC_Q?sWiPSm2oG`Po=k zvLumCQB^tLCWju_^K)%(-bp?7{zkCH1axwj$lQ+2l0*lI(rMzwg zD2Z}^r`KLey1hZ>I}Xn8&)u$dvSVkj1}B0WBL{C%=5nnEVBAy{i>XQs3X+P%j))Ek zlRbTm7-^^xb9E&iLpm#zx_@bMT5?`Sx#(J-Co_()t0boff!(kvlY@kaxGw!Gxx_XBtqdfpGsvVLJo?!n^N8ywjXhzn# z(s`-8@i9~A^_VYN>+eW(yWg9K6nH1pqazFsP#>>BxVRDbMPUWcM|5du&3%N>!g{BZ(W zub?WwKYF~r4MIdbYwjH}eNb(AYT!S7oTs&VZVHPPGfhMDfMc_R+QEm*ohXLS)6;uM zmW$;8Twj8jhRe9eF$Tc_oowL4+qkaty&DQWh*-x{Ae{Hz3y*M4mr#a0(fw$iBhzl>EupXUV9UvHpJ4qifwl33(Ro9V}^m(lZM&Fg}})(&hk6f za3^aHFZNX&qDYE(&1w0_#gpC%TgKk+0k@l~652(Dg%BtLkHmytJD)qi62IT4rz)4> zbcVELbwp-Q|FdplOnQ9`?;H7lRMVZUWZ_f1BJ4| zhs3^xcehsc+WlUiKp_3P)sklB@wH zv&4zBwbAkOG8Q1?vhrjei}3=^cp+M*yJ2-In)q5@g~vCQ(h2WYt#Ep4wN(VIE+a~f z8XedyRmyo%MZP3)%B%smZWQ3@sj@U3L!zhe@HFDr3KZ&Pfw6o$0sJpC8Q^esdNuuA zaMz_q0uxHbCgoGAFfsa<7WXLVX9}h-GRNVEgG_G|bHt||{u4QE=qY#esM5nTTZ;v! z2(1+Ind@IeDK1nL`3XTs+FS%4ixR;#$5Sq!~ou-f9xlU(|PY%3rRd%1h^% z@)qnVkRs(RhvTwZ6gY2NK=$dAf?qd?STscj+V&&xHOt3o9GCGN^5Y^<6M5}lR7>Or z?+I+t0k-cwD(LXz46v+ROVw0o5Y5;|kVSn^t9@zSmXOI-@1T z1x;mX;5v=8e45k65q<|_`$1?U>u4PK*EU}YG(I5jR7>XeZZ#Wt3t1EiO3A)8(3xk2 z-U7h4q2eob_Apbt4|a`rVRdzF5Dlp{B3$u(t8~-hrN5vPdLid%65`a;??m@@ZcsiN zI=_jeeyeG6rBUCMpJL6Y^nE8vGt#KqR>G#~1xLH7|2d!qz=A+kNp12x>5<2300D3A zz;xFe!|Q!yr=PO|hB9+KNFN&BoxKN6=wG_)gg-gc;>-Ny{$3Bl- zDT{;&!2jhMWyduDZik?|3-EFG?ES|jonrdNt%GQt=#jhY=1%bEpXaMRDsgi(9X4r+ zeU_4>Vb2%n*XM3QV9l#Hf?k?GZ1?N^u=8NS+FnC&YTv6<28Na2lY{Ru-FDCRcfiik zv>zeowiCzK4c~7{LL5uMRVW|mA5Tw?yR#m=*m~E5zVE4uJCn!LsNLw{e3rV;q+DLZ zmVa`YHi8|pP?9VdhC|Z~W0tqQC`t+u+C2z_W*Q)Cx{a~$Z(cWEOQ-kAS_Hz37F9ML zhiq$W6%r40qk&8GPz^BEC}7J z-j?oX2)=oPo`nX-zU6>-)s7eZ50YtdlY*BH%|aCZEo!E$Xh2x9xIa%Ny-Dg*Y;Aek ztB4IUaV2^XS8eKuqWwb|qdGH24SkzFo2`O1CejkJX_$7mqpG zjcv!8FhYE3I5kkK$tmN@yYnmmmpzVLtKl3^M4)e>mpbN{J^eVT&$N@<7^w@+7jJ16 zeYK(|*3sM(3^SO)~rXsqhky31R=|16Vx z99;kA-)ouSOyAGW&V~{HcLOpD+qkKas}` zW6%!sz4?6Ls0lA#z63Z%suRg9pf8a7&7pcTRtX!F+MB&6z%5MC@p9RY67V!`n9PEo zvTaotL46?!7P0>m5B=`oB^cW?n@I8uIMdZ~IVlDQ`Ej^1p8nUlaX<+hcj!M_Z8NYB zTLC<-#N*Q1{TntM$cX`^@rIP;Bsb=UQik`wL&( z@17meOC_O)1q(1`=AtdSke@^@(yDjHD^N2+9|8M5~C%`F8RbOrVB?AoxKO2qUhp@`zyzO zgtG@DN!2eoXoAqnF~Z-mpwE*~!H(Fzzl%;xU6@^mnmz4%@N(^sG3m~;S;`IPG7C)Y zIO+|WeZ1}@@LeYny0&QS%hEV0R&m^Qg>*nFj`QKz&B>ARkX0J@5pz%K(841j)U0*g z*|daL!QN@ArExRbKq;;WDe9urx=1W>q4BSJx_WZCAptk}afEEBJ}f*>HIr10Ei1zPR=S1KzjD2wZ+#35+;%v{ty z)%6`Zk0;nGHi;ctc($d~r}rCz3~bDA?10)_VqB8KujVFF&@ZSbXCm}mKh*e0?P=&G ztwj@;aiuM3X@=z_*3@-Q%dwfLo=++pr@gS>>JXbRZTt06_+t!TcNt$_Q9SqREDa`T=KCi(sf zrL5k6*wsDCL2sUiMHyPHb|gpgpio_&MPD}HNzEIqz|-Zj$>bAYcc|rq_5FhEBRZhf z4UV?p0=#CT=)hpGaFh)d(WXUzZ_G^}OMM45)SaIXrk_rbEG$F^cM6k7nUd-rs!e1G zq99;Ti{$Xxxb#|_ui*7%R|x)K^1a;tlX}L~pXoZ)n>oJy1I2gmt<|ia&lG}SrNNyy z^Lar)_M|Na;v-TU3v=|Y??r658DHvTwva{kAm zW*5Q7jUpIw#uy@@qxy6!y0VC+si^1xV6~jmqt|YNO(O3Qp+vb#KJ?mbf5&z7pQLC# z|HZlMNAylD@Z5D)=XNBQLWPhvA>4_W;hMmRe?Dg1tA5oV;QN^8^kxn)x;sU<{*~1o zp}5tE*c`++{7^jz5qBZ9xcCt@?SKB})vpfN-B`4Bn&cITRP6R+KM!StZud0VpTaIu zN5)}h#*S8A7%<`@GA|2&wUQ1IgF-{ftPkYa;&5uD-aP%`noY*=Tb>+;vzHx>wgR8a zNl4$-_z5A8df+mAlXWXGokc6K44yj;@ zY3sY`n9Mf4WVN>nVs(&_#ynk-Eu zctBa!wfHaF`d<%`loP&nPmeRSp`|SXJaRZ5VE|Gka(8R1k~+6EB_py}kTMs8%d~Q! zZ&WHODeT|7!uCQq`4NyN1Is^+X~@6JEod@|A@QOMX8NV8IqxU_5skLq$? zx$`^ZzPM-)IfTi3*QAFEm^ol5bhNTF`|8{O_i%CA#7rj5j!|;@b>+;)xF!=37yy5hJNhF4TZxrO_(84lxMO8j;Kx>P ze5UU@MW;X#64r(dIohx+%tHYg$(@2&NdaWl>~CyRwa=%KU*$2r9PvXGw%LMjzPk77 z(xW&ihmDL--#YnW?;c2#;(0O>IY`ViQ=j{*vRc*As7C2EfsLR2Ni6Dki*i;HOx{di z;=k*m9&|Oqfe^ld5E7I=QKoQIY#i&-gAksA)-owE@r4W(TN;l)gNBVkSDA_G!ec-L zUo{bv&U!ArKWZAiXEiDq4|;Z*!rOCubNizwRG5fLBQ$WztXt{>$Cas zF$I1Pg{{Y1fSKdmu;PkTBOm@2KYA8a(xOfmS`GaN3B9fbhI+T%!EmCU{q;;#8tIDF z!;qwt>jl%@C(XSA*OFcaQ}>2wvNF39S5`gw>)BI}ev%H0Y0PZ*LHE|@J@dp52llW- z)}^D25Lc#Tmfi0%ngpr(*U%kLVYlBZ)*!wo4AWj7XScxLv7Ad$75=_FOFHhN1O{q` z%@)SaBNV5s_tgEcPgg_s{S4|3F(Vp%a3t?s-{Xgngqa$38z*=bI`RrU6fZDAIkx6= zaTwX>f7l4F@xQBpg$`~73*`U7LdpP@hA(T&$?7OmC~!H34*`6=?5IP%49f06N^rDx zbtZ4K{|sSFCkZuN_1u5eHR)!K>DH7psXy@fWWLtx$z?*H83nlo;Y3cN%Dv1T?_^?_ zoe~FTd=xTnn9THpvz(#Q|FNHl7AU`eAvwZnzs4q)CqMvRTiD7%hO(%rqOQbdz)sWo zfhvz1vy8L3>cszgqdJ-4%zHu%V-=6fEEswI+H_j0s?B6`ZE?sW;_ns4|7|16l_OLh zAG&xLv^iO&Yd0q5K${`KsQzQ}BneJIxFM4yDZ|&mivl&gA2~ohM$H0YFQ2;p8YeNq z-(D9tI_N9Suq>LOR-P>lHNBlb`C)U0RC6qNkxeE*4lGq*$UxwT8G(MJmA2#58HG zuZ%S)?oACl4~pUa2Kmi9(iD57u3~c;B8*w6+OATR1rgFZa~a7hU!;Z;Ny`T)yc1$A zQIVl%$*l2VVG=l2jUO~j?}I3~94E07p{bf3)?+)rdm4XqEd7gIkYzQyejz^9x1`_`;4w#q(SnAq;UIGHFr;M;rL-S}U7fs`us22oxH6>S z!c|95*Hb?rq5)GWG=(fl11%q}`u#ygUZDSGhbM9x`f6hvI|uIV@xxB)dc57;=?kbDP3TZ zC%Q^XgK?fehf5P(ZTEIqYBTYT$`!0aUXF^Sjql<}W{#J`_UCh493grtb4nIj_n*i? zsPn}ga?GuybTA2l6`%DwCILk;2gUleTJ+c}#g!XsY-1F~TaZOtQk_NY1YJ&bNky?) z#RL)2dNaj)$!a>{jS_oqTDvZ?-n?Z-6bXl{PAl|f<&{6CH3OZ5@Tbd5|vnf5L5sfy*4z9`(XW=p__;=8Xr3gp;D_faGd&W04ccaO=A% z_4PdD3~JB`9E=1)fft>v>$Br$kSQ_NTfrg&}e z8)z)p1Ox_u4NTZxN{g#R}~WcqIqz(#ifC+(i}#`JG}SzfV_B3m>B=L@&drnc?T3{g?6JaR0Q z&Pz)_Wzyx)8C!Z(u~j4>tr9IADQH8(WwypnGx&IraJ=r6n@jS(8MTv3H>7e`OJv`U z)EXpw-71e2v5@x(|E3VHk0+hiwj=ooSw|-2SUQBr9$(UFt04FoF5~+LSoJsbZw6jm zcJsX4O(^&@s`#h#$`7UH`{vmKHh13hLaEcsMGKul5Ila-9GJJ@YoJOvwC~YA{Fb;3 zF5G-x8ZOM=4^1FJ(Z$+biQeH(N%Nju8oemDrQ|f~y(v!bG!npPEIplQvsV;;T=+`; zkc?qf*qku4&+AXa<8Q?MhpZT+TDnI@^aT6Q!R!0+vOQdHKAucEy6beD<5>U#!GU`O z(70|gv#I>w|0Gf6_VghzDw+PbHe^SNITst+0Lp3}4tb{Sv9ODW1~d5!6c%LJ525*R zwGpWjRpxbir(ccsnyoWa;_eNjr1|NCn;>k($HcTHj8-XOaTrDgt{g@+HMJfRNk*P_ z^O>t!-UZoXFTOx4Hd0@2d*^kV;Qk|2gYXbqj3PaSPOcVSJsIneF;u7q@jgYe%n{<0 zV+8LKQl|NmY^ATEdLpmRv!o;sDu14>%Qq@BxG-@a7lGSn!D%j1eAv>CQMw&PbF`Zw znT?S|#d4%i{ZMYS-OPFE(8X>_GtEW2vdFZ`MJn1C#UdOKs$IoPew90nY2d=2eY-X$UZ+!9W<;!+D2X-CMOvRGSz zTX{s4FqPjf7~&*zGrg;>+ph(}H%Z_zJxc4tPS>6ikx39*6z)nl5zqnVQsIzK2p*A&5t1ZJByYPDEDK}KzbF24KEsE2 zY)uNEL))#)2t2f87)0zw0to?-?Y=_?ysfre7c2T9Zol4P{2p7|r7twf7i}m}?!bY- zzvVWo6y^{hYeGXc0&&A|`t2~ZdR=33vFLl9m*x$uZ@(h@o(A~G4Rc9kzZ9P(B}*gOP^B8d}t;wxaMlxHqhI#w4F;s zL#()dK)igNb#vl;e&I!hza!1X>~3^3T57)dDTkTM!s?5j!b07Ce-|S+A+;w)feLOr z0G$kuL(-V~+VI3FlO!o~(q8NPF0RV~_Y=Ggqln7SsLpu8ckBp%qH1f&?7lnigIfJr)u}Vo({`D)2u$)Zp{88%+`u*vK)Y z$qT-b?>n272_+V)2%+yfX&nxR2#DXJG@u@v1h2<(_7%dlE>6D-p;+XbLmjA~Y-YEr22^dFKQ&F}+b2(`zH@T~;H zB=O)m0Cw*@1IChSq>Ges$$>dki)o)q7h|6@q0djRhMzBy{|*$$tWr{SK#WDIviW*<;i;X@N}VLt0#P?g+Z*LMCfm<|XC(94j$}d0NsfK09U6jy_|%icP^G z=8=iUo@Kj9`;mw8oQc}1t2__Q1=q@A=%Z1$3i@bq+na)la^OY$m35Vo0~+M$EA%XMU+5^;?vvssRRW8S@ZMnOt(R040NSyKRZ-;8V@u0k z9Nn%8gG5UBK?8``U3{)GR`l39KRKHUOZWzXZ(1CS1P=|jf235@h3-p3k>*J_>_Q-v z`G1n(22dlctZU(ZctpKd{(t0sp9DW(9thla`#iUU^4^ca9|z=*P0$|Q9WLuju5RwQ@)l>Fv>^>H~Og z@yiY5vd<5$E}qC5s(|M${(Uc|q5Nn@gn!2UtJRqu5ywfP5fMFW&PiH%B*$bwCTy!! zm9<0U6Nr=+7neuhzIX*+or`a!}^=J+^v_E23+(ClWh58=dsi#UM4IB zg8vE3^Ii|AoWZUl8qWWDi?E`F)kg$sV!b|-4^|nrYFM(+6hqR|LVgLBF&1gyNR;Hs zQpNxji}hmBd`^*A5Tl$1Y&C`%hh-gg5+GyYx2Wd3N60Z!86U;VSaPo?Fx!un!%#^h zifW?PYE{|f2TEdvhh)gQ;-xo^NI+p=obMT^Dw8}|;|i}Qapp=DRDhBA1O&e?ZA3ji z>3m*(fweHnP+!L6LUD5EO(KI}t1(Fj1OeP$)xpbAikHso8JLC+$F2MlpBu>&OWETa zH<-ZCy-YsKHT$v*HYD|VfXQ>mU~NP;FK0(YU~}h4X+fW?HY>V4vO*fcqWhwVyA0fq zF@buteRTylsR^6GrDf_AIow#0A%kVo6bc3ixSsO?yaruSpM~GhBC+R7(z% z@Ow2*X_ygm-NWdoIo@+-^UvcRO1JaPh~~|+*$!8R%$Cj{5W0DTU6XMpA4lP#guPoa zDEZF)!7B}i+y_9W1GYu-|2*D*PQk#=kI#HBEqObs)9(QP?r%VZ?FaSON8XklKhDbD zbDLQ1SXO#kt1MGT%4XkEpC_gBb2bq3Kwv>nz>w5XFG&_124%&EcQSeRcGC zahWx8Jds)~q32qGBU}-w>w4mYII*+bTWtR+H*q+3X1J~u&i)w>*xJ_P%qu-hC@2jg}!PkC4YVr8h z21`0^nMhrZKXk90-A|LJrdc(97q(+xrHN5-%2D)KXZrJ~nNf6fb=6l8u<4b&O~JiF zgAk?~`7swLL$k9Ey>A7YT~Z}Pe!7@lawX=-4!Zq+o`M9J zqJ44C?|oA0KLa?t;_|;Ch7G5!X99(g51d^qkSd@&Tt9nM45Y~SS|moQk;3c1C@~dL zTt-iHu&~_jX>gsGiR{UdpaV(qW9ZHx5DjTcw76ap-O4$C>vV`Bu%4D?FA9zo%+U-0 z&L{8;Zell&Fg-EQ;p8L5E|45kr48*FEEGWE99V;wIl5@@$kgHhhtXcYjdq?jC#P+N zM)nH{t6B_@nwUkr^}#&N0jt{FY;(@;xF=RpwrXz=qglfmxnQHcd-7bw4?|q5CB^gr zS}OLZVT4JbKaPaKm^}VkA)4LAZxKzl{ISWi+F-mJEu1v5WYs5f7tD!~5ph~aVK9Y% z>pUbfilmasLf*O88sRXgNgmCTLKB;Y*ZJOhd(_CBYl^7NAdQ0UQPbH)gfE3##C zLBvIuQn?MA&K1}QEs*GAfLk?oa~-0D2^QX}TA$)ho*ZUbE!^Un1Tk{vA6{OlxYn|2 z$Mf$;YPxXY1P>0u-GXZq z+=9EiySoIsu||TsL$KiP9^Bnsg1a~FT;A{AfACLgcGjvvon5>3V-$D@)+4+W8BYM} zbWJf?QL2ReSw?$udUFDK>3<`uaZFn1a^I?3t%W?{;p3#t(No+LH}5#?E-ljgm_T-} zvIbA#yqb*q^2D^+VR@bAT<+~R2%JJc;TwfKa2o|(X&U)Ke4GLhO*`)wcRO#nLrACb zzh3ZwEZXZAmi2e|HsD6~<;*L1;3?!SASz}(%wF+dAn{4IaI}wfjW>12z<-)tKYy1Y z@s*mp%J+US_!(To>4hWs5;VDO=)ez;Qc2NU2FFf+P~NR8bY;Hw^iLO(qzL}Rl_i9h z^yqsEJdJtCzIZ`ZyhyR6hHu{qmd;wm2H;F}3{3JGgmOa4XiSyS5mcH@L#KcU{njVy6(m9ldQ z{2lPfXE{OCAo!nMWy3j}veH?bRK~Ni@-7Sb5$V6taBgcW;rm0(dvT($#aL?2NQ?|V z)kuB{Uy_vI^YY|2lOqTT&V1tL--RzO_(S}hlpEHuz;@F;2IKD%k0+QU{(&RRc9X=9 zV%SLcFz#qt7vUdee|5=>=NwY>ljG#0R?G?rioh`*7}ItF35>{;t8g@4LDP}%8H((* z5Y1_{Y!5geY8kCP={Mfb!s;P?Fhxi|nEL9ScR?)PpQ}LQwwt$}5k&tl=J+#o5!V38 zX98AM|0zS6Yuk4v)=0I0Ro2~nD4aQwU;oah3_jIKlveOv9Y^@ojng}LE6aJDWp}{4 zaLS&Ld*`H!0WefqhOC8u;UGPTWH|pe+c$$^l4GT24mbO@lHrMwIcip)vM-|d@~0nJ zU$}>&v;3F-kU0h_N^a~H4x#tw?61ReHnK7GYefZ#W&nR6g}+?z0{T_xV9A^?1Cd5U za}VwcvNBy%=mnG^YgrH$M}w;KH7s`%Q_f7l_?F6+(w1o4FQx!WN{G2C{=}x*(|932 z9PjUH$3Y9S1Q@1bEn=n<0B|CYlWliRKAZTh+cqduYwDDV&5ajgp=J@!6! zI82?*^HIIh5ooC4-)QN}rQB8B_w(g}-z9x8L5b*!at)sS%+}lP{LWuIz>cKujZKvF zk0)+k#(4oTiAmB|;m4op1xs+pUIV1PWwZ7eQPa>@lSH6e0UAijxD0W%IK59)4tZfm zTVGCKk_@es#h8MpU4n>`wu6XVw}n_hzl}e<$F~#dg>=&5SihHipZN}dRXrk}H2>|2 zubKs`C%IDLW^lXwcuoqPv(T@g;_(3gsQuklQJskWo6i{IWE_hWY2nOte}hSEu!Zl!J0B^(Fq9k;ISKMkbxYStsv^{DZo66h$f z=uy97w-+SU`egc(a%FVRU3KXea2pNnKdU^U#zkz`p;#`z<-aTsnzcBM`-*NmJ-mnr zYN>hJNm1<}z`?6M8>OjyVaS##)|7l}L(RJADBaS zG7Omb8IlC$Bci-&^RG_3s@2M$#b-Ej4I-J{{@0kWb`Rw?L3m!y?piNZ60j}x->LQ5 zz3ETY-h7f`V?F?FA?8W8yLYwoOQV&m(aQxi9~{Hi$(Wr&S)4yTME%_d7X$5+Tpub5 zkOC1#{n)%8PhHW7tHd_q-JgOIXlPUGh@ralkqqmw-rbDw#|Vp@8AsX(JnjT%f<<3^ zQy0X1Rl1uL|BN~phf&+;VICsVaa$vkYLk9MbAbl^{lWTTHs#F>O$H%(XHQV7e)71ofj*X zG0_t2GeZQ}qX$VH%g^N>-VPg28p|iTAAU{ru67fA8ADedS(*(vGZSwvkcH7|-)oCH z{hGG0)CSt3O5HL$okbPii0KGy0xu3Y@JAgTpb><8*-@4@HTf$4eOm0lr7tLYKe8yS z4z3+IVZn~a3;3lMY$KNxE>j^_(D}7ppgqHH)M=YGX#}>8Y;S>+UCq0mmJIq+OZVOL zYwRElzWrf;jjW79!!IeSJ|OP_Y5Sx(2>@qW*18>G0m<1k-xvok693EC9cN^bJ$LwC zxHcBcE(2|f4K1s^J(B8_?1>6$iyq>U8)A@29RJAB58VkZmg9<)rM4n15&ASXI5t#B z_d9dZV4rfwv71@opIVKCCd`wdR)Z`rpQ}(z@p=vm#?{QW{rt!mgP5%bYOZ>K8r?AT zudfLS>_b$uwAQwy1_`1Xb^0N?h-8673CJ*1_bq93RG4*A)I0nA{HejJ&y`BOZawk~ zM2wbckUi;K7<&+K|@zeC2T5AxSl7Cn?ydt7pwlJALlUjeC3w$kjB6c7t9sC7{O}~K- zgjQpd?;Fz#oK?!YIyQ<%s#k z31{`|QIEd$cqZ|`2!b5RV~8AQ5_Xn+Cu5u!xjOtL&tK?K1+Ho1XA>;EM=f0~kZJcK zim`YUu-(SeJ&RQg%;N`$QwF}bhpGw0YT~z9F);1X!~rO;7j26E@6kTQXG0UXRya^z z36+7%gLmI*3CtU!=s6JPd&x-DGKH9Ie*INmuJtAc847$@Boz!UiEPBI-}=S17B%YN zN%+BnT{M#A%Ic)Q@PmN>oYh%F5(( z27Wg-;A9j_y56|CA&Sqq6X!IDKB>|Qxl_8MJ4*A{|!h z!=5rZCRmbE?4>9IG4NAfMyg&AB_t&w;P;F7{-(}XdsE;{Xb>nH+5JVr^CM5a1dY1d zewDM&q{efM`Zo|FA|PXHqH`_lDyQJ=poavv>1Z3+*?>*#^Sb0VTyVHiO_Gz0(^Z~+ z33sP|N6(&|KBz8EsWZMR*KbS zoUTcN_G)={YN#yRu(aTaVc#aq*rhCI_r75Xr^j*}{zIaFRtV zkk!qW$x2iDWn{CMcFns18dEdiH0OqFtM0bll!N&AFvqqe+N{XWc#|jhJ>)y+_L)0z zDL73Kpi<7MOb~zRRhP)RtfzKG8!#IfXO|SPN;ydcsGW*{1sTxgjk(xH5KEXfm*!dU z7Y)q@C^N!d88I_|E#<(cyA&GsU-tfdPrMRSV}|*rg!dWjhpFZ>XFStXLZZ>h3^`iE z6OH8xIxYyUlwJoAOria=fjY@|!soy<5shUF=MO!!jif(OHsGQ1TAq>`(0ClK8}5~{ zAaVE0c}jY6d`0G63k3xXvA!Bc67`(7SYxHtw6(BG#?P3)cxq>{%nj`MP_nTH?T%5M zF!v55ghZSftfJ13z%J;c^2`_ExmHKj#R6F4{S$YK+~X9!#YGQ-YzZEBi4aS;DX~#3 zZCtPJwk_Y|jRR)^v;HCWYqhSdRRus5nm2%t#(|w90KL?SQ-8=TE+eb|bcqVn)p-|y zqi8c)qDGMB2G^nn$I;ke2Z$bqDDQ$|Af=)t=7}m)lgA&hKp-BzgSLe7H|m1B3!0eS z2#{tSog6XGue53Mkx)~Z3*~Hep%lpPQQSY={1S}Y%O7zDI;IF>J%#dcy>Wkwa2n~d z;U6)dZT)?!69>mO75Fdnz`cP&Pt>Rv-bBl3Y|)looZYAqVM~TWiaX9|FA=^TY_qn{O50!hJb>ZAC z(!aYpr)}2l6sMm_3~6yXHnNF=pq}rEh5t^ay2CEFd?8wMMNiOXQ`)KvofPUFtueYX znanls4%Zr0*sQsDi@grMff7X%jDE4#YI0_OTA}nN(9&i7TLG^{{9E-(TlVo#!CqEN zi;U2cHJFKOEqGD?-#C^Fp7+(~wfmdTV^`Wg8M&J_U4c&Tp9YBjHSf^LlT!YHBW zm>Y)6Q;%BmGaT?7$c`u0Tm-LA$YpXXj%zx*my!p*rf$YF*yUC#C4Q|gtYJBpn`KYG zX^s2+v2SVv#^NLwDWdf#XtaMp@ST?)4+Gi0M23&e^6&T?56@Mb!_TC>o9mQPxVYM% z{`=r}^CzdVc*C#q89Y`P+AIyS7LXq-LFW9quzqs9_Q(=9B5EvqC(|VrtR0a!z!*g& zTtG*xORsE!$~3TL3dtHj;ZPjdQZ5dA5>yqYntUIF2p+}DgMv?@8jX0L%~uw zCsNM`&?VBF*S$vJ2!H5eaI|ImbC(^ci_R>74mC%9ZLEjl)q|>BD#lNqC6)asAS&fY zER9O5xcs5|kv2`FeeCN1-SFQAa8@VCb*U=MTIGs{i)EB;n~kju}mn1ycl^=O)v1u$UcTs3{2B#S2F&Rh4}0nUF-^07TO|&*3Z3Ko0!_rIIocBy%rE zA`nN-w&$$bQk@DUd2OtOF9RB}W*qI)OyE~RTp}`8A^;LOsjDA(#>x`3fc-z4$DkD? z`~Ah*KdCD-QY@}|{^;(gQO1Kq+=#o;9FCQ`X4wMZVofYp>xE@xz=!2n6pC^1ci&ys z+7kYr96aXs8ymY7H=e((3pKwC8fiub5>>ys|LMEK%+3_?w19#PlSdNc8}a6){?6*l zK>j;Jw`)$TKGc2SPELTB+f4r8J6z*Ds9@8Tj97H zB^E5Utb%`}_c`nc|8VqL5_~6*eA>7x8YI{X!1 z4urZld|&Ap-eoW$X;|6Nfz1c98(oxi_Sjl`*W75!2DFNx@c7Ti&SaG40w)iOtF$^K zJB(QH9WjbNcM8bFP5p97uF8T!JSnaN%!5B!w4pnzl#|LRGNn!E1ue6Z3ev12T~Mb| zBYTfT@5QUS_%@zV^=8k+jZfLUpEo)p2GQj6mWC`jLLj}0n{Od^6fS!w5C%uE0Q1=$ zX&(0o-m+Z#cS!~FSlwgT+L4i#{JY7;y5{`n=!7rd$hD6WUD&PDFHM!}dhv_&{;c7I z7k~WSS{O=9fM5#dl-ru1v@+S0xtos9;fsAEBDV0kx(+S<{4^w>U5v=c$W&f#Vk;(L zTm1hfM^=p*Q^{->ph>UGxEOD<^}Nu`rlGfgp#vxG8Bv5>1c4a#rg==RZ-eg-qz{YW zSIr&czWv9?tIB8h^R+Oa=esnHP(sJH=|eC1?+}v^exif2t6#_ul*)Zio)~B6!nR=2 zC+3yN^V^fnXfY)>UhYozGBk^>#)TH(O0|VM#rGoZ{~lWrunJ&2V%@62J+4lCt~D3v zl{ITQVs1aSX*;CGjsu~sy{2~5l(t=ydbrv+?vcz_N|5wYDmkhqww}*9HE-JGG z-sb8IK>1cJ;tBo|tBphp-n<4yX(H2cE{qyS+ypFTKw=`l-{>MHOrDg5XpWDb`FMGl z9!$`%a{3(8ajWg^1MnrI|s^);ZUqM6I6G;Pj_ja;8w4SHSvM zSg^PqHhBv^`3S>^8t+$C*{w|>0eAfK9F!>H2a)hYhBylt714IbpXkf%I_l#*u<^qV zWl1cf5HrjYSbhh&n0^u#-2EwLh!@79Y-?M_7Vh+yc)$NgviW?+mFHHrVxh7^f$~VK zVEsv18xRM3Y*#R0s9bZrb@{l6zk3MWTp6fxDkl~HqV;Mcm~I$ z1R4p{(V)lDu-f57TcD>4(LE7QXfBJNyCK$!k)-_(Drj_5m|BD}wMZxqIPqKDFZ*19 z@=u5dXmw5ncYhv!jxGL!GEq}yLA6X$3rtV*YG9=g%yEgUHEKU%Kwikk@Dny2kQRZ6d30m2%0;u31y8~6TJ$BITy3m-D8 zOT{A{^lzUu_O!z?6_sS=5->%k*$t8imUdO0#p46~67{+>33i@^Q0*0dSHIUt*Yrw` zOF?EU=dJBfQTumDyId+%hEW~TwfB<}(^lNlgX_5fmpSGZTbGt7cu{d(8+a1oGEg-{ zmN(+XUpvFLN@QI|UN!0X6w`KEoINHdZ*1Wbu%oDa9u_$*W8og~YC*89hPgx8o2Vb` zqTEu;kEJvmccAydibc*^_a!B1yAMev7K*f5iaF0x&~u)jU#dPbal5M+IF34To?bvX z)SnN-u{WMt3HV_Pvo3|D?ugD|h^MXygYAeFt;CRFFW}^H)5BnM;osEoHAjoEr7Ffb z|Kq%WGzwhY1J}!iEpjF{T)tNy3bUmpjk#FCTKE`22qT;Yz}sMWCp7DRpIYR=^L&?; zojdfk_p8*EvTB+yrqIiqYO2hh+zW`?TMNA00NhK`ot}`Cz8ms+Xx->QS?oCF0G^dq z4xVgWrvT}_zxrPHUi9o`W*{YnKz1g*u{vJ{Jho>=KbJeJ$4U=4TOE)fk2+8ZL{0k( z%Z-`wOqNZm_9>Ew?A{pgd-zRK2%goSt@sNIYF>K^#viDB)Zp{?{%4^pwC49sMi)Rm ztr=VUy|DMi1~~FxLIrT#@rBv<%-j|u(UPZN*wWYH4xw@5-Dj=IXAVy-5k9+1(YKM9 z@&lhSPb>$KW!F_Iks91JZuGw2=QNq>N}_~-J>lO*vp4)(J>v>misF! zkTXg&QgI%eclT{Sa|H|&+U?TQ#t)Ti##(-9sixZfkVq zYA%90a`-E0F9t`s62Jd}1vEiEU1@@EH6N~0IwRe91+wo-@*M{k;koZOz(T>n!I7GA zOuCI@qX*)jTN#$zZg$sts=I|sBc3LMxCtxYiWH;qza$SQcO~mgi`(J$J_t=>TWC)?XMO+m>eS3)&2N## z*<3X+$z@Af;ZWW=#wE3r@Ty45UYF?SUWd+os`yrAU-4mwq{LWC8iK?+V!22-M}jcv z3u;0B6}aubDp9}X%iG~USobS{fgMQi83cclbYL`iHLP!qNJocn^A5JS$!0YKW*X^H zovuc!bPXE^#P)+CS4^&3zIhNM7$R+d5-&g2BnmpdtZnbS3D#6y7nW0u+8pW<+*PTI zOv&dTx#aV)3ae)~_*7ztnC49o=a=%LF+Q3CDbaSVZ`x88x&$p-%AN)qaPgyZL3JRs zF%GgmuN+R1xuhYxJeDuJ7qr%e0DgAo_>OoD(nOObrLmK6wS6Wv&*kj5V1h2w^FNC^ zi!~NDaP^|&SclF=>+mC0XnX|*e6)!eVPdhVnH)PJva&RTq3A27Y-8hcf3|7=sO_bS z6(A?6Ch zevIiHuM_<@8Xir#MMWs438o0ofrl2PTJ65=eL{(0 z{O|T0hQ&Q+MHxNzPFHwVM}Aybfi!a?!mOcg8jgdsx=p0ZK)6RYB2wUF`t#ox+pq@L~EuD2nA}v~&Ap0}L}8 zT^uLRc-(i>VR<>W{<_q9Eq(esV!KxMd2>hLT#2Sp?Ek5IN#qxiG zzJ*sb%j(aih!+sZ=now8k zELL^a8GI~jw~rWKFgY1Utew@T_lXd4^%NQFJmvG`zT>=7F{e^!8n#4HYn7iTyeuxQ z2bZC8+tX;1(rVFZdt%KaLHDzz5d2{!y!$!)1CII{#02%s`At6w_4q`@>pscqXsPGX z3rX}0@<^-km!;CJO|`$rjY)gdwPeul>?*?Wr3a{zq>18%g!7!$4+QW!5=)>_4=e4? zX1~=#Xmiojd|wyA#H6o>lGCfJe58_AH!IVrcPa;e6+7oOR#sN3f1YU6wO>~1w>nJ< z+~akywCrL|qz6Q-FXCB<8vkX+M@B>2K{9*@fG}Tmqxnt^{cx$dux|s`)N5dl%7gnK zrv`BoW{XJ({AEc$Syu%3T#Y5)!4z|`ETAKeHj2hKt+wP~P)QDMm0WAgHygicE2aMX zi$jQsgQSZbZkb>-+MOJZFAJo;XbU$zC?l5@G!rF?4Q$)AAgq&Yt_zcRBOKmtRWK`; z8@PHQ-!T++X0+-b`BI_MLn>nXZc|^SjpQN8C-XEF{hF8mQ_z--hRt`p6oo^+u5hrg z-q)H$fxnrSQFAg6MXNe&U!|+BL)5#3?#!dy50RkY*>CG3UsVT4<6yo}=75A%#j5{j5 ziR3}1JQ2KZoB)0@;e|+-iM)ilA)do4fvFbZ-6)IZ|>@Vh-g2OUjC z=1X;7}MNbD6w?yhm8 zUTg&2tl@DwEScfcUUq00-2W{dDpa|I4;X8T^tyQGnJMA9Q5v2}W(d;z>kIr$(y+kAC--Le8*810C_#)bav!l@PKbE1Y97Y@I75t(gUS z#Lt=+wSwV;u{lx46e!7BW_m0pwIX`8|{{8g;Ua=vHh3BPUK0I|JJ;P2_b6;!+5btUX*{ zDiRbo7Y#hvnA@9UtYzc{JH~a-+Wnmk;{E-xuES?L;Avxpd_I)iIO(3P)e0=(#dD*7 z`qsvUcvUmCJ`b7-YF+`7=r>K@4_23cJ|Jwn<_*Y34ekK4B>3&}Sit?Z_*DTutyu$i zk|g;g7r`NCG@JK%@6R0o12%~m6I8>HElVFvG#!3_EIWug*5zfd!tO^gXlqeX|$S*(-(rey}4y9 zH&GZ{_80w3FQh`#FeqZt?Mhp})1SNjO<&yNnAQDk$zLF3Aa3`DYHm7)DMdeOl*J%u zXhh>$yH_R##6WzZXT-Mty{ICokk$1;h_(K@?o#7U89bd70BJ0;?g&-InlS%JQ2xDY z^Q@q7uy0(+2zWJW9BX>pf#D{o%8HR5^@OBIkD8jHWDuytuv0Gudn64e%D&J&(QVIx zbcr%%$kXzfdp=VT6*3nbm2ZWuU0bXI?MY-NdGY2VJc*J=vq812S-C6IQG%*MniS^8 zq|1y4rF8Ziu*VI^ni+5^2EW5)V`A+__rlZatQZLN6Sm-sst14EfBCx(JWt+&KWLUQ z-4cLv*Naa$aKTZ(YuSu82##O|3oRJ95$gGKAPX$eUhCa1?+x%_)5;4UtsDeQ*w5g6 z@BHX}?2tdbU$MN$Pj0M6c9`K`G5S_5FcC~Pz*aW~#pOt)J-t4!fA~~wk-p`yVPPZ& z%STzsdDXZ+H$@|=8sv6&3w!UgXLm1lzWvNo^m*AK9emz+_f>@>46gz#?V+FEz+|4R z|Hqzvi0FJkx_)Lbd_=5l*(Y#~IfGb`8(Jc|<0}U54d`;JaURFcTV{(wvLv^{CzTl( z<5Cp`Mzn*(?3tLAi}u+4JVDFa!fXixn}IQ=yO5$SF57P$b?f;Jw1lPB<2i2XaOjd% zT2mK$iPl-LS+w$TzAJFZGOcDp*?5wap)ry^U0=6BMWbQvBlCk0xt9!ko^UXPTZoa9 z)re|vLx++fet*PIkVy$xPG?qPA3R68YUL$be9(fpA#;BX3jkXB=deMpP>rc3>!_C#J%RBjLh!RXA^ zFnZ%@djq}}d5{$$J0Xq!RLdRyDrns5~O z)sg`{oh}p{^LAv9;Izh%uMyu_r?MdK)G??l-KJzVgNwSO+2l|W(oyYHX?AbYBQtWo zo2U0xz8rmytl?*9B!v=lw{p(kk1P(4FAXYTXr?4c;%Z1M3PCb#;MTo=IXWDxM!-Fr z*1k)ZxW&3Q4$H>);XM_pZQ zU<-gbh4j-)xm+)P>l3z246bhq(&ud4E!KJ2CPMRMQQcCY9%{Wfu2`k%7Vd$CPerob*BD!!H3Y^Y+Ck70wt`CKZtVVMZT!c#1hOX+~y$ zf@xmrR{I%wAsyxCf*)zNIlWybN3=|^*V}p#eF_%B_=_?_qkIu+nx<)6eyzuV2q>L$ zmGoG66s{eR^qUQdzYNJ6qPpEk|F>ngu9n5`0nYzf0TeNGTYXk1l%nl5E!?J>1c zmF2XERivuLm>*=w*+N5{JK{{)lG?h6Nr-mW3e7!&+|UlQ$+z<|K`tdy5(BifKe7ad zlJuo;Z7)ZonVu8n;)}-C@w>wh%^nAhm4jWn3 z*9rSbC#g1Jq$Tq^XfR!%ipj@wHr$6L#LR?X*)H6^KqPB!VVHe(GzrPVmL6AEk+sy` zaBV1sL&K_-hV`2dcXi}+28+-r+#ziIZ>>+eld7WHtcr(t>^sZ(!cNz`fIJ`GK1VcBTHywA{FM!aM>rUl4Lr1+~ zN(XRy&wp1T*}f-%50@cAp@!habja1HL31#LJU{8u*J$2lSJZT5oEGNqoNohOS!npQ zXcK3N@RJoO>QTklMnpZ~KmK z19+Ztii(6;1dqCie+$n%&O4qRjtxwKv+3)Qs)tkU)Lec4OSIsbxFDfU zh+0GqitHXyZGN4sWN1B)CZJABd>DP&9ElzG{vVGA79q-W;em!o!1jNzdTh?)e@GPU zT;u_wy&?I|mpE;h9G4s>s~fRXmCFH@a7*a`m~_WF>NMuW}lSUR<*2@MGPc& z#d{B~RaoMmz6Hkc&(d9WFiu8Lcg$2A26c|wg;0#Ssv#aS|; z{XdCxlLbWPRoM1PHqe{KBO>ndB#o1)(vu}k{${e-ofdvP5_gMozKQ1(8 zp|{TYfgYBoEf*7Q8lM5SMr}p@Xd(I+AOR>(eJI4Cg}3!01d--o9^Dd5qPj?yRR|`AL=vt!WRnk% zWnm3JP^XWk)yfWP6q}z5|5TAFyxL*$kC!M_>pzFEof42rcGH2H2-Z=u4){l(3cFS6 z$8j<3Xp&^2ms7MflEXR!lNfXNq<^XQBFLfjuzP;ut|x5xJhP==Otc<$yk#lH2R!2$ z;0ZOdfQggIxVP9W!yaEuT(|i$VUi58I{EaWqfipws~Z8T6i5E7#hGH9$qiBvO>|7X z6^xVKNvlytE?mw63WwiMdn;t6#MiZDs%aNpiRB6A^z+@(Bq|#bs<$VlB`4f6&NQ+T zyjHX>am`iDH7RbZvTJ%1g|GEHAdq4PZhm)RQ!d}fO=(*+*MM4m&M)Oeka>u-Bzh<@WwG6<=G-d({YM)HJ9dPPY zzZTb`4gHa!LK8f`xA9o1lmTJQziTBYYo+g{g`Q<)!du@pEe}iQt4g~XGN*8QSzoMz zKPd`4Ak6{3B(l@7bM9SJMTw(E3v*zOChrM6T~SjyiiTko78S-R6tM~lg!z1!(^+2Q zk>GdVl;b!e9iE1ptYYK#UYIUNNUP1vI+^Vh7g#8T$FO&5b6-0*N6=-t+DXafW@L!? zFGd6*L*@A4JN}P%^1o#QZp=ICJzr6)9t%sdZZKg|QIYhHe~FsVlHvUTDcukP>iAO$ z{$eKYxyDAZTdzB714>P-w7@s2gR#s*_-PRN8po5Opv_eaj_cvfjeWyI4T-YEASU&F zFy65i^Lz_4I$2%m*~@-A%DzNW1m-+W @k zC!1}tE1t$sLn-2Zj?Veiy zJoXK7kRP$MfN18cDBf!8YN9hx=puc=lfQQ`BvvXLNV56?w`A!4e+Y^~{t%~03xrvW zQT=XU^8*suS`Q(Wq}Hu&Gi}}_7m>A2Rt{ub$F{&JvEeYqVDS+%>7#Jf#%I!xUZju|E-&eoZl4*bh( zQx)SEPS%eQ!BIq79j$#r&BMEfE@Z==G+y+V&X--u9g6zFgaWofL{$LFP)?G&9+I2E zlZd-)P)ePH@E`}kwP!p`coNZHB*;*8R>r8I)$r@yj`P^~FojQkmm3OmBMYeJqm%`*pKW7fQukxKR| z-?(`gU$WBduqUKa$j8X%rb?4@a6z-|6kkLsMpC3nlMQ;CqZbM)Gi%bx;y!0X_d|^u z(kj7$sN(5kVn7<3C_rw9B%V7bWMd&7??W=cF!uVX*OZ3i&H&`&v*o@pE1Ln zq~v?CZK;FEG;M$|%ld2d>J*(vrOiWP*!DET!7 z?}`dItRFUZJ(g&eI__s@1~<2`a->?wVPapPp;t=Jf?|xOg5KUf9boFX3-VA?BNk3f z9%`;wei6+6s@o`*Tz)Xy{kY~h>7j>Ux+7y+VD3Ub^Z;Dvrk<}IqRtX^*=CziE#zP_ z7n>NvQWwRMZc5X&$0PtJoX+-F>%|e_PEzUYWJIV^$M45xC`?$I4Q3x)fHpzb7J*xU z^36G=V~PWph3QMRm>%ou+g|G7#1PQZB5t?KLtQqb`&AEHlX6Ndn`enukbBREl=g{Y z?QfrTt030YiEc^l@B0-xoim??}f0NH0f~@uP3kqOGMMVdIBDPQ|hQ2&2E4$d! z;kLaURbz0V-60Iq4D$G#&_@5qJfSB_H+T1mBKwWj#4Rb)C6xmtV?)*7I*u*d;S6K$ z07ZTXOqTDHb)6su8o9EMUl;dwH+8q5kkjU;^X45?{@XcK;2__+|Ego>zaim!ggfUJ z?+TdY^tbS9Wx5<*o}jBOV{SUx6KLW+XXdaevYrWD_xT|cjA6>Wp~y&8zNQa*l*W9W zh7yRw{JMw^y*J&=kP%@-sNGlg^!kNdC)DBK8>?BizNcwRe1Zi#-wDps+EU~wvj_5x z_!Ry|Gg_$%ew<;~QG?mZO6;mXi-p(h&OaAQ_*-M5N*3_0y;G+Wz#y0 z9P8>sN-FN`jpn}L%H=L_!wts`=B4u7*N!A1kgPmr%j)oPY0{T8cVWr$dJsPK@#DV$ z;+W=1bN4bKh^HC|L}!f!l|V0fT_{%X_!qI&I&z^3UVU;Gx`Bx%!V_-Xmdwu!`uk@! z^18w1b6QOx_secC@UFzJIqWB;7Quk4(1Qp6Rd2S*V(%)O;Esshj;ENn9M~Np z*W!e`({m8ZisdNBfaz@j2Lg_l@axV`_qk~|2d~X4(_1KiQPBnfz??|?j#^a z6>f^h&v{V;Ah%|%7n^Z`nTOlh2djV`kse1)!|c=cjHWDA2uUqf-q@vaA3|aG@weJy zt$PuQ9dso~jli!i;PPj#m2ok}S#D9{6%nitN8Ld(Y> z%S4g1#aMtbHc9r~`BPHnz0vU-xchQaD5N|4oih66Mi#IAVl-3;VsGa0FKp(mRf!kK z5)OYMYQ5=;83TVIeBMH8dOqxEdin+)m{PpxisKWCP@cKb&z3YE%)C^*2n+#nm-0~V zJW^!CnbFRqMk7mzte}PsLaR-gn|)33S9*PN`Qy3pL7T^&SAjLZ^Dkvg&?XIgAOfU) z$Evcb$k@dhzK{U~tzPjrk^zo3Zh7M&-mg&(g@R zvcyZ?jVJ#h{hYmbR8-n;l`I<34tS|;^u^Rh0+TfE8LmV#oGNBat5?xj;?KNHBPv{ z!ovc8KKrZN-_@nw$Ud4uXYP#RleKWruMTB=Dq{*|H4EMC51#jUPQdfHw=031U{oz zQ}m)3Agt*HmYHSW9WGyJ(x;E9oxil{Uw1(U{{goKcjGDnH#fJg9*csiT;t<|2Qm`wELf2K;M}*`&~@$(|Ke-L4W1iuk1F zxc!)hWCPr@Ra}F@g47)c5(?c?k!6}2_s9jL&%s|L&9pA^d})2>l!3=SqWv#v%{bxa zX=u)&%kP-oxtKYYdW*rY|LpgSCo0G_oOjo zd%7V}EMN|1OZ=vFCv1~>QQ*F=;h<^^so1{OYQcYuTmIgjt%5M>3G{LneLaPyWsw0! z3Z2BlwhCiX~_zGK^x|D1Z2nJ80!3+ost0JE1Z=Z@gpFa)Q1=bC$* zczcVJv@7EqP{I_Ms(LtS|$$kI0f21qRKdUbz9 z3L`0o!3!esIMA{Z9p-Nf3jh_dZwIxHxshtM+xSOUzjsZF%#X6}zTi8nDU*xt41doe zOcRUyZ6Qk*i7+pXc2U+`N}HDSqtN`H_lS~L>^5VpYO^d|^L{Yf{Y@`k`@3YS=XQwH zSu4_B9I+D(&DI3f)`;ci|KaK_+v4h)Xx#|G-QC?KIDr7c3GQxZB-2#o% zI0SbH?(Xhxr}Mn~T>D(-OZN|0Yt@`pHO4*SiX&zHn3L8EZ1tG4rFCZ;hb0=r`N@;N z(s1|6EWg8qYcO^0_r~{Hhsr>nEgY$(HkbjaO~`%90s571;24feGzW- z_d%}8JRv5P?46xegjr=1N1iDi$NT!1O!5zz-%@W~2B|5#^ZuXrSA$#2Bh17&LmeaS zl%`ZgCk4bxCM^s#FC}b)iIy{A&h5x_(U<2NrfW(aF~vm10p6NVD?{{9WpTad7gUdB zh1y2)yOP6LYwl+2>hZ>emc?y0UBf;{T3aQ{|^2D@! zK3Sy+m*a*5Tf~cP;>`;yvw2iA4@y(6t@mLs(>s^7hJ3MXI;-B(@MZPQu{gttLuMRA ziq6Cc6xm>7YoVa93I6S=%fH>uvId*!H)ZYmMLE`prf;r|UrL(H@bjyqxBQ!7pJX%) zr2ap{4cw8|DcYBccq1~q&#(5NpUu?7SP*!TNHf!F8;kg;PK0|Lgoq#!L!HpxF1#?Q znK&0cd{gtR*&F?Xh}j?CXCy2>4w+~1jYGIY@t6vjmZ(>jwl3BW%PRYHp+4m+r(xdq z3lyP;?f9!BOrC!I@PzB|@q=xl0n+gt2OZlhX(5pgC%TUX0i9N~|G`7)$ ziL&1pQ#2P&wpwX>Hv>q{{<4BRrB%z!>gfI0sVADM|? zOqbOU(N6-m+Wt4$Adb*6Mm3FoL5}u*BSqGHbXovA^r%;Q67V=1G&}%!H=$4;qPdPo z%0>&gs<+ptYTq+>L>xGwlcTDGnT9RyGf(ct41n6OdZReWe#@p#`O4pjoVWRhbS4O{ z4O<8P$qHEOW@yQA#il}1Q)(VGB?fe+BYoLp+uX3on7@R$Z#)`$(m-0TJ#E__>^yds z9QvAt(NsPzLHkL=-Yvz*{4O}Z;!G<8AkJ)gM5pp57cFnN9~^`VqxA!?&;8at)b0gS z`SYxDqwlU2!A4Lv^voAV@YIX=VXVw?Cf%VMLSzsgIoFDf*YSwkdik*qSx%I+8Y^R~-2^I&E%SC0n%8(6G%QEcZ0UA?G;IyOifm3SR)HY2I z*h6RDQBau~Pl*+0)S>nZk~(4U#eSjSl;e3e$2@&L{w0r9eF z7t>$e257FRMC5ht*_9c(POiG}Y@~9)B+ymkE!;Xv@cPaKLbO{xJCewbO3?G+PK^O` z1TZOhQBUq~(5=?Hja#0Ahm;?{!Loh_D;G7)(Xffl0=}W64@%V8l0;?;U`sVx2CS=@ zPg_mbm|0k8N@GcjdtbwLGz3%xm;gY{f zyJJ=@fHD zJFD7%A`oH!rcNv2&7j3ijzV@`!_v^;p4Z~QJzKu;OZ|nY%6x|Q`VMU2!M3RLR1I9| z-9I}NwIK(k@CM>4{yNF_4zg48mJZg7l|~je#12y%9H=aFLE))rK~A4Fj5>|thF4Ok z$OEx$mr@P1uZvUaf2B1v5;*RV@74x*;@pa8G(vJXc1*oXUIeFc0nvS4nu6h0GwHFfYOeGf^sTrae{n{Jg@|Jxg9-2RKd&Jel{KAJCr-{cm@w!B`_Pg zDvkF@)wU0PYH|P?hf8~`jcTrGe;!|a_+BK70 zf3d9A_)0<y};#Y zvAXAB*ek;`CDL>=lXV8j!Q~Un%~Vt2bZT~A<(utLkXq@H07{Cua_xxI@9FDChM|Ji1C7Kb zjnO`q#nByg!6o2U6xX_OA|q?RPM$%f9{h-yo)muexY?ZR(Fhi@&flUDpTp-1*8W#~ znWL=TaHrsIjhz_-bgDj0%vL?jPl7U0=~}Dpul0cli_eRc=9sluwe-_)1|f9*6+~-( z$*pw(7fkeKyX?hw2j#;^fA-d{-&3pDR3AJt{l$!VRgTH2}*T@#5&+8jDhoIv8 z6c$@PsjK=No25jpj#y*VT@J`c#n)|&o(LR|C5>G7Daq@KGjXLs9NBYl6QSmcCl_nm ztAMTdOZr8);fwn6d2ktqi#f*H8?mIM^Z7RucnNd^Dpo(Sgl z?(m~OUTP>YYD-e7SP)OUIyW&-9$-5^Fkc1{4{=T!`H=3KMv>xqjr5N{e_8zKf3OQ2 zUj{*r2J+M@2n$QJm{Wp~fTV+iRPf#PGa&=EiCeBYJ$xk9_|Kqxyw&&FYnJLNq57yt zfF|6H@(K-ciB;Lw_t-#(T)jM#{t&0aKUaAduInXDpb)sW&&d{82z{+aw7}BAn;x;y zV69tA?D$kn6B{0rEr4eJ;bs0vYr(XP@MGN9V9Fs)LwvQ!Pe@I9Aayi<(6DeGP^a=WP-C>0D^^gNjQDE~ z=z({Qu?KaV=zU^^@HQqezwrP-pBT-nAFp#?EbOSzNfHVVze{~5xRFpdl(+W>J2QXk zX3XU|Gx|c1kEjW6uRz~+%D&`*v;@HR`$}|F{^_Kew9Z8aMx}sVwm4Qg z&|1$DuKuPz6|*eg9qm|!Q*8;C_lg_%MYPUCd>_6cudI-Sg(XuOZ{ByFm$iM{x%K%` zbK_F3qN|sPt$R_OXu+VB-X%_(rIvNx0eX|IqK>9R-D;!e$|Pqq2uz8NKuIE|*hHcv z4kgF6>F_>=t9#4h!>fETTFNwQ^wT5h-OSC0Pbe8GRSfl6gYZA79^xo2T`6!tQ4(x;< z;+B7wW!5zD-9hV1O-=h6yEU3tU$*Y5>Qnx;Fc<7=My$e`yw}fg#a)anLGN3+CKp5z zK8okix1*^a#eFo}xzf_{ctTJVP%IpOZPCQG5fhyOuR>SgJv@$)2fMmVW?u(EH%? zrH+@SV5kb99+|+7KC;$|nR4~St@yb&!XvI2ivToGwG#zxTCcC`npjD+p8q9#7oj#{ zocX>zRRe~TYKN|Xa5oY+s^XI&N8<1S<2(d6a~ZWPVQ>s}CJG?}0~At%50ldx?6?Pd z-1Axyw~m&HWeP~X*?~O)qwyfg>&x<3LqW96qqI6l0?_u%Y=-@-a6^K7V~jg#;Rhh* zjvjR}p0|TBOERGPV8oh5l0+lbx^6c%5j&D^zihfP*jqpd( zANHXlM1wgLIU7m9)I}xivhQPS3;%maANX2*V03gxp7JcELLS~VV4z05ojM|Y5EZ78 z`-EMqlU->@o#Dsb_zcm^k7&TLkmRWIhNF!qjK#~*=<30WHi6s;*e>Gp@22Xedf~SC zUNCQ_;%gZGU=Y)(%nJE-8>MSFfAbc^UKYHEuPoamdEyqD(F?nxx{a1UK}`ov+gw)H zx7S@qcU0K30nA1Qal;;g&Dbh~FtDhkzaT~QxN=vd;zBBro|f)NHOMc$ia)NUt^_9u zG$y1tm$*jFwHJJKGzxr}l72f_hVL&+@>g4uOGd18V;$uL1)3jAdIq1u=A=vVy%5?EmeYM9zXDs0`OFuf+7UC)D?d`=>p+(n6iFUZ8ieeYFr z2ZRRggMVmEx^ZRXqv}jMywZ%H3s=gv`Qs4qrwe?gJg$#H_mg* ze36#H-X)&?S{)T+nP3W2$AbCbd{`gFk*BaqCg^mBQ=vYlf5-a^eAT@;2!|O^|z52q#6l1|$EW#he3oG+=FHHolZJNG6BdjWJ856M zZj+NkGrJ30<0h{Rs0|9yx)=)!2#RtZ5s>2}s3KM< z)&-ek4j$~qq5iqBM3I#~Vf?Y}^|ca$6b zm=?DE&*qD?ig1V-XI4Muy}z9#-ARpwm-A=iXj(^{knHTg4Cn8x)|v@1dIH8e>w}7Y zhNZQfm~PfQ^BL$Oar$Zp25n}63(v68k#o^1O{8Z%W^0CY3NI%vL2}&XRl>sw$@lCX z47&BOgGydyS&?JPzFrZ&M2NL(Gfb;p=F|5r7kYR51~Xe-KX*GF^M4;CoPhrj33b}n zx#>mCjzyHS=(5;3lj5nY*~_~aKSp*Gb_^JaJq|zK9F0EQo-8@7dEZgsMi@g0*!Nc= zZ!Wd zaVARIo_4YgsZU^dpy+!Srb@-Lf9Sn+?t51^pEuhU91!hbiDwK+Rc&GS7oS)EP z?2%ompom2S=<6?P0);qBvdZvi3h@FSiUshRI0bgcANYxHWR*E{l9kX=zilVgw zPqX7D^4D8XOVgjGqct|K^15Tzz-GQ|vpAMScJyc}gTI1^NdN5sd4`(t6_^ZY{4VIo zI$u}1>CZX#R_+m7kNZX)vyFsJ@;SbQ(1p(j4_6%|@^1W2uaHu#8%uoN@LtU`~y z+2qbwaE=c}j^5L_L!lxsTtfF(bnM9{CJR*f9{a*VKhN5v4qp2m_nW$<3ogBaEl2M1 z?{F%+AMLZX?-Yr>5Py;`hLm^U9oc&j%sL$VH)Q$gYlP&3j;G(jLQOjdv7#W<(Jq`L z2^-x3k9$15EOed6*_^^Ql=p9&&U*HlU}}Z3OlE&B9JVh03!bqP!_faN`e*?CKx5S| zI{zLQ%AM)H{|>zJ!IER7ZkE&(u6#AGx4pZK4&OsE4q+l*w#!JokCHJp6mwyhYPp|y z+OApB!CFHD7BvGGR`V+zglui6un8rhNf8n-Vi>}%59=m`gZIdBwW_pW^&=itnac}L z^j^IWPi_uU)}9|U4V-W&S-|`I`w|IKwVxu(&%U67kWn@(vfp4nF|Gj>^0vDshC{UF zK7Vk~CrD5GZe(uWcZ|jCK&uk90~GhFFsM(v+ZnXTI)KiAz5H7n(IX*wDy3qE9NVQS zNo5MIcjcYCEKBr6J16F;Ow7?5=vVXgFUsq*lk3Q(x!>N=qZ-#vi|3rXT@}Yp;bN%9Jqj-|R6oX*j z;|mmAC}x4vB!;mdKjuLd>u2oe|-bto3}wv2pYwg zFECR4-J;+1=oMLLReiRT%L$Z274u23`Xc1Kce?R=_tUYGb@ExE#CUh(8ws}FZxdm+ zs|w4nMaf%nC?xwQXFOSSxDa4j8=FB_kQ1I5VsmQ-wTpIL^{9S>8%tN8tLELTMc7r9 zS5b*uUE6SvE#leBz9E(JW!3j45Jdv0Tz;kWI3v*QM|t1zG2ia4WX$-^1fdOApAZei zU1Uit=%kT`Ssb5bNK@I!gY#b)ncwXQO0bx@G8=<976cCOXt@)jYm#nnx|q(|gxP7t zL>D&_KgS4)A1`DV#mW|5NI8^>EXFJqb0?4rtk*Nqp5IC-O7D7Zy>gM3(GWikw3UP(mnHKQqHFoURfxhWhlX>|b<{Baw*~p7$kO|a-cJ|zLh4vcvzJzto7GwF^Gi!O%^>@eMId=c+ru@d3N5~|0 zLWMVL#_=8BO@yM?Cf5CsX1j^-zDF;UfnWCPb#Wixw=6-QD_-9z+Z7X{u+Nt(DYGhj zJQ|etRM@m}6{SCL>?EkhDWQ7p#*03pi~aoQnw3VBw||z^ND5HS^|uIP<8}+gAyfVX zHr~}5iqwPrNDDWryT4eDBV*N4S~kiS6?Jmr?TkY@=|%)A#L~+Zh~ZhWiyQjFV|$OB7c4IwV%hnIcb7Sz~TR9GG8H49kr z`TF_yVK%}2?ni#}G7TDC@b|`hVyFCV=dNy_Ot%(taP_`R{($g!F1`Wkx(_WmTaWpt zh^@Mdlrrv^Uu2}&Yvo(?(fQ(4oNB_j*K)ctHY`J`KTOb;7F45!qfCn4ffx55&Z;%+ zQiB&WS}nErBxv;`zEwRY&aI0ZJ|d$>oCnvkj3##SAFbiN)6j9l6VuO7Ftq~HcxZ24|nY245A3Cvsc-|M-lUKY@mVdfooA*o~|?yIz4(LwVz-q0I7 z?F~#!$UGVkeFS3388xAt_bw|XGyXI^_wy4KI-ghdvc#5eglJDt!kVT^u$nUGt*JR~ zuXHV2Yb0mDH`L`7H$;mJ$IQV1abjCo?ck^)D!#&t71_NSg7nD z90t$fv9p!ijZ?}RE)-755EJ6Fv-tv^bCWlQL}YiQ3?VR76bXPEZAL;GO5BIk14Z)p2WA7JmO1i4}Hh6x=%3P)qHfLY9a z8#$+#lM#vGze=orWE*AyTKP$Sh|~NOTfZ)wsF>&85($T!-EMaNII@`|lz#7Jz?hMt zbS?}u0zT>5cOR~jUec<^63eEHNYyh?rF@*4a^z@F-$@cH9WD}1{!@BQGFYThVLYcg zZgju?u1hV=y8?0P&u3zsdKR1a=`0|#9otFX9wu`fb)hg`xs93AD#?>Xc`ZfM5Inog z7icC)3Nb$j5=vCry1`hkzof{o^dspRcLR9fQ{~Z!qMv9QgT1s#bCPL^7Q`HYhH%^X z6FeE_fJ2iwxLNyl&6c8INmQuaW6>WQsz{M7N6x_;*0f+17=VF{ib|fE6)d#3jU+Lp z0AR-s|F%H3e4YYUEboO$T-x4rPW6obmVYebgFO#tw!fZafjlG|F>=YbW&CU_5M;vI z>k}hkfMt|rF;zPN*h<+o^GIridOau;(|V7Osm(XJQsiir%zj=zdDAP!DyMR@4rqOQ zVIQt(8q}r67o*TM^F97bU0~-VFo9_;I(-(Dexm&n2FCA2e6keH61kROrs@4WeM=Rq zfg5C(@}dIEeb^vzi{kqy+A;!{5&-J->E~m;jYWJa+@=yoBYUcQ^n(;AuW>wAs}i6I zJ+twbqFF6Z=9$x`OK_6%!t&$^O|h>^)8}hhz#TCKKv*LQ>=VjpAUljZM4}cApct?7 z>I$>%y+I+B>rZoLNqih4AkS!a39XYI?bC_TJRgV!Srg zu?S!R#6_seE*&L}{+1W$=V$veheVWQ5&Nz`$<)wZsIVzsW{!no=cwFDAgc`h1qgf4IOP?LLJqfAI|K57IoGk*GLNXGqm$3Lf6K^XU#clgs&(@D6W4vkg^tm|&{2KldOu+0-N4nJA` zX*#SOj&%R6m2x`hV-smSRrEbWQ{H5*Lf>OI)dCdz2q$tc`aE{_9Hrgy_X`gKQgqjU zr`M^95LG;(i=6=u z&Zu9HIBJl6^O7qoUdh2{F_*g5lr`l-e^ElUQ zqRECK1sI^d6KWw3{LwGGdKfwIvC{}kS*f$+)d$r@=U>ohRjvVC13)qdkWWe>z4 zcs;EizFa5hk9Ay>MdTxf-b{#+FPqq|rK*TkB7g9c=Z1rgR3ayv$}YjJAMvo#giaQ$ zTUReTIMb4(3%NR>X#q2M+pNuUwC~X_@(uUC8@;i>TwpkC(xB%qe(%#{Bf zU#YA|DjusBzh&fw9^g@rN%M#wn3t|)m(PE*oKNrd&~;ZII3cr`#}Y0?v_U0%{q0xc zLcH5OX_{@Hc-!wt8cRe%aznEIxU%}6mR0ob+Sy>(vj6_u!lp_8>v=sMpE%`P#iqnl zu-wEH(T>ZnQaq`Pkq$4Eu(H+2f(p+)H(QrbA ze6|y_`F19e(kK)`JP$oK7Xv_*a5j}Ks7E}molERr8io!7=_Z4tew1O917c*TV-?3t zcK@IXxxZ226O8{Mh@v1y={}0b*qqe**YYK$?=^ww#wq{*($3b~m2X%~q50tm?CypN$C^@Bz|y=VD-=Cm~T0orv}YtMCkeW1|K?Mwcv587-2_4VI)lEpLP8G#5TYT^6G zR(l_vaGC{7;U7|^nnD?Y*Gd?21#$S<+aJ@d=-Z7FCk_pCY=3BEO6Iz-gEu<8@yQlL z!l>Zb-R|E>qMr4icec?3FxAY;;P1U-_JXYo!%f=1eyaG;p*Tdgv*AX8e)6@k5bM1m zy3?*87k}h8&Ik`kpjsBNae4nR-1sHfr_TM``VO_Y)nYKer&FdyGh;`Bh-pc;oI-R& zcn48#61&(0ycigxvGBS{8<$G6FW(DFkr|>QS*gu!zqee&702&Wk_CpT zQWrX`)fn9rhTX!}RS3FOBn7u1tfDmxc!r?vlMQ^Z|8G5)@vB~wxR~cU77&P7XMVLF zW%8mrDkw8*(Y-VVKy;yUYOLC0(jMwX8?8rc3gVHaf$TL|&FT}R+QJS(MW40}oFD|4 z{Bj$GTYr|4##YCw<18hl4=g9}4Q}LwN8e)Ng%y9<4U%naS48&s{0TE4M>ip3G+O^t zJKuI?EYxs>alG=vcY6l>@DVP%x2Fdkx)yw1eb-7#wv;z4UolfuMyk-bl&aY?d{|O7 zZlP?ZLjzH~(B66&>4C3V6rke>(sRQNK|Sr;KNqDo@6+PN8sIt!Z$(kRWOnmAnX&HI zfk|f4K!of3`UgJ(d|((f?W5g)Px-j45DrP_f`b@MpH_Lyx^$sk0aQmwFjmill@)AZV^}7I1qzsFyYugm&8Y z!jZ1wtt_IizqaX31wP_io%|8ENg@t%*P9nJdC317 z$oPw5R-3AnRoobe;7a%kBJAd;Ew0|{RZ|t(XYy=(rWb6@m>MS6H|#mOso0(U&K&6a zh(g4Je@4W+=2Z(t1JnT`TE%4*tBt|w#Bvr$5j!i=o9eKXoV#c$8K zRRLK^*{}wd6sMz^dD*++M+pi?aV4BA&wQSzR!rkP6Q!N%VYMi%zIWAe!4A1pEue;at5yHL4Tsy& zG0z?mEZZ&%uUU({t%u_0XLy9~%cF<>r%}^_k4lCDW`#ZJP9Kj&*4v+g(VJdZJeK-J z{59J*X!h+Jb{*+p`V9?_1Z;hFh9^dWzPso4$4jrw=Bl1j8WKM1#zO9mc*!eO1pUPt ze!`7B;+{WnFT%>TD}|{Xl|uA>qEtNnc?;ludxeAQt189c4&6M$vG}g1d^_);B-ozu zjyrZ*^DB#}kbk<+y4(k7Y(@shKG005Oywa4?%e@~itV*Cv1h%59-Z}1tOLBuQ)7Z3 z)AAmrLsU4_cHJtaxCDK0BvY})Xaqh3^j}xm%|>E|sR?*3O1adEm|yyyOXI(i-fOPX zWl%2&?U#klY9VLpt0_OA&nCZePD&T5&|6%Ej`zmOT8x-jJ2*64Aa73i7e*smuv@me zV@oHRAk2sjCQR+?{_vLZ(%G>9$^wz0v&9LjkN6~nf6ilH@CEANx1Czo87txj`zQNc`YG*jgkppYXKcA9A>o1GV2al$g4K)TWvv8kC7tOQaF=_Ucg=P6$)xZo^kLcB~6Q&TxeEtE&9Q1Is05CI-ql6Xtm zASp2)h#bTU?ddlt^;wwAi}V%%c_MuJnwugOX7ybj*~G(x>`8$*KmUot!wfHqcp+>f zcLkqJ1f)k1_B);mAwmPG#?Vb--S}ap+_!itVBvxT`E(^H_T#vb?K;B<%E__7f58sH$-odLketg<^s>`FgINwutEE!Clu8y4GI!yLWHb zGT%hien4^Il~p3_QMi^+1V5WUakSZeV!piIF}g8JMK$QawTpRj&slrBw#hU5w{y9S zsk1X-2r6;c>dLsWn{`~!gme)iq>n8#yF}vuD(^E8KUdCbcA5JeZR?GyZ)Hpl^2fxx zkid$BLfK$kbKxgo5w8%WTg%s5|U?28P$VrekP?+Y*_`OHO3g!#Tw%M zaM)59JouX0>=oARHApWZ#w~_#@loPY` zymZw0H$eva@IKF@rTpN$%mHq!_N9qY4=SHzOr`5_QD?tK>v4%iPBH- zml}zpf!ZL)oB`L`L`obkIL?I+eHQmhnNd`_Mb5h@_;=}^i`MB#7yUgO?9L+82%BF#jEE25Mrmrd zbt(JP;YH$XT`Sw7fLIC{lqOr;R6QscB&~*)Elx{DQYax=ifw=(+ph)>W28BV-?}>W zfb-=wNh4v$yEnYEMo?a$7%6Oe;IA>kleEv}@7U4m3d%OeJH7X3yXokS&)?4tpO`}1 zFTE{04tOVCn%5-8k9_g;9qzxozDz#-eF-+~{yq1CINQ4GcD?F|q+@sq8whDb^+VzP zvs8u%fMqL~#-c8^nV>fIpy~#ir*k>4bo9%9%gwVw$5@g`vq_tB`s2}Bl3Y&jw z5{QQ=L3L2G3U$5Bpib#>FdT`#&G)rK!+L}hyRwOyCc?~>7fYKnN&0EnY_<`BMB{Jo zaxLMW42ksGaqj~-0>wFCo{!_(e{qDC4E$oq{4;V7Pa6y+*>VRrl{zIX@Rl93J=l`= z>X8l}Sb<0cewA)er4Un+5J;Ms_;-Y`^YDebdZ9ivQCpW1FV4RWmv?+P8>YdcuU)~p z(xwJhxE&dFFqm9e*ukwsht~78`?15=?X1Mxh z1Bs&m4ye6pAytNY*E~k1SZM$+Rf;gom2gY7HEDoCo1ThJQ9Lz+0PlJe7mq&g2OLjvs zRO8ACE}6dum>1XM8;VYl0RZf#Ub2XXh@J3XZ)h^zdQ26#ql*iTzU4KYowR- z52q%;v+V@~X>Tpi#{AMSWp{924XB%O%^esUPZ7*N$a`I%ErDd%s6*W@aTz|4pZdg7 z(C0yyBZ(qWEJx2gsXDq7eDveTO@^oesSWDk%qe@bCy&waY%PiCtpio^FhM}9qR(Y~<5~OD2ripWaaAPCNLsIU;AQRAwDJ-0=E3UE zvtGlu8^bL*!^gOZlmGlyzR&%>AShQQy^*KqPC2@c%|P0}&$MEU8i}PVL;QD*!nZ5q zO_?9P0j#C~RP1{L0Jsjcfp#(eaqc~3b3(t+~2VkmG zU0;cJkF)|o>$KDL@pR5pN}6_T9mT}CljOt9_`@7j5NMK#%OLJPAFYmO*Aw}#7BAn* zh1xIvIr^VAZoVXm6W{l`n2#?*+-LJ$*3(0DwYwqiiDo-7o*`t+qnRfp!aVwcc#Is3(6;Er#aL! z<{Y;XKOH@OYoTe8j_?$f20r{*@JDLDt)KUpI`LS669LVw`jF_fsbgEfvY)Si#LwcZ zzy8PnsSPQDUF{(Vn)zc!Et)t=^h&ebr*C6>O5Kv!$dU}aQgJ(6f?1@3#YV+W0su}h-&H5RCGW_=yO zW#*)%NE`-?jE$K%TVwhe;a5X=NI$A&3~_c9Z0>G15qa)@2RNt1?_o5cN$mFMrV`+^ zF{(2}qp4pi?M!%$0kADuF2h9nsH{Z4k$!=>Fk{QXPgzUq3?&~&+i=9EP{~!35Q>!$ z$n3n{hAE=$i;<;MrpMJT;tX&45FSLil^DaO!A>+zo>@t*TeR$L%;;vHkjNu@6OAX- zw-v~7YGi;-hWH?K*;EcB$W|zfku8#cjLUBY;g2H}#Iwu~FhkSuILH8IkY?l1!*ZXO z2eCU{tNA{@ZM`A4dXNWC#CjSywC(t0-{Zw0=kDfAe6MURXdlHP=q*imqkS`ZefbvF z=*UL8GS~*X0oZVOgXsQ!oNUNFN<5Q1J9@?P^e`M}@*#-5Nv6^?_pVY^v5}G3t=tv%}am&`h8=h6c zZ5ip77x_{l@xqS2(PlpsU1|wVwVOXeAsojS=81a;MBrM%Rr!n_n0~^^FK7k&++~dB zzkxn=-nnP$HS2C;!=$q)IHJL5Z}`ib&Ezp_5i~s7KK!{}B5u*6=?^4nFy+LpjvGag z67l&9FK>vRAJ(DQm>1uUrXYY}CV29IScj4Xp6>--MTfD4w-QjTD>jt09{NZbZ-z|yZU@?{cLGT#Wa2SzwQ+JA<#zo zrP`-4)bF-V&-3xd#)bm%EJ0iz(=E(cQl63K0BS+hvxYUiG=$-UIG}2;7YMZw;dg4m zm}0igl`-dEbg<*=c)%`@Dy))_TPoJP>gF$2x57~!$9ONt-{mExmoye87rpoMO{D39 z!(&2t1__5uab_UbjA<;d6U&oj5R0xaLS|WM8PIcpc(048XDuiS3e4a72a-Yg;qJZ# zojSjNUGjZ$h?$G(4na%5iE?=lcA0;yo#lmvg*|^5_a*gxNcIgQdp!@?hh}w4314D( z{fmiQthf2Tj5F21_PJzSDGMix4Uo?Fn)y9W6M=-OoYGF;q*bM)Sw4}Q&~aa{GVJi% z>P@(;aZ6*Zg$AHT2Hb`k7GF8{uX1fzIs{y(sCmifl9QG z*X+|M9Yx&p2%YR@>&O-7o2Qshd6OlUee1(b<1W}i(s5XlliL2j2gE9r;xsAng(iVE zyLYvJ%2FFk(%u<8A1Z~agvAK&UB6n&>Ph~C!1)8#WWVWIz_zty&I6CYHt_RCtohaTM^mfi$^JKTV!*4KK0zfl)PGFFpnbQ{NBPjYgF*3?R5!&3?&?Q! ziQ2A<{8ErgQ#iBluu*}SaK8SR{ZrkbBxxkd_kD@Eh=~Gi^mO?e+V%d{AmqBQl=Vsd zvi0poRD;Cc_k{TvEHPDbglCk`;9g9;slmMnX^RNy9og0fS(?~zK8NHv?Aja^06(Hc zw=adE5phxPQ@9rtdZG-o)Ql3OvxpHn=t2I=aUQ8bYPNhRq{hS_XGrp3NUM_q=|d3{ z8wKmoVqx+v>#&iSb{4XIO%>~`pS-YL_+b%lGbYx17R7X$_qCkzy$K_h+LVTyNsq#?m~rk0_f8_7ziKck`?cb-E%?ICE!wyy8COhecOe9+-gg#wDRTk#Y{ z+|VFvcd9~8b7&Xa`fL4IchB&OA|7a2z->`m-*Y^^RtGfMRO}_~K9B8-kN{}(0|xxT zut-vXmzoVAsnq|r0@d~Anqqd=4;r;n*SGk0VP#l&Dx_iz^_#d)U7&xc1eH7+65Nv% z)42G15k#nHDM=ptTjGnWz4fFA+MqdB33e@fjnmAf6AUpxZC<3fua8|=C@M9U0o&qg z02=jjzfPKTMGh+!MXt|aD#Q^8b z3Q~c%R8Lih7vc5KBAG7S4e;q>U`zcr?-hNSTnn=t=braiI0*$eMGlp6tUZserL4KH0*g5Y90Dh+rY0r~e5<+x z`&>HP?L*i=HgEQN!+(LCsXgu;Px@Z}Sg&huFKedbSzkP6$_#=;o@zz9|G{ES*Yh~% ztA=NKlv6m;l-3Ge#~BvT+04I77Q7A#xX%t699f3i|Iz6B402f6`>rF%|9ED}Ls@7S zDsrvMoOV*7ub{TjGgOe)E=_SQI>1lGVbNGS;E5Q)>_P-~uzFEN67Q1^{iKpTxtNn~ zMm@Gh=dZ3g0y2B6Eie?BjTAwd=`QEn9PoeWeFQ59pBlY;N$1^soa?~VbpU?!hoU4g zP^V4*qXXz!=D@1NDikQkZdJ9`>*6VZCXrEBAM}aKD_{TU1j@q^PD2!83d{81G zo3A`pDLDi$w(&?66c(-;`8{z@9WL=pF+{l%KD3>AC3UVNODL9KbM7p@4I zuS6}?&aq_QlBKKna}=xNdFQyyFUY*(<@-JQ?e)FF7~WqdkU*1MBU)IMu@A544|^iQ z%h%3=gos`Jw*K$QgqUMmlPrk{z`v|?lhaNNYA|j;Ail|zLLOo|BL;%Su7ZR%!(evf zw39}SX;D?EW?Y!KlTrZXR`p;tKgAttGk8^&M1o#FB{}R>BlL}1)@%ED|Mq&DYU6Ik z>zg_X!t1n0c54Rh*akR)yp(zZfI@pu^stAFu{EgRnGmRlNIJXmI|+AtyHxatJ(yMt zPZ23tcM2ET>LC`8+$qXqXx1mp=gfuQyvKz9de_lC-|_aM{5ZB|dfAdq{=(n)x;4&m z8PL&n9{nm9^}HX?_e_@Ech1%xaXrGvs6d9;^Z{`&;o_TV+JjZfYhN~C3+3e9O(PeY#t5qH%8L_{aA_- z1t_d!qnn-+EG8UM9cw%?IzleYHOl8AJ*q&NoFeY*8Qx0wcWEJfu5A}*|LBAkt-RQJ z!geMhC9P+EgKB_%MMb#V6*YcN{FqsyPICGD=a9$4ji%8LUnb5$D8SIEuIeLP=hMQQ z{tV*fwu_rRZnYNj>#wzIQ6Nm*9_dZTFOgrXS)^Og-p1=gZ9JEyO_=g~`g1S;6LVB< zy9h2$e{s0G^@T5j90KX0&FIFxcX4y5)RV&0T*A{CV8?qM4EqyFsXObW!e0}y98|nN zF<14N?f%g_|6NzdU~tFSLH3jQze@*Hkdh+)v_HA-duZkI5Si-oADVN#8>ZFpFzpEO1Fq)uhbH3Kh56};3Soh86Z_=asLqiJ@LX zrYZWeOyV#2|1kDeVUa{zlx9(A;qLD4u7$h1ySqCSg+t=*Md9x5?(Xgm1tjj1_fAiL z(>?t#Px+D$5nn{?v(Gwf|7$i#>sq6-?GRH;V+$(W7LAMpL0kdq45~T6C~so*Z$SZg z%eD{Q=XuTNICgC7mcu)Uq0&$EF-NplK=eUP=(P)W4xqWq2LQO;7I7oGFpd3Z`)&3e zvoJ-BW5OufCC{WZRa*V!N5n{jI*Iy8FA|4fCrw0w2mWM)~Fmf55- zv^$4Pc3R5u9Vh$g~J;Q%7u9JA)CtJ!uU2Iqs>taruf(FH0EO8_~v3caU z3ln5R*b5rK`gvU}-KA3jf~`W4&Lcn0KQku%+0(QMggJ+^iHa zF;qVTV4-{7c-{}8eRnt9P}V;TyhQ9_#JF3k%vnBX6>p#Y`Qk@MZwi0k=a1Zs3i(3` zz5PQh@mdT*GSHAH^IUOvgs~O6S(Rac?8-?IJ_)DkJcd`7l zn>%|>zV$LSYjy{Nr6*eTt@gn16RG=S=L^v@@J({HuWkMD+7D9b`H)3BHctKHGt)=@Hm~Nv1-v^5S4W4V+?%K_&O@ z(+1{qGW3G$Qp+mJrOO#LVw2b3Ax!TdU@1-pMy&x_2I7-V*#Lec1A~cQ665B*7b3Q@ zt5U?02B>YQ^XgdR>g9e4VS4E@D&))cMh1DounnVK4C06a6p@=_E-VBts1;e?CHF$R z0ve(EVOj+ySHg3bF{us!Q9eF+8nTY^NL z$k#`r&r=FeF?F-L)ygf>mIQ?!MKi(DdXj5 zGOLmRhogC&frTZMECF5+Y^uP`xiIiIXcjj!z{gN{_qICB%bFrSVQWly0gTNyG3(_3 z+CtK)ZWnIFao$NCA9$-{a~M02yLiw|Ofsp_RK#r;#ING#fGLZ9(?HZYNIEsAo_ zAy(RYvR?1)@p6O!WGM8yUmG}GYs9E%)`P9>b_MLrcVOn&HWRBllMLWAO`z?z?y62r z51eQGKLf5}XqHgR?*?D&UqS&;Hpd!a_lmbz$W(Nc?=Prd?1=jf1W5XPXl{1D?r{ zWrQB+&z)n5-3QgJY1d)7*>< z96u0XVBkMiw$87Q>k+P=7kN1ah5h0zhkj5%eUQ-SfzWVKnt=_xua zxh|)&vrtwc2ajeGlwuPM)`J}mx?1T(YNLCU;3;c`l+H-4kF!5FGx_SCj{fZU${od) zC!fbi6>H*&fiyu_HbL>p%;=cwq9R2=yo@Do?5x*O(u<2E)J;w@ou!k!N=3A6WJwX$Mw?mIBKL^^hc*2t{T3UhhPosD{s@ zU*Sw2Jw4%kpF3Y+B~(6jR|JlO<(!DUOFZ~(Cx$oIUq_10PqEgm`l%2c(;-vhO? ztR^Ca!w_)%DF#4Lsy&wurJ~>JWy#94$MN#h*i2A_as0v0ySFc~$3y&nW=!Y6{uHKO z)pFuq=yn6{l}(4_adWc<@5biF^p$^0CO^E7)oC$z;h=W!fx;IaMi1@oR@avzikeis zyuQd6hR}|c;Q6r*cm4cX)7V4RJuOJef#1f~aJ+298*la)!NXlbWpH1f8t4OnM`Uq? z#rk`qn>5{`Rr}~iDr;WMPHp}GG!YgGB#58f_#*76KP98{&N^l{mMj38utnch+W1R& z)mXK2H^oqMMetD+A-$)4epd-tUF(O7G)Kt!rF#3MBbeK&!*;$Cz^Zs;dykGvgt0JM zVmNkPVcxrYvf;X}zyU+!;cUzAv{S8FGlH0QThP}i+DBd9Vw-*{nfjO9Ub68qdpUhu z^2;=LC24(SeST#UXW$FIulC)WrZ^9iR9_~**vpJ?2cUxBSWDx2YJ*KT%EBD0k-hD7 zdk8ImnSt;wWPIF`a(yG53k?<=)t&!}tgh_1fS8yE-eUo?$c$Nf{!Ju^_e>+4RAdPV zRV;()X&7mmUV8i-(coK1sbo15dwD%C`prY}0i-!H#Cd60ao>T!IQ369bD zQnh;&*KYF9sCTCP>Yi8AUBSwp7}NZ4bE)qY%%};^Cdjs4YijYGTcI*75zqZ0MoL0f z^IvCm-ygV-4=^!OpEq|GIXQH>oGkl&gWa2#?A3aTdNYAM#A%D3u34wzw~!TQUVABcJo2ExWTxLEIS7S zSc!e_kxD)NS!Jm>K&30#=Zl1MzaJCQfh?%06B{bGAuf=+$;*CTyALV@#HJp8ea8(b zF~YotfRH1g=VcsJI`aibA%GJoZa*R8Kgb`;9$O(1qhUN625gF>jr;QwZN*ah*+f>* zuwR@juAk*okys+(01&gO_<3TSLH41LJj{>7O{SDX5x#H1@%q6dMcvty9{ zFESvHzWPtC5_O1t7Uv#l`JyubCZ=J7$L{SJ|A&e@>~wfx_{EDzBxuJUY_%m^8zXEN zj9jz;!X!?4yXSRAMl|MJKR>Ywt`Cfp0v!nkLP>YPtHnnFOvg#?R^WJ&Ddfhl*w^0T zejhbg=XwM+D8xP%FNp5;4k{dn^sLFg?j6$TuLJ8cr!2el#z@lvo@o{^eYUd*Cc+0@ zd`-Q>wAYTnCE5lB$)T78uetcP4|+75MQ6fg$c~rAQ7;wSY*glf$#6|x+EjO&Wj1-e zk3DYt@$rHrB-yGDZRyMkxIfHQ%hp$eKP)p8VKg=_57{Y)DV6Besb#Usg^6@lYKG+< z1L>-LMs#;O(oNlefwxcfLWa05_b`3?AJeslW{GrZ&JiU zP~aC5|LXvV(#{QDvV&AJn$kC@iez#!6A)Gs05rAxsuNfIxVKz&e(GAiTSWxKP{btY z*FIsiey%u(S^he(t;MJks45+P03#c@R{?8lqajBnwDUV5+LiBl(5Q?78xx%|G2@r) znv7nqa2>{(!po45KHwOlQ5}*+U$NbG_1#!vWz7i)vUh7^b<^p@thcLuVI-6NSJKyxZ)t*EWaM$0_@INg$?r%E^{yr~i z*+=O4>hL<_=sG4?P}yW`nFeJTI#Qi58id@POmwuLlQ8jT)2(%-*Q(SX=ovQ5P^SJK zY``}Ljh%)58p;HWcQbE}4)U@^P0SFj#|A#H$1x|-jQ8fP;Mt+bZ$duZ88&5)kf$iD zsXDanyP!w={ZaO2A%wg4SUNi#Xaq#gksQKNRPMCm5awYW$qd;A{jH)t9!4^CH$>|1 zBj(isrKX7iP(t{9yfzsRlceFV_#*PjME#7YUVi=y8#w4wV+>tu;{hlFZu3i@2^)vj z2Wc&En2-@j(-cTAL5bvH^0SycO)dzbG8h<+$gXYpsAc3bXYeW@DWIB z>oOmjx%I?4WHgQsIjlratV)fOP?I#D*_<96+S{F_;@9*3iD0NLNJ(l-1G=aAQOla79u@sf@B#c_jVcs9C ze-etBSBhp!A<78P{;ioFDK2(CxVjEzT8`5x0m`;)x0HPY;TeyDHF}4wQJPCy; z)q>+R>q|0BJfaHQR)PoQ}{48J@^RoOYA!|AT=o1!1IyFqczHeA~1~Scb*|m71 z-^t}Y4VhFOL$-BeCON0(p>-<}|DiPrq$ks)|EtF+{D#FU&44v9_2yTo#~Vs}ozSOA z^|pC`S=od=)t4_(R9xEfJvpKCZ|PMVKO_1YWsaSrLtK{YI&I{UDN&h~a4^m&jQsqM zhPa7zQx_9d!eNX!*3Hp6MU8R{v3`^We+8C2@sgGB=|>NWXuhx1nIP`pY+&?{mCiA8ufoXlc(>Xu*U(7lQ zU5=eO&05T5v6;N3;e&enEq{mppHlhKS~QP(Hg8{0a?Z%Z?ob zDQ?4&Vb{o%)Uij-*Gtr&C8@8`+f#yUfQGkO#g8ji6Ko?ju%AIoE;zheD_j*)K?Ec}&X5AeAL) z|AsKHdf0Ny_bR7omWkxU)2o=5^HbJT(agUSO1;_B1BddZ{5IgFN|qj8QzLjzSV|VU zq#s^cUt2aKaA{E|AS!eXif^j1+gx6|+*ddE-^Wdmn{p(wK znN;IY-KM$?sL=r|BP-pH9MeL1gNuvk^^3xxAjq7a_fbkKh;v_SF@?*Pc6D`Su8XJ| z2UHqKAc-3&tZieEhDLG;Xh7@mxMJOyh5A7;PFC<;Pr(we)8R5@k)GbXRIBlY)_C@V!OQ1TT0@v;D6 zBxEEDQRH-0(q8lupWwM_toi5Vn)+KQw&U--5*!^x?_V#Fw_7t{+XdtIw=8S7=n+(- z3LuYZbYsQgRlUd6p~HqHcsw)!Pb*RDylW|*`Y|m&5FI$hDMhSh!_GS9DRmK&wt<(f z7y{&1{N!CPu18F3ZM*7MH$l79V7__&h>8Mr!7~x0<()m23lg2jS1kwSTs2D| z)b(0NUA@l#UZ3EI=omWS@)@V8GA0jQS}$&l;VF5ft4xdT^bw~ew6O+W&}yRQH(|o^ z{4oy-Xck4TPTTHfDs^`x^uunSvla2zVnq4El`htQKj?DhQ=D^%KALzhms`Vcy_oaY za{a#8e!;9X#5i&_e(6+aO~Ur|pc~)yfE}K1h&ytW^k`Vmxzg8 z9Jp>`X5ysu_+!;&i1Q$2kc2GEX^@`VS~ipRKrRNMpkHpedc&&nbZVJhjx8xu)Wikb z_RR+cF?4Rt0w=l^d*o_v8O@;(&2{y55RKdI8D{>_-hT%dqSQ$JIT>E5Y?87Bq%3_i zaQueHWoM?XpDs;_JY!Kxr;cKzQ{RmwX$l$$CP`v4aWnZI>N4l&tvhsjkaF@MiP7Q-Nr?u1&QI3=Wl9olm& zc>16FjpPQ4m)AE}Q|LOI0Ug?mWcemD{+v@;@GekQb>j zL8wX|plX$}0{y^;@6!NY6fO@Ag*(cszhzzX5)92y|KKQf_p9O;z&GI8RH2=67Z^VG zSZ^r&HnIJqkjnXHM<`N92TsGGf&fRWT4jkk=WvFsB`X=DNDQ(wOyxw}Dsadb`Ftay ztt24gEWA5d#%9@TvK9DhdmFU#B~aPn#Ww!n(Ox^1_1Ha#J&UawD~d58{9;9;xKUkc z8?z4tA>Pj6ET~e0b7EEM0OAI>%TJ_>-NV?hLnTyzxd`?b+Kuc5tU+-s{b2t$qHz4Z zQICNmw}j1zAztKr=$MkM!O3JAUCuO-_%jXduiRAXY;`QwNci>9JWN`0MYjB-xs$s@ z%_#mHPPhn#G?pG?-MhNLiGes!dZyxpxWpsk0R}ZwFeNLgBR*mrl0#nSJ;a^`A5B{Z zUx6*kWWICIgPM8PBJp1jbIi>2GMzTe05dyMQ_9e@3tUqz5g_FEVWpyemrF={tUWd{w{-PArQITMEO^9mf=PgH%yyrkAJM84J*&xwh$- z!v~6gY;Gb`46RHbV-~yB-Z0}rwHB8Af||t8vod`vZ)+4Hd!av$C)%0e#ru1CXW=Zn z?V!Z4JMHL4h*fi8iM?==UQicC<<1-Re_R!hvmA$K0v_4){BDPyr*_`uJ0F{BhW-OJ zB7HOQV~-Ixc(%fg2K`P`W!IPO^<3_}%d8+^^ zqpH;Y*yi+ihbo$m9z!!;I2@k;U&pl|Kj=$5)(azpT}dzTH8ZPWFfpShB}3Yn6%8W= zlA8FUCHyP7$7lg%uITIQ54%PH#0+kuG@T4uhp?4448$;jq|}?kCVourc`>2RGw8`9 zF7t*Cokzjuwz$!gqBPWmN$mfv>X+i?^!>WWG2+j@AQDUtoxqWW5&dg21DZ;13T>xc zG8=&@eZA{Q11Pfoike6NbhQxw;{$@>5pSXP&(1BC(?6GKWbe>o&cZC@TvuGdE`G!6)p0 zF&&4stF4`gdb2XKhf#SF{A@0*xyi905qVlOOdfR-yS-|hOc=MK%>AVcE9)315@SMs zO@he4e`C;vtWa`K(X-$sqqc~UOsFU797pUZbqNklS?xU94}5cf%r904iEzABxNXlO4O znuUuGI*{3}+2vLR<-!J`phY0er7i-3%33|_p>b%I5Tx9rN^s)_ZCp^yRGnHmN=oWN zziL)VaN~#G&hM_kxzW}gs%~6!H$jF^$P?P!JO{sDjVw=P)wKu4tWlO|RDse17mxm8 zJ)kG+;oWrGEXDX01LQ~)&FkTb%&&{!$A@4kPxea}Wz;w)PXAr95+d;%taLtMc%_G} z$geeF<6y$_be=6|&24NAt=F1$A*;wVrB!2ksfLm!w(fkC>D_HXwUcqHhew zUMU(oeF-nZYPb5i(D=~!eWc-#g|B@m_NS~oier>_n~%%=YpnP~zGuv1E<(C(n;Zvk zBa^I5F&}!EK^d*-;h2UZ`y}K!{xGDoiRUkWP8*db|NV;c5UJ<%rFF$o!K9iBrV$s1_c{1+v>5 zdYxOA@Ct{0g*s&!@Anw_{9aa(qPfB4ms!Ka@Ruckf>zdjT8yeKk0!kYOT5Wj9o8gC z4GCO_agtRiv3BL?+L`cmvja{3o-(1UZ zR5M#BvCzC>e7jL@{af>JHJv_UzPr{}p*;oA!l^xpIi}h&7IhQC2KY;v)^9cf;>`(} zEcACI$gs9U__u0N@&&M11L*&Nx1)mE#9xego9HT-@C^gztEtjdN>fzvnS>HHK$!W1 z@Pu66-+DH&A6+4^8{!!+sjH3^OQ_pVbdJJQ8=+kh<_`p8kI$U*tKABVX$o#va>i}m zxfNb?K{b9yblBL~%!D5Q3EXUWt`>LeG6MkD3Wgpf0@&HPTJpV(3)&FmUGXyS5G>mR zdyt)ZS;x#+pZkykjG8s9Y;D^Zkn&cg{eY zFSE|qGiR-4sn|$O#cVFDXb#kpb~Rbhzv~v3>H>VmwHO_#qCDF-X0u@?j%F}doSBV% z^0jutif3C9V%&p|)8QOj?v|X#?5)(G-C5gXz!nmzWVlXV{;_JgXUnR#z`H5OwZq~u zwnGYBI0kj}DGd(GJk@~!3~F`ETWB8!4tfMwxzX%gSNR6MQyeSUaYX;Q?HJ937(6aq zQA_=X&RQRNzcc6Npojol1_}_E?%{lCwcVYry5k>j=N^J%Z^!lnW9PO1^#Btn#TpM} z9i*IVwXV54HNcb2+2CG#kjR$VTo`g$Fmx6y&0F4gEsd350^Q#0MOzSW`_kG&P{Qpm z>71R6iRG4joDj{8+4#N}k|3eVz!;L~1GA${#e70#=mc`oMCH)$i|3QAg|_eZ0H)G` zv|zC3Mg)0XK6f8MB{faU)~#&h5wj|y{Sn%@;Y+ji^)Lx@ZVNozEbQVPO29!Q!OT#2 zkznjX+dfX)*&u$^y3$W|jo|m|rRFw{v^7dEGYi}tyybn2mYuMRojVWiw~i^D-O$+o zRo$B33_I?R@RZB80dwsj9RBwKd9VM(TB*JCyIuatTOZ1}=%9++0EX4tLDZE#74I5^ zhDp?4&@#iZ;#^EJEvugi2Z{}o#$9cOXLklO2Q+o(m340~tbCG&a-r6c+5_0LtuFrU z3miigd^;OzK>wr18Okrs)J`_1VT)3cTL;f!_Kt!O?!6Tp4-oGR@bb!Ap+SOBpHu)C zKdCDINuTMH7B5YgCKF@}YI`QnKKediMy^u$y(UdYoP2a9Azzr*jN{ZDc>fHO7|CPS zkntLQ10M$c_$O151V0}7H^0R^&TulAG@KuB^`|*wM)nZ-wYzHc!^;PZxXI1y05`kS zH_#}zC`%ekF6{R`OfPU{x5sL6S@tNaUq|qg2~29W&JVcH0s#>If+79F1t_x4PvL*% z*dXGVMp+@_$5Kn-mz8KR{^%EW=$JTZj7wSTA{sp1e5TeC)k(qeb8v}ZK>O(=GbC`8 ztc*^Q#E~T%Gv@W#w{(N~vtlMx#_57JCA-`I_{+Q4=w|1G`T6r#PfWQFer3BL&!yIx z7-~n}T4Y_H@v=EWpA~%dwrtQg-7akN-}G#;6l)D{1B4+ZxF#Gj@DNk}@!dZJM@G;# zdxR=fHVpbVPX=ZHalLNj@+9z|e&`0pBn3gS)e3s9Te7puOAhe(AwymU<@hms>*FKp zt}23zX0(tYn}~LH7e9Z!&}$<8Hmh76lY=sRfAvgQ>37WVFVka(BCcEJbn|q}k7rO4 zWROB2>Mim&_tI0CB)<=hn()Gig!ny5g*sXv)4tZNRrjWzzuAG)%&vMollStYxWGQU~UU&6X@d2r`(BERR;rd2O=2HO|>2Oa0ues8cbW` zXni~R+UAHDuI@dSbr5}8U+Klr8c08veULln&Jpauo>uQv9qM;eynl1shkaZzW8IJ6 z3kQfYc3iSX>bNDMy(LlOf@W;}Ta1igf z87r71JK(~>8{FXav8A^q*R*NbZn_9CjXJIU)%~q@zPCwt)X6s2;GlRb!-cY|+og_W=cTrdw$Fb+KYQ6QYi z|3sG>2&E{?Cv%3M*svglr4kWZnRW&`ybJY13Gjyhv!60<$h=6F}TxmOWPQ8c1u z3AP5-U~MhApoq7HFGc$~BC{1FjZRII?%o*5J5pVc2j99u@uU$Mq=DORkZCP#hnkc@ z#Rb#4mjP&{H%_?0LYx?D8-29s5QbyDJQJC(}p_<+Nyq#bjN-)hrgnx9~@G~6O5{(U5 z-Dmov<_ojFJlVQnY2mI+gUow zVH5{(jKq@M#m8=@@#!=S8uqASK78h5CPwa$Qt9{d60}kg_xIkah*szQOxKw%oRe*| zlYgP28P-B?&{kc~Cot!9tWA&en z+=*^i*ajwlusD3H1jgW%Y76939{A+&+CUfFhavGvMn)r+s@oSz+C!P?S|gRmCN=2p z1&MJ^8dHWx0$FcVn$$4kOpVSD_zA|1=?E%(I(ta3)?{4(n%4QNw?yCycrDe9YtbCjHUtW)q^+F zob{Tqlbqk@B#Dl=Tby@x#`w2e?ff(qoAcWk9K@-P{JHE!2Uh-Z_b?u&^&%H#C79gE zJ%qJWDUQM!vXv$wnws6?)qdTCv$S6a&t6$uOoVl&>H<91EmurCPPY-s<%0@0_^>w5 zUvp+*4}ApuuS)?hTwWIbnO(L&T((7-9Ikx5kuDare76TIo_tU6j2jwX5ynjE&Ky%5 zE{wLS?M3fs?|nqiLdH1RZzBuIge7V{7dY9!zx+tGw>mUMZ{k20v2H>@IH-0MB_mpS zc*ZPY@Gd9kt;>&i%{gI1ywgtfPV@J@=}p@CJ%3AP@-dp#LA0^bbM1eh<=K(X;DM@` zXm9n;zI{}?ck+(O*nvaL!|MAH7}`==Zoe#3r3rBCdO$SpTHNvc%Am#;Nw?^N>K{ff z`Npi?h{oBr{3+Ens%gT;pS;O!hE*xE!haVBzAJwpBM^ZZOoG9NFEK*>B3pa%?y@v= zwsRfH_aQmLrv_}8JRtwgGod=wOnAnWk|-*j7eO>!gO+wixb@m<%QcMLEhlmG=pwRh zjCtjB_Q-Z5i#a_jJ9EU}R*fYDvw2MGpu3Zu{=E0Iyk0n7sgo$DbNNLL%~^-V1Wpb! zX{rtDMqT@KwFnN{6#JT(eWlQ+GNTtTn~92>59BjDl++xJVM=;|u&}WBdN?f!+#oMJ zCw+Kb+{-_qDMLKP2Lm#e%)Fe{kFO8u64BoJ?M?~|rC2f;fNP##`CXDMZC?=6J&&}h zjNM^&(}O8P6rCrBqH@tpDEV=zJHPQDZdfT=_ltg?2;SY^(r*87?zfgq8U#aqQwewsx$SvYM#{Ywo+R*%!05dYD)0!Gmp)NYf{b2BJlEJnbii_LyjLE zJH01g&x(6h)1>1_Bv0K$BL5=eNVwXu%!A$2u0*lLR(*iz6Z%oGTob8@KNwPz`(ViC zOGbU0*OwZs0*^dFnIcrki&(}d^WkXzGrvn7*Q=~UUC za(E+OH^t%2@U48H9{%*cyPI9oE8z1Lcb@BM>-5syrJ=kK7Sjf&6XL3(@v#z-TXSYZ zrKYC+XwOu8C7IrAF@AE~!XWeVPYJ{*1Dasu22z z5REg3bfR#|yp1=@k$97vW4glarP&RTrJUd3WEg-Qjr0In0gvA&?CW|`XI=2Q?V+3H z8nygLYF3gZ(bo`V$(bEawEh%Ufoh@X`%s-97~Qr8{6i;tv1M)YbV}WIFG}ajYsGlh z_{}r9Mw&(Z7-lkwUWfL;;37${{&3INOJD?vO$eN zcT(&2t-{X6>F4W##mPlnOZ(H>OaP{DmKBPB)99kalh2;UjGfz;hI7mK#g$+Kia%4p6`RX8 z+(uBxT+cN}r)uuxo-CEn_h4(Z``&2R?Nt{ z(bRDdI9{6Te_WU-2RbrvJMJ%;L?AuT&=oenKCsvC z|9!yXBh>S@`u;rZdTs3W&KP4oJ5 z-M6bXX4-f4GT_>e%q~@FRB@Y?+Q*dGqov- z0i7wzCowH96bmMceKXQbsAa+{F^vM>aZWb2)R^Q?zchI)xLR6q?4$3)u>=y%U&2;d z*hBv$>eur(DmZo zC#`ir;za@${rx-JDP_IFVdd=TffYTsH0JbwG0!3&W*(-^zvpU>YneU1dz;0uBSzOn z;Qb|ot5f@>k_jfyRa@WrD|AS^DPlM zg_m%B3X`68GSxV~IgHrJsKmXgRDTHwQwbkS!1MdIGn6Yg0M?-(e^Wrm1M|q`hqkWd z#O<7v`K(P#iYCMw8u!4xxh`CbX&LuaMc=F_yLNYY9AW4cRx=rGK zAIXtVX)|sq*-v-l9~CDxrNEf09FTdf|NWgb^8q*80z_#Ay})@YGUkeosT4x`{1Rd) zmw&+H9-f&@00t@2ZAq?~lhHYK%uxV3LfQ+Eh#LkAobZhAN zHAP%cK1|wxb#kNo^~iPm0x}jGU(t4Me!G17K!%O;CxT5HiEk)0DgPZm_)i=CzC^Q#Gc<_oSxn~jQL1Xon8G&V=(di zXF+$wQi}JZ@FVoND5!7sphlw6AM3KKNk+O(K^947v9g-x9Tl`v*T<4e%Eg$Wz{mYgrD12=28G;79(lfgfc z0$z#VsE5;?3TBwEUCll*=~GW%Hmj`L{>wa>Up!ziDO}D)S{AY!n#~1evJ;U$M;+}+ zqB-Kv#&Es3M$(5tjAUhAqwHlqxkOD4xx)02d{<+WohF&{KF?qwy-dfrs)seilh8B7 zF_WRiCob@aM3480)nzCVcjLSwdbQz8aQunx`MKr;UO_G{`X)Z^GME;U<{^fa@})YZ z<@lCS6{yhnOL2IzPg<78_{PMKD!hLGSC~4 zW!i!bI`QbYyw&Y_m{G2prsdHlf^Y%s?Csx8^sbrY&vv1NEc|9|+?l?Go@Wjikpke&tAczfH$7rnk03(wDA2ZnnYE0_ta zci6nQ0sQwlJCV>rPC`&o&GW%Ntt}Tm1pn<76s|N&b*lV7As63?4pHHx)=R z*i57a8DrC})ni}25 z9eskXg4=Vi6S)0H+Pf38T;bQRDBW#^zABBS&)Jcu*a4G_Li&K+0u^d9Hulh=qR zJvh$9swQMwmL_D=gs)e}K=8b;MXJ4}8sbqT;e z(lPRMEgUQPavRt&OiJqMx&-y9DD?CtKGncHDoC6w>{k-`GJ%y6j=Pq4dNXG_PMlxR zkekeNTTPNg6@|AI`Kx_lxNVK+D8^#u6k<3$JOMQr5tBkDR4bzgusn`WI{w$erj#v@ z+N4X%K_4?n3v*3(#WXL$p+H$9M7BFp+CdQ2hO~0C4y$^xji#8neOiXm!qv<4$`#?Ylq0iN# ztnB(4J3NQOC?}_jCct?x0wOGoexmj3lqMv0V1gZ=C_Tp;+?xgSR;2M_Vm0vDIQ4)v z;41Ki20thH1E%O#yUhi*%In4H(Z+yJ+a(DL<{A$(%|zgV)LU^P>C}j((&D-uJ+u#?DXu^-;{Xr z9hsV5A>!gQ47}9L#s=7#CC{@_YWp&}T^y_!KAkQv-2bZ*FjQ=rW7B+|ekJ-k2pr@& zC+=dcNl|Q@zSB3p-tHQo7~KJZJ+cn^o)I24tyjJLKs{8m8i)}i?Y(0^OnN~g9T|l& zo(kmywl;4dCnprLzbg$Oc+oij4xOQN>F>yP|0ZDm|JhD+b_AAK`L?R-2-tZ9`s>)% zY3qE19oD7Xx5%`(*E)b&QjXF(iQmXTvH1jIGA$E&kuhTZa3O#R{oj&>2yPEgGl$=e zgc}N8B;Hb8=2_={<}hefvBuAfET>1L&~N|XQN9eBHvf%0K3w|}eH?E&SViXyqGgzV zsGhUdFfh2AzUEzPIB}h~qx^GkkKGaZIQO2+Zi^!31LSulQK!h(+8-37y#CKkGh}WA z)syPL!1K-L$JVMZ+{RFcC)(>+2dLC@o>DbB z(f3}IjjUi4E3DER(HA(;g{qW@+n(^bcp`V;>22^toPUbkj?~S}-N8gf2e5H6{{A+< z(eAy?)JpFF9QV_MXhC(Ytq~O6Z`v)Tm2P{Lj2xvxn;;xudosll1kKweQ@88n#7t*oN?XFCzbKD2EPSjic^|jOdHBEUc-g=LCzJKpQ63>>-{iqd)36nazjx)~%{iV_fw!~01X zSx>tFVWL-e*zR?EuCAtiBEm~|iDBVUM0{LlM7E{_$Dy19%j}!qfe^!rnV}2|$|9T9NgN?{|)WySRf* zNNUXf8q^f%RF&UKVWzHf+W-=q+m=(e?hH;DM1DB(Kl(?aiQ*nP4=yH{wiQ1T4TN&- zm23U+-u77Cd&<4EZeG=ejU`LskBMp-ZJ-G^%bT(901!vcE zr4olRcKO*|tY!Z2#}J(C^AN@b}K=)fXfk zUBgy-2Xw*|1fFSwbD%)8jAu(-z@)aXf62AI1l2(Yw$`{Z@om=abhEvjoZRm9=T=X2 z-fLN&%5cWT!qyK^p*y+OVvY-K2>eCI^kHK{U;pDjn&Q^SFzIdR>Cdf|CR^ks+QXRu zV&567VCrNfCjaXEi(ezjVB7E)s&hD5eRqlNrpdolFgsDux+`C-zsF7%p*WliI{>g03j=;UaHy zQLFIj{h<}ccx2#`XDw`axDO4K`V$09H9lhPR5TBd)62;*_5)v&BScGjn^PX9M6s!m z^}0*JnlsRsSZAMeXo+V`#SnRI_~`7uTQ0VToTngt2nhF`!8_i-m6f1zzND+0;)9Hu_*FbU|%3GYj(m7Uqlg zD~7T6g12vy506}0W;GDHqy3x0yJ-U#{!b{f3|uY#1NlU|xxul!9FvdgiEuDK38}nG zmTywDxf6v6Er&VPzR0f_elm%^elVygTMmAb7|-(>VP_eiD@&B-6~){yh*D z3f6qs{S3zVK!;})y^~@Aeq8HtAO7>A=gpn}p-i#XA8VEd@aK6uF8_J;iImM<$gScJ z?x{taT4ZT3Zwlu@nVm2h5R-?H*VXaQ=l!+utt^pbtzSx%8yHgO(~tKlL7Zq9GQ9pB z4FVz)KKOJT+S@wax|$I!v!b)Tm9KZ4=L|67q+mz0Su-JP4m`VS-0VzfK2;G)U9ixI zceZDGU5RKT+;-qAO8rMPg)Rd?t)k*|)I(Cvz(AZr3NMqiB1U|2D!XrfDSB3y;pmpq z%@)T0(fW}=C`-}^+iBgNI~Kf!f{=YMOA50sdn#T%jJ%RT7P(eP$LRyc{ZW3$p44ew zN|$xcxK<9u?l{L3N1iTP1ATXXlVbc0753H*8h2k^jNwqsre3V|B-Bqsaey9ifG|5G zfk-Wh!p21wXx@WFvrwF(k)&%pF7|)%_0?f*_1%^QiWGNum*5n4FHl^IyF>BdPH-s> z#frNXcMq<`H9#p83Ir$)llR{5-udp#Jo9&+Bq!&|Z=JQ*UVH5v9_PBRat)sE&g3Z~ zEmA}$cNa>PW0_9athjqEP#5^c=-@(ORMugVF+-0UiEdEO+x-_g*6=TS37*hKG33t= zBS*>8M_kk50usR_krNH;+hg{++@jvtXuS3N`+a}=AgkB<0Fl7eDEWqrKQ3m0Yy3mr z^EYTa7Z^LAovXucuo1pF@y8V_Bte*hp-h+I5_jPZLaK3l{k*eAKFEIMZUVqe;y9<$svtCRQ#<2-?S2Xa!}B~>{AKYKw6+OHx(CC^ zo9i}Z!-s@IAN=mTtNL4^fgWIx0QLT=F7%Ws?`cNjZa4fc^A&0e2|V{V74SZioCv*w56BhHlZ=O0q1A0?q0dmHDX-wiZLAOEqu6(ZssRsv7df8%oh$ zF?4V-!+tyuenZFf2Lb6Iyu(M}-@%M8KUyzk*0?)5hiN1ER$`}i*X4(om^Z?fOGS9$ zj_31mPk(B+^GqiaR3$yT8^*==k;7-1f&RRn!pkS3H-Ya>=#yom2GT=;2*x>7ICer7NUo0h8a;)aU zmBRY=I&N-%aS3KnveOd6Q<3$5-kBPCR!Y_UoUkTmz{?DPD~4x}8+E`<*75s&{kt8w z$6gBFdhM4XCMCD*by!ZA9qGo2G$ofk3DCuNy!0y+%}D07_z>IA)Wh;4WbHD|ZNRA( zbNY6F+(;%|YJ_XdbA*_YwvUev3ymMX9e4#b{7lMT0&m6{Y{nWc!ov%7x zv&IQ?OjSHAprZzL`(2=HU0lC24Z7$ZAc>fCwPv65WwBs*ych&zN5Z5x#mZ>51z2!J z!_647u>pgE;DkiNzYoVcH3UKLGFnZKx0E8YdicV3UVQozXJMHu-+G!NRLw~bZv!P_ zlLGONEBEIX5OGHraR)7LvL47Lvkt1@>E!M+*4`#SSKN$bJ<8{SKk@V*7S4wZd&8V) zl90C_+j$7rg0B_zq|V2G-9RVqZ7lmgtz+`mL}KzdjbWXMpyo!ROs^L1p27auQSL$3 z<^(&>Z7)oAYf|y56#duZw}pZoMA=^xzLyrQIhk3vU@o!_G(ppf@WVyPd&&hDNgGNts;+!P09esoI*6R< zRX_6Gj2bmL$!P?7a zB}KfdzSI`^DK0~?q+pbZj3HepSzq-N~Ft8XSsXXbGh9Jk`BW2>%(y0Jn=mj-gO<(Jzg2x`qQ1+ z8ZiwuAZz2VDNk=OAa&dq=YU6)-UShoH5Q#DOh)?lY2|X|JSN z?x~|!#4K^dEK{j!1kMpj4f|~ZKT_z5gPzA=IdybNm!-Ym-7LE&|6P46zqG2{8ZF2d ziz7LC9~N}fMX*`hSvTfVxPjq?eVzLc9Xk=upN1g+=~An;-i}*U-R?1*Q`=@3rEQav z$nG(0NC7Kut&CMWHpwngYMj2D#?9_#20!EJYmd@GIeUp_Wi?}t#W|ePsFM^@K=)Tf z8V${>{s-8iuuxDRGq*zRxC;7CzZ=v~XN-*HWhn&p1lqB~{ogv9BJ@oC;1YhMyHH9B z=Y~p^^0O{%}(zg^-ZJ+K=PRWhrGp^ACE}e?j`!0It^l@iH`$Zu6v3B523yN+mg!UDl{ zrp}`BkV_1iWm>XB(_9F?2Ebk@Bcz;NQ%$Vjz~QnnGT}~8`MuprkG(dYQTFeMe)ZO8 zVoS|@mVJX<>nfV6T6K;!LKf~ND7QmnTZ0F-A_tPE+<4Ce9FO-qtv^PbJx%*mEj74dWY zP)kL`xlDL)ZBWk-jUDOR=3Rv8=COVKIXL;9C+*V}}WJA66?Z z_vMp$qT%FcKf@XeFMiMW^0)jVN+`?3<28M4YxnE6Cc)2MPou6+NH#V$+8vRuFn5q* z`C?Xn{wQ6NXjoFm&co&9iFye!@n<%}1C*9%nfJ!S~$D5~t`?tPadzDufE8 zt#5_*#`v4<(&;Wk+D|O${YzYh>Oo$GG6>iEGH8?tC{gFxH&&jf+D6fi=k?Er$oBbm zdo{GdpkalY1to842Mlwb>}@##X!B!beBBcoqw~ilVL@7g^8UECDR}mH-j&e^V@c+x zckftv=`NFW!<^hagSa7O+tn%WTMf1Jdk;I zWMS5YvZ6N>#>?@n(v8yAp|KabWO7!G58tM<;mG%j9cguWV=g z9R#f`gCiWiWE1ft%sU4)hK#$E>9QrM0^=kq7_76Qxw zBIo5X=c)bR$G#_L(a-Tp{8u;>{%m$gaJ}-8KCSbkA$bb@(5zSkz_o6IoU9jP(h=wM zZrjU#@Sy3-B~=kib2A;2deAFYu_uJg9P7;7+CxR>!D^FpU)OT|iPNnrhCX@|bvA;Z zwqAeM?pXBe$}9Sn>C;D`-VCiwejf1L@{a0dtYA2W4!H}DO2s2@YVtoQ)+Z22YgIMy zwdk36Rx`zRn+L9+@VPTyZnzoT7Px{{xO|P>AEH4#Ft5-{qCoYGJvE38n7B^&;4JD0 zpiD?N08GzZ>dWQw&;<_pe45Br#Bcxgmc0Z43l9;^x;cD~21BHbSnch>$DyQ#1N+aK zgtS_E8|B*aqs)_V3*8y-&^a#%YW2JM9lU^PUg13K3Dnzyr{6IaRq2|VPq%3_r)C}7 zxJCJ*?(eHFa&*u$kdh~?m@^q;v&o^6a6bfQ@_q5DmE$IUgKFi#WCtCT{={F-tk%I z|D8vZzmZ9h+}`6{b=LH{`BQC5S8e`z87neUZo-ZyNc|wD8eJyCM;L)Y_(#p^(Myo) zWU%_xT64hk3Or?(SAO|c7Sl_lX?__fuZ}er&u6N}UZRZ5<#lb?cy!k`#G1d02cf+8 zi`=m{HA?!wV23KU5xmxXksnBabmNmPlKSF%D>T~#?$0HTyN1(5BW%cg&si$xFZHtm zemgNij`qDC{mm{{(6qj6>wkFSJ`si-(ou3Vzk?CE%6N=S3GrU^-PXv#i#KJ7r30vTOR@*l0z*n>m3!Q*j=q`yxV99gG?Hgfs<~i zdo{k)yOQ%rVEqBMc)<6UVoBMf$+HkjiWLXxoymp*%EcIBQqiUPghI33PmyJH!^g9#S3C2 zEd>Y`L>Hipj*)!k({b>}w%W zPG!QJ8#&QsT{nD?gy0CH%9SU)@o&90QnTqW&t9}A8r;6sCj`gLvbLYK4OdG_13$>3UcHSqEjV3KVQsTD##$`m!KL)J%K zogFS@&6MhtHlQF6NtsT56)I6CkZfCfH3D$F=bBZo_WvA+k;r5?R02L(odlIxtG9x< z#J>~45&*#o@zfK9Tmie{_+^-MVuDdqyVy;ZS`G4lY2FJ)ZZ6TFQ;5PP1TI^WwX8{4 zyPG(+74h0RBimFX<~Hffl%_yQW_#`1F#&BD8_qP7?G^`Sg^QBo*&v@ix%MLySwAl% zg1<55&Ed2gomLvUyr-||kxrzC7K99xyyVqZ)y(fH7Dr{_uOesdNO&6INfynQ}0 z`!oeBC}2sa7g`cGTB}_?t_$ApaOo94CrQUk9zHv;53d>#Dp|wbm_Ee?7ZIxVJ$CiM z{4zo$=XjgBg{!fY3x?5YZR^Q?+DzYCKHNiTYWJwB@3!4=|xyRTKV(5iOU z!ZAF0@f+jjAKvT`Z5hiVTBe8{_a9*m_VS;5tbcAuVlVF^Lv=2A0$dU6`XVws2yT|G z`PTJz-=dWmSc}0E=1u6u9-jzVH4&sKhXZ6VBLn#V@>4fEcSeGlWhT$%Y<>yWKyiL$ z_sqr9{H)xYIFvII4kpm!oH4iehO1_4w&SbgF&a353e^C5A`d$RKHp3=w?B<8>I=To z{+ty{GBBC#9r^q~GSz!e&e3;I%aak+cpTUOTe0Qn4?HWiy_h7?Ugi)4x{wWxz&pMFT_r1) zA03En-?r}j-8hqnl-e^_KVK*g23qI(?VhA2yJi-%_~cL{bTicA;s&HBGj)yx+@wSE zD2=QU4J%$cW`a?5k}O$kE7Zs$4J#$<)kC-v1xE9PGZ{a~n*^D09v|jqSJ(o30ktv zm6cE@J`bTg#6}058%!4G>?8um^PWmxOtPG!q0eWouAcT&Z~W2;`co+ziUV6iX^T)k z@igISYxwt}T|cmusC5B+k;U+F{f!EL4Y+H- z-JSw!+L~j*c_PSOW7HFNA4kOPvj!$@NlZG15|UXO+%PgFYk^EUhJO+ zIiJElNp7~enl{;G!H((MTS3_!{l`Dvug|ypR4+rp(WbC$%1!*iwU>m)KyY~NS>1T`z#4786&UE*p88v1|xhTbZAEm~D<6C^Kp4``Rzrnk`?VoIJ_2 zn8PKELG@%8o?5={(+TnBvoVicpMRlzpP)92EmEXae23eeOKlh;A;=Tf&G0Po&()(M z@sfNcrBwC2ff0h3M#Zo$)O%f%R_EmU3+skd-&L5^q{)KPdx^Gg)KO?6^H#&>_45t= zvDnTCt8j2>OXN~#W2kKen3lS1PZR_Xb z1$5GzPN)j?;rWGKL%Bm!LKJ{6jtH+WVXl_Ojx$BiD++1fA>J_3&{Wdnn3Z(0*}>e; z&hsr?`Z#aBSdwkuwZT{48%H}mBK4UKViF4LtdP7Jn(O!RYNtcv3Oh-uI~VkrVo=JG zCWG#4NHg$$tsIA|&uCfGzgTH?YU|ntyFABwL;e$<9$4F@^o-%}@4IQd@M2Y^Rxxh# z1LS0mq=5z}eF`ia56rc{I$7I1Dt(x|cG6h6T!Z}9v1b%t3Q`bw9r{r+%z#&1dA?RT znJJcr~dSZpX=60Lw2@n}Eu^#b-ny3~OZgOvwK%QQM7e((B3*?5rR zy$u{s2RxGeSPGk7G6{KvF{DvH%$T{VIA#2sWzNMkO3=D2*qe8eR34RG1z3J*@(Q0Q z$EGNdx%VS;uV(6-SnxZx%$-5#VYA}+@&^}$gcH=95|Rmo!>=kSZx1XgM1j%fNtMT3 zEObsnlq}6@B5cldspE|`G@J5xgvZv(tlXmZ{^Ghhm^q1Y{q_T{@3HyFqDj4E-~VMd z3BMG6EIIB-BO!{~h$8R>XNT1Kq!q=cpHPprVj~XoW>vJz!%}2oG}$`<5yt!%qKJNPf!0 z<^I^E+RM1aK;O&CE9vVms%JXU`-9Xjup+%w7%}uOtsjYLI!Q%3^SST0;^^K|7S}JK z(cP6p6Sm15+OR1cyez@L6mwT&cd+_+dRQIyCXuK=__DXjW<@_d!4)s?H>8iX$3W)Nl0Iq>f;>+X(+Rh<{&-T(3o zuv#S4G!IF0dZqR+g)(o9NQx?Yb{Z;FAX<;cxka0Vm;Fdt0IK{j7}U|M`BvOQOdAmj zL}M;zXvoGES*BHH5$t)p>$k9}LC*Y!WOhoBvq%6oYsnXiV}$9Ca?MqHa%O?=SEy$#$jHd6|RE@i%t zD{!7LpOa87rtK=Qq=7~o&oA7zlP*eUUj@4FrwWR={d2nQH5F92V*pd0m=s;y$ zya!$-Wz>fSPdlt>(~+nab_4hy$wvE81})xIeoy>tP04F|$7}R_dT#c2GC2B>=mq^u zpg`L5#Xf?lcj5wl$m4|gt3T4&qUq>~a&SyX{WEu+_{~Oz#BDiviG%JV|L<%#eFQC( z1oDkI=aHxFWk;|28nFveGm-ISx>6^Oy zC-krDtnRv>S|X2{K#i}dMbX41ju8%JhC$23OrwHZyT>?T@CqR+sIbAP4beN&KVOh-=dfgj3>tXvzNJ z`v08tdgOE^#Me8jSIZ~R>jl*X*2{oIKeG4*96=`afJ>O);z4!u+ciqjhUL2D+yu?6MnDVLCc9rY@_x$sauW_PDuVdHyxE&&p0e zpwKO+q7M5l5_TWiJ>Rowfy?99zlc*)fC^`|Pt|O8HFbmbe7i!4>xuFz*upAoa@O); zGr=rK0;h#%K>f<)d3WQtCHNzXyy*J6IpO7Xt;P0yBNuei>vI|twp?85vfQqNJkoYK zGM}*QH$W-S^sz)mk}t%+WEuTs-pRXh);yUI+aO+fC<6>BbYo))0PR5$i8eHuoLVxe z&pAIlbE*t$hDS#6pxnAl3x(r7sv-CHLfVcxn!mELHu3x~&3b!}pph7#fv$g`hFRHx z;c9#-x>2n8>zcB7;th$~2sm8qrt$PyK2Nno9=5HaM4Hu0!<0yHlg!{ukOs*{T=I8L z@mo2-^w-owkA=?9pD_3ws_j-4$w@jc83RV|ysY3YxkOu6P~hN&m2n3si7mQsZ;*>b_ne z%QA2oB+rU6AW`V;&)hB*nhHMDvg^AiX}9e|J({jBygVUN4?g77@W1aXzMKe_*)M&K zT31#pdVpkV_#X=KZ37W&+e~CX|FM?+p>x1y$<>4?>x{*s&`J;G(w^CyAET181}a{f zG-zb6j8(GnYXTTpYT9WSc}d7gD=J#m8u=|G8D`&|>E0JoPuccW2#W`uH=7O>DTv39 z_1$;5JzcsE?;%r;%+YU1oD`1of9>$gckYP>+e_N+Bu}LaP{F0fSZB}2^OT4J>NnG4 zkf$5WN_oLHs9$RfQAZdf^_g>4Tm!6^rEWAkZdv~MpoeOBVi53cgygvP zVvSo9z{wn51O%GrGxAR9Q|PxQ%4ZGUH-xNQiwVZjIyCHkF{OFzVSJ7~{1St+bGz!Z zCUDp7LezL_clw_HT7^=6G-{+6$N?42URP~&Y;XZMj&aSq2u(;%fB9ZgVve1;W<{Ya z$4{U@{`X^?3YX_Q;YVr)RfCT4l-Uss!i^GalGbu3w?lXI#Sim>oabR-A| z_Z5!BHWR;oxG+;91B&B`Xp+ua!&{H~FGX9m@FX6Ff+@n&@Isf2tr(Cvm(6Q8HLO;)gY%;*soBi4{zci!%OTgR&ymX>{&s=zOkhoA2*%vIFX zn)_^ZHeGfvr>*z1@LT3S`BJoW)DQlO=8(+6zk%)Uq=}R2d0_UtCo3=(IWzuJ=kJHt zI`xI@S7*$X`Rs453$FxLjnO~2AA(>lVQ;k^{?<>ehVzI^0~Prbpsl6%hZHI2H;xk} zzz$5qDeuX{-;KW+D+GH%nywf3>%ZG6dX<~kN}ao~Dn=B9ye;W3()E{=Biu_k3JnPK zI|8%TAK^}gE}xe&sS4|6dK(eN8g>G0%NL!IAB;i#H`)IzmIa=*tSq$v=kR8gU?T5H zCn#?!HOO+~fc~3mS9jRwARJO;X@|>|hvO{}I=RDzWY(8^E=+75rWF?&QGd)zG@>QK z2F>;?CF(13sH^kTXuEMq5Wi^n4*^j zwp37qoNA;um5Cep#2g>mlryHj+S>Uiz{&7+8}VOOLhXF;(6pq4h#iy zy$;^VIW#TeEWwC*u`#ZoU092&n@S6$Y7U)4$Pxv98B5I)4p18TNM%l*gsdtW+cQQv zuw4u*z$?exb`LDG#9z3g{!%J<1F1ASXsl3$&=a+}dTfdPu`n&H2E**T>>@F9T=1M% zLOJsh;8`_`J|M2hPfQhC;$zzjMfkm==o%ELAXj$kw_j@L6buL4fCFZ9Ck(-TkSNfi$Z~w6+aOU4{Y}TFPThyawl~vc1B7;?H>F-iYazrO>@gfz%{OS?Mg!`lMk{ zwfy;I)3AbbmfAt5vK>KhTNK0A_Z<#_U%iRkoI@DPz?VRmL2M0XN)_@lhHmJ*1OHwp zM~=2O!j3|$Z(A<=(}n3HN*R-U>7K!C9n{xw7zqU___PkdK0p@^;EqZOHarpG2FbD4 z)h%B>RogahE)*S&B~uYweu5)ZlL%j4J2HPbXd^~zZ{nJLxUd#xZGrnYuzk_<^HODS= zDP)WZ{UHwx>yED~aC`lHnsmt61QT-}zN_9yopo%MJbCx3AI}Ho=1_iXMYyav80PYC ztX?5Z)Pndo^cJpUM!B82?Z$sLRrQ*Hyxuc&5;|rs4>0XEQ$2P@fm>53D1LfSw1_1P zFfOn5fb#`q4+tK+6sxluNYZ?77_iMR4OncO=fhk`Cr|*EBw#DgEF|kzJhIl3yMO}_ zxbch*I>5geJ=~S7$hTq47$)1K&1r7>GEa3+jD*D27+(>bd)wUN?!iXl=MVis<@A2` z?Arz=Y%uOt95x>^tZ!9rE)bvaJSwsm^W;K?6nwL?xq%_t+u=l>3VZQAtPe!tBImroNYM^h(`L+Ug%WW=K;Axi`5tXzO9vv&LOHs)49}$r}7m>Mh zVZo&y*msaMI=jRAQAR^8_?;2%$=I{QTWl~_bbB=_YbYZAescSh8^r-LFM`(^rrup8 z^%V{-aJ2X-6>Ak92~=K`C!f}6j)h?{TlvW4eM0|TDdr~ri1bg87|dk+Zy;lpuTPFQ zdgTjf*=T!De9V;O!=v_AB-=$`NovluGsvQRNvCf3arW`9PP6gM@wP2zTXi(=r*YWJ zB?qiW505L=BgS9d%zZGi7$Ov% zDD-9rFsOF8l)SEO2c(FGie zxdh`CC|?Q5gD=P$&+q86#Gl@jC5czW+m>M#3?GE1W(j_V$Ma7zcb}C)*N)yz_d#|q~zgjFZl|7}MArqqpShUufI<|MB}qgYy?td)i-5@W@QxHCl`#R8UmVUulcm&If(6Tx}-R!}leZd0brxt&W63*cOz&Ax>n?sX! zClpCUtG^dP!EML#d<485*l-5$v<}gxJs&1hNOUd$*%Pg;M5Sj6)(chvCjHq&BC{k=7^CwlqSvixs_PK+YgP$UBNf)zkv}rCj!`QxVL`QZ2=i zk4~UxjzT~zGR~}dU=MaD{a}CsBlvu3O zrC{0LAQXR9ScrsD`5oz3n!Ii@*V@k_J)D1Y{{wAcA1Ck+O@cvIlp~AhyF+RzL0mG! zVH{`_Yu^X=G&g5kn62F+5!|Tf%>qg@ov4OaHingl4u7TQC&$DuUD?MxDCw4IngDlC zZs-g42V)9ucY7f2n>hr<7|_C>uhwB3mKzVZ9FqSz1Y3SZ@0gRtLAZwmr+W|2mLJa# za}0Mc`ZlE=d&^L~eJ=aWZn#q$kaZo&~=?yVs#s%c*p${`gW^gn=yS@_#j zbgYNhZ) z6dxZQCQ`jtHtjbR5-_^VY$BZi@NIZ1Z2j+vl**~^o5MYXhPWY^_z_UXwpiHona;4+ z6P_sli26YJOO^7k0cRZ?;KyG@NlyI8oV4tdX_LmVPzNylUBXS~)t&H3^!zCKrv%c) zLiZg0y)Ngwd_b00^7xTDb7snT`n+f8;UQ90lbBj@s7+5Ol1%E+m;CccZNhAh@Pjoc zzWs3pF@!~q{;4!|QerK%+qFS#`LVM5fYbnGI(9>k1GENZhO7VVLnFjq?&u1aV! z?W0C?`E*yH)E zECp)OEqvLNmZ%m$YP9drk&?T)hPfEeCO*l22U*e$LDXapEXjJ6{&bZdVN~6SF-p-W zjY9)j>YLtFiX^<+ppmv!Ou@L@qdJ3iyFn48+V8aH$i%yO8$w#Vq zQe6q&j)=`{nGK>oNH1unvi8=LXaL9DGs0-@57ltxCNjVH%%bmgn9O|Q#16ojyqzvI zsg3Ekm<6Ja@2e&pqai|%rnwU;wI&L2H>u)mhF!AxXza0By0Z5(?(x*1k;TN>Cw*}- z6m!IGrqn(2ia&@42`ksTKXQ_58%V29&mk1D#Ex$9jY4$|1REc9)ilZ54pzCD zc_9f3y`rs(GJf2IjXWfR9129j(RRz!@CeFiDt~IUC^ zQJbzOjf?F>!x~E%mXE#J!Vq)Z*dnkl8i8f`?MTpnR%$Spjg3?8R~X`~d}fd2C?cmv zQJ@EFXUl@gPGr6x>wRRs5H{X41g%ty&{Ce8vMrI)85nOcY?zF$!|CWc`M+v-fG z3*x`g8Xgai%s=0zWDD* z#*DuJ6f6r4_M?*-Z$3~6W#E9{5`JJaG0WvC(({x&W9 zBErWLIdJ(*oGlvBC z_#jGObP(m?C5=fz0LmihkCwd(m1yEI!Wg%3BUbtZmnr4P!^VMDXP`N1@8T$kvJZ<6 zfng-9mMm^qsa{?A(a0iit(~g^%zEg@OSC*}ULmxu^A)gUz5SM)d*JB1DEHsduIU|xom>mJh3S%ts*f2zmMAB*B~Wl zaB5=yB>A(w1AlQgh}V7&dyuBJEM~o)9BBwX>!j6*gO;F%7vsebIum-$p^~Cg;J*r; zU{9o`j|s#~F`(=;n~~HV2*FV5i{u(!F=kujnzCz=j_I&uC9||@hbQ)fv*5I_Zr#_R ziBbWxTBCv0CtsCD(|k-#yWc94=C7tEG-_*~i=5savdObx$)2)Jy`?BeOduhvQQB=` zarLqW(8`m$bk(-Xx_>&&1*n1D<+eUF$206BKK4wv+7@`9U_`KkL}ONwoa~{SW^*_A zDo_KbnD6+9ImbCpYHK{ByB-?-4?GeD$4b5oY9J}}ufq_NQI_`{8s1t{oD4kQ8WZko zW?F9;B;MCGm~=A*XumjtCoz8m43QkuWbT!i&HNl5KsV>i;;0hL&3V`o^!ai~Yzq^# z&Y~_HWR8bFb$PD6538wG9r8Ei+9=HMm|*3S;J=qk@_oKdWWWHewWUWrw6pw1l#F!S zl>EkM_3AmXKFa2w-+frl#V(IOuJS0m6#izM0l~0~UGkTU)CMmhjyRI`)ZvO^yxf7z zTOo%4DU3?ajuKuEgEUa0+4+F$SbniY2;>5}LQ74{b>Hwxcd{Aj|HpD@@-I8a;^nL` zmD2MMb#dGbbao}Oi{r)O6l?uAEq|vqgAVZ4Xw-kh~(h3Hr|e|*Os*rIQba6g7Q@U+sjMg zgQoxE_C*H+9{<=0$ z?xRN@A?gSmYd*xDy4d6^dWKjn;JI$)esp~-)#Y1;H!W2hnup!8BiPnyDU)co)uH>S zc)xAlZT%t_gD>-^x~U*&{mHjx#&iJs<4QkU2t+_x?Lnkr*dF_Hf;Oi`=Xzc%%jsnmT}9RGbXOexQo+_NfESX_mCTv0v$aUV zjjyOx#J?*j9@a2a3|dzrC_19CBB6uIw=SVnhL%fHsr8-*#bahHS^augca7RcD_`#q zNBv`wcT;8qIAdVB{;OWFEQu*u zWKOwD=*h-m81un|k!6@Tax;9t^)qbix9`NP&GVsi5)HtA61n$cfm}M3lMv8W)JzrA zNMOBP%}M9CH+xyD*BhY_p2^h&!Q6h=5hSvv!<6QU2J{H_ReKrVNStDfr00^4q~$%)7pH7l-9XCX$t&UsSw*!( z$cT>Ky_ef3+AEHNk6MHyD3cMtp_q>a%uFoVMzjU%(~LO@un?(_zI*RPCrZ!R5HN*e zr9kVftg%d!C2BVAtXp1dY4M&^qvIUB6sR$ET0E;r zkRQWVRyk1TFJFt(J#9VV*Jfhw{UO=Ik-x{6wJCt)MC94sycj@0b-WzuV)Sa}(r`K{ zmLO$}##fr{G%IqReKWVGeZexuMk_9|7rY^+SOViQl5m6)kaeI`52y^UI?rNllxl$V+}F zL_q_{z4(?Bz8|zX#hqPlzGsghA?rxNAqQT-q3`&>SMJfgs81+pxpls4O?3Xf)Aiq8$XS;r}JqKwN%Eq07b|^T~S% zkymM7aP3rE|0X#OZ&UIY0WP?1vQzfu&U0>;3PZ}M2g=%g<5ChDbvm~~5@h91dc{qQ zwJvdfB(u`x2YFBiUo?g5IP3qZ0$Mxu+gi?ycRU22Mar5O(Vyw}#24U1OEbuxxWy2a zYC{MTm<_e7D$G8REJ3Q%7;yJ74C2%8)LGVf)F;D$T^f4wBxm;?7o^H|@Aa_nC_g0- zvZhO&7+!kgcr6%M^q4YN2)`?ltvX|eA$5% z*`<~wC8U~VlFnZTapfG;>@~xip4 zk!fjcT9Wig0=~V!F7rp-KNwSQ1cEIDu6}e){%Rsn9~R_BrO{flXkU`)wZax=F=U>N zf}1>Po)NA|jC);qJ#&SG(jo6;P_jC+p3zW>!!KV>kLZX>^hJjn>85KG`LCik1%l+Q z?bW}6%8{H0~JX`=bz;AEy zZtkj}-`_(oMT7_T;Jgs2hnqFWwXN1c|FZ#-?vM@gQg3LQyN4{;hzLC6LlG<$wK9f1 zP-wA=Hq1;bzirhio15qGtBBK$N-R)m`XMhb0FW|%rp#;E5kT9Qye}bSuC!*A>t|)q zKPl6i-e_{|&RFj=Xq;`6_a-%&&!m>48{B|FX^aT^QXiM@I;uk!p=e5FsR zs{UR~8mdSaMI9FMmSkB&bPk~B2+mZ}FHt$-;|}>UA3dJa$rGTJ7kQ{V`k|Xno}D5? zuX$KKj;O|PEYmIKJ?f{syYm+Xa=~`JCSB_|0y?4^7TdFr}9x zdZ1sKQ|EGuw|Xl}B&lnli%tOwyq-tpA>p3wA|c}7yUd;imucUM!-9xJUx0}F``Ip2 zwU>ivIzN8G#O;-y1Zk-;1qtC9Zav+!M^N6js}GO&clb3ahqq07yg#o(s+0T*EKuX4cRmY4zu=p)cBU;DxF9uK_in`*N;Z9Y zyOfgLt-U*xAU`|@XG*2PGC~3+P))DSd?9RVe`mTee!vLq<8`3k$Gnm z%vBlsL{#rCAi0o=>>A#tB|Z}3^?jvTaiF+&(MhfXO3=IZvkS#F)&&28xNm5(wN3_1 zaJzRg?>cgq;u@MSGCD{E^x5CJsWtmoN%u$ASWj2?!slgXXW;zx<-EGx^|94;X!iAK zc7Lf->*N#6)%Ty{oJfn|JM!1dB<-9%NIlyj91lGHw_8Wu=%nO85gv;-F02Xao-QXV z4^Y}oiC4h)*+5L8Y!Qn+3)V0{7oX;Imk7HS0h!V2jUOL0u{(hrOsWcDb}Kvzbe$bj z2&COieIKA@tio-x$m?Z@wx8dO+=_-i4GcM$d^|qdpc$&d<*BPOeJ2ZWBEPGif+#*) znH)gU**+i8kz^CiTZyZO&kk&)T5^DTDt4hN*i^Ie-p4l`rkN;U(ZYHXB zt8t285`JQ@fvz49)9)t-2(pbjnqvL&%D--)`0sHDpH56@O6`bQE+aH5)SWS$CJJst z_tph;w)>&at=#u(Vc-81*p}o=>-LSb?4pG??(U8aGeb+Jad7o8ll)Mdy&nopNC?m_ zElSg5Rnb5(UP<2rm9R#kS0<{-C08-0YiFOvCP-V}>H5d2C!5gA|Z^&%f=z z{}JXPY$)3*8Vl)X=Ho3Ejs0HN8=8(bTWp*sQ@kzRNFQfvz(VSffHYZNq|L$2siPFx zj@c8Y6<8j{gf=fVH)`nGGr$uGHn1n0)}yj%F+kce|2(1<=Crp5Wky@8W9{SdG#7SX zDy)ulTw9k$ms1Sscdq&%E+!FCG=MA5PNoreGi{A+Gvc8{qFaYTXB?s`XF}L45=BWi z){vE==yXY@PCi zS|Qo?zbJdluqeZ~+gm|G7)m;&yFqdU1S#n*>F$O>O1eu1ke2RFk&qq+5u{Ok{>fuWFPjbM9!fs zr&l4-JU3!_9gDq`iHVtm?1%_{WR8k|Ga?s#dy?rrFRSnrh8YougM>l`TF7^qu8!!L z62E36?{m%6g7`FyA+S7Urj>5VB&$*fnPkZqe~NS)8#uJeR98+O*Lt~;lW2Y$%q^0@ zd_4}Q1Mni9(veegFt`E(x(3+4TfwguyKd|O+Kqe;q6?zEB+?aIOB#=Wh-=`1(n#U& zR+uxg^t*7zsXVB}H$mL$2SM$qkl?-?uis`{fkkuZUse4A}8H|+E!wCvX*M4s) z+1)&qUQ#z25AUdz=vY@0Ic(rN^gCr7Qq2qJ6Qp&iX)WXa_T<062)ah8hEhhdKjV)Y zIjF;c8ASQ#{>Ox-I`?LF8ocAtuWlrHE|I=G0^Acm7EB`ili`c}+nguVY)(j=82L3X z&*6M(jJ1xO9zF#5pqc(gp&uLu-xKyMAaT(^8CgZ@4N^k59bF_%qZ=m;`k}JiUU{La zeSwaZ-?oL@|r$-?O)|}-|ck?n)go!_20(JUnxiNh?Ux_r+}U5CMS-7 za%C6hCu^y!NyoM$DAS}o>+1*OX7EXS0MQx5_Uwn?+oFZg@jIooH(L>AT(xaIk)TIg zz799H?V2g5f($?sO1yxGJh#NBQOJ^>GkzRqIT4`|6nc2bId{EfPTS*87UXJ6^+`x;jFqrYUl zlPF9@g*n6(e67DccPg`$io!&cu+P4XWbHCz8+=}}6_(}+a_$heRLKi4&#&{p>Uqpo zD@bGSSI9^qry2VGhqgK=;bw+Jm)lsX(uX)LO+e)4PEXz57=@o8-O>4dcQfXdSnM7r ze`}1-!cfWGGu_fe>FmY0NC-JHe?ri@U-h}mgJ3ky6EjfWdB$Sr9a%2$BLc#hJy`fV zZVoraDa^y^bg}>UpDDRLD5j=iKDG+6q+x+^loazWoOGzD0QTasM`K|FHormX;Gj~P4H?-fj9 zQlbkSdC3{Q;lzbaG*Gh9WcWwEPP!q6Nm4L?jo?M5wWNk~Wo$`a(&sN}qm3ps!;wPh z=;(ZsJ>N;+bU*nd-I=VA9jiy5h^3H{L1C>KpI1z>ZY(=ay4s*>0c1X6i`1(460!1l z{}_sjalxB0pkEyIf4(2yl_B2}{}u7pLE`VSNYyd+t)SEHxMq*bc;+_~zO=@jcx(bN z|0`dCAqUIlv=h6EwA#6Ji^uiJHBG{EYj#ah%-2icl zAIht_vRY97Aj0op@{Vf%O*F|KRk}{RoEfZKS;ZnN(0#Kv6={yHeS~x>-B|WCIN$q| ze)(sLatb*Sb<~)(O?!*){(Il`BJZ~!Bk=3l|4F?1x)}l_Z@u4vk#Ds0^C4E~p#n z+7@%a#s|(A%zEPupTbv;pGUrUgSS*dJFucZIUv zuSaUdhdykoNk&Qf-vGsmE*Xq6L$_NeD#lQbtluW~PpB_j(_7z7##mPtX$diAs2*y_ zhW-$Y6HfG^Nl%H&IxQdS};YD@5 zM!;!YTjYI=MPh^v5fzI)(gE9;hEmV`ij2`M?}ucVX$axSv<8h>%s|cgz%iQ?f_1o~ zME$iyTlNH`6i7-FtUV>1*pEV!yg4a*t7&+45DzIfik zlt}j4%7dvG+Zw8g*=7lFaaJz+GUq9;)VHJ5%`Sg3?&w59HXPWB`Im0G57=LySoTwsai(| z6eaRH^<%FK(@gr@zegxWI2g!)6WpN7h!0SFcWUZ69z)z$l+FEI`3;gaOd1Rxj~ z<{u|y$dHKNuxhK|mEo0y`PPRn?1_J(@%NA)@=Ln)M*AKA`FikN?HKs?-2X}Sz0X8b zNH)Vx?m8LJiiFvHCrx^JMA7Ginlfc&$0pmp(K4ffXIzXDwFo8?fRH{k497)7ZKL2V z5u>1EB<*DC@gDDV+cSS~eQ;+@*v72Hkc=GFlgZT= z8W}Q13-|?NN4lcwA9wFXZod1UUbWrn+@-yI@E5sT6*dc6kQJXyCQN$4VCfc!QN@96 zSIt?_)e%37z%u0EKw6|%TO6W)ql-!S(Li*8wfvni^oaJVj(0e3zh-b`Ds*+IeCL>E7`- zdwoY;e|(Es_!_UkUT;-PYwqqznkFuZwE{D>5YO*~-G(wn@;V}Z$Ub_0(lt!&S#4ZK z_YG6$)gP_$F%#+eawM7yYpIN?1f`v5p)*^CJ)T#u@pN|nX{V@u4dbPYbj%&WY37`7Pu8F!<2+2u2FbW95NP-?)k_WRCc`npoq7H1mN z!5<5*fX}LiNq4|$f^k(1V-DW0K09~zd0#eGgg19YYmw<$V&0tdMDJ%;Of^aGkKJ88};$>M$_rwq0OvCT|5+@g~C7r8orF|wFF_! zg{R1uzw6RrJ_(WU?RB=3g|~oI<&+LL zF6!@}SEJ_0wfXb>8v#-bd1!mw;W2j)A}^SG6M@K@B3-nN1F#q-&*`YDB`BxrZw*bK2wxB8}5_cQv0 zp&}=2hGkX$146G*Xn8b4J;beBJ<*Gsz#tC5DaKl zJOdTBShX5!{uk&xvQs#hhc?5K6`XAQ__IlJ8wBngkQ!CLH`4RQHpoj3_|zyaF$_pl zOzaODJ&=nFuzuaRncyVJv^8cmd3YcsV>t81Hm?+9K`;(vL#^h_=Fa=0Mc+$nw8rYn z=(Lo4|0fDrCQnAjTIEQmt|dna8I)`E;JE&pJ?|je8t3v2-~D%PwkNN^hL`0r@EH9B z9fM0*c^>SSvWskr-d{U3*vr0@Xf&WsO%+01*949UiyxE=;cd#V(L@ct#Dmi_3}xL9 zsGhWmlYg^zI%K&SOI`ZDkpN|O&#jNRfI8a!2qM;PF8=cpe#m;_&7U>O7j$ivueZFi z$$G-Lbot`z@La}$moWM-HX@;RUrY6`gTslPdQiR~B?R?8OZSHeEwMtf4W(sx_VnV8 z5LVY8S6pFmO~y~TH@}=|t`@N?D*`e$#9P>wvHiGxKOeWuJ-oUe+FPQTgZg3*jKpB1 zAAh~uE6hmiG_K^CLERVTwEYM$b;nRIry45QP_sZ>`Sx?>Fs6Y|KDx%@eOyKk#7mP5~;`t!wVa0sl98;jLo0xhSVJfRTrH@b0QGaUd^Hq zl5@E^!4%6C@vGB*GEwgr6(Ixa#rgjab4>t`ibwzlrvJ^>koA9bQUFsJT3SA`kK5)2 zk@wud@&^YO*cZppJ9SZAvWH6p6jyYOYIfx)7*a9Z*BkK6?N;tF{f^=^IpeB2d1jxL zntu`Uea;hjt@W4qUwhJPlanb>P{y@c4QP>GxE4SY2CQ{BU%eUUrO$(C#kJWlurnke zR>}B%2z155v^V=}xXg|xMagt=Q*VJafQp~t-ID~A zMNAyIxUY%Z&FVG?3WqHsP)4WZzG~XsD@%~}@=meh#M4J>BB?O=-sLU_6abAIX2!cbb#beCt?K292CZv8Gm0z+xs0%Sp@hOC%kopXeoEQOWxeh1|;9kA~$iH({r zTu9Tx8XyATv6^v)f}O{3xdx`(s;A(>&^RHhT< z?Go^~N#5XI8WRyzwm7o_L`AS=pz@*)ZKGoSEN@z-zeK6(>S*NwVA7cr>b)B{aY-dF zo*MS^Sv4d?(NE+2*Fb-q(_a4>k*Bst#V`We{8oa2DXYhvxK7g&K!{dUHiqfAv)9m99 z(Ibg&QicYAGVU?s!3ufF_zwba8-U@rFCS@3)qs%&sAXn07;C~tcCEVe%T>G08D`J8 zvFIPcy&tjz-!-w?3IRs4_g+!pqz$)7S3a+px8tw~)|6AHmjsjQsG`FuHotNYLUi0I zrc`0%MDK{(RSA^@cHAEN>?L;kik$p$w-WCcGakmf3xVgngcYfv^}Ssuy3s;q0G)iM z5SNj&R*~bTeG$}FggJSrS@|M@^S1duMa`qeOFDKzO5mhI%K6i-y zx5~~^^InS;;&J-PIvu^e0i%WRyfV;|hQ#-XL5m3n2CUIuPd?t8O_m@^dE8^`_~wYa zlGEsT#q8={6%Ur?Np4EB|GJGrmI?m8{dY&>JeQ@V=SE5D81YfcW=eP!%m8vV3 znJMjcS%vPe?@)xhn2d)bUF94{eX96}A`VQd<)cm(BoG$h$jf5x=?-{L$FV6ndbV5ePudBmgnX73nh}H@z0}eTCogXS!^@2e_(*5hqy?1iK zsgL)koCwuF7bZ7dz)hMK$5(9(S<&^X$Iok=WHwHH`(IXn`IEE-YIfi%-6NK)0W+>MMidLfxox%VHBe~>HI|36^XkLPZc|4KyVosx) z*jJzOep<@My=E$|X$Fg3bke1^dJsq0VbD|u(BuxiNH_7<;aV-jj`%F8@s`3Nce8?1 znqwL9mFW85m4O;U!f9?^kl?xyVquAOp8vSn8F%2!L4&91AiY|&a`$XVK2jUz@*80! zU+gI2=^g2urx!`@fRHNjw}fia>#%16m=Ft8IW~m0KTS%dooV}ba9S>ic^3)wX=n8WQDAR-46^ z5#y0|AITpqL#0MpBZQ7^aLAQDt;^`%wO&uq${$&Nv8Dl!u{}=@GkvU)T(xtN>4qrU zB9LaPH*FrgQ=*Dsc__&igoVMY*e}WB6E~_&3Xp>~n~PTdngmbKBAu4Tg^@Ic!J3L5 zo#rsCb~I=(h^`lDpCae6CfSIKKP<@RVdbIriRAy$itj2)B-J{_zGw7K$QdRV;*mwT zs7Wx0c!ITsl|&5v2)ZJZYSn*Jz#slA=Kik$ynZSe8%79qE8YSrWNN;=z!5LHi8ila zsSl3+4JAt{q#f~5Y*}Q_su2S&Y|Eu(T0R@bW$PJj|L!~EqwTN5@+y z2=BBoq3~nLlMZiNPhd>t;ys|XpSR<;<*uM+3vo@@NGQEX#SK*Rvxkdxzk+d7 zHR!x3A})1?2#I#teojJY4}57*%}VLJ3|{dy|MLFRwwCjJYE}ZH)jzVWt@Nh%L;s9F zd<4|}Biq{LpC|hanQd*bP5Dtu!&0g9(ssPHL+?_^Vw|2l(b`6Md-flp*0_R+u0$`x z{uG7?71(vI8OtRsH*twq=z{Wodjy$cm9w7u?)z;Y$tUuUJdqjQS8^9#|K`c+>y_si zkd%IDPc0$yodt-V?=AsA9~x~DGr@a`+S10olyE78^(kuD_w1^jxt`ORZ=HB4vd$sJ zre1lSsif-)QsGGcbtC8U(gJ9-8fzYc)# ztNS{`gQmvdA3mPsVem@aKUJo=6`Du2V3iIur@mnuhRqBk=jp#8v1qzZ5Q*PEGp{6m zh!GaoQ%?H%zw4w{dDVdl&WJCIQr_;w1m5m(Vx@NbDA)eNVV{Lr@u;byig;gm5|B~l zi*sS9@Y#J0knJ9%!@EKTJR15}FwawP!+!J(FeNGR2yjqWmXr8= z+l6DE%Xa;F|iL6{%XLGTZvPR`80x3RBNK-5{pfcb8!=S zdjk8#f%1q5w(j804fW7dd4=Kn1KaGNuM>f~F z7(B>J56rFYpI6YDm7S(Njo{2{%c8Ndtu5r`4c$lzz-PEsrDIyApighxs2KEef4ZQk zL9i{e-_Kp4!Z?J%@2%L92DgaEkDx)RL#tadTgvV{K3$akraHLds?l)q>v=$nb0X*t ze3P@t;ykh?uu&qYmrAY?#dy#3LIj|R~!`zus zcLY*JQlRJf{2x>}jYaE7K18xyB6DuB3Ib`N+;Sw9)sBn&E-(=ZOs#rp(w+;R2Nob` zg+OOv{tPmQF;YSjesz{%8fwB$FGD`T&1vJGbaGBEa`cJ5_E|t86|8J zxOpl0@ry{vcL=?evGogvlF^JP_-4qoy#MwGC9`r*f~qBM z0=8*iL1A3a`CK>kZK5(ZS{%~y>aI9DdUwMowb`iQx(?Z;P1I~q^F0|MMDi2SksEf6 zmj`s3Tujy}G10iSoSf^F5(vI&N{KsODSJ1n%hwk;<;qmj=1x3~s7SdSi#LIY#CCgC z7*;tKLQlLMVmi&^%}s8n_f4UUH1nj-&RcMo#QU# z4?a;HLR~YDYNV<3ch6y@DX$)wdOj+bM?v3&+@@dW2N6eIs3@s&s_8|3JAWT{?!mH? zJu+_C@h#-s8U{cbvyByiamZ;B7>BfP`@HxsrljmNE<}d=`R|u&H9NrxC*iK&C)T-u zTe-@qYwu~+zIG<@K0S|LHT*!SdwJszNe9uNG#A5I`C@je%0r!y9pEB<0_)#CQEfCw z*lY;r8(BIpxabTWcsU--h!Z4m)WpA%T9Wp9HTte9D_VkS+&*ADPHBEjhQ=g2f7gCm zTB!07XKUS)a#mpT)>WR2!5*?+-3h^}0WB0gWER$P#W%$&koS{i2R!pnlo+`@utXkN zmmj`drps)tS+gz$Fx=lU9krL7h`(spo>T0eWDv<$oSUD7K39p9U;K_yY)pa)eTrf& zR==-4nIDx+q)fy%uI^gN9nzoMXhZkY1_%zEq9Dv84(LW5Wq=3V2;WU5(*eYk8p8&t zzT%PsLD?6@$V1+@;cTPLPF1jur(K)DKr<|CYkXG?EC9%TNM$QTesV;!SwyQYxxsj+ z>_Gwa(^{tneqH(l^MogF)>bt{bR*9--bu=wjBw8yo5%>mvKubs?&{-jvt@#`H*SWV z>6E(DS0*u2f?dp`2Lq{ zveyWF)u1zEjYCJiy`9}0C8U(Ac~PaZVpE*!k4}YD^lxhLOQ{ z1On@DofS9#1{t(PTCJ?Z*-fIh+`)5C!|WScwc%n8;&>xdmtCOS=K?Av1<=~C)lMKJ0b13n93z!a8$^1u| zO&bPei?RkRw*LL6DyM7sVN>0jK)yfC3~d;%c)XBubd|~po{!;g!9vGGrs@ik4ZpWZ1W~P=<_+T)vk_|QEyyE7hzHgZ>Bxs(%E&pN zXtlS&J)Sr&+4@!5tH(|x&Q<-O3eg&9P7u57%OMNz@w_wvi zph@NXS^>sua=rt?CS9xlPkGY;59&lk(c{#2VqW)Xk-W{99=CiM{I z#53m;SEUsP#X>FAvgcyLrt7t7YX_ZLMBU^`pOD10e-E;Zr&^|GuN51)$p25O{C|0Q z)@|jrD{Q?&*69HwCc2=PQVS-@Cj)4?$*!TDG(3E+=`Pbo+^|3f%S1U z@M8bqrGq$VMoQN-BM)0rPRy^a&eW=A9rN|5EwLbECAM~Wi}gWCQ*Y7S?q)j;Vy36m z8bq|DE3?=rBC%agX;|5bkYqlj(Fr;L75+&-zTLplAOCiBSL&f&M-b*yVikQ$EXyJ| z?J3xuRAvQJG@)4kRU0}v@y3q)7b()sJvc#G-y#H~_|H2JNrP6q8&F&I4Zif%d5`o= zsd)6caK5Az5s3RN?oaZeD$d2v>K^hR0(X8&S}vcN0BElZBkj|^g>I;n}Z{SaHepp|>pL-BAQvC$g{ z3+=(MPu}pOYoDBRm4!?6iG-h$X!6RG2w_2NusqVBB;hiNZzmQYBjXc5ly-X}Q%O%_ z&h_ax(1ZoF^Zy_{{;yuD_4fLrwQ(eg>F%!X&L~I1lVB{;Nu1#(u{IGmO_UFjJg9Bp z$ffdfxWe_?k!;=*fKEIqwC9-~5jhoZZ;LPc)aGXZF;CAG7=dN+>3lt1z$*}>gGAyQ zX%v6Smb+__AYKe8_=v1e@iykvohBiPOpDwQ<5a+ME=!Q>!j&ia639o7nKFa(HY9Ez z%Ae(pA=eR(ibSjwU@XVoorYKHC5&HbM~@PhW^(bGvQKPAaXyJl2?e<(i^YE6z&=E% zff$VP{s8?Tp&kl@D`!^Nxmk-ZhEtCGvR(-~jB?KLH-8UNMAH!9XH7Wj|An!VG~0*e zkYPG7iOhod!*#6WnbqfelCCK$0k1Q`x^``b?Vt<6ERMJdX^fyM+=UO8k1-e;WJ^jV zu;NCpJp2I}G^K@CBq?Y?DrM`7@CkkALcepYpv#p67N#R%NEmYW@afT6fh_HEjTrw4(*-k*L-k8L9J53P#I_9+ADb|)NYF3 z=OBU>eWr+i4x$ zs}1ph^tB>B(}|uLlE+0;rOH9s3T&lbfN(o~w8%Y+6*ihF4)wUw!_I}t1mSU*$!d!S=F1&HOc80#I{UF! zW1mSSJ-7n5Mxy@op$@U)F_YY?XEl}sZk-MTTxXfWkt)7e-)O2%#f5#OO-8_AXbD;)+QkRCMT>^gF|6#DOMP8rhqfn8yMyzG%i z`$ueUq7OaX)dwL)>;)+ZaX_;k__5oqB^z8YkMG-71^gFjC3jvUg^~xML z*Za;7lcON`*B+@Ny437|^mmn<(8$^2%z!UM#sGISuZ#lrbCfqaxk9%7|@; zu7YkaUT34P?{(u^azT{o+O$Tw#b#t|?Ize|Ws> zn8eSVWcpa5!;fA2qw25@4nGH`R|f&y_1;$UxC=2kCU1&_a07nJEHz zq^pkl(zrgdGB_-mlI#+{?v<>}_@rBFJ^ErRh5ir7T&jt`eu|l2_cukPOG?yw%G+}5 zIb)iQ$M_u$RiaIL2`E*~cwFn$_hsp)WpYDoj)rYQoobV{ulrPX6X+<<8xE2U?AgKt zQc>UMn0~ihxzd}-+pJJ;4OKL^3cyRu`#>cMpQe|4*_fMB&~LG#U(%@?DX&gJS>$9? zId9=q0yM67jiyr|URPpDEB3x|k5%)R-xqGCJ~~4W8S4OEFmYdmG=`m`M*~7_}q$G}rh# z!Nw3ep=6nBMOnq`_Un0Q`(XWxNE9xD?4|9;MF53;?e4_#$@Ah=( zI;SPyaii6tKSDgWFz)W1hHKxTSLzc@*55IDT zR#}<_yo~MNP++Sgc}maMzN%q|!@S?-Sk7(oW><)uv@ceA7t_pU(%}}sP-0%pAAOH; zY3^Y`RboL?YW>}lO0bgSbAGmLk%>&zlIF6km3W*2K=*Shr}tif2`L`+57YPhK7u&9 zVl~G#Kh73rn24NFZB0IqVC$_&H~Emec#;$_*vAuGYm^`OZpA;5f1f#+LOo6IAf6-?qd_EXkxP zWUxxFzEKck?k}y`!md8F=VnRGBETy7{NRHz2t$2zi(taezq}FHINnD4g`VeI_W)f) z@r?CrcnrzK0YbG;I-iE7gH}R@iQ$o&y?e>(70gI z6~J(duZ0&Oc%f2hY=z45LD>nkmMGS`-C(Pi8hdcVcs>gsC_)D4&LkQ8V&I7!?GgO~ zD-ZqBowkxsUD~gBvQ;mu#~}Cvp=%Ap5Z$ES0CwD>m|V%r3Q$4JS&HM(J6+D`cuu+v zFdv10W-yb|G9d#Ae3`+rFkYC#zWZ$C(((&fZ;R;biw%Aq>hm4B>a=X5kqPmmDpS1H ztR0_U&KPgGw|9)+ckLrxBe=(c|8$L5U1c+eA;!ve19M)(_Dz;x5F2l0RU36AZgrt; z_;*efQfiAh_AZ{HNTgzOTKswPO#JK0vDaZ_L&M6hu$m8QlBg)0=UKxV-WRt;Deewe5S6z z=bGyGpx+gj0c*Z|;1-ImjY!!u+wTp95I@ZD2w;=9YsF)m9US24ViBag2QkO5_ zQ7xYm3@;4J>&HZcQq2SOOeF=n{PD&NbrUge29)L;c3>SdpIb|xy6}IYO8LV zGYCBd=nj0gJ7#xK^0d?4SryDkw#}!ZA@6%Al2@l8dt1zG4Imb?m@qFkFLu_?1T-!H zv3zJsyq=HR2L2othXY7J`)Ypqd`CZhdPla1I@VFhaS_w>8sl!D(o>6EP{FG4p z3SJ8n4%L40lDldQ9=Z`#{p%gROxChhlCr{i==-=eIVWxs%6l&rDWZJwPt4Z`zEtgR z2)Wu+gJxH+aIQLWrK6gu`mJ0kOBv3=f+8Q-P(}5eU6E!_>^E_9hn|f`@g8sKO^cz7 z64x>dDHl35)z6q+4*p@l{7c9FpT46?P8Y!;QqNH`OsIvZaEd2;^Z@If=?uu{=hgNi<;zl=Rx)xWQ-;gD+zH zr^pt9C2?P$K2@d;!v6|o6`bXp;1mgoWImYjkZGpgX4|ssU~e{jVQ5!1VROA$UfZf( z)4JB)i0N!$9)z3mvomsN9iVBBQGf@cgNzH(r=Caj(&mW_)G5A;DBWPd6jQK8ATx82 zboP3LtX*V+DkJt3pt0t;z3xti8~nE@Cl{B1WNx!HI8u(6S1{J10VVq!pNE2Nrhc;a z1b~RhuC<4L9GZS5SiGDlFG{EO`&EJOQA|i!_^V4H>{y?$EAIkDu@SYzxv6E={8#Rd zcCf^-)`NPL1=S;FX_Fl<)J(6AZ}Y)Xv^1QBm=dEJ+;|1FG`7uxC@DLwp!Sc5kA>d! z#jK<2a7J;VP#an~I!+FmZ~jg~4*G_lha)^)*A9*9HrB6&d4>H6szyD*{jyu4R`x;Y zXYtwjqETraI~uuN)$wu^$Om3eZ&k=h!H7H_pk`)ghxN6O?A8;vV39wn5z)~7Quw*7z|7P<_4nrhcayBsdu9x+=(Xa z<{6k{L07Re)eGb2|MZCeiw*zm`3xOQw{y!^6##7&0*6 zd$nL^3E`m{E_oq@`j;P@sCv{aPT2nM(D-`I#Rnpv`#!S@9`I3M>jzV@57H4GqTqY6 z);YgDM-QDroBnGqz5oXF^ai=1?TmNEwwYYB=iNeshxK}{c0k)o`Mua`!MJL{Z~^28 zmNgQ6tZBXm6@A||;FdY3bhaCg+h~t!VoB`cOc3NrZvHJ6_ke4gw$xtBMUKsrJY2%@ zCD{ahYM>&Nz33_Gts4vAOLdMk9vh8!_$EvK9NA`$;D;#ZUeCC3t9T+0@V5X%t;;YU z)FuJa(|sczzRr(ZH^4?F)FMJuGHPne?9V?K&}#a@nJSeG{2Z{=w@^2^oAx0`TzLg| z0Oq_T%EgWX5MHlrVPjpq?p)KH2%fTD6-WW`uE}tA=Nz!_)_=~gfhjoJ>#xg3ASlsr z&s-4mHZi2>=WCSySgy|`*US1%|M@l1h^&5ICgr};g> zLfT{nHMPhk*{3vCE4XP`Miqo6ILk`JUxTzu~d zmNBJi`X{0ot)a7Inx!s&GA-&yMN`%6uFOhj*r$s5Y}*>_BqOAgRoVj}sn)|3+3PFA zyt-Ol0ucA)0zig@?wZTmBYU&Oo)wRCOfL5vZXUgZ#wTX}4WcVqYW;=*&uSx6`(10^ z-YVttCFW2hdo}6-OFGTTlg89&vhDD@op<3*cnmOiNOz|(guF?|W1{O*stK{{fVZ^f zy8|Z$V#(iQ(z`AL=eFAZd|l^6@P5AB`5$kclxIExNtf9l397@G$NB$k9_u0nj<(XH zWrn0;p7#cZeV2C=YK>zEyE#XNhWzF%E+TfBasY0VYlPj%taJ)0K?@PNOT!o-!P?45 zDRatcBS0dFBZH^lryyf6TdbV{x#oj-E`C+qnVT+&wqh|x^hA~qdRht} z7$b6CZ4xRaGZKd>=vOpJ=%+E5p-ZrUY^VYI<|*|M{{b)}skAxWr|H3~Y1=LbV8{Do* zAODoOyE%Nehf7>TwqMZ8Hj%g+87G0yOqOOX|Etpz0(zKK(gB|Ra1h@9x#4np3`qgf z1U82%%gOa*yRS(T=3oDizT96Pnn;cqepzQ00Gvwr`pN`UP`UWq+Zxc(rop;r@a(Y}?`BG1e}_U6^qt1Q6?7Wm&nX7*!- zm)Dp%T#kU=R*II^pQ?RVzzga<#wm7ZOQc6SE zTgW8dHo8@(>(4A!;q#Rf$|a+VoyhKICI>U|x_g_(6K!fkiW12RP6_;!Yb4?--sCvs zxRfI@#2Y_z_(8vd1zcFZ8ipO;Byb|hE<}o&vp%TldXp(kDJJZVZd`-uUfN6R;A_2F zD*{6*@#JW{T{gC~-$@5tmrTBBI=Dk4sE%JW;|1D%sCnE4hz-EUrF6j?6hq$e71tT# z=j$_z?V-gidqaDQ#sefWKO^3f+-O3x6=j+4MtSPzEN(OB9OjKHfjP`xh1<6&Ylfe3E7z< zJsImRv)F3Bi*9&S`Y8}i$K^tP_KnrIVH7@x{piaCRuviSK1I?}o{UrKxV z^Dpm|;U(G1q{b3_Cgh5cU^&I=b@I9h($__7vM{A2?am@Q%dmJ6vE&^^4sR&$=hBK} zjLoEJy^<#8E=S^nQnmtLADmdL8f}Fae7T=cMSIe53#{5;+g$-4(w%!ttJPovK&9wxKKg4{(EO)Uq-es+N7%JNiJ6yA%o02kl8;e*I0q^5Q02EW%uV z;01h(@Md5|q8bN@j?bt&>vQxkypU@fnjuiu-tHZvvgvyk-|RMkW%3omsy<0xA>23w zGGdqo5AiR%{s`}87=rONsI})eIIGU)edi%PN}tP?3XXT#!mpXk*DzI{@O@4*{8r-Z z|D)`!quTnqbzvwHG!P_E2o@+-Tm!`k?pEBLBEecH?(PMOyHko6DDG06qAevz@!}5U z<2`4b`~J?k-@Rkpf08lw*kdJotvTm@=A6&777NY(+3s!Zm5O)H>E-D2SpoLNZTk2A z7c;-7m$^xeZ5#|YlfN!;~+RLn|=OBlPqWSli0z3{z1JQXJ1P2ITSw$QcLpGZ3J zs{2@pUvzm^l{qNh1EM)fp>Hy-xw7C|Z65i^(XlIq8IyfKYzBJH<_6`~TBUrcP~yi! z?CV>2S3L7x%e*w!v+caJSNZwA_C;yP2KVddY&>a-cY1EHnRm&%Qk|U%If{4AjF1@O zpG>LqSvb7b)w2bH#Dm@12A`AV^B@7mYVqQ!=TreD`cUubwOQA7t$N={J@$Y%(C1sn z$x&mzj4x?oe)T3b5iF3ipDf6J@g2Q_#EEcfX;N4_e*HV&AE(^8jR9RPAKhZGY#-sA z;ktJ&h5G-ER*am4->IS_>t4IH&%O|&=Ol=NLzCngi2lb{vDDMk{f}$^yfL{8YE>Kg z&ny39gb`B^j~*OaLHGx>8OEcLC6`=O&^~eYT{P`dP2iq;;@nm7fBVt@7%sWXeAj^9g2r+4LRBX-lXp3LmCuC6Sb2pFQFODi(-En-XIO4y`&x}(Fm^9W|qKtRm*7Tx>V59KTBH`4*ej?NM`ZU@{3SlPnA9n z(aAy>A`c?33FcbYVa0whXL)1kxZkN`B4o%oDrDDRSHX(y|$?4)7@yVC-{w3OMnpQ3s+#ax>d*E6wKmV74b^dr(~AUW-ME zc6Y+MpVxSa1yFnH{%ybW48Tv@7!Yn_n-A-}Yj8ILCXxrkeXqTa+kI@q+zwMj|JLwoJ`>g-^|CV=3-|rxg z{o=$^s^rpEyW3&2F-Rx!aBfzyd3|Pa?R>IM{`zx`EKHem9((Z%XUNy1KP*l;Kjyw; z)Y_=%o3|UsAp`l$LM~KJ%di;A;co6-vrdzPfqqFEsCZg}90Px&PDWyJ}^tqa;{z=LQU_zF81KGZ=Kq0jk!h zd8Mg)e@d?S$_R*-@3$&zKhi_Z;b9z7;DBH^M8}P6C*ZKvqs3T05_Q-%h+m+7czzPA zt8T!4ap)xp*s5K&dwhWlApp5^8gs{Wxo*YjXt4jf(#2sF0%aK@NI)aC#)<(RVjb zg%;3sgN}r$iz4-S#bjNz5Ffo z#L00a3+h~f_}p&4_|(D%t7mTV-6Jfr$V|F%KhGx5dlJR}uB$MPxFvj0NBi-`u5{Co zI-=|F-P+P{+v?2z(5s=|B<8{q2WDCI153fPmeVCR9_a_|-%rOi)0JnQ5&v=Pa==@utBlPgkXOx^b#KHhNu%Z4uSrP7 zp<4%8$Vs!x;FI-`J>il~In0=0vn1lEjMG&;1=hoqjJ1t_yopN$No_{;$|?n^APIG& zW=VD0%R|Y1Nu4RmWTzK?2inW;c#(S9J7N`$tJ)t~EOUSz8C;m9Gw)O3x0~1WPY9@@ z_{geu_)J~Fc9Miq*xKu0V~puEs!LRZG6nfXRKsUz6n;q^6?Oi=4vc3e!MPq3{7XY^ zct--!B_5A`!_GK- zqr8~&M#w-|G9a9x{EFwbc~>cnj@%#sNZ`|)HjKdyF!@6T^nrUgGn7_#+FToruYrX^5? zs9jc7IzUqBe5-aTFmC~bIAMf%5yO~<=%3)d=AjCh005T=AHy;yIGR~A>qaU!TRCm% zNA9~=sRCLOu{0~(h_?s?`pO@S?y0)slfgo6Zc!IBi4NAvSbcE&9bSq7G8>M+6=DGHY5UvL3$9SMgDK7GP> zJ@t9_P7rb$r373TXt(_KFq0hmz%*LU2?S&BpiaIu_(J8P`F#GJ_K^`$kenl6w_JO> zPqzz|Cg5nL;LTj8)I(zYI}bVYU&pZcer=C8kvsw7?#0bx7bbXvD+14F zO9YLdj5CGiIU6)YMwP*4)W`BgMWdQcaDv*u5qJ83nqK9q2SQ|!HAH4m)1F2ND0i#M zV?4ZYRi|bDYdqA__1|z*_}r$K{G;N&4$*GPM#A~XNm+6ym<`M5Z9v}%J*p&yMq zbdDB^q%FkGIe%tX9aT`k8lI_#^?jP~cN3qM4|cz_xprNsKD4~lldf@GQ(BV}zbJn1 z`wP9`a<%X+s_{hNF6_3IBq= zx1JQZaneO6v#P+(AZe!+iKN@#-{@+prW1R)&I$23W4-Dr_?!5dGrf}z;mAPDWTQ^z z(l5xmJxp8Bvfg0k_H2w231MK9{q$nARh5D!$$sN!3Cod;;omA&@-xM+ve}_B zo#Ye0x#j(LC>1ROtq`z~0uNw?GIfO`s{IU7IHUbVn!ZEKi%Crn5tdKeIPK$-3Uiz{ zHD;0!Q$ze7D9eZVI%%m8x-M3P4z*n9!Pzr&*h?(HoSg#-PEHpGr+xN1d9s>u`reH9 zMzYHp-zpvr(@=?f*eXg{HbOi7V0!_Y$wq;16$hCH0Hms_tq)}w>06ixR_ zL3qVCgn2m*v;c)D2@86UlDnMmT!C+pjPMGdPzq~!jabRuaYVZ)iQYm&9XAEaJMCf) z2({ui&uVdmKDOdjL)pil_TpsA)F;8QU(PGE#>80uje{@yvJ25GaY<}hRA^-hFDclv-sLrfeE@!x-M8xe$jU;_@KuTq z)*Q9ekm-6GJ)k%BI%1ieP?N`l!!{r0Ek;seA8QweCT-WwCbDY85yO^axvPw`%a)^t zp#-s2A(-)2>(s;tc}N54U5@KA)@c7vDOr&eJNpYFg|Ic+kR-t07jTJSP6(T%lK+V- zc_`K#EfNJ|l`c1|s^(+K;CINeyBi2sa*N`S&x5qB6v{7xzQPtAab+YoEKg%%K$m)n zeL~vbIip$avr_ml^A&aF2$qIr!d2ln#@g#D6d6?ZzD^#pC~!2^_ZCMAl74<5virND zMtA5ckPMzZgH$4TkFx6jt&dn(cMWp_Fxkc#cSGW z8oPrJXvijzH1%lZO%d!P0<>vMJ(0WK(VMv7l_$CwpTAb)7%IVyk?M!Dx9;w^szO`Z z@Ivm*GmL_<*-r>cJb>fe_XYal*@Z~jM6TXxti~DbS7H=YmN|Q_>fvN4Jgn{Gy{Obd zD1a#PLW244yZ2M_aI^UB*Ay|(OMC6XLki5@q+@~oDC=D#>04wk!7UT9zn?}eYls*WS_J5_lf_UU8TFEr}M5R4~$LG#Yo|;xTpbSa)S{U~rf$s!@Hu_M{3K&s~FdHb3^jp6_Z5EP& z1QSt`u&|EK1zo`f3|$W;-dQ2@6KM}O967>_fUU~mzL{W) zOh_#KzmwMFji`XUeZ z<$GB(hi%*!J8Ws*{#kt)?;@O2T3?L?Hb<`E1FK<(H59kTZ(8~S>C&tzAf?qIR2sbf zQ=!e9q`1Og$t(N@@Aaif$T4jhkSEw@vZK#LQk7X|v0@Bxu1^}`YjeBQJ|v)Hcn3j5 z!r~RFPF=THtXlklD5nQ&ZZhcIk=-P-Qt#d|Ik#>Jh+KDDE_M{*8UrGxR=6Yo#yfwM z4&j3Bgbgai7clPlmI%PR$3NzSVs5nwu>oN!B2>xNzehNQK)QGRqub1ap>To;}pk zT?{*J1EXEvL2oC)Yf;tsZb{}^|0~lyVQ!j~>`QM+dWd?DXL~n}i~cItLP?t3fmN2! z@c>6$L_~$guY1I={99CmdMDOYchO537DpNMu_s*Q9fTs5OI5T^??@zxl^glc&X5&q zqA<8Xe3+h|a&1W>4q;fA(^p2(MbAO(BbOR%EvE}*%3bUv7${_DH&r@{>?()(h;YJW zpE1Eu`mRs`BM1Q^&cKke1a7lq51z)uqpN39?Sa4orrU#w#bj2Ek6-f61%(zN5!(Gck^qrZtP08 zsO+UKYw3{?RZb=>c(VQu1c94l@^%xd(?h@Tr=BZlgSkuM=t~WzWe_MVQ(sZl!lcvC z($ZfHuwN9liR9oW1T`2>k=w;WN#Kip1dj@kWdKnsuLRlgOof7+SIg5#J>=iS=q{_J{Ns3CGOTB;0ZD#0cu#i8yh!+}58!U@bK&#MA z>sKib{NvAn9`w*_oP7A>wXrdk!Lki~a=9ckL?-ZdvzjW}IlsA;w1JFWy0#J(D;&NS zA$d8_XWkFx4uxuP58#!n*4KcLR_ui%tVZ5X%(U^BH4i%cFI)7+#%3?+de&~Z->jqqL6y*$SZe%zj> z5VkEC!~|p~HQ6pvt(hB%kUVu^4z=xnQ9{$CvSCPkiMC{UA#0LYHQW&qx`9?;Bp%GC@X^IFFT60nf567}G6 zu-}*q+1l1BQ;HY@Y!9b?py0h=C@nH246@yzXg@mx|S29 zkLtl*VY|GVk}=GF2pMzKjTj&}AMu)$U6!-P1GL0ixvgPg)`?OaL&&Anq3*iMi;EC5gE|R%NvL5|nL~rq;q?+B-acyx=_HmI9h5ib5;#p%3CRE~F zVYkog>?{`F>TJ;BNmqY`luKzE8^-X?S4+YuLO^)QY8=uvwHe9hyMv(-Z7_I872!ZApXpyep_SH)FHlOwOkULY+i_S9j+ zFNmaJ;1Jb^_!E|ko^@+?$B^Wx|?lwWVu9d418lvDU%6TbEEXk4N z73_$Y+#D`JtGwm$N1ZsEG-4xd5tBZj-gG#6*D~~I>0P!?xIfG&(r*Nn3yj}gPemfc z#h5Nbmjc$8t#(YCaG}Ip`4+z9VNIEtpXuCMEiem?RcsaUsY%l zg#Dh`XY-^3D|sy+4>(f`JML==TdaBU>}CIlr+poIiGl5eK^P3#Ymy=q@1mivux+78 zdvTs!zUbCxd~zYZyI@6v(HoG6cs5xNm}hNmzcgr`X_D^1y!BOdw^)WQCWI9dj~aM zJ^#$EHi~2iJLjIsmp2w9sR@0M*yw_o_4e3Qu1SD04CaQ}BNid+&Lztqb z-z%e6Tx*R*qJSuf6H-IWLu&VE`q{Y^aS7s%uUBS}vdg)04<|7pz(lhc z@xO@YrZJ3*XQ!cZ!J!7HOi=#$b+)nNdf_*YD?S`DNi&h$r|)oJ0lGn&Z(;(>OEjm( zp;Er&GPT$}6{_+(A7nX93AecujQ`=OoWQNCxBm#*8Q%ZuE^T5UqaAVYr=!v5(#qj0zy(?dPuEIcIOZdc8%BfHrFqC^GcOh{w?N?q*!|4^aEfijKmM@u6rik|74< zTPpHsCC~mMpXs#WBcy>9x*E1t=o_$k7RK^7Tvz*_Tb@-YU*!)^=jd#J0~X)`sdx|X ztaH1cO~d~Fc9TW!#xRIu55cj_6t4Kr7o`jauj+fVyi_GsLeh={D5U)v1!dQGq*>C; zwo-)OVn3B@Eou~vUN0(kwo{+swlL0NX*0}Dw%y4!F?69>cVTRTj;9o{Qa#3)6qnr* zHg56NTi1Jmx)-HVDWHOVBA;%#^g=pfx$fNPj zlTs9c!--N#AlBXudMcOqG(d)@mQm@@eu6;@8{ zeNqMG7wB>cwAqe<6p9}l^V_fr^3g+UO_B~QSG+1Ijtay8dR`f{{l!pdVmB@r>B!RT zG}DHizCCVEW#UGNjSZ0Vk|cnfYvAmdnQ9be$4Ob$5>5k5@PcR4(9%njftVe71!(hDf!W|))SVahy_2ECXTcpEMIK5c!x^> z>hk#AqopV*x4bHQ2@|23ueJ^q{ta`BPp_}y{;`4?H~;CBTJLvg@TB7ed*i-zt29c` zVb4!W7rWBURq9I=k}4*L&VR2I*co9At=iN0#Y3s$J~h*bg&%jOvA{<# z-L^DErRlGW?(6_1KaDP*n*L}j?vnTJgFbO^};IuNm`-^ItIoxB%CyO8vQT^XS$brN9KjR5DLoUxdL2LTjU~oN&#!y-Z*CF zZS1_hopB6Y1~UO=mr{EYt6@r*)BEdO?p}HldM~zJ5Lc`j+zLJ8TAEYXKR(VvETJdJ zzZmmQV;DHQh_iFBC!=;ms~mH`#>{MxUlLBcL%>OsV3$$U*fSWhgZ+JvixYupX?hkpn3}L(vT%5s8W{k?@_A@=VF^-VW4MtWl)^JHN)*5EniUCPO@S ziMN0x1=~bbe0tHvkaYxSwlYmM@-1_Q32i!9QXE5g6&V;~VRfMkNtd zBvG*LDAe%ZHuT>J03#X&eyPl#lo-t`e^p+;`FlBFWl8fCe8gBkYq8MKIK4{QJqz&( z<8^M~pf>5E0z@d&l4Cpr5gCN9Bha`P-$#zpt4o8Jd&v2K4r^s zLoOVaoPQCD^;TVq+kpuco{W%Ifc2mofjMvo?E~5yl_g&I@oQp)dFV^5umN{J8jz<^ zhfy+Swd82F%B;MmMIDRm4vp^&lH?wcpUu93)&`$~9?@VgE|7 zhGfc>5zCTPKPwei*goZ=qG%rMAr<}{xt3z(NxFX0GVE*tb^@&t;@HXe7e;EfDhg+V zUQ{pH#XpyysneY!^t#Qn(k^P_hVOX2!fC3K+xbu-8yRBGCu{@)=0-uOqi83mS)g~Z zk^+^?2H8Ku%0>&CNzlAk!Up2YkWZUo&w9u}fk1R*APq-@qfEB0j2v+feRN@8zWdbM zjzHo)6pho=1-vGw&|M>jF6%R9n_0xS;9<8yA*-(fv617bk{rDX!nXXlC;5z}Yur1F zEmm5g@selNt?Q64I}oH<{&J~6e^nK-{?X3yJHE;knupO2tYz+3wRenMDf{|@l#?{? zO~Rq4hM{qYRXxra+2la0?I$8F0q#{gHt~2Pfww+T)(h%Wsq(qI1ht#L^t-A6KTz=&QK8kr%&eZIRzD8 z$dy)A(Z;u`94#@u>dB8Dh}nHSVi>v!LqNNn)jVu^eJo4{;{@9uJydt-JkB4F-NQ^Hu$)V?Gfej^eYr-&&xm+=M{Y%a0tf!6BIvq?NJg ze7Zpu>iV9~RI4ex*`uo|8D8z@59|$`%GJy&V zX>gGwIR5V3cfo?@8q|?W3iyUjB!gW>c{m6QYKIPDdax2f4clALSW2&N54cnZ(!5B)lryq2*J0b2XnHst&`Dj~SvzLTS zkMz3#MeF%*0XE9&Ht~t%qfkPaq)$A|umw@)PMXR~j_&gRAeCxtpvqeEkj)>;Yf1iI zUU|AO#yWBoA5k1UHjPs}Bw(LyFKvyK00GHamR^17>uIOUHsP;u1jv?pDPWEE z@^B^Q;qKD>D&C2~O0cN_#<+Z?CA75K>H=uHa*|sJ%GnWXr1^OWu>WREK5Ru8j}F3L z=8AY_j(;FG^9Rl=T+781dOQVA{uF6w9J!YrRvH+$LSR0YX1v&7P5jnrDNtonEDV2A zBj)i$@+oluYLp6&Qrg#-){>U3#srEDc=OQb;xq-D`zii#T};}FfZ#e@r>OJjLU^9bUckkex82ds!ZJMG+T!bJwZ?7Fpibi z8=9b+G|&H><HK%f_{NMhdU&gzT$SKy3o?{ezMD5@D6iiWg1!f*(1hpwkv6ZTd~1UuDNL5m zW)xdquyGRcXDF!SYnZ5LPR?ShH8J1Ig!$69`5+ij z*b`Lrzb}2p^M4`p;$*~e@IHp-b#D)8k;tPoSlp}Fh;b;3NOl3=z-9H~N%a1YwDCKl9+|HOM{7$W#|nUZZlyKGR^gA|s&-!V2$iMrpzazf*_!aT|H=9xDYNzwTk2;LqIofX) z8jCeaeg}^S$F#ziM?GeyoHpZ`Axmdu^skKpqU3Nc@=~xTIfS+tkRQIzH*YZ4^sOga z$JT73J#6Q2x97L*d_h+c@qie70S>5~z-)e{2G8l8zB9))fq zhrHWbGq}M|S79|y&~)m-HHo|yfdjhT!$axEZbHqB>|qPJ^m_(@Q;59r2Qur~IHfD@ zn{Qm0JJk7QS@HTV;+R_cM|!CqzSsWa%{vBJWplpNEHfS93RkhdN5ex#|IF$~zQONU zH4M^~dy{D96L@qwGKT*rlHYe1vi*fZ{r){K(1YN^sL1{Mc9ClQRgRY%1V^J8<&!7Y zCV^PCSI4E{NIaU#8c8GyK5G$$2^CSzuHT~EL!?cMcyrEZIV2u?Vbrqr3m~(ypum$u zH^d%XKp|<@Mh5TcA%=LYM=c2OcjLcT(ac&x(m@Uu6P5>M%MIku%UJzoL6eLEJ;e13 zze>fWb|S{Nw>9XsW~VR#nTTPOFMUe$=01FJEBU!HYti@%&1>KogbE;yJUve60=wEk z7`KiYp!#7W*dhN7ZGv$G=5*d@4-zFo9&gFPVqf~$z zfya=P5g>S){DVAJh7@?$7aXKV-jg5nO`VgkBywpS+vt8KMtz};@apFy3FdoDF4$Cp zZ)cz#U$9qU=4zZLNi-vyu1tN6#)0lRVdgeF>hR;&%Y&X{^{!+7gj`4r^LMQs2>H%F zu^8$N?1{1PVT3NTTE^3f-K`bk=)fzvpOfzD<=oIJo*_^(BrT7 zI_ch88aqrw@C`7y4*mA)6@9hvj@VqB9E~sfD*_Mt4a{AmLieIXX^t$Zcc$X+0a{ou z*6kdf!9h?-X*mY+P>@Q#8Qe9$ic+>6#|bw*<#Y04n{~`blc6RYJ`WxA6g~j~v&egc zZ+IFT9vna*UCww>(!{_S2Mo9BHAk50b`UI~UlQWOFmQ7%qAauR<=!YDlIQ>5ar-^^ z$4}$@1(Qzg({)6e6ZrNrKfsJox{t-mO{UFdhA>}X zsktI{9e$Cc_QYrnQA~JJyKl@Jx{z)^YcT71X5@dfb^h3;;CPokELOqCixd%>{(m7J zemEj|cTemvLcS=f<$9@oVe{<2)E77XzjDjtMvhl<%R4Nsp*{)y$lFz6eRu%d$L5&N zk4K(fR(;g0I(Qk3c zyK&@V#6#_gkYc3;tM$VT4kCPA(@LIMnV&e;jWq;{Wa!pcZl?ut_Ju5P+S1h7XQ8R; zIi9~e=vywM?yc8S3j$Y%ztlWlQ8%uPndas-q5Rt?GwEz@ltGj*M!EuF?!lb3YhiB= zdyw#fjbU$CySWy)qu?IaF50kuy`B?lxozT=V!)vht`icG|hwOv_al@o?X=nTWmO`hO{P8gCE>DJ53(+|r)B0^!5Q+6?4UzdoC~L0-S14m9$&)k}(dMYm zCJ)MqZ7zkkZ&cHQ)grlp1o+tRqFkq=WdNyu$;$m89uF%``+f`VV!P*9cEhvWR0l^g z@y$BIe?*#q0pE@)QJLp#j176$!^3+*e9W@Ezv;I?Dhl{8UtLB%ZC12Y;}!^fCp8o} zSpdB;EfWq>C%lMLi5#rt&fbzBr9xVXzW4D_*EN;%cbi&TLgh^fA539P1^RK3_g3pW z?TB~<9#9p!hbi5(RC)XfdpbR(9iPT)VITCn^(}fkR_klU z{r-)p*c@JL4vC=o)A#4+^And-s~6`@KLNETNh^px8t49>`@)3whCvMz;*>A9zZ5W*_D$dyS{{K#&K8NiY~H-e&V1b% zdRyo7Iq2&C+TV98I`kdBFe+1hLfKfe;%@5zs^iyfvg_@DzZ`#-Qi|?~NCyrGF#%f4 ziiIE);#td>IHDg=xzq)+70%$ zvX_5Sgi>Hjmc_Wf%##sLky&lrp=Of3!osa}{I1`!Q|SIw+$piEii>OyNO7&dTePiw zKihLczK`+~&D4FI|N7^VKhr2oeKYIVJ)`QYL%zE4|8VWp%eqZO@e#vpn6DqrtJKS% z$}lifn;Pq<|0EwvyD-KERWzEs_?MdMUy2~s-B%gZ zZ?5fDGK})Z#&}{pfb~50$~2uHcr_W*BEB5nf~Y6ay1IoHrLATNCFxDpunwOab4CqA zdXZ?>)`i*dP}5`QtYua(*N+wakOLPxXZjVpJe|-QKmld=qRl=`UIKZmZK!NWUm6aI zi!=jGu+6M?65z+FOr3Kv^&IcbEW*uexI0{NU0I z5gjgxn3CMsIe-OPh^btjdA0Hb=L-far~}TTS7vpLmFbEo%;D=z7NS{uA$#BvRkAxE zq<=#6E@^zJR{6*<0}|Qu>-gnw9(nwWn8sg5gIc2`?)t?M0J9W!$&c!X&%Q~?S_L3C ztjOwzXa61`#D7=-1#naKy=S=Hi&G@yFG~RL&3@0w6I}LwBPi4E1>7*WlAo> zYhO$=^fxzu+wsOA%)-o>cMgiL?ovEPF~!(_PbMV7Wu}lPYjFRcxjWxNuLc#h?DM{% z)6m#trN|xqVM1&qJW@U0e#%=z{>5xxf5;p7<%djZx%Eu97_Us#>vR-)o%D5g`ra+a- z%eh(-cf?%-3tjG_%li{UhO0L=?U!4e150J!q>1dg!_=<|j2G*fBJ^!@LsCB}w1t;G z%DqP!DfQNCb0eKFS`9t;V|#L5Dm^^S?{j)kbuu1s0~I?BxS|HQpr%pFTlILV>J5K`M(`3( zq!3g6zX(Kbo&+MwsNRKQ;wf={zBhH(dejhPQ7pCc+svOROiLX5}N&5Ol~@(j(bJ*5-3?QV$MBdj-xZfZD6G=&z{44 z000YEJoXTec?zsqW(Dl12W^P7boT}|8^dkv2y=`h;NjR|Wd#~SEg?*5NMA_1SRGf< zN)f7Htn33NAH>+&5T*{_WL3j;Q{81jDfipVjd^Ig=P1w-igHvm zX=y*`M_B^&Bwo_u7*=*;BHSl7^=h-s6Xj8rcC4`8g1XfRoZ*49q>K}0L}4Qa_}&!8 zE-n3FVVcVz$``2YptqRuocg@j>RsAs$3qD8ICSi6jyfS`Wp=0BeG20aqm8b#wZg$c z%IOVpwK8Uh&lPRqi7R`etv0+WOMP=K@dqK^faJ6*T!^3czHdni)*#=5iIeuSbarIB zft-4$k2XiG&(C;g7ZwDqS~@8G>X8bVA;8c_EWj+iY- zp1*syF-IEXifnyTt|a^(N{pXF>3cnTJef+*O`5YYKD~VGd%Ja)Q2dI@GS4CJiJeb% zyi$(`7HSn*HUlwIlstmYsh57{kZpI5$ol!Al;vhV2@zq=pFjTxN_B5H{l!H8pRYOc z6(iz@uQYuwkK0+-6Kc+Ve~0z2*U}%c^8zNf^t1&vnz8T;tcby%h0$JS1s$r1MLo1) zr5T^f8hIQ1M87S1dT-={SB!9T)Jzj-{&rHun7UEy#jmxGEH}KLM=prBJ?~KZ70K!U zN7`#+YkMPhR&{8P-*)3$$pv$x%TTju&d$*R+;!gDYisF@u;g*F=#Yi$nK#7X5k~E8 zL5ayms$<)M|9FhlwUCr1?e(B3niZ8|&V(DEIkE{|C}+E0KG$2OkS_HD2so`q+U9xT zthBMseKQTd8IMvxlPw(>>i<`DK9xG;X?C(R5V_eq^1l>Hac^$L-m)Hk%|NjIUlqfn z?}!5mIiDKHaWiPNz!_$;E=Lig!P(N<>$``AgX^}f^>l83F!Ac;3X6+7PXQSIaDpfW z$$pf44w}I@1t;gSsd0 zV9Q>-CB^qGQ%=}@YEb~t#O`F5DN2CP;OXS9u_A)YAIFZxR7n`gsQ#S~SBKplRFM}V z-zNg6B!kY^lSk>tfbyD1PUZ(9o7toaoj{eg>{9p&PLvmN4{kpE>8h^?QoOWW)7x1LL4^XiFA*Zq;rQO5R{|7J8IV<``VG7e(qfkm)dhK7~hzGVhyzfbK zg7iP80_~(lp`J~V3ky&U!!R$Aa69Hud1hvwNOUw*^6(%Du0Zv~W_x>e{lIyCdaUnQ z5=aS}-1YGIM3o`K>?FCr0so@1GO=aHIqc5BURSTN{8S8M!WKCM0;1=>Pv7wZ4ip%Y zQy^qFxrZ(L3y(20e=o%jM){F5S4JUMT{irC!INj0q~(1TpKl2}k9)?Zt24+|O9q-U zt@dC8V$~04G#k*Xd9$m&QHn+1x9dUwo$LK#)#H1r-ZVwn zI-c23e8(`!r)1gZ6vh#9(pV4{_F*i{b(Jy1uHok%-N%y@ub&m3wQf0Hr=>I-;*T4) zUUox?*Uy6gSz=F4A$XGABx&Qk5yf*&t$`>Mn#Mp8u+N4Sclm}jN_Jm6y&H=a@h^HH-!O1iPWWB%aP=K%k%WE%ki{F4$O(nJ^j(V6>%@anZooP}7NY~GvQoOG4Yh6=lR=?!KT~~WD)^}> zsLkJ~LG#IC3FZU3Xu=w2V0R}g@?5oO{HD(wY?QgpkzXHXm;D93=}b2{JNP!NYO2M|nc^}owAUTE${#nim~_)0MN zdsUSTBWcbySK^b2iaNWZtWZDj`nx|+EiM~NJUsv0{mR5sV0k^LVqgoC0&!_59;*8} z+6wF&+>o(7dZm4YIKbv|`N*eLIKyLuRZ`8Yk+G3bxx>q4Sakaz%Vg7=Nht zdvg1|7+&vUP5YDaZ%B{mQT-VPX>w}n-tH+{%m_>_J`wqq5P6NIwY5N|#=}onmVsry zbjI_PL}PRY$s7wWp3_pzZ*6r?aorF=8-^qT1ceFy$T;K!T9v^e84%%-G->JQ9{Af8 z{uDWY%fc!@@!RqQ)cSzLWzxgcWol~1=L~nZBe|=n=U3|DqJ29w@FuhE23Pn9F?<6f(&VXo4mQDrCs}kBa#NH_c6NKz8CAB~g+BU|3fm9^ z{yvGQMxyWmv26N8Lo#IFpYmi@K7I^BtmDT?;s`IJzF-Vcc7Q+oe02pQrF4oaC&QB! z3qXpW+CZIIkD-8f=Y2Dob+^e%wGMCDV~d6%B=*9Tp93zIu%<5bRR^(@GkSvnCmjJd zYCpQfbya@T6trPn){UBJvmz?Jb;>Hbc|Dr=4oJOz4(4)g$P4df}Oxq3oH22 zf>TpGSrl8<6d26^A8T*H)n>zWVbbF6QY=^t#amp1LrW=8+^sF{uEAX^&=!Z{E$$Lr zT8cyQ1a}LTAW0_AGvE7;uQhAd@C&k%oBN!z&py{~$6F;(H0TVct?u9Penog#OWvvb z!L>10i@cz!3j2yw>#fY3u2|)+oC8&$tjbyvy(s5>ziVQbieowIi;CilIFayVp2WHg z$(-{(I=hkw83BhsN~gn6>pwVLQ(jox`aDCNkI@(fE%f! zl%uE;$4D~S{6MT{RQS@GLb}HJn}k@JV?qz{iD))l{(k!vx1hbu`Va1$L0VYRVv zI_>OP6TAHAypO)Ye8y}+SqDCLsN9zPIeW!I$Vn~W1p;w?V}rRFirY2X1qT4}8S6qh z`*x|aE&^42UvknM3Z~6(_?i;2TN!WRFPqt~+wGluauzKp9?LrF^iHNHtadb%{QQ`$ zp+fyte8aO?|GDo%_qWn`aVO`fFLE-GgGqMA^;}O%E55p!fMpd&qid6niSeZL7LzW5 zq%*8qxTS~hu0o%``FHLlUOu9^mi6Tfy99O*-pVtXUYG#=W{PFaUsC2V@tl4Q0-F|?;K~>SnQ4oU!3N{hope(a;KqqPx z)5+JnA$w&weH0Z?bdV(ITd_iiV)gxEFt736T! zUa9k~%aEGO^~)bp&V-diDZ2|p!cyGr|BkT#7*m1{e=amSeq6`{g5>`-rbtL6{eL#5 ztPbSwX>xrijs;+~bEw$R2w6t~z5@g;3wj3n$QK8d{g-3knFuG961-fCJ4S_BzEQSQ zVV~lsZ{JsH>IzTAjEh(2_0JJwee{3{*06w6ojYa~g=H7|#ewSpO`-m{g&i!czx3i> z6gb)2xP)f`QaS_94S%o>Qn6p^r$oK%1ivVr#k+}Eq7FqM5q+U`fR`JK!=WGJDe!nV zn%*$QUCBJW+AlBZNyZYuu3g1iq>8HK8UYZt`XzHIT}3J74u6~2PJj6h<44lq+US$W z@luN{}GM!%|v%Eh(T(S^?U7V$LIQL?c9y51ZVxfC?4`R zjzg1^Rg0A-4l~RyRsU&aUL(5Fp+=mP7C5>3M)*2S#=-~;y)&FR%YM45pf8Pa)yN9 z3W7IC5qszk)D!JUqRqe%7BH;Ey}yLsGEpM* z+^iM$D^%mF@J>Zs}D(e{Uz?~YywSq z+I36Kq4PH0{FE*Zwvu8sM60=L1L+Jj_L=H{LH*_9M6Cq7*&IS{$XtT19ZCLLCToZ8 ztV#yEkzOWtXE~S4T)b=bFwt;XRKBab`s%yjTfUZJ;`K%^49I=s91F!}fjv&kn6M+G zUJ|mr)SD6r4HG%hQVIc!MhGS$NWOpf=RZS#c7cY;hN9&HTxK48-_O@lg%*#<4?oxa zQ#N(8f^3EDa8GWdJN|3OP9={yn!)L3WQdZBWZ%kkaVa-Nu(XO(~EmY^}s_oi14 zpD5n7U2oo%KDJna(}0Y1-3tP&^N=ZN9+C^OuTnoM@O$+U+wh(#WTH9)S>j6fcilDr zJak){8He+s%}dShJb3RXdi{-g__8&N)qV3ll9K~(fVQjsa(Fq9=0sarHd94enUh8? zUd5M_PS7;r=7Qc1(s@G=qiib;SQgJ1gZVs*8lm?hz*Eu~sM6s#X2AT}F_8`&c1OGC z8`Jj%YCD)0)D;sfv#@;RE^hXg>&{8TecJG>F$z0g)&gi9Ta`}V)xPqC80mqfrWZ=A zZ#0PN>c|)+QQkyjaWQYL;})M0(K|rm%gRQy->*wI|@0lT)CJOF>dkXBfBigm>{+87Xrr$-8=BT|0%_!xx}br z_E~}dd%_*-keJ?r=deF)3LZ-7uL?v5y^uE3@@pw)sPCu4@1=;26n+I z%i?|o8uSDEq%CW=!Q!@uLzw#|DAu_u_08-9+4Dp9b#D+nzqN)hRm9Q@R}fQAoG@*6 zgm3HmlKZB8%c(-D`)VT&T@KoivH#Tb4uB%eGGq#;9K6@8?nU2U!x6|+P#QE0uG#8z zH!HaDOqUSLHx*oJA&(6CYvA%aUJVYwMq+&F z;I3D(R0&&K!%gWY&^UWQ7rFmx7_l4bLaTMZ^uztlMC(=c-PwX3lc&QMsFmyYAh=!s z(w)byYs=cirVrX{Iky$P-n3BLU}vtAll1D&P!_hJ+`3EF&AJZ_k~s-Hd+Kt!N3Vxu z<$GO9tbKnl8nDb~8RH*vyWAS8SA!tZeC#nM8!OYYxy}R%pXk2BlvFsT=yoG<@&7lN z*%sGUOb9c3hQ^SBf8Bj;=PQO&cG<338m;)EM><#G#=Jzk``evk{P$k^!?VD89q&u^ z5X94m1d{1J!4v>t$xz|1{oA&H{i%BM){u~FNK^AOB=%6JK9v)WUamSBri5j9X2W#mpM&>1NPSt~3YZ7%Ql9xWaccjRFU zJ&fSdKZx5D`3=A8GwqZN$;T5gCLN5AC$}4fQGts{Kc~uk?r@^WES`K?sH4k}VN8{h zBA`(c>*)ctlpK8M^BTYr`C{ZrB` z_InmQcUY*WtHdL zV@*o$h6P`^5UdL-AFZs6J1`zVj5;CCALNFF5t>T9Ut%7r#&;ha(|+%9A_g7Q@`og3 zqY>&WA-k@KU%Qs6i;DQzcP$(1cYxk;8jE*To}xnL5h8{)m}IAAip@3Bc5V^_F+13P zRo%DWMm461&EFW}-U8%&%##)seU4Y7@lhTE;Ohx=M7M0?gKSLPjW)0a(2tu`-J;n} zt1{E)P_lWfHL_#vP!8yd@&WoX-(naZGZEMyA!bW0z=UiR{IGTBpk;&9Js^tP{f-B9 zSXx$FR!sg#FNF7jh(t75;av>^(E*K>$n=3CO8wUw&@gq`rUsT!Ws|(MW)ghEQowO= z%LeKYoRZdckrHsR3{9E zZ$PU9S9NtX>Vd9e&A}-{U=W3TWj}JyI?sOJ^9yv2`+lh_{`YS%-w9hLgnHon$Gjo8xb3jeG*iJqNtGTp%}!COccP_?6<)I%gdrN+Ui zmj3NcvQ)sJ7+~0J!Y!uyQF(R?(QaNho~9YK()_QZ!7hoF@D2%byPHwV1n+NsD;9*L zbvq`;;1Kq24;vlfYjw*f12d~nVFe!=R&Xn;(z&v8?uEV9aRZL=F|A)W@(q7wQg|$h z*lhF#zz)vqyFy>V z5d-un(r%E8#B)06sy^N46Xg%Eg%SLheud#Kg*>G+mv8-yld?^N(>oH>O;!GY z!S1+n<7MQ$eHU~7P1(enMNJ2d<{^URufp+>LBLJ0d>R`5vu4fWGH=WzCEBf(%&7B} z4{>w{i60#|jXrpW_7c?>Xv1<3*o}h?f^L~Dn|NYx-(_tmqt8h1`?(>J#F?U;&tct( zPYYF@Jr54$Q$cs9=m80Nw7gS|VTe{070x%q4Fk}HNyw(=X@6UTbE?3CyX1Acrc z-@M$9dsLym;hrR~T06X_>`zAd42c!~9CoF4Y2p$`T84zQXKiD1XsXregqw)ke3ext z0dc03r9SNwC4Q%w_boO$i;H7TMRv77p&Om1FP*C-E%qwZ ztZ^ck?+RNrI#<+zGMe+2wJE8^;B$ljtb#C6E8p6rq%D#IBpUd;7jdvXr_uhsg{3R zZ{j2piPiMzXQe6|)kUQ62#*{??91lX##QNHc`wz17GTUlWzfLKrg$!=j9*_dqNA!{ zR^hXN-E2w%PDbrS(XAY@VFJ}(u?#K-WoSWZ+I`HCjMUGMfE%Vu(Lkf3krO= zCvEY{S)Y2)+q-M^7ut%>MVF|9={r&NUo5e)m8$fc0!Cdp7tZ~~r}%mZ^p+&j1-eR@Xths@&^@%rmifY+4+N4<<5 zYu+bJi}R5xjwPV0OS1iSu_P_3rv8f4uK#=cy{dQ(vCv&~R)p;=8EZGilx0PZ>$_}6 z9%kA8!=kW?*jF?j_jqNld@;ONScsMo^Iq%-b=gPux5Jpnz#Aaa5F*Qx&jqhKtbqb|GtqH z<&s_RWTb;bUYvsgi|MXlgsr~acqDE7W9KPR=SK&onQY7zPxQnFDxEOFBFP@Yu^w!x z#yJOnWN61+H3w$JpNKK17!BeTt52zbd7R#t<#QgNMe8K^p#dJdvxv=>qnRdlVC zHeI4Cs(Z(-Z={a7x(2k<*E{+EmdG2@Rj2qrXcO^}kQb#hmMT8w`11ZbXV4+LgPbai&Wx}JzP$H?@pj7Pl@AR?%lL45Ys3A^09JXd4F{7l?3c-z7RBzG$q>*vIy|Y06Q zn&qN$;V8WBb`be$)@NRQ0x*F2b(wl(D|Vsq#(Mfl{Xe50LoZ#9*DdcKw>7v#j}tUL zCHNcyhLwBrckEH+h;-oPm=B1HEP#E9@dBH_ai%X`@cWgRdX7KdwK*Yv!F|rrKXZwP z5J{fCezv()i>|=;gf_@bWdVCpLYn{cmAscdH?-uaz$uYYCNn&HL>NNR7eSHdq4N-5 zKVBi@94p}iLLeD!Jv9Md!&iRwR7k?HJ&{t8pF=+EAE%~teFPkieUtC{i155HzBMQv%&)2wkxre z1)KZ0y=SmTGeFKiJS|aWo$C>v(W~PEl~pZK}8MYQYyR2YIJO=l!qAh9odmW^+HnVU&etCgz14CS^-)Hv`!yiI8+m5 zM+3yUoEe2Bb7GYJ9#w)=xd>j&L|LXbO%*9}(n&1Ke^HWWbzz7qK09zs_G z{P_S6x}gIIS-lOhs3U`Nr&M+s&;KN9R3I5kGThw8G~HlCTiV9wvI)u=iPlBA)>D*y z#zxXH(T1c+V~c-c2cvvTWu>$31f0m$45RSzKXkANlr|i5!x}DR?x$4oI}$BipKt~ zd1xX{_}OwB`L6Of>i; zlv}}%@jJ#iUhA;d7D5@N-0$G#SPZV8HOf#IxVO(BmDSSXr&Blc#_lU*J2*H@=iAa1 zl^Q9DH`a*CWl`CMut>JrGX(`p7xFTarb7;5bey-wt=TkOANKFd_SHE(&l#eQ=b&*_ zcy1DdjSnwq=)i}mU{fVX8^2Z84wktM3L2s3Wsx8yYcdiR@bSAUssW|{n8n~vpNUH1 zSd=__idT<|fA!Nlqqy!XGY^Nn%KGLD`v>`)TjT)GlSu}Fb8c>m{Q zix`iavdc%zK%Ct;=*I;5Kf>Y#3bUJlx|&9dC5bmRjVD>!T$atLy2ux*_wnj*TP+;# zbVR(PgWBKWW)m`Z??5SzwP?35flupWbcC1{`W~=v^a7iXl8o~2cEc%!j0Z|PFdVon zUV#@bloT6RMuK5%;|MSG?#EkR=kbH%{DvkmrqiL!7-RX)a1rNw^P(y- zQ6XF;ae-roz}|FyjTj2t3|-$`F7YrVn=c5WoSh%TxTiHKlIA|(l6|J1T3$^ZYIpKs zULtNW4!1Q4S4EbSRaRBm?07A-*oia);rzB!!k<4Ps}tVxljL^pZq4ZY;h&B|3MT~~ zEIeV!f*iS0x;((d6UdP?q*HH}S5`)rO6o8ZH_fG#SGi#v>`eOH3gWos(l_&e^l}w- zYrutMNpwQRp?U6A&XrTs(~;J~3SpC!gt)5V_!_ZzL3MVAPZ;&^_k;C>pKYrTP6+mW zby|3>+*H7qH^vNAlCgHQS&+>o>s?~5e%?&`pT{eskgr39H1mfvZbMY(v8}^|po`Qj zhFoc{7qUNM>u)!aM@F+}8dh4Q`pxsNl)c>x?hvN^QQGMMv*(d$Q%igvYRe3Qb zWi@^Gu?4M$S(wR-{PARRgODu!h0~@F@T3U7ZwBk}h6MMl?>We!pPN4izCI*-6Do6d z)_*t&{DuB@zZ2{Uz2+M4Lb%KZL;D}8KM3u}?_-g;#{+_|@exx()x#`l?utgYfvCVX zy62%Cun_@J-%oO29BYGq7y}v6KC4UchTzIsRsPzNbIo8{SeTq!un%#2!;=+GFzEGt z^)B6v1N219StRC(Y|^yEqx~f|MMMZ=qO@p!nv>O{Khwsoy#xIhRyDV;fmsw_G_LO<4g-Hy?Wtx3B zHPZ!HeXH)&Nv^HI)1e;cPjBPwrk-tyMDgGzc+?WuJL%GVzOU*n@$Q*^$b(m;&`H@lbW{GVi#g-l z6X`M^*qaM%ga5Rjda4n06}>AWf_(>r&vb2lUHk0P+|{9zZ$A?Lq4*@sQOBdi%(Ynx zJK9XM1D$A)&oYx7FGs8*15ITfO{Kehjad{UjNW6Efh@$g69RfzIsGI~z$bVvuV7+< z6hD(D1|HMrMC?rBNDNXXynSD==OBIf(7dyij-8h0vSv6J{!_iVF<92L9=Cy!w9c*M zwia0!R|~c;T1%Dv3-y-qo9n`%&z}(%l*x!%4^7*&w+a)oyr}N(*LDLl`9m-D?hng= ziT>by-p*}KtA0{#76rEzCNU7|Z`YOlJ+wQF$~|~)6gC70(E_^LT86oXwwn*usijYE(X&NcCe(hWWV`e?9`R0AAX!3;q(v&jBX~vmDZh=ma0~!*i?^Vl9WC%U_=P^N z-U!{trMe2Cyr7|(l6H(@pelKRH_^{4k1Cetn0xP-JKXCU=<3zTMf(aXakk zfOcybBKfu-R|=uE1cQ}D#~n9d*3 zAhP4$!mf@H`VjfdacZj39lOeY{fEL=1Qb8xAhi^~0?P_Le@QtQ`;k@B1Vdl=y7#hl zBY?6V1xE>AODAjm^Vg=kNURzFsUVA^wQDX>r9n@q@|_8%!`Hxn){D6hyGY zW#>2cdIjE)zm}<$%fniO!$o~t`bKBDq{qcb({}9JOvc&>0#-|??uchkAsaC75Bmvq z#RlByY*Lxu4*9RWt70k`9raFlt5*CsCnn3Hj>y1<#m|P~(7l^_f<)``$%ggbeuu}X z$bAfT-|J$#`KW-)U)XDlJ-5JIewOTbf63`I|Fpn+6#x?g-Fb$%hjPl_ ztmd!V13T$}T_|nD;yg6+W=BvKk?p=YlYg&%f8J}Mk)_pSun;U0c73r78|nAogm5<( zH@Gt_M*1!+=h|)89ScEwf}uB0S87{GK^M7Jo9$2(Gw}9T%~@G?SQ033`Q;_>n)rTI z9g>z7vJO4_2t~5aqft**kPC*FTQ}IHb*mBY-nZNNSEJt7O(vkMIoWOPJ0ZMFqKfg#jr@8hP{>tN`h z5u^@p?pq9sg?v5E>h3w}bdk67!WkWBHG_sa*C2uBpu=3mHV}+A_La;BZr|G=`JYLd zOMP_jb1MehDLxQqTcSPWz%GBQuc1#**44*6HyXvpnx2&@yGjHdJ;yJX)zX~u)Nptq zli28*8SNWFsn?iHDdf;c4-_tuHC`8n!LyJ&eZuOq+XW;LA=Z(s-!Z+7$B#~b1_2%f zbIishr+%E0)Qqo8#J`7p5(Qhn(1?SZB;t_`uHDKC)}?(yVi5z&qg$k?e~n1qlzmt3 zF5d9fK6Sg-t+l64Y7wUlnWT|^E&lw51Owzt*DSF<7Zq(ji^t$m3GrXdVI)TCYGEZz z4uW8Psxem>R?1$?rD`*Y2drIEbu_{r)XHAKcHX}}h*(b3LE(#3= zPo;Rd%>shxt2C}1%4@0~fD}ET<8s0ck18j8Ta`~@@6J01zP@6v&*qOb%nHbnCj{DaOx7~Qi}j0t z5dKW@N=yM#IF%`XdCg;mY4-k=^<;&71Zc~%(~=-(WSu8PZSj*O@BL=R5c#KQ%EN2& z_`46g!)nza`jeYgZCG^J6T=BA?B0D1b@CC+(9%zf{c~oBVld(vxzE+`*-4gwvg=5=4MV(W zZfpw(Hrl#VWTNwn)xdqIsvFZ)z|w%j=({cFI{rY!@c!R5vXy%%I?A3A5(8L;@vwp} zZDh|Uqgpr7;AEp8F45~G8+B02xVT%*UXHm7&s$vjjmQS={=Ol}hs^v`uWU~$oIQGD z=+}#gCx%T%%_w=VZI(Ady#jE5oADc=;di_Is;;vQGXxTA_f_4Apqtsx4qttK{D)!j zE92i$u%v*=3^nqxyQb%n@_jgYW>Hmod1H0vU%96X)fA{9M$l`==`n%BUaSs7>e{Tvtggiz52gx+p)^!xHe0!xaS=yaku z2?bruO_a9(Mxr$yBLP0aWb-^p)<+$)oCh&?R7gVDElucDv+@-}S&#iMSKsE-rtLJ> zx8Jfq1SmeXT+XrRr%y_oc$;ti;gPshw=rf-U!Q?(j`(O}!97Z|*IU5g|0KJ(j)x$w zm_Xfjqd(rf8Ea;PnWxyJAg+*UvQY7{{r(mmd!yjJqIB_wtDZW=qILi<;bjctqaRZD zgiWuJGr8RucL|-covR$tSLSxi{##67@9oFezP^>*59MDLrJosTn$KM1O6IMI6SD|< zB}w?XD{jvMTp2_e=~9+xqqx?sscqsYq{$tu+JuP!Xr+v$waHYhTvqmTJDk^C4}MlY zSTAiW|JFj2GHO`7-AFv;@jEbn%!06~nbc4+A@cpFacpT#<~*h^<5W~aY`;ZftQ=e< zu6S=Tltp?}! z=7(KctW1FfA*gnP_{?j5Lw1@s>zDFLae$?$!bty;ePdh_-{Za|nd;$F)V(m- zrMD#!)7Mw+KH55wHAXR$ycTvBO>O?90g_?0*r$F1dT)u=gQzKUyrruH_7{y;C@77Z zDoDHg=uy{vh<#UW>%$YIdw!mUl}xj4nb>=2$&@3F>d&1Y3dYju*SG)&FU+>KiLbICGr+Yy#n6S7m) z>ekQ3;Y;kX&IC-Y2|=wITYPg;dM>i=ZOH*k;O{1=k?wf06{AId(pO9_7g5(RDmii- zNYJv0*@0PaL9qwZ+EfW2J(0~Q(fCCi1XYG}3=j9)jigcf(u7GP4 zaB~9gs(fscwbnN%5YLF}_!4PdL^nDvO?@FnHuz}ZR-LuSZkdcj@A_18N;KNZS(k+s zeX?t3JMpu-H@{_@vchTLDPJbB%;=QU#K55?h}CINKybDD5xIOl(0pA2hkeyq)=q68CSRuW^gqslgYH*WxiJa={)R!bz8tSksGr zU#ykoj8kSTFT^`t5+vT$|Hnn{)tKBtlc~X_P6`T8>LZ5z$R7x99Rs+W&k8cjZY1nW zDoGYZjox676}hHmEfOB(nv#+MopN4Bb03vP)qmAO}u+n98~st zPEwnI#q0p8Kl1+b(Ra0g`#yS6HA-W^Ez+pSG<(o#49&Cimit={Q zGf`leI5Yc7EW^i^ts$0!PuMZ{Yoyxcf}d%B4cQ74iJ0T+OR%jx2mERq)$K-zit@`~ zHzgUe%iBwRYB%ADt<0F~8{uQ8Dc7pkz+q2V$_B3 zG2^ruOB8F!jviouZ=&c>tb~~S{)`W?P=es$=jeF2vpPZ`3*0g9^ zBrCQ1gv#fP&oprYy=9|w?{0P1oDUNwnd)}LP6y1a0>w=?nOD|qPqBf$7|*qQ$d;bf zfDvjPa0a(RaLJ#ypp&zM={G(L$uN<6o;oivH|8Cj3{qI7(p`*{PLHIFYV;jTy(hv} zzGs^)367(|(|hU}dRRR8ZEA_82#f`yLU}|N-Lsvdw(@< z{_4J`Lf`y!-vrhm#oRz=FI*sPuo?-tKQEN$4>}%(?YHD6EuA5( z$(Y)}uaAI}F*fag-Y<_uLIfP84}-uzJ<8yxPt zqk?#hUSv12Jz>fbuhK`|nT0DHeB6cafd0!vhQ3U7_x`aKU^ENHWO7r`Vx`_0C?;5pHcQdHG5N1L zw>JfT`RVWDj|ItJZ>_ybD}j1 z3mu7kctw0@*$q^qySMw44wW3Nz8u8(VozCPWnf)7r>B!DI>|5)u`{M{ybS%?)=2Q1 zFpflp<2w^sqA$B5HanfJwsQJX$_Et(5_Tsc6-_rxrp|JSJcmB_$kSI9q`5<-AG(Ng zH4=&Hv%M43Pk%Y`&ds;ENk(IVjCJb;m=oACH8_kdlB`|jY<*4!4wxvp35PyTMp3qI z5l|uqhU3$SbA=KIXBNH)o6`|9+EW$U{g7mSWjC#v6z!Y%=!c1Mni%G&DRz^6ysS>$ zi?`rmZCm&(bA+3;ZIYz6g_^bgB@8b5Q#h4Wh zENJ_t7**aZd!Tb$HcPWld)l%4Yb6mJr0sa*>{FA8w>+yxlfpk%gvne?l7iik#BQC( zVJ`dM4>va`ZfA(7rYJs)HRy1@2}m9+cN!{+86$XmH|4oU(XNO4WuNtEwV7~Y`b*0Z zK*@G-8__c?pFCglHBE(Lgn1!8jx`9Y#T>+Pvbe8A#MTGNlsb?Ruj(+Xya1au zmA&oB_ds63t>F~-3p2i=DFP}%32z_jwvQV&@#LIvd!ngjrk0@*yLkZ=m z&U>O9sT#Fc%4;H1H*ewcNznUY378e|PR^|xDY(+@$0F{s67D;T?uTPA4(@H(&hQT7 zuagcC6N5@^n!B3>)~yqF6>j}ni!(T%rf%EU;?(e!aNi&r$ zzhpSSk#*}h#(aiWU6+Y`Ai&7n6Zczo_F(fw!qLZ<|3D*t7qWl#IlD}eu=NIIpn~@t zWVRnm`oAFcVFb@zkc1(T`D?wGj)-L;Fn-nUYyrHYdkuBm&qHG|V3uV!_U*F+fz)0I zq6JaI)b6n!SJOL6fBRL{kdD;_~#4{ zI@0myy^Ac-lk%|eoT!&CHHV9&pUkbd>pwY|Y3A9ys+W(}X)~x-B!H_1xn!wo)=gOL zB69{KZ^a##$F{A>NI9s83Y#lT7G!6kaj7S6n4!UXG7DwNds^}ihzY6GxV526BGBs! zFJ;f(ai1e)`|u-D8BE>#yjg_9yhk%`UdniIy+Zw^iK0WU>RTR4&2vr1{m-6q*I7o; z{4h0w!+16-X^fLJf4cqNm3TTEsvk8sO3zLSlr@ev?W%>l)$$dP*POkgy8owEA(O{a9Toq6Nbc;ctvSu2PLY)`uNJA z!;&7%>wMxEk2EGT52qq5e2{H5fTS*f`8_+ij9hYoovwvb+(Fz6ZYAHr56dO*K4qB? z*%dSwKR8H=Qi^2ko3%{m!fYwAD+yjE;F9VB6^0XHmBl-RM*uF$>9=b)WBuz`B+~}Q zLJSnW961(`DF_nTz6S8WzI!n&Tuw^LYaj8;-r7FarUc(+cgCY08!O@4ew13#d;G#} zyu#hMwQ1=jr9;1F@|UTXMCK6k$#$h6XF7?^96kCB(k-j{Ihk0yv@Lzh`OiV*f$aJx z$l>A;bcRov-I3n0HzsQx8t1!yj!7H9;3LXPH^~DJFz=({%^1C;Y~; zfRqHbOKWDaE3GeDq2_15Th>EQLHN~@-hz!_xdkz6>zl!v;SMN@v2&}_+jHkb(3dWJ z4~HHVufIf?b^5#3qxvL|oel9@fL>CmiT5PKMWiZ%2@N>b+S@zB87<90CM*bJJ`(zV0E?^x=5=_N5K7F5~7 zl8bywvm$Jo`C~|P%1T}+UOL~?#IxZUwSc) z8;1%CwjU9Bx>>@elj?S6RUHb^R*I?c?83s+T78n-)geH2k*3?lY^c zusr*z&xW=427n^JO7(|&c|c>Xa0g#fWZFnU=mhK0ViW0Mejw3=YzLtk+^@G3y)Bla zRbKUaL?OdY0Sr#deR=2hggNPd7ie54K|R!?Z(8%=ACTBTQP-c(b2n})@VTh_PQE5K zV3LlhzwCWl$J}`5k8q=|Ra{@iLa$YhS@@_IgSp7&;eUT|+}d%PHJ%ktpfN_tSeq!F;Z;aZ=^+S(BLcRNU$B-DE`ovG74ve6JSH;w^y>O(Ew`~xG zOt%}q*|qIeE$Mz}IwzIOvpCqM#(Z!wM_)5U4?H*gvXXLoe@CAF z`|N&yB@NqPPPHoMYxMB1L-DfZ$pC&qmpu8{No5;0h9Xn_*NL)`w&J)LbK93fPK_&r zJf-+?>Sr-zP3sOT5(a{#zjRsl8VhG)H!Eu?6xhBhurVr$XQ>Lxk_%ma;H_G?tj|_U>)~AxT>KdzY*C0oRX-2z6O;&zuUJR^fA?_a>JTz`7WgHv3ga}m- z{GT)211g(o-*oNm>>Ey-Yz_KWrz%J$s%Wt=I#CmNp9z0{UDDO3gMWQ;s8CtXmQ${% z>`m>osn{OmoqjAAkCY2kIW(K<)BCHVC)Au~A(E7LjlIV>QsBTxFjP4;IUD~?C1C3T zi{Ad`#G~u;sN)6yTKqt<#P(7;(Z2&jc)k?sE)#zW${Mg&k^LiETJ2naZE!f_jSObc z<)&O5L#zARg*LVs-BYl3jv4dwF}Ul!3wzMK@IXZ z{47_ylZd#I8$A4lhC2?ajb_MiJ(y{&aHg}o+OOHnMxPn-^JU8mfYvNSwk?b{y8)VZ zQ={_9?)S^|XXo$cihJ4KOPt&cLD#gby6NZWLmxu#pfwwbZFXf(z^NIrP&QDqF-R>i;fQdQxff&t0 zw*JMk)c+sIhf?PGEnWD3FP}#_E+R&)+yeK5iNT(*h zZ~G(g1(T1N9+rVQgdqF8$d5ZpZR(#WB^e{j{4w*%-1fCr(r=6Dx)*VSg~z(POKuF1v8xR^#`qEeU0ApkC(tnjY-A^5#Mmmfjo2}G zis+A-il%8k_iqh4{>SSN7U4u*E z4u!kB1&4y5!GgPMaCd^cy9Rf6Cpf`#Ywxr7KIgXgZhQCheO6mPwN{&R%s%?)vv;3G z2r-t?iB|O$H!r#{c~S2Ri^w<=hGOpuH*R$OVQ$J|DvMD>eabH>c7g|3oj(4Ap2-WZ znuqH~))U}$=rR#memPa;xMQ(*p>?t_9-3OwGQ<|^d&wtuv5%#BXcy{(^Gum+1Cw6{ zt`DaWJ~bgkyF7VB<|5mK4enSzQ!zA7`ows@LPk!Di;-lJ6We5QN|c4m%4J5;E`z8m zecaZFa1P*bMj>yUIYbqsb>P}hE2{Srd;tK)^_0KwD}CfZ1FLyz-^EtvJ}%4_CgHa%N=M4s50)l}Wl=+8Yu=Mze<&PPE;l`TAGB`3$;6I< zm=ZoMu$6rtW`;MYPs_VNz8KF>D{$3y*5kgnNJKgMDTIxi}o+lENOdj%uXZjg1jcgnOs~eF)owoMhgR< z&j~J_^gkP0?*<=g%PBQALZ#R=>uO`{_RCCZth#2K!moL_3&OoK$VeZJf=H(dFRq&> z61g^=Z0sfVY?0Cl&r}kl$jz@d`UX0Vg=iQyj{b#wAb)jdBIvfTZu$k{9`oDFu?O>l zcR!i+OSOI=^>Uce+c}L3iwBg@`$HrfBYxo@b?o$V6XUbD7I$$}#8wK5`ji6dW*9%; zTvFRjS1!Y`kAU}#>I~^Wkb8Mc_J7Q1Ez@VsX;E{v}r{Ow!qJI-?n95_2q? zft|?Fiay_r_BcF=URL;Hm1pyugR2>TvKsPI{UX$LU$7RR{-W!>lOq(Xm7^QG-J+LO z_66r;1x)+y@rN&7$Na!nD4-$+;$SE;gBGS);JV|S*>y;i+3Jmx|Ahr2Yl#(y`(wIR zjI!)h`u(}6#8jn!XYK?*13Ao;lgAJb!7kc1u>W(CBbh!yp}TBI>ZVqu;KcPA}?)4y8aEFZF_-Wr&T@nVZ1y=OKo9 zE-Ga;LTL>JPAZz5uo?2Pz$PJalZnjq_7-Vkw!9`wsyP z$ibW#!2?D1g#KYj8m?@t)3LpISk9gRz)sJn?C8ay5Q@){4F*A`xVd~p`X=OKWf6o& zWcpliqajb);IWl7_fr@vT{q@(+$@DU*hZu&DZ$TY*6qGCyrw$ML5nh{DVc$=IJ1tr zzCya1ag`k(s!9y03hfsL(_B{v(|lH`(|n$b(!LHArMVwkq`5C*r*%I&lOrwZ`%7AO z6GcvX@*d{6Q$sF#$uhNGEoe)PFBVl;dtc996GXyi=JLoHXrRw?LZkF!8eoz}!O5CV z`_@+7aI#rg@I48j1)0Ptd49vuz0qTVGovF46Fnn<%dSx-j*kv}=CDHbyJXo;_{+Nh zA0W~K8(z$fRVm=?lQlipAI8gU%1lfP4cbEcNRf#xQBnl|o-4#O?r+^%Lo~^hr~2d5 zoLii`9Tv#WqnC0sl#5C~CxtIL9GUX{(Ry7#A$n46dRldPRBb%&3K*0WAYYmoUj$#g z<8x;z;rQtqUv21Kl0O^#14y#3)wGbiw{1ait%p}e|L?1H|CnU#OH!%(FhFc5kOy`s zzFM(l`|SFzKlJo#Fa79wV&O;qYTH?<^7>Je7#-$e+=S+&cQhg-;%AL19&d2Fp6 zgOsumF{LHNh6XXTKJ;|mAab)ajNR~oVtwKWLvV^aaTxB+7TvE@oxxIRfIrN7q0zr> zz8t1T;&c)4;^?}bY2LO-H3(odd@=OCXgivI9b?%I6@Jy~xP9dfBerl(94iokXlU!M zT=d@$W4CREA9;P>vtswoafpL^p*&s}w{~f}TTs?QI_0bXGgDXp$4q?`Nn(AGftin= zuhwi>^L)Jt#>?$@&!%8I*!V9AQ9j{OjQGp+oDaNDXV&rAW#6mFvpp^3G5p*NEow?5sTlSh{dpqp;cz|BY{ zV#~;Ci^J;PB>Rowa8i}cu}O68W5bLa(g~+ZR`IuTBin+aNh!U+K+h`XMtc*@g2-Y9 z+)-ly0%hnk-u(iesI6MsDU6Jml^uLuD>;a}%&Jr^-mYBK>?=q5EmWAG4;&v!H(`5@ zH#7*9B1d*E4UR3d(U<~S4WVCEhe8aoAKgill#Hty@EkyY4PD*Q-EQgoFsb8S`R zdsj`0_uhFvRHj+0qZDDtNL5E=>-%I8l+gGsul=|6;32HU`tRTrT#U(MG1H@43d)cl z^Sc@qEdIhZ3tSnTj(B1$2nuuQia%C{Z)!M&!r?%jn$|>NeoQ9*>{K6 z{?TuR&vSGxu00AUal|^y!9el*wYMXUb1yP?Z(dT|Y@>v{Eo^kqy2HWkN*uK8$4{g~n##MV->q$S2#{PTe`VWO= zLPP;9rncvxoYOb1jn$*o+JnulouzIstg@!B;K3hA%EUMID)eH__6w5Qot(03IBnS- zD1IGYj5aA%z4_&75F?NR=(tiFF*O4e;LGWB+Y9)tpq(x0w)le(GHviL_rD0u0(A&J z!39VO;MO;TKYM?cgd>{%{6*v0c(;5#3EA2%`TIiodd|~5jI#8G@%Oed79u8CLqkm> zeA?ZGXn}QHYtb)MR$X z;Z^gtVz7LDPIA6p1J};|oBGXXxz&Nit+M1k`lZJ5*7tnP8SSN+WT4lc*s8?0{oyXp z(vNfl{9v{{P4$F|Sgs5xr=k_?Azi6k;o3RovQl(jIHC`8jrAVvVd?om<8Unb@jKzFTOo@ebi`Z)c zG<7M!hC(4Mtz(!VApEJwh%y=#0t_c~;Cwflqo4!^0AGy499J&sqToWC?mLJMa%oZG zEFC^#I~IDV2QF5^d_x(V?x&a=ojEGPXWo}kZCb8hu6ZgxO^Fu|B{4e_RO*qZXAN5Z z7jZE*J<~4n=>4czD!gi;Bbxc*v^z|c%*iY9tWnQ=&Q0K$U^4mku5p0_v=Qm>PB$t>6Rt6YfR>}UB#t2(ZtvZ#5I`wp+Ad% zRCK+F9wlsJd`AaA+fI7)^-`Eh6z)aKGf8}jM|)x?0;HypOHiPbcl?^Wg#IMTMvJDCO@SdWs-F58z18f$j|#}A*5(fi`B??@}+s#{qNi58Hxr zNaCcJzu0l!qf84o%DUu-y0crJRB;6k>J?tbHO^i5NxJu%A2)o~;xWJCzkp$P@Nf*? zMLX_iX;$mv3 zNAcqspaUi>4FX#-L~688Q6J_-&~#bhgBM+K1j*{0!YYXv;IwLvvy;TQfOvtzK(O_O zt1IxQY<_qYx#SwQ+E1*{7>R{ey@g%3OfYRGleqEC^5w$6zb9|6*-x7Ht}qf|qz2bV zrb%Ly59@r^wsRa^5!0Wf5tA#Y+XV5Z1#GF>GaT6w04~0^Fk9l#Z@jMQOWA{kaXu?7 z`4`!SscVSuMbTwt^(fP-Goq*!aF^zb3c;Vi=I_^+gV7py&}_uesTw})Hn4~uNhK5x z!RHHMNtpqoimi~Vu7L#BI)vdAQtv3`hKpz{+X3Rbzzldx9CC90WAi(GaX;Yqfwe24 z!YdB{G|hN#y&xUVPU`5`L5i)LWxEG*7SBwJvrX=1u5@0B%EZazCYEzmmU?){ND;y* zJnoVxHbMy6ftH>-ew(1{Oi^<7AR-d@7ZhU0yzT;$#mfUMHKS<`w>whx!!!fs)YWsG z0nCE#yy?5Xn(-JeLNK-&HM^N#Yw|<7LkjZU7Vv?VqLil3k6)vI;}aa3{G!I)iSl&1 zw#b@DA-rBtdL#C|N~Nvs?j-E=c{@Hk>3csS>Am!QX0&77({3t7ae9ZHfZwzPysiXv zPv)>hnVPJjYz@Ye0Kaz$o?RaoDDAaY>D9Vs+O5>V=vlS;EEzW3s`DH!pHmanPzarE z-_IW5hxQ*-?yW$6UAcK!l|Lq+Zy79$`y3xn_3UzAd{Tu#o2j%BqV?d$Kf*yMf}UWp zb!YX@F+;F`X#_Gujo7yIv`5v%{~RP5`?MPvqij1H`<_Ve;yhjvA;!QE?GA_&0Ylp` zmsPWQ$2yh^W`(wkT3U&gNfQ-sOT&c~43HsQGf2Ud9tHz9rMO=cMO(~e zwb)I)&0qZlR=d7521U;#FGJ~@1;0zdc`2g(oPEu1T#}3TjBI|Q&_=GF3lDwJQY)Zp zjj3S^+dchb0xJs5IZQiHbXgHAhB!1UkML0)_5~4Vu@${xjqe$eMqbkYgaL2t(7H@) z=9BXLho=bW%~0qLmu000MYA0?l^=Zv=~uXszx>J1d2ZO?qNX=kKbHwrRqUezIA)d6rB+tuB zE>`j%!RjN$0OumYDO`opBCdR%1S}R%|0%f268vk!5M>2DBSG|qJ5G*)hr^N(t$-A9>2r!8Fq7r63Q+v3L zyK2NKNFPVKj2mpKB2Rt$ccmTi18+p-Tl9Ea```wq7#V^;RZIWQ@8KOlC=s}2?fPpA zKh=Ogo=jM~<(`f^eopq&Rz>zoR@TJt$qaN{C>vRZbtgOEe zs-i|#`S)pxa@YPsMklo&u&l@AEk)+foA#7@aE=V6pNF4S_>BaEi1oVA!zem9bmQ~4 z8ns>rDKY$a4k8wva;}K~r++++6|;>I?N!-hW8Z>K79#GCB?;XTaAcN{I1B&MheJZ#iEf9nZ&{2FXIC;hVLf%8uK;ryiO%BJ!d4u+C(v zqZzz+f(brfcEaOf#)=*ex6}{HAoWENW}=sHol5~4ww#`-9DyUHK?y}9_jyfy*Dc14 zx`S|v0}w9_Ss)aNP3ip%2qY_lg+Hm+{IS0472!6OsXxI}?P|tvYj|CZv7?EiGV`8S z=;Fzdj7)3y}#}kx)MNA?k{@7-_t+ z`1#D}t_9Aa4odl3P+086jPa&z@vs#@b*sA1PJj4FjlBwq z!o-{%`5`eeu*3~rEmF2*yOb((41}3d0(^l)xYklz@w4GBRMOH;G>S(z9ei-1uKAgw zLfl2y@fJ7nHy-0?PMjjuJhSEo%>B;VO(D&o9c-cHyzj)opaLX*=@9@%o!ZB@>mR{c z&;vG@=MuROJv6P#mW^aI^e`v9Ps;K7jUmkz@oV(s8sSw&%c4P9nA#+cffW0Qy>*o0 zYSi9$TATYmJ93m5llaceCya~Zaju7)&xz;-qL)-C6WC-|c?V_!7qlYKhfe^RXG0$2|97!Jx?u^dHRUGpCO@`WS24u=mpcuckZkbEJIeU@*kA5V12{Hr8~nXCC$DP%J4LTO88m<05x9JEZYPv zBm;Shbr%5NIX9mY=QRE=7eL1GI-A2%?pIl~lQPOTUL{*H;>UGrJu69} zM*k-n7O2tP3)|NdrcnDK=}9wQ?@1yKQ?eLA8@v@w-xVfzlgJpVx#p-)vBKZve$ale z?{gUbPxjYtQfvKH_4SW@Q0MxZhQCE*!Z%WHj<&&T%=9v`6Y_1ouKuysdig&mqstlo z8m{>%T$NLJdVVptoO^3P%CYp5qA^LsxQXK|b+XvM+X*J6fKK1p+&n`~xBEd<^ z>h&L-AxpHOxU~~=SWXUvpb$!Q=ruc}aav=rv$IRA$NscS1nq%yPC(I94URxDS z{fIGGI4V7q2M2@DI;|MDCEos|sptNZ z&iw@(l)U7CnHjdQ=wjYTDX7HBa09~TZ4uAUc8a*?+!f$cV48>53Dk0%1NGu>8xiNr zykq>!o-TXa+N+Ywt6v>lbk;6?dv_EE=%gCMfT`i<|Lsj4U2wcBj-)l<90h0vv+d9Y}hy1;4$*<1L6 zr*f^17|!PWs>QCfnqGS5CraAWOvR(k41;UXre z>-dyAoYM#hR9#)g_m&nT#EU5}P@sM9BxdHAk?6?NyMWNL%Oe`!@On_zdBsyq{KBz;v+u+? z?!!t^aJR}5IBVcCFDz>|T0PaDFPqq{#kqRekGQ6{6#dQ!xL#xPDm^gj-1TtnCPZ{Z zE$_*QmuzTw67n^UL$1g^(m^!9PZ`~AAYfl%LT{C zQZ%7sxh4-#p*iE&pA#h9@7DQE(rh3sNK32=`>{!{_+kbXw zJ;3pPSh1wI2*OnC`8p*SYSJGP4lH0?qnZtCh>qn1rI^0HZ}?r`KWk^c{5fN7(rbA2 zpv79((0-ePdo=dPPONStp+%8}yHs6iyP%pUT@D)jT5PhqZn4}}{VCcF^=>A7=c{%V( zDl~+vhGXJYggI2`9VK|Cs{ie`#J<-bvtFLRp|QA0ViolT#ghm=@}DsFaaZ1=hH4FEgu&K?Qw_7*D^HAxXJQo1kl z(mQhHYbw+?lwA!s4?&yrdZ2NwCI(z;ss(4`cXQaL=-!x5Sm-R*ys@#!*d@*1^;0}y zU_?gtDOJ<}w~q`dM!lG?2il5cLGnf~@Dm5|kAozsNuF;pjIX;}CiS^Zg~-39kRBU_ z`N^%2Ram8l#Yi*4*1P&uSdFpduTNMZ8vrd4KAKIoL57LyQ-Xr@&ro8=mM;#0=S+P~ z@%H4IJR`{Y8v&tzo#}ahqegb;(}nJ<15jnJx04(Qs4@bwBs==>_e*lV?5!LbN*fet zxGCrQom=%jg9@QaoFo*Ym#)U>bfeF~q85!aGZIcNe98p(_^EX~^L=LZ6tYEu*~HwX zim^Iy9M?LhRiza_$$A~ExYoHf!`I|*PDU78TGUCb%4CV-!TY(Kqo1FMv_28^1k)=3 z)I7^8cqf?g&%Jx?3pvMj-9XN^5`2ME{I%)GP8FqqPR0JyyrejHKYWm^%XTh+J!c@;m=f~8Y1DZf@(o2jkSeEf*;B?`fnUr&pE@7zNw#Jaj-7}`wYQ9KT0>TaL?k>Pu zGY=yF>w+<7Ih&O-(ep!e-b>x3VFek*@#&Ji)=<2&V-yvIeIjM=(EnIVk^kJnD5=#P zVBl&wFtS0S(B6BX_e{dTlWpvj6Er?0mD%-~5M)&e{0_C?KxKJAL9a{uIo>+wlt$^>Y3YAwp<(qUCzr!A3Xju+bn?f>>vEWAjvx9SsVq22y_6TbStV`GHFRHH_< zHjmZ||NU#z-T>^<@+LAGwvT*rf(p7^r3+TZja)r@D!t+z`Vq`>q@yMbt4TuwOmcODO!RayuYCrU9^^Nzl@ro%6k{cnj)`NgHFn+J!ZUs|+<^K(=-XNk zj8zF!MuMEKbMI|8K9h=OXVD#_?R!~MTm05ehT6=M8LZ&67U)_ZB*)rD!x3 zhRvUn7~`onL35uP{ZlFKHRxkF;ZO^EIz%uwdKpVg%f6sLTtf(_u}DJ%o3y7n>=`=z zx)cn=W{3VuB;UaCFTuLbn60_!(zNPWn(g7hUkcVFv#D7m9*xM&n6QcXDr`=sNTh=3 z{4>k86+30s{zH3TawzKj5%_HCtGaNalP#3_FQ)5mp+|9@=_@I9_!6oiz5wSSF(;G* z>(OG}g#P<7!AiK2af_#A36dWYeAl*NspFB9bc%(=E{&;Sd{JvX_B))tN(T)Cgp%Bc zzBA~x9C0zLM-)D%6|y_7Iy)4d`VMgG%>3(EDRfLKI=ER#4ykg$U{)Ds9uH~6>V8B$ zapH3c<~EW{H^xi=ZD!JCaZ{T<09aNTIY&g~rksumXeS1Q%A19t_O!4SkX6_1UiKdP zng@L~kUk~3k-n0?6z+weaExr`SS4}D${Jk4-+sCJd&crgBm z^xgGxg+mt@frotK0}?f#Nv>Y|8AG|ViDxabN+TT*-QK1^@e3Calhovf^G_5;z-(km zHfyikl*fxF-RJ4NoIBftN38VKABg9pH`V~)x>LD^IMD~`Op!Km7AUNhxv~yZQ&KX* zc|Ns{Ov+1oH+;O%T!IyFCA=Qp`#Ye1N#AmL7(T)tf-|(XeS_0BLF3={jP=$)4(E+`P8h@%m<_lQx>FRauzN51w=MvbJz3&xXLsN9%+ zkc^ud0d~_=sw8lRND|YAm?{->@tJ^Vxs$k2qX^7@5l4hGpNyLao+Gd`Y=zEPWa>|G zWvsP$V}MNQ&=-dYtTafbGc!RjRf-DITne+Ejfg+?UkK=TX*H9=v~srvx~>tnaM(M; zzVgLSkRmdvp=5=)Xq}aj`V^+@!8%dpdv`91U^)q&jJ(|145}!NN#P^2s%=RVMzPk4 zBahB|%}>wrq9td-R&T<~Rbe-!^Eq|Fujo>`*leZ@(PdhtHAwK${ER$Bpn^IZE?h`Z zB)|h%cBY5L`tHK9%FlzL4o*hgUST>??8+z!Pe-%A5s%K|!#H_06e3lF`YJN@tK}r@sfC@@S-?Lq5eK^-I@01|CiJ!q$Coh{yY@Ya@in4L>O9D$l zWFWC5EOOXjkG~V^t>%~iP%M8b(WX9N(E;VjI#E2RIW;ea zWAYiOP>I9V!DIhnW$2WzaQ)faG;+7&dRVF~Ze}e8x=5tb!R3{%b(k;)Itj9YbkQHb z9OqrXslB|9GA6)_e$Le_gSLJgsUET*g^#F^wAT4_pyElxn?uZ<2KvAs2+BU>lr=RK zM(A4pk)}ix?$L)78yTjRzP@l%nxRheMuFycU|rFcE1hZz8eo`aAEpZa*{dQKGcO}O0M>7U^5$HRap>rV_WgZ*)@hgm-(1e@q$(#U1LJE&AYZS{uOx*=5-z?vlozU1^ zDB`QGx;p2CO46ix3?lq(D$SSPVH2oPP6D4g8F$QP|J9*;r;G-I#f-4kslo%w6@RS~ z73)&rAIrQ!YlTI(X<=(4iVR8ON8i0CYs|v_juo_is!M~zTje!UK3dUvr*CqPiUABy#i%{ch2q(k!H{({3 zuI&yH#N@-0ZT8-srW?J2rZMPLRO;#~PN8RdAPegjsbjQQGJ+zKD{`vXbtq%x?Ha=+ zD%|kf&UVAL85vA5y>jmMNMHo>&9-|9;-{WN1WWu>@KX$Q5T8ZMMej+T-EM4X!VJM{ zo%krfdeCh1XXsm=-~q=1T)=3wyWu;kuU_59)7LJMCkI*XUIOC4v2s=IOvTzZ=^AK< z7-5BmD9ZdS1^QiJg8SzPuK6hAX{<$v&+<`!&u0sBDh1db(9_}w!n4~`k zPHcYV+V%lEDav{Vk!{a>R&=Kg+S?5}CK1v8y8U`HV8L^NJV|dGq30MNN!e4Uv8-$; zHy?l`IVI@k6q}?)ka_t+@z}K^+J923x>^;Wmc_7>Er!s?5fc9Hyv0c`vu$@hg;X*L z)2Rp5C><$!qL{g;6{udz;=s`jY%Zm=l+TLR%JxD*iPi+=o0V}_cYKap)50Z`Ev4vl z){#jtRveWO`$7_|L(0Xp2+OH^+QS$FBs|@*vHHGWqk=B8oRuYGj$+Zuq@+;SLh2EW zDzpVz-$*2f+rHOMYF&Pfc=3HOqcnyr{I?^o6yX-(zK5I?iu-89tp?1S>>)`Hvt+Sf zqjb6Czd^a2ADc}F`q(iLFtHZz_nv-wu-}5O?lhd=&J(f?xb#MweTuEb$%2}PEss3= z1Dfa`m?XLJr2RvG#*kQ@R*5?JR8+SPQE#@tNeOsgUWgTgjmbwExy47~kQySu9snM` za;RGlvE&_2KcRxpEB@YoumP^NC=l476Gz3uZC47>uQzrj^~e6FgN9C%zqwx_X<^Pb zsuQ%aC8lF>kGQaY6k1)`PL?n=YXr%#!NZ|<8g{WT0=@qWkmor0{9Qn^&LBeI{{7#S zj*j=vHptqx#XHNte`axYRreoZM2~lKN|LMB!=$lCIesMlAt;zmQd&(ih3M9Vj6giU zwu=r|?Yq|l;Gdb3QgAtXMoMV!aAsKeo>Z=1tblpp0OYR5--J$djiQ$ti(eQMKe%ug zU~qThr5$?_1kLges!On54`vps9e?s%jqpA8SCM1I3V*IaA&y_cSuJPLhrWdy;ysrx zyt^Amj)EHpDB8x-61^Kkr~aW-7xZnQhN2cRgqj}bo*QZ1!Ixa=x#H;n7--?- zrMnssNTJx#fe0&ZXNDOi!U0~^iaFa(J)saazeaWcD&J#7YT+S}V3k2Oi7}d=WNgBj zTq1mq*E=1?N0x~qJ0`2FX{gA9iJBQ5eFY?99U4p^(cNL{tG-+4u$NI6K!lAEn<5`3 z&T7!-ym(|{)g(=yVKn&a zTB~z9x_mL;g+6XSJiFd|F{%ZOnDMW>o;7-0#rTDNc72_saCYF(ZQ|nm{~)QHa8z=) zja}}~ni%&$tvpf~@lB)g#(O^<5<$1Yfs{p6S%d!s;34Bn_&_3ijZ8^hidk+=Fe~4n z+k;Qq{bKiZHBZ;zp)=umhtJQ9oFq>1su@v7Rtmmae#MUWkL!Q`d}Sq*(XqY)iD`9r zwQlxMo^7Kxhd|Doh8}Jgi7$Fq6cs1u|zT_B3tHdh=Ytq=%*6AM~pe(bk{%(sXr24v9^GMELp1Z`dK_oFW(4wX%6B{mZ)3i8u_w`BzIoIO ze~iyD>ZrgyfwW{!o?J!o??St{FTSRZc-V|AKG6>`>OF34%aLqh zSg50hZkVIC7x+iWg4GCkete>eOx=qst&|Ym{f+j`uF9)JH3I zt?Nc`EGe+C1PHniaJkGI|7~LEphk3zrD&2N?{~^pDG5-i4#`eic9Jj@973w-4@tL~ zUC@Bb+C-_!eng?_uk;t4(yIWmmQo)tI3yf|WP>0hsm>ws`_V!*m2|S7iBlgp*+)dB z;qHH65+p~C#d*`Dh{;gIv6hZc))!P!(aRdZ z@hTqt)H{cL02Z63L|GTYoSA@Q@4P;C1Zm9A=sBnao{SWAB_Yj!@-TzCyLKp#Z12p? z0@1$}AQT_trRC?F+}W3y#f1C+n43^xP0!Ee`XQM!GAY@GDgPfp)SDMLw5g~^Nxn+8j<9G&qax9l{Im!;o|QmcvaC(lXjZmN!UXV_`j#XYHx9S=(e0*RC}o7rZpIN=u15uLdm+jr3!BUv-%P9!w! zVeH%(*A?MT$Hs%}LwSYJ0jZJuw}^P^al{HyQu{MO?OVr~CWM`HuaV{k+W)$Za)O&x|h~BguBy%nM_chzVF^&QV&t3L3 zTFK%!PztLvPI;XsSl7P}mW_<>V~eD(jU2Sta~w_d6FPRH$`xSt`G=e(f8DX5MR=yw z#6@@84$xOryiLpBUQElKA;?>_QE@B_-cW_=97e!QUQqEd|F{D|>>q7%kpFVCGbPy? zGiyfQo2eqvA}=OmhDQ>s4z!V}Gk=ezd{s&rigNDC{2j>BqvMcp_HaGQ7zviyY87`4 z`v!8%SVLgE9w_<}so``g`={^1t@mI-Y?!uNi6jyOK=%=?;#isFteo6Xka$IORNNcD z(8D)Ug(IK8!2}wR1T97!d|myur2oC$v5nwcZ8^$8Q>Ef}d-NMV6cM06C?WBJ=x?*8 z!CnjCube3>IXAMc9Dm~tU2)56SDd6|$oc@{8N9V!i~&`DN5?~0J!d*^5&{<9b8g^r ze-qaiLCTm~Rl;e!?xcVoqD%875Xt?}S3lV$PmO1*RA z3JZ|dOHTfGiEazH=(t<6Dyx=qt%3o>5)>~>P|-^D=N?SeViA==kP*)kp@%u~>&_xt z#BdJIa!AaG9TKFZG6y_C?GZgts(yR6fE^07fGcSn#sv7W28_R;my-yuCgO+n({m_a z_SBFPC*zNhGSv&}*k1?DmQ_9XiBg$68|G#-ucCVHsjfDYcka1_7gPMI=@3JEZ&kzoc_ zM9$D@63mEJEQD*|r{{30ib!6>i+?6FT>Pr{lmo`uk@z9E*#CE0rwQgL(vG>Gk)@%u z1mD*(zDM9eW82@S@8F)+d*+})7HsaEaZWzLp z2`Pn9x8*7vk7bph^B%o{8Z-FjjI&)Jzkrn|HVS2Mqtx-n2jN{opFR%(kMV<%sb4A4 zhfvLt1`1!^+t3_ew@R!UT0q6xw!!bCKan6reH?^^H(fy9ED!E=1IG(RbMpr`nl>Vr ztVvzCg=v@~4^-S&h3(`OfaA&fzw&lhdD`Nz@Ed_M|Eh7-)x#uX%4OH!-1uinruqMM z^2{T*xtgVK}ac`Z;=a1@y)k{~mHIVI_kd+iAGp zUTTp~oZqq@(?jsK?SeAOrfTqo`{N(N(Rx{i<-rJQG5i_0&y7@JWPd*P2Qsn%vHiMt z9j6lwB))v8AQeOXUBHb7nA(w&b~W~uDrN6;>++V;tAl!aM}|IIB;bpzuFnNV(kX*k z+n!z5lAQ|I2`R%tYlVB>hCo}nTKf&>%GC~}LF~qwb|}A$1=hh5%J$q3K5&4+IFd5c z2|V}|iwWE5Xvm!r2t> zV3AYgx8(x==25ux)?;0Vf_^S(^|-?aZ6_y_V>CWLrU~apjod1GbxcFFdKF=@Yl*xcsWk0Xr1H5_47 zc5uza$wgkuYPE1Uwov++(yle$Lf=N*7?MMFbv+63lL@2(5oTttK?+o|G9u1m$h-OX z({;mvR_3hgV19!d&iTCBpCP4)C;{a#T#K4{U2PaBToPDKobx>n_Cez&m@(|%vS9D; z2iBBfY|Xf+N>E59>$SIS8hRF&KfuLjj96O-&3!@N-!8}+MGPfFI_GAz<0c$!MjXa# zuM(ZNFstKI#TTwd;ZCriBoWd*Jqx!nu`dmdUNxzz#=MrAZLBq^7dHZ|72Q$5~V{UC-e*=Tp`XIm!XSpfZzo3&5g zH)Dw&yQ4?fo3U~=-RP`WHdN%p4fg%wW>r!Iq?%eOrhk4Klr{u4+YJ3>;0&=E7#!}a zbTVGbXSupbbS4#gnQYcJ)F#(db$J@OaGPuC_^3hMUh6x9?EQLp>+vjXWFliiy)NVl zQIu))-nu~6;{PBs!RBKB>^@Mx)}$s{*8`UzrC2jk&7j^Je?x1}d~zo%9Q9CCbOo z4w8Y(IstP}cp|&1M+K!TT7;w)mPou?((2^Z-^npUSDwTd;~Mu4k~-(XfUi2fPE!Ep zj;wI)6Y>6UU>y@gW2u7@i07L;~>Kb<&38oGzH2+Ng|O#VUe3{c&f~$YY&84dE)Q&9o;Z!$)m4DJbgLl{ zq}3k1s>H3*8eWRBu84Bq9rGL~Gww4gaYD4HDr^2pd z+4>Gxcb&6HfO)NJrg;M>4$W(AV}Au1QO zUXAw&&U{0yY{WnGkpE-V=5G7JF;w4#i)0wz(#dT%NR+CDtdV4rjM2P%(BW70vhXK$ zh)dG>to8N27NWi#*mFJ(wvW^A>{xCM)oBpsF52t__}?r{*N6wLeWXoZuy&n}BQkso zqMByJ0P9??Wx%z?11lSS~AvMvM)k=KU!% z=?f=;q^Ha-HmG6bK-`OP?xFKeg7g%jilA1Aw(H!%*q7v+K+eYu8Ou1Po>aC9)^$m(9h9ruRrTfDQ zK7M{CDO0fIuU89l!@u21DvnNJLaHjlR}r+n@EHkC>V?heVV}nU93AE1ynF-)2No2+ ztc>B{K6^-_B#*~pIz%puZ;V6gZCrgO8&xqS~;Qe(DZWTFEc7JrtY0qnv z^8ahQU~l#RXcumzp9)v+J&t}8UXNJHsaXG^;CpfD5I7^$Uo-L`X+SAMUpwp$^m6b9 zP*Xbeswr&-?6nUfmfXbT7CAU1G5y7f-S|qoX`mhKuhkV43z@#_25;Xk*LbbywVXtG zv9%+H5kK$g2Iey3&md z$;BjPTHl%^&~1ahBk>dZ+4<_RAM|)}!UUndGR{y0>HzOxTlm2}B_um>Y4fJwrc)%` z-dBGozzaIP;2S&CUTP~cMByn}%V(M@Ul!fBKtRBi^>pPg~Ik2F`ATAoE5=E7J+4a0DuckdsuCZ_bsQ^>D=^~^)BHYGQ|{UF*kdWFO3JqPJ%Qd4+H0T3xM zzX<#hmeCJoWBmMHb1h*{8MrK^pCB_xl$1=>f=J2IC;&O>Gtn29^NGBWeuxgg7Zy?< zYAD4)V8Ra8^a+scsFqSvJ%!JkMuOvkMnp&t#^=YX6M9 zH}3>>r%imq^(e<{$iITCNrY#oB5s@C7 zQ#0fYPF!h1I0=hT@(;J8qf67%WoBmjCvZ=^pb>%ev8jP8UBItKDxVyWzw6 zda6lyqsz8SX77CpHDfFGefDMj?_7*E{fVIkL!^gkOa`;YfWR@Vm0wh~S92TtY%t*}!OZTrXFI z&3kK4bPDk(Fe36e$1v@pN%}!43I~EDv5+@0I z0c)b3invsp9GNcvi?p+FYP0S3b%EmU5UfaXcc&Bzv{0l#@nCIn2^ze(dvUiyk>VcQ z-K}V#xCRY=@_ygm=gdB{=ggite?cabJkNcvwXWZ_pj@U$c;o;;dCBiz#iBAK+rHQV zN@sEQ;?TI<$PeFznX`{*(6hpA?V|$q>9CWy(ExJPgft&DCgaA^X3=UoRM(IdNA}SK zV9vVZI+Xb@f2HM2&)!jN7E90ecPhW6NfPPpmU}$l>qC-nY0?r$pWa{wBD!~G^pxfs z%#_>*4&?g`=iXl-Duu=JElNxiA9n-fy;bZ#I46$Na_#5QKMGET%L8;p{66}Lz`Jd=_*q^&I1W3#k@Z=M?Exmoh zHb5<}j16VI&X$yEqe75K>oBE2n@hVJy&4{bK4R`AXDzmiVyDF3unXFk?V4Ku`U~=F z0f-YWJoDUdTzVe%u@ZL)eY?Vf;y^R5)%DG@EWID3P93-JR z&k%d#52vUN>~`48MrY+@HE&>*D}Ov@-HU9Onjy3+{Hq2P1t zd$d@ME_A%VXW#3;8oX$v6>ajlawfcVEThUGD>ht5CdeYk_oe%)lfaYj3}S12NF?uy zdc<)O_a1S})@8i;8^eJd{GXhHL4yJN!S51qYN&WSD+7lKjLCplFx}{aYYq9+FlSZdzK#?=CIYU&P_)2XcugDQlK=NaE>4S=fbAB=DnXLqz1U-X+#ES<$%im; z2}NUn^<|op8t#5&bVQ&SJ3ybS1F?fBjJ?0p?B@|9VB&n30df^Zy{)bE&>O?xl=>7V zN{1R=p%ugnw*1Q};aw#c;Wt8458;Yf)ooB*g%Yig5}Q+!WzT@V zf3jJj6K1GY5qQv$C@p#B-X$xkD$AYDsRTs-h@5GrOch zdF@0o6YKvRd~B0&_HEWopB+o4cT*I~h$V|XYN^eRbkU?foakNHvCA>%T+Y1i73ipp z1E%2hpuo?hgsL+MRQ8QIOt9~IQN_zI@Mn75z2S^;1GnXju|1F0AVBQ#qj=;ovB4Ye zfn{-FSltY>Ow%zf{!(7>$G>6&IhaX*f__}86f!H!NDihfB0I?lE&GRc+eQkz7D)o9 zs=s#JA5)c5UnsL0JhEj!EO_Y}&y0UYihrZo+O(d2U-&C}1&ywZjtl%U*qu9+n`Jq` zRTRGRPo*0$4|Sc(%`XH*M_Yfv1BB%Lnz~%30u$eKuBnHfr|aw1i|vUt@l0=i-1Hz% z3Ms25!G1o)Gjv)S14A!fv(}3|UkmvYK?KQImqbw}R$VZEdu(uxE`J_SbNy1{5;i^| z_cNvuKG$u_FSlHXoK%0(IqKlsOBRY3?z#3e4RIUx$14Fgak!iG{CTvDlA;w^^`sB6 z7?8c5IqTMMK$U?w3UD`{10fR5D5W$NdwZn=G4FD=st>ulKC1bIyWVpl-Pl`~%>-mE zM-qs@1r0&BdK9T?-xpIY<3h560lVbn`fCta59D6%5Bi^=87 zF%@3dUDm(+NA%HkiDn` z*c4V|-`guetQtMmtjn4|tg8yQE_F*4B#oUXbv*|)c~c6K4W1;&tKsSCSe8+OeDju2 zM^UR9R(kr^xM=C4Z%6P*B`N=^vA!;;M`QYOCGYY9FZi|$oA%_KJo?;|j@@@}-zl5~ zNS0x*=EOax_ZdQ*{Ksj!3$-yLWLckUyS6gpD3u{oLC{*%NVSADw&zH)tC@(eWLDp0 ztjiRe%tPF1l*V?$HSCcCO z47T{L20M2`|M20c{f7*C=(5}U_nu$ArRUDTgi7DY4*XZD*e_MBNX{t)P2%T7|A7-# zrl7qWpayga>xut{p%LQCk(o^Na6YdIZxKb=1wOlH&dYS&SxL5VRBb1=ir* zTQgA)5=OfbpvGI_^s*~)jDC}!dz zApTIaiHv8Ad`P%e#0-|XpDO+p>!(Ui;5!?){CS(V-YSAB3U-s&AFOjCZ9g0@pWPdd zW-9ndo1=XjzoWcJDSu1v3K3_=<6}l~(XU}7ziP3x*A3@#UhCx&kqXLj(}m6{FqTa? zMp4J%rL*Rb4`1+z=U8SB`;0&8sa?oZjfLYd+Xos-KpOS9Woug~2Y& z2O_i?GZgE`L{8Sq4dz3a6K89)ZG#dmJT9cI)h_)O@V#2PYOoD%)DWdow19>&#%p>p z&JuR6-iYQek#xoC9b>&KUo+ILwdvW#(Wc(w>&5?+E^yY)_b|Ki=hBt*!t#atV*MOT zk9=JwY@*g2yi-)aDCj`umT_0$MKnPp$9UxYN~L}LP2sXh<1(MkJZHIa2e20#3yark znhCJ`HKK{mXo)$#A1bM~m=>9}Z1{lledm!Y+*tBlkw0(yTxfu!6w89I1xtwj7OoEp zPF5oZla8Ds`z)m&FG|~6GfD)~6#8-WPP`2&pcp#RU4}TG7)SnEfBJrFKuzJ%HP*C| znjOaX^5JCCr{3}cxn=huZ0wU?ru$qo#j*$ub65A(ue>DdYEYNf!`TI3R&lq$A=b7R z;C=HISt5d@A|qn=)Z>NmiAraoa%GpExfcVs$rW#sv&2t`6l$yBdkVMdV9|DC$b*;M z6?yneQ#F4Cf-AwkOZ!xN>mX>Go*vjgAFw!`L&4n$!dJ&iqYhvyNdzxLgh4 z+)+Jp(){yKhMp5^;Fnutt{><4i!5}!maZF(Jwf9dqk}JrjnLbtd4KJZWzBlDDDNJxtJ;X-dy-Zy1p%{U<>x zKkUZtFq0vMnuow$2Nt2{G?*B!!;Xk3x!h7O9scVAA^6wO1osk*A_oJ}RAammztyT% zZTOAE`qrV<#iY6cl7%_SfwIR}43;`^E-iVYV2}J^o>q^yzdv2%sxi{G@WUegcu*Io zWcy;!0@Um)!M#c#==V-5X)(coA9Tp~bXak3Nr=rAn>pkAMPz?gvyU)+i|6?qt4_(< zi~n_L=}m&&Xi-oiO0az9VLoriZ1I<}o|wB9ed}0d$-1zJ*kFxH#djb+>STq&qM~kd zL0Jc3Sqnid${2+wWZ77IxqimBU5pR0MB?uS)tN^U#svkF2V>?HpyjzDsT&&ct9v;E zz{sl(HO2Q(EM0)zrfFCR4M6^0S5KO!3BY4`Wc!xTSA7f*Qy=y&KLtJ~YO`tia4~N) zDJ7Coi1vNBT5gDEL}c^vlq`3gdpYuBAO86gQqYtkoz->$*+ybljy(TY_ym=--!_Sx zw~6%gEmJ$Vpep$;d6^WP9sYDS^cS7h3Fy4)3zUvX(tG=cTQ=9zML!GN*@+*I14#%q za3iX=8GKsVRjq{oQ7f8@dn{pUOuQueKv2yXLD!AuLLv^oBupBr5BJ6!Zb|T-H5H62}o&c#&3GRbk0j~n0HV1({E78HmN3STaX zV`T13Sk%3ih&!ElQ)OR~hscg&6RkS__v9;>jS&?~JnXmcs@qp;wwrzg-X~CY;rER!pKV!PMCLXBVvqZ4o zw;Mo4T2KVmZV6_{#s?~~7xG4bDokkF_teI&hYtA^K&yY4UX%vI_akG^_HR1OGK5GI z?ZH?)tA3oNJKhl@&lh|b0gAt=qnkpqbk9~>!B@}iT{T&x zRf{x^)2Z4yz9c8!S5lH-Rli>9%afcPB;2oy@IxK&X(F5M>4|>8{}e2k313))z!=stt9Ly=Qxq;W%c(lnLNw11mTiW#gXn)Mx8KhRNt zwtd$rYCo9?*dBhZhltZm;e&H*Z$`LzJYy(=WpBgJ=1EP*HjS&jmBj>6w?Jbw?pt@Y zu?oTt;TwMqJ-kVlT85Bd(Ce(zT>F+YAj@irP?X8{!Au9XP2-{5Tvw@B90Pok8&|LKfl?q zY~RC@)KQQx&V#pC zIl&FKyT4aL*2yZn)~u=YIB-g4H{X>Zf?k+IR!XW-vMw&1o5Vx}mAY1z{ESlTxH=cOrpd|*zP`x6X@tW34r;BwhVPvmY2(fC@C zqN{woAoQ+@1p|FacH@0(1e66N8=T71u8k!hS6X%VDMJqFn&@=01WBwk0AL!S#g+|c z$!EB-t_fXG*{KhPNTwL|ul`r`UAfLRz6)<5w|jx$+xx4tOL24_?=$h=)aiTXd!C|@ zg&R8b|1nWE)H5W{orIzvi)KF*i1mwv8$ix%!*4SGuBQkjS1>IkRckT0ecN15g(v>B z-u~RoOiU{yBu! z{{IiBaYP3BJF0a)Zxh$oBS=rWh-l;y`Qd(=Bt$FWWB&^6#*J(6qJbe&qAY&JLiC?= zqUx-$KU*(P-wm6_&Y%sMk{YWuA7Ak=S#Q+LMO2Pt&I3fjuMtNd0iIoefe6=j+_Ajczz*Ql=568>Rdw6lMud58x$6839skWuFR=&v#DQWp4gNUwS zm2R33A!(5v%WgkqxvYdA5rhHNu%Lb(+txd9kDHt$g3@%9y51L}pp@z9~BY_^pV2fw4>j z{$sJAFMMn_FeTOn7sE?kw}HMv3H zDyYY~X-b3U&ldiF&9rJ7=;_J(2L%4t`jQZ;nGu(~*BRhoVG^rNwDr|opDs$eRD=k* zotf-RylTI!7^rqpooD-q1G(Gl0o(ag6|db80loLo5b!55vubt_ zOs;lgv4b{pXRHgd-3onw9E^nM{l0D|yIz>O8)i5f%vR@p`A>%RUW0Z-L*Qt$;uZ77a;+FhgqU|6SbOBK` zop`97&-FyV2BF#b8U`^Ki7RBeqi6dba2vXfFNr#z^HK17lk`VPAZ^tQ!^SKw)DPwv z%_lKM4}at-T{p_o1V^p)@ID05(r3Z8M|%eC(FDd&vlozznqqqG7zNFv>v}ry94UimDzS@w~5pPfj+Ck&qZ$k%Hh_ETGJIXL-lDZ~q9_&0Gk#gr}bg_fI@ zb+$&H+XHXbyvR>|?nr&pn+XoxjyglMN2F`W+phgY-OmDWVXiQxa+F7r1v;Je6XM{rMW*E zntJ|NcNIihb+{wG*&&qJ*YtLyVohhW=J6?(mavS%X^QyNIh3{wGxcUu?w=C|v7XMt zo}|5bpcx!V_h%WH9SCo{<56S9#{)x=FhncLSo#jx#61TSFkNrihYo?TdM(c(8c^po0`dCN&INm|1qY8-nOECVxZbUMT z?OEWJ4)98u=$;K;_Mbi3%d+WjN2S|J8!+x?*d;LZxCv~6juI+iUki(25ZRJuz-A

      `)LD588wk*C)A`gawV7Jaxk9<|bkrzJ!z( z$Vs|v>`KxCuuI$YwOY$y$Jv#aCX?)ltp3BG)N&p6(LbOA%TjBw zjA2iudNOq@FH%+B+mk!Nxs{%+k%h>~i;>OmMZ)xmM(3LIBnZQu`;b;%D9HuS^`pdF(iMs(_^f$7C@y4=-d14&^}e zzr$I%B_ykeQ6&_W0%8S8J-V%evuIhzzh;DgsZPWoe%!U+G|$}%rM8a|2yzaO9iY1& z2%K`+D)zQdfg`K+#5S{qQw{9`KFUMbnYliGaG5--;rmfMKV0@FY|fxWs62;ryfV>N zaY}PVb)0CNW>(m~tR#5LpJ}=K^vg5?Bl9&YPWaBB*$>*${?C!SD6+|kJ3n`3D*ltg zijhtLZse}%JJT!*Db#NBRewEA-%u%YiWoYX!JG356a$#P_r-Vy+!BcCne8R;1R8fN zgiDL+9GYwJ9^kAvZjq)g@}SNicWh>n8xP=@Mf4ufK;=*?rlMv?U6>k#Ty$(qqsHDS zcxL*wOuaO3Gy_r(V`n?V}ON3I}$ZSW}qQf@a_EZbl)FA9M-W$G%qM@qui8uSum=#feXMb!Qw%Tsi z`y?DX!dkf{$(pC|j(}(M*u!wwDh!l2vCzbI- z%m8Q-ZO6Mm>q% z|GWn+6b&%SbontHPMv1@ZC<6&c2T0_`&e;7*@4lo4@-MPm4U@Hqy{1dRjTa@`fp>~ zEmlHPjD1Louj8^*>p)@@UeIIk9%5GngUogAVKawpjtHKg-w~yx^Mc+UVMQpP6~*s| zg~Q9`Gqh~Ty;IA2YP_@~|L=}_zK8qu*sb{cCx2{JM8Vyo))_3a7D2_69<{dPGGg3Qbw}Mt%0&(D2sz5g2*$pmP|*faA)! zxz`w#lPF4!cef4e)~iRs_SFkZ2r?DDZF*9QNNU_WpJol9t34a074rJLwVM_>mFh)3 zo-RqfUtO_2JGUc8I`aZaMkI^mHK;O zLavh@w1O*0O9vz6bg2sdBDT_`r5C={{>2DwQ;SkpyODDJhi~N4wv4I35L=yH=$KtG z|FKczGlxmp6gV@iXOplMV@T}jfuQkL22IaJz?Gu>>&9uF@gN;h=|3|*!oYKmf;X}+ zJ7sq9LNz(ve=SfEkmdxfw4=nym{2&~<0`10(z{THK#&;Kc`Zp=@F@dh{41)WRHddBobP=2BqDzh$~e&V7z@?(=_Bbt-2*lX9IASkHJh4b!fA~f&ptd)sidB{-1=^!*=dicQ5w% z+YSoA446x_%2mww-iMJrtKAz3-+JLO$jZiUP~z@O)bW@xCDBU#qNE?YFWf_j{qoWA ze5fbh7UxgwJMu{T0<~@{Vnqgy{>TGdfipk4r>M4u7_s#lVu<4_dyBoIpJFHu^hLdf zm6(oG2MDN&k)?f;ik`EnYI7u~t^p&d`OxotOpAH9OtU8geTk!rx#AQwT>K^1q7u2V zMl)I~@Tz09uXNM1$s^Sx%u)NaZ+dART5z*IaIbRO7T5^%0K`h&qV3q05i>5TJEm~= z4Lm<$-fM|b2*7mz)`Q=dyfZs}qKG3zmwK(-fC9H0=2`q!zzLvJPIwt^OGZo_$?7wb z;%A+OSH09cn0qliV<~Ir$|%0Ljci@<*)7|FnBCGtLfM#;BE5vQm(tt7xF5IwT_Z52 z;37X)AT>UHojmaF=8gDOMWZM2PaiST5qwL&gCB{LBEstV06smBJ)<~O*ok3j?|IHQ z(~lKL%zyKoi+&eUY~zw?B_GMX|0-pOq_8Unx`FL({mrXxN5D}n{Ik3@fwSSne08t- z8taqD^xDu~akSKPj74t>aa1>Svwzz<6IC~c5K3I1U3cUWCW_!Zo`Vher9OGZ^jqy_ z26XB1`7LVna)@adSK}MZbM|#NJM%iCJ;x~6I>(vFb42RU*d!DMJG+{&J zS^j*#6f9jpgZmQ51q#u_@H_u?vp67^Stil^N|5; zQD6dU{47c==Bf;P!mivreBEjaqA3s7GVu3ly)IZGi(W&Z9YL4_<||DgBtbM3ufi9} z)=lQMlNAQcMEM4dxqsrm1Gk<+mQS0=2INUMM^b?O0~}hS`^f{23q|Gz?j)v`n*`*= zXdNnU;`?hXdR6XYZy#D3Xxj#IzC%%mE)BXm3UPR3ROyqk%8f-)F5C|sFMkzpmSnSa zzpvsdiT*|5Jv`U+{Ci|_{aRwp`@v(Bb*Rh3U6P6V-So0633R3sPlD|E`_0-jfMd9T z_h5zU>;`*)F~jdLNl_H*iM&$;82hqyAwtc~J8+>95@Q#=Y+B?OZBMlljlQvT&1E=} z9pLl0tL^~@9{=>pQ}lg0a5XpH;`Xk-;qW%TW-PUW47-=CexlD_J#&DxJmMa)P(G}D zM)E)EoLF7rNe^x1ZkfO!(re#63q`8=ayKAiI3;K3rFcNZ5V68WeJ`}%f8jeJHNwuyAD+>FzDzCi-_E1sgqt33{xir{a_A|oG}}~lpQU|A8Ma} zSwD^wEnONuD{VGF4D~kSMN%*)_Q3T7yNZLCK=K8}?`?7Fe(IJ)O?@Ai_uvODs#znH zJE?ss9B!?0kXiR;L&xT?7KCm}dLA7A^GQa;K}YzkoCG+#6^%sDj%k{>FHwb^cdgAIKv z+mK(iH}NLn>&TUJ6GFDN&Lejg>D;T691FmmdoS;8D+gtIYt+ZT`r~+hQ1RMqSK54+ zp7e$=KJsYz-ZsK6Xm5|cHO$wPNPMW*6z#Bc8OsBrMOUehEPi*VcQ#Gum-dFp$%pA^ z?C*$8$SBwJ1L{A&*T3+XCo}b_Mb^iXTF{>oK|V-h(sA$kyU41d!JF<_dTq0Pd>PZw z(idB*w4suKc7e?8z#hf+d`eRDtid6s^W}2Ro z&>8tW5#aE^)JB->z^!s1L1JDIt7n zL*Se)7tG9z>!TRc^S(GUKr>8L>R2VyYih3PDZK-l8bCpt7W$oPo5RBNdPlq(u(h@= zv-$W!DsAoAnzms94lMLsE#X)RquI7EG3V!d_&egb+;#(9vzh4qp4qfR4iOCfRU}+t zP)j`bF#2bUtW5e`v|S)~+B&bO8DsfP#w@YVh0nMu*Q>m5%lw?Wt#r8DT!4m&E_B%j z@M*1;Zgm+lUJl=}mx$>W($YtzqNy$w=N*b@)YcTWo&h@F@b!k|uKCMz9bD(>7b?=b zIF}!#o7@m@SXvjQeF2Id&u?#m7~R3sHPgvutf+6<2mU14Xqe5sVot|ynpAHkU&e~m z=h^~&&QEtB`pM)%Cyb|vuM`RyAI$SV9l*U`Fg>{E*^F;J?WA#Ei}pHcCcjy8r#oS6 zd49SN+(VLt9SrC~YJcC}{6?AVWukxmyQsiWspR7zz>S$(iVwx$n|H`*3Q5-w0Sp!+ zH)4B;CG{qI@ssIqc!6oG&$}NHF4ZsBNuvZ2=DiC85-h{uU&sH zU$F~NNjZHYr21P+iL>^p``D!un$Rmi2z>1MR$u>2RTwSDiMgASz}<*_UAhYQIYA~# zL)!=f$;A^E6nN#W>jiDN=3Ae#kEXZdd^!ymO=?ULXgnDt!Vp?bG;%}Fe9M;P_@ZWs zdQ-!ob;GWpacsci6cU+sNEI*gg#7S$P|GVQK{k9+i1n0`dtCz6YOY?Y$GiaSnkla; zq_$a8%!xOREFjxRfB$xY|7PlR(u=cu?2VzBi+KrX-b5d!1!>!rfw!4r+5$7+slR(Q z!ASk+E_D}YX-C5+JKiFZyh^#?B-!q^ zF=NGua>uS5vj~QjTiT~Sx7+-3Sl-Y-eO!qV6nAZg{BZah}5_Z1=$o{RMIYUh_E7N-`C%f*qAmrgCYGgP^eW=l*d zIuwNG@G^OPK?qaM2i%I#Zb8;f+26^`!}{s7rB;rG0U2STBM>Z92NXFJ=}NhvzR&Cs zvtUQh+;~cv?<4|t2?akTe+P!?xM^6q&QddE$<`St9?w=YqDx4I5pmJu_k0P(Ui%%~ zr>QR5X!#QMnOm9x^Qr0&N4WOK1DjIQUzLakMnqwX=qEkj?-Tfg9%?Q*!N1CFv2iK$ z!bMLQ>pvv)j1Kh5DTrfH2@_U&Zzg+)jWP`n7oG=ej~X@2zaHj900;)Om?MqWi>M=dD3)N)V|zB~^~m3TT4LwaH{w=U@)HKz$UUnC9!Wd$YJDT_ zRpq4KscxdSMM{#zC&nMeB$6l-!9_JF`%qMS7npv)AREP4w{y8`MK{vvqQ)rr3t5kNqh`t^bzXI3SX{ox&n#5kFjz`SZS6oXEo$L+5*=rjt>= z!5D$JuZ1={Ta3@3%J`et64|{L*lxMmGSwt!W1133`S|<2ByFTEBbcg3>(!`UHV3`s zUD~auCI&~PA>0-(RS}CC*T-q1Ro3C}zedTeO~NQ71W`WqeIC6#6I=1RZv!7>(N$X; z)4;uuE7)k;Vx>?Ez;sNgSWn24OxK+ovbRcMJ9;(P_ri8>-*aTseVY4RuHQ!Y`S?Yq zcylZ~t7=ZC!0`}ItNv5Jb`2gCwh=GOSF^=bzCHR=2e-kJC4L$nNk^&^KZFbNVu^&) zu$ks78K$QuClOI%oP397*AD`66cn{{d{lN^LDTU(t#sB)3O)=>*VZbhees$CtdJmN zYOfVA&fvW1O?v&}k_qum1T*-kgmpPk>fP5j3L=+mO0p`GHPi^*7invQxmpp{FFn*j zWo3KkZM@h5K!)!lHBZ|m{pSxTps}9k-YfG<_*I1>Y}U`OpY|EwkF_K^ESfKJ7pSU( zf@cd1GS>ObaJop$Tk2j4si1J6;Z)fS9{Ch~)$tIv{(;DG#j4m`9td8>_OM07-L@pa z2Sq&UsiK58(l;36<6M3CmC`W_?S=siHdGTp2oYdJWUKgQt&^qWC@61&$O2ui&=Y zN7=D#$uz`ZLnc(0PLRkf7BW%&B{`h26B&oU;y|a@`9?+KydS$fFJ0o$)P^wC6cH&Tzg*a3wC#i!Isv z9K#;6-LaT1O6J}UM?QI6W@WlvlN_?U%D10zf`E44fsaBNE=9NOArGU0qSX#AiKy?FOj**;JbcT}pwhY$ei-u~}_?70iR?L)-0sDNfQG^Ts1 zAe3RQ1*3B1!eNw+r*r8>Cc5}kg1il-LA1o^f?-_z+Wz6^c+cMsO=C$AQVZ%;aNVw4 zra0Vs6vS+PxjnHFxKOEZ`ij=gQ}6E5O7x_wnfD8B56$UlCu4D;@EEt`SD7uSvP0LQ zb7Pm(p6aOaIfDY~2~KYuJMVpP$5!%g$3RSfSJTZ1K4>IN-uRo%WM^h-fV_Y^gq+)k za{l>&4e0U2rJ~nBg$rcdOq_|Ivtb&CoDms%J`L?=ZPNaa^U-bD z4*oY$P5w_1Ch^(B#e_Nh49YP6FHV!eZfhRe-JX-({kpLups&^1bhMQuhv$*Nq6od? z{3iFHaRIb+rDh81CWSxTiLZDcODvYD_76JKb=o+r#!O*$ZGkX@a||OvI)0d|Rm!l@ z(8|s3_MNy6I%ucS9TjcIwK(MVkX!Vh+$XBeMt5SY!o(;z)#Ye#aKh4i6lr75A@S36 z96^PRhP!S#eilBOXU>Iw3cnyO+<9NA7Z-CSnIkABp~}I6pt|Tw)FB z1hjjIr^|D=2YmQwz!z{L7ek{7>->1L8R*jk`!lQKsBuaHGmZRSOA;ylD00%WYe!9S zhySo!bPqc36okQIad@b|8o;(YD*I&CxO>BFo`QGj+RzxM<<5ExQ88E=5-g_hY%HQ8@OR zQCh&^(-p>(Fgwaa?Z%%A%HN_Vr>O_xWU&w*)a$l$CgGbV+6hTt8ihibn}eO48_BJV zDL_SP-u2nWpaDdWlSjOFxWh9`{uwFc^%=U*vRs797tR@ zakwNf1f3E$-Pgbdx3$$$`)Wu9+JP^v1`$Jk%NlUSpSiPLqxDd<|FMiSGE`FvD8f`s z6>IB-#X}|?%&Lv!*^e&|5l6Z!tq^T>qZ_w$p=Y}30 z7ypl6OF);*AhR*C?r1r7+o3JE#@HepU-MYwed^_`rV^#|54h!zQ+@wy>yO_`#PQJP ztSSR5+R^kADSxKEJiC5+sF8obR%Ui!W0OD@JGeGG?MLNJGmD?P(A~@b)K_5tTMR&iw8X9dA%y2`GfHF*ucDyClWHALhTim3btiF79!)i3Y5=b!`-JK zZM6c)wfNGr`(icd3A%Alf*EFY;#0V>OcQob&1P@Y?W;wR&9-Ix{vJ9m`Ea>V(;Jnp z{9V@QD{qD8u zm+N6Umt63!FvRC}zv*~6i}$N)Z%0Q?K!zgxngYu|@QQJ%kBaZHVBu^>go;4u0msmJ z3{|v2hkY}DmKAs_eFFKbs=_vYdN0i^|Bqf!Ryd#<Q7$>coJGV_68?GBCyqI0r+B6ELw zMDhj-(4P9<;EW$-ul(igF01#xnO<$W2)-SJz^x##%0piMg&Qi)S9vv#NI>{~FxXU& zo!fg)Pts$VZ4e*kwdFc*KG3apcB*LH?QP>{=(;Y~^i*f66gw?@49fN1(vk!n8o|9C z#K)VjK$0%2U;Ylfd`@5HyU7{Ys$0OCIt~)xt@1={4M%#ozZOt`bK~}VuI-e=Q0U3i zdtDTeVr<)+5DYYYTA_juU^d(R26vw=j#PUNFM}WUZ>qYysZTtQqzxq-M2MoS^ZcNQ zdEVbZwXNNL-=_05MbP1GK|J9GZ#SM;tA$kfp*nzsc*b!wg=I zNG*Q0qhDj*LHXJ16~k-;dX2G{tsLeSdSVId*t2GamXuBfuz;k| zylvhxKYj6L6JsEyjnun%jM(AsT<5JMKt6>UGSkxo%1MpKG#_SJsw@@$J@-;;IzZV? zSLP2qXK(kj&@PClsGm2dyvSqHx?ot9qb}59+vzu*-4g6oE0OqckLiD}F!?J{j1VQ^ zQF=#Pxmp+gWM+_%ph=^&lK&;W=?JLd{}h%G|8d~RMVgs$K(U$f{h*rKTc9U{(@a8K z8g(qE__y0HUo>uQ=^3+P_(7rqSzn4@Gjb$>(>id6@!lE6unKT#s0;86U21#->~pbV zijhnZZkJ?HtE$DnKda$PNX&c{<3z<4yF6l74F2{(a@q_>NyH}Jbb3-gkrhj+ApExq zAYIZ1wTP&Qj}BIt;W`)=`IdQz)HA-K>FJx)HbPg~^J9zA-U9DX&GRQyzij1J@i3Qc z<9l$O=Yd?CE@fZuh#?gY;=3sG-yxO|3cwTzr{cFhdPrNdTg=wSO|e(2$OI4LGn?1O zbh)A@cI9HH*^}%`Pam3?U@6&X7!!j$ZDz^Ta-&{J1uTyVU-T73>r+@x_l}E7YVDd@ zB??3$)aI24~pO~`)HCw4He;lC5q|Whzg0eocYkYHuU5rnv@8|B?S@+r+2lfWNtbSaN zM<*40*nZWh$=(xVcW(j`I_@Jv77X}1N7CjZNiFyOfW=uw-1Bl&c2-s<9B?3`TKH6Z z(i7ADsp-$8Kiv+P>HTH(!r|MihGy`we}>~t#z;J9tqpv&-UuDlSdC90Kw%tv_pGlT z8G2*3xWV0>Ja#|X@XUEb^}iT<>!7&0r{6aT4{m_~!QI_$aCZp=cX!ud!QGwU9wfM1 zaCaLRTnA@x=g#wc&%L+mtvcttf9fqz;3>}p0K2=M=kzV1e_@np_&8eYQUJ4%j0n>^uf|8^LP_7-f03honw7! zts6n^FJR{EtaZ!^Y7x>2!;eQ?r`e1F)CdwkLR4o+3X4XJKdv7VnWvV|f{m1vJ~Oy^ zcPU|=twic$$I6_}lXRIXDxwdpv=!G?VCgpF6zWAu+x(z6DQx<9R^XEW3S0GKsPhoV z{`@sOrBPzwv02&{ZoNf67T5$f74Ud($Z#ZxAt3Q5eCPF{WQVGOu5iH>qwvDK`<_wh zZFFG@cR#V_zFgRyT5nxU!W4+=*)baZ?u%=iua)KcT$as?({^A1sI=1iOAoY7+Xj&= zQS25oIGqCt!JrpeQX+e7EslLAfu{igkdOxtUFepTId29YkyCpAF-bptBP0iTVnkM@&*c9{MA%fM*J8kfzAzm@= z=C=#QEt+=m6z8bK-&FoVV*zpsB&1hq0IP7c6!|%&hWm2TJ}SF$(p8XifejISRO#@; zH4D4#U>M=k3Ok+HnrpN_gomEWKkgKL-z1VB*;AI5sD?MUG63|oz)iQ9ycz{R05@&v^ zSCWLG#5hn-JfI;izAPC^=6@SLN4)u>?5$v;kG8X|*V{{6)GfeVQefzZsmNE@o+fd5 z-7I;t$wO#Oom7bkyS-q@)pfQV_{P(#96}*?EUreDv74b5u^~3c^}f zc_S+Es5t!LYH@?HNYF$`7`CiwXcw7hkpRn~cUU?ADBWoopC?wrlZsgUp*xrsB^SrT z6bS)7HT_v>Y6n~LA5}eQx}5%j*o@U`rppH_MoMVq{Lg&#GHBgo8B8)?D_X*R+tG%COC$507h)BZZOYjOXTo6*>w~u z4NqM|G^l&HJOBQSGCuZFDMgR)6@D@k`-M&g51G}>%W)vneTP`36{<*o7Py@8H4R!W zvhhJA)TVHn$o~fnZRHm-Ayr6OR?!b50^H{}57J$TKJIvHsnC;gpJ>I9B!8`Kl7kn$ zU%9>ZABq~2vYN!pa688Xf9;>vZpY~)Ch(}zi6-~M{DE>D%$P&aoWzwO_^OGebLDh^ zH%u0tBPrwDoyU8EZglH7&*nME+*)q;ebfLeeYyAOAl;E&*$<*I@ovnokv7{krV}3p zBS~A`5?THE({}x$@%Wr1&{7Dl<>dr%&28v2CBXw;0H7kCujc@{_=oTZrWoS4hiuOWI^?4Zj(AXlzr`zfeuqWM>eURR z)Oh197?u#=eKNrPPF;MQoLHtDzEmP5i0bXn0W+Od0nuLJJaw%OEiO5U9E#b|_MX2z zqzGEA?)!N7QK09Auc|@|R+&an2y^<{JA--_SF_V9pBHu13t@!WEJ-U?ToUqOAN{NE zP8+@Ar`XVjZwbH#_@UQb4?@>h)Cf5_GUB=9Yx5Oqp{5G>Mdd*TG$&#Nn@{I}pJKo(bcJmvp%1p@?iON4}`PYHM3Iore$glf>kAVm&&a7rF(RaT~vPE{Dymp56mSg9G zF(yuy)91XU@DzH_u!>7W(TF54L#z1CO9mD9VuGl|N6PIK()4u7;xwp|64xV$X*gddLQ5SdgC6rv?cPGtEhDe`gZC`Rpe7+ zAR`}zQ?xU4c`I8_Fbjt<|QWTkLCUGzF{8g}|d+%)h#elDl7 zwxm|b%?Fzn?7~{OoPYtwF%63%!fLJ>XeUo+iQj56o?eOts_MkwU{J8p_99ddML^j& zWXmGUa{63oSm&au`PZrHk(*zcr81@C*Gy?l%-}Ye&@s;0?KadSq{;}MfCaeRoQi8c z9cWM$TN~BJHyU3~_b=a`NaEGvj?jkB7c;JU0S|W0*iQyd6n}UNXiQ#}o|A~obq{ui zNi_SC0Z79!N62Rzun)#lNGY5n&G)VLK+Y2w-;SwZA=9{Cu#hwDlOLMJdpA)NlhB?< z@8{$&M4qmY`?m`^y#Tp@ffftWtcgeZAl4_fa76jhQZE958T% zgNx^fqsevSY@coWEz+BaCc7IZO+z+jE0RW!%@!e+Qv|m-h1Bk_R-WIC40+XD$*ePW65)q<0N1ceQfOhX||1T=IHh#8csoe!8UVjk+mN4Z_I>^ z6RkaTLR3??suwwN+zm%dzM&1gxFNZPH2d(Tf~e!tsn2NaF9fqLlw;;@&POA-oEYtU z=T2F>GSdYzCw$osE6&-dy6Yc-HseMS@{)GnO4nKhHtW^<&1*uJaHA8#3juC%v zo}>$}^qV~5l@%2wZ|38tQA6&8tWbD0TZClIPC^vPkD?@_@MxdwetX&ND}SLdeJ`I= z#t;GSM37NB8kxu^-M-l-yc@deST#;zWL#T*DdL+5*uVZaqT!B_s@BpkmX4>zT?MJP zBL@*ly0}c>a?+w%xGw)tsQR}lAON-;B@!%T#u$f}O9S}HxllNS7mHD*(3cO~3iq>- zIJ_TblSY*?2trGctMwY12C5!j?!)&XOIst}r7riD%JtPYDEH6OF)d?nhj%L0Jo|-< zlsD>tO$&{=E4J#m@=HrG?()ArCXEMOb%E+y*67V5sghu_5peAROb@fn;_{z;ToIYR z`JfHd8hA(SQuBN64yWh!JzqEHug&eC5(~ufV=-q(we35D6N!=a%i*u9gFs}x6nC%N zwF*SR#Op)ZKQWFv`D2h%J8*V(-#cS7Ptfu=X0JpW%;*NkNQC<2UJ+2_Ab*!s!Yb&I zx%%S5o_+v{u9D{cz1GkcG`|;!@{W3i=EDCzBoIJ8BuNJpH^fl$6Fa{w&76f*S70_R zZNYCjaHgI9EnQ#S0ZBFoQo=gseE!g!%mT#ZH$%e>#8{s*DkMu7?#9j&zUS=L>xB8< zoMtHb<8%?oe#r1L2SgRG-{XlhkY;Lr6(ZVvttVb2BUCVX%8pSRwwPNHZQjC`(fboj zyw9LA^WFGd5LbF%Dmq@vu8E-5L0@~mr4L%CYqnnvN_yf&Az(v@qz?9q#9j8pg)5nF zI;oIc*&w2&-^M02lfkyFH5xVWLe<9g>6?{#r|9?4q>H-O-J%Y4gV06Xr4*Hy)7sH9 z7d@v`@4ueJ_XiKvi&bgeFj!2#AR6|(e3k>l=nqhLbQs>~x%VXv1OHiStmc$LH^T=y=DH(jPv4N&k6g`x2vN=!{+7szp z>OH^Fdsc=f<#JiCPtR0{$v)fR_|XLPoKw)Q=K7rP7|$7URVIV$&6fM)j7#8pgeha; z-N+JC@kT`N671@AmipCs8kQpA^Z7-VTztrl-VHb{wES)iyqgGWKySCCn#=iBAUYNT zy1bpf{$)4I&oq#JpZk--@){Zo=>+53#3P~M(^yk6vESg~rt)HX#E00#qAKMeJs;_S z{uDjK=m+4pAZc_V8Kg?=mwRoI4)PeN`84jl<%URPCpdk&it+A~-I<1q-y5?t_X;@a z@UqzLsq5E?5WMw)sI9~UO;$dYjiIJW#G0;qF@N3$bbdg$dG#=X(6hus)Mso^B950} z=!Rp%`B^mvoDR;S|^UP4OXh}3xrf>KzI>8F`fE@6BFVjDs zX|a5*3uCEbr*u*9B~J`4Sor66yuy4oJhI=`C~`lnZ>SUh-0^N#u;4I^mG7`0dHxJI zv$i|0?blF4Yw3r}VGH&5#Pq%TE!geJQpn79*W9Pk0?+lVk#D!`0z4V_W0)hUf~IDm z+q-`Px>~+KuY4oItL&jtY7QT4PlQOXDxFqFi>Q1hL6qI)P=R1E)|yj|nJdFDL76Z? zqNDJE^RMlX+lPlYB3&@M{twqdvZU%^c`pcY@M3Ooi$IC&17-BWgNZvj^~Zk6f@3Hq zk+MnYhGq_{ojg+vG9Lg(vR4@_-Yo%bis_4poHgG1%UjncBNl}A64+<>QB@WH<`_G8 zyMH$^quiIi^xtVJ9F7Uo<#{{9RJw>%$zRuWL8gZ#U7GxH&Lywy^{`sN~b;GZc$s3q+EeI zV*N+9hC23zxN~Z(!iB`yMyysxH7+U|KMB+aBEM5 z+yqb88QG@FBvaDhGo|4~8~C)<_)gIi|3bY>Ad)-$aZa1<{iJmx!>u|IUB76!of!aa zfR{vF%>3}-{YF+oRNYSi_By?~SL(Q>X*{66aeCt~#|4w*QwSRvd+|kMNxKc5As{oB z3Q)0y@<+TRG;_7`8v#aoR{ZG=5bd_*6KoEmqAy7dVJa-lI!$)7i=AEz2G9}0BL)1Qfv?F0cRH#X^#ndq3L96hdGq?-cW4_3Sr+;!?I1|f@3MpLN02LwS9)5I;WieDD_p^o z6qNG~H;fSl@FtNh;4Sb($uRW{^+0^9iK0YK&icbiDb5jA%iWZ!`PpS4JRCY8{6lB= z$a9#Kug#t_UeBFp6f}X38Ld9U_L({s@2go}eC0KBKb0S|w1Hhquov1_F9Q({*aCw0 zM0KT8kBh#w_RZSCc39--!(B+KvgzH1q|@(}SFu8Fukz4S-YL_^59t^IA|ptYZ{nQ; zEVG>wyk06{r{LbBtJyO*lifK*t%qC3jQ7P)>KE0eY<#b@b05@-mdJe`SQR>-HTL59 zkaUPPX+b_F6HO}GrYJYg)-$*Zv}W_|qmrfS4wu@IGP;l2US8mD6tC5(xhOQ@@C zGP4~6i*C9igSCeX&#{Z|hbJ#(t;Lmwi%EL{f@Ds%4iPmSe0&S8`V~zj)w?2<7Yv2VK z__zdL>+YhL{b~MRu%G{~(-(ZpiOVUYus|UMrS>M4rs8XR`Xh`#cA8I;P;pC_-x%4% z#=D!W@9espfHF6_ie}4{Q3{L11)J3Kw`MLi+Z;N`;~ITTDgOm|G80tZO5qAN0h=vg zxzlC9-m9psXO5-`ctteYLZ#GJpL0?I-IBjNx7hdQ7On&9I*94I;0vWKFk@b_=j|nr z(!3uY)~9Q$nBfmMJpjxEn_t*|xHmWu!^<@@4>i(p)YVOV)IU(k=Cr1C`7*iVT(+fV zou78fTkDOd;jj@smt=yKno>|`N77tn5Mpf@5QoP=|9R7v_Zy>at&22j_Azd8>0s~4 zA{iSQj#Nd%={*bNj)y_zjElT_ScWj555epy{q5^zzDK{?%V9$(ZTgMOnLie_GI1n| zxX~`vf?am#ea{=JBsaPVhxFFK{1E9lcvtmTLP{c%pD9MO-AXL|FXY$~P@T;RJG3hj zN&f5PaCWBJ4t8|)!4VwjYcev?l+YA^s`_FezdsPU$uFLdp4X2 z6?aAyxAI~a#3ONBsuc9KH{lnx6>Dfv%>41miE6VctpA%kUq#{dYzrx>g(;)JU^J`k zHk$g9=GNbC6ZMpIqvf>k89c85UmjPyisbkqHrddO$7lamumIaY2^p6XYRzg)2ro?4 zv&L4qqowItAb%?*2VAZ%oRnGCAjK_?iZmG$4R*zG4zSjl<(N=uffy*)tflnY{Hcc} zJ9=dvx;32F>=V5FwC2Ak3r;f%$+A0bS}t%J^?Z1EmUL6XO~}d#z6qfPbNM>RmTz#w zUvxk6HGBWq*<*N%i>AJn(qnK8?-zvc+dn~(Xh-$vAZmbWV70rSNOb1s|NJvPIx;bV zsWJ73xYvN;iN@tp2?^a|s$@1{>3y*djfVgbvCGdlC z*6&w^`usqF!?R;Xn;AO(qBEH7#VUq41E!$`Gg`>!3kfqYxYPA~u%PqZ*m93y#tpVv=Kgee!p8#4v2O2FI`;wvz-vmJ>gk1zOR;swzMtTWTF^|ARVc%5q@c_P>)OOmOoZj!mHeWTzWKSo+L>x+aQRy-g(gefX7CkMP9Am{@RP5jh$xp0&7A<+^q@JTxKKK7 zRDN+3$iwi}OFE#RCvxVCi98C`@dJ*fc^Mh=JB2ZKSC>Ub%rENo*buG}(`PsIq%6+Okx^wQ6#ok^#K zQ%ZohkArnG zT?(7ymsW|h{b{+87{AaFc&aP$z9gpM(F||?I9jgp#JKHhKF{fM$X5$*hMm` zK~h@5^@u9QWZ(f6&wad_QXHMx57wrk;U9bZnTdHMl4epi6{_GW5m8ErW`{!uX550$ zE{Q0Vl?b$(7+?IgSiaJPW_u^KJ#gai_lV-^bBV4011>@;H`4Dm!5KjgxIe3ycKS^y z4BU3ZZ#ynn7wb`r$QE0_94$?vD+!c3o}~1=S7pqk#>c^xq`lv|zQ~vQ#X5FB6TA9v z2I_>v-#x(r*OfT=V@Bv?q*J`M|AcK|#cBn^mjGG{o$i_KK&?0oX)L{t*Xbrb?KDa~ z2Nw^Z+26p1EuV=^2XS>VB=-92gym|2x$F+si$m*&onU1)M`={bcw{Al8i77^(V~Qe zj1Tv3iy0iYNZk*Yt`jTjaOrzEmBw0LbV#unkWs{hs3qgKQxeGIu;KP0D!frTB`<;P ztkHc^^=*W_yeP_w96@SKkimMQja#9~yTfT^bFY1+I9@2SCstv(K``+y3sf7`Fi;kz z!FXx3$JqWp9Ue7Qp6xXqHbeND!S`jBvx=@E0v>GO14Az-ucE3QT3?7HgP183J&P*9V;xf%a=Kh%ms_^ql+$G*p17u6;kw6u%g;(q3QHi;n^W!YhfFjt?) z(U|BtI=KjH?QYRhG*V24pT28#bP=k)Qg5%R4d;`N{u3(~9&%#_ytn^5bmmj*_;tAf zOCIMnE_RUqCew}WPmeGAPV?)}_`~zSvFwPLY{I<*Pw}vo$aMzG-Ermazsk`(1#8D} zgx_iDtV1+yMZ_$alf0%gKUkU0%{C}|QdSbw=1tYr!@*~#;isJL`hz0B>2kv5x>22X zKAI4MC=mJmXOy^crpU)F5DrtT0oP9L7{aZLK>_#R$zJXw1Lkjo4_3Euoi z5y7dnhA;(Az&~fg7>N$SJB%)@Z)?{Qv9#i_IKF(ypkw`+<1egUeRQ}!DCo_0V`Q+?4)4>2t>G(W~!p7dJL@|QO{r2c<5!XsW7&N)K#EQ9I0&&Ev@OTh-!{oEoKi@4A z^Hev_1lGI{g`K9r|D0xBV|N>>2dzOYjj!1HXIO;MQcnVU7x;c{DgCP&)er(x@(I@e zr46dmCjP${is=_Hz!q?0Iq<2M`X*kvR9Ndz<6ol{2Di{(2@zv5cv9ngCrUu~-%)2- z|CJGlh8Qud*ySLVL=CI+bhv#ru&mCF5;C_E-+X0unT2O#lPn?8Bu8I-gqe_)60GfW z@YlS0Se)WqtdKt^>krNp70Ob4j=>&6N#v`}!hpEFWjMmNR^LG(=Ya)0Qalof$6+|; zzSMNx(YDS3g0G&g-miKwFqfBRva(=KyZIf~CzuGpb3V|#s5_ENPss6zYxUqiwb(S5 zyXEoiyNc>27|OZD@P?M%-4ONpKNn((p5-Ek9JJOD^8A_uvJK)A!Mpl~0i$AE2ahRj zP;U-!<&>dmO5~=j7D{s-?6CBk;)K1RA`fBw@#fzBMp1JG33VG!2^w?p5=ag6VwH5h zQuWDlOx%?dFP1W`L>5aDxQg4&{FelNlp>lixj7(p`J$ri-(w8hxPbrTA%^Wsk|+fz z8ne2It2Iu=x8TM~k+?sPSdaQYQ?#3Uc=N+2&?<2aiNC+_r}2NrkR@tQmmYA57AXif z;>eTu|7mIBS_$;EUnF6S-;Re*Lmd1X(R_SK@n0~i&to|L|A2~_piczA1-aXz<_zBV z{4?l*)XtycdNEU=w+RmgH$i!|Y52AKhtm*<34q$b1!L$=DYTo2EvX9*9;wA>a0gba z{s*8!A8vqpfI^520}s0V23&qd0VhU!H5dq(VYT-nmsP_Jc6jWHNMhR%5cWX=Tl(3C zX~_7P2n0JUNh?P2W?-cKV4IoqqP2xt{mr&L{(;=H zxNkhcR>WHC`LUkko3TrZ>3yI{3RY^N>IU*ho9d5jh`&cBMnx@j$Qxs+fZ_rrRQu&k zZJ}rC_=1H{;2{8BJ5zQ>XUu8q??)_bO{kuSV|vjN*eFh35@&RDOb&aBoSjmO0o~~N zY5!qA9TwQw@(EQ5)kMve&1eCaki!BCZm)#X9y0GA;JsZg^fPC0v3B8mL4Cz+OLZ{1 z4N>7PX5#0s@e7^4#lzr&?XrinpC`9+XzFL{F^l8rMv!`U;CCJPvayhqwQgKK2gaco z+@N#1gAd+oUrT^D&ovph<6?GeYRHc2NbTr+IAsd>;i6p)Mzz?cgEz{TM=`NZW$1k& z$x1_zq8}7gQ8`*NL3>iKZ%q&0$M+BB`@~wYnYaQLpQwq1;r!Tljc3N(WgQ6QS{VDM zIm)(Mnx1F2xG1`wMPi5nXu~n5@l{?-&3+JsT!RmU?rYAxHb&@sX6B>@P~bDf;FO{8 z&8`v3O>3>z5oi47dNo_EyCM4T|KUtK?|Z_It}~i-v0FDptki`vnjCO_Z`gP(ZV+xJ zm3}5p$p_@gqMB>nkMm^Ip)Lq9$@*|-Q+YF*Biy23 zkX#e#&2pA**%SWnEgZ2nd_XJLzY<-r%>@+fATg*|Ry0(mi#Pl=A|=;9GZ135(rbNo zH6t2CU{izd5D!nl^b2xPt!$!kq~Nu#(b|};e2Xa@>lK-UREw{dog?v(TU;n`mw8v7 zSUJ|qQpP0G9(Dk0&~<9pcN-zL-MgtIMZ5lLl_)J*M}+cfDtMCPp5bnUmX`JsHjktf zChs^@J^qensn3*_*Mk+A#i(V)+%VscYEXv1Uf4k)N0IA$EkTN6pPmK`v4 zpuE|t9J3-ZNsh20x$=)|!*7s*w649@fJ#*9)M)43_Pq4vYk0Tv+l)Yg9FX1=sA$*z zw6gu%?qj4c%N}L4Tcon1+t-e+r=a|kx+*2&QNDZn^n+(#*ggm7N`{^-mxAs3{m;G-mdC86gWeLZ zGE>GWGn=bx_MSNCsl#&>hLotHPs0`9eDY`X(-XTrnJ$>AiH6sYz>#R{>qh{s_J$Uo zxkO>yoTT$VN2AMs+}&uFbBF|wlHF1~yGj9Q5_clJDg-~0PfkzX%mCnoi~MV@oLl$O z%JbYkN_ZP-{(5*hYJ2dpt#UhH{8l4C(=RD*cWN2>mduS~Fey={_+;Sfj6Nej7(aQ# z%`@#;d1kRW7tGN1PNjZ|^-8pOk4Eesa3No6>H;>k`{HO@$Y%0oY-(uJT04284oc*< zvT>loSB|29X_;1LO_BV2=#i#~Iy(ye5SJ%gyO8Vu7mudKXlX(^hyIXCsX^Iw-K{!&_D6O5>e#+PB=&bBw^)6Kf-r@XXo znhp?3x4*nHMJKSND$I$7>oqndmZl-dg8mXgOBJLJHAL3X1j`kUu9AC-tohO3F z`~t%A``wk7*&GHUAY}khp2OceJoDeISOi&V-S~i_H_b64B_f6-d}#K+Abi~r!D(yL zK6SkM2~)>H)YeTpFC@d4%2>~n8Rx76wC{ZFZ{XVvcTAoj9;!?&fmjgT_10GYW8TRFA5(9rXiJvdh(DplI#7v9+9PClD&NpOa2V# z`ZWs&!nO81IR0=)AzvrQynWt6ysbv@_G(TyAEnPfN?sH+;Mp}bneLx0>_{}(^7eqR zDeTEQT0te?@~rssu-!!Xwg_n5Jvn48OvekydI#K$zWRL_`2pyWBjCUDDFkH3)lUf# zMoevhtGnHf=rmpPC2wh~Cv-%npn~i(!5Yg17&d2QoWF8g9%###AOPVgPc-YC}%G9Pb0m<}ONTFf{ zDzyK(Jj54mBOZLy%aA5LlfZ}!1LY|i%i>`NhqHP)@s&tEnmyhtMLl@3tqHjx$vAt_EhBx+9NUm?*@2S;&`B*)~B-cASaMLncy zs;j7~QtxEMPE$RJv@}O~`MeY|g}fNe;si&HBLBD_u?d@xmm3?SrThGYpDM~Nf0S!g z{86klyFTety?LQ5kGm{ifXBxY@-S@dVA?VHCPz8-;dB~1+R+Ox)jg3!j%l;QuWHnw zYO%Y0fuIe+Nb;On6rQ&#fvb9$hfmsj*b#TkUzSlZ?gJxCRSm3qBFUpRR7v{o*mQnE zhKa{nd9>^&Pj$L8?$c_F&Pw^=;(tw)C-rq%=419K2dz*k5CX#NCW{@OHI{08;#V_V;z)6Uwg|0R|&uOTS4L9^E8&P5~Y^=!N%w8HF}y4$V*Sf8_^mAxV?5yGXJ z6odi07thr9ozzk{5ogRTIv0i_nVtWp^c&%x@;qJY&^t{(J*d`I|MzRktxt1nEfc+h zJh?)ePobqONYc7BQ^u93H~Ul2BQ`nlN9RCSz5$u%>?*j)-^}IkyyS;YQ%r)t$O3!S|ABWk-Y&^b@#(zmm&{oi2|7dm z9l!7e3C_FbTu_MPkp8W4Z~9b0{mSmPLGSWvQ13D)y6t#R8B#R;67v08qaf02gbSHz zz7OkHcB15gzYIEn8}F+=f{g*HV?iFpRMwci@@opn@P$g)@6r;5n2XoEGU5EOrH$z{ z<=0wjpl3I1y}cPy*h3OM8dqUlO34`jjR^3Y19RNXm`ZSgO6MW}QQU*;kGS&KpbZL2 zf9UpAA}1>3e!NLKR^<51w+cFT@WFC8s{b|5{C^{f73Kdkkt5YKo=%iTb~3D_lk3pF zLQaa+d+jrl-?M*8Wk&-oK3ig9zb|l>sgv&+HO(Q)1t?3yE;JmUn;0I0&!=YTygw=q z(KcS{daykA`$6Y!?`&xuDjvl|LhE4|={o|ZjNz5W4-5FDsdsHHCEap$U>b0k%Egqx z1@&9ObkkvYt-A$rfxBK0lkhdGP%%i4ND^;=;Wg8w)9BFE;1^eOw}g@ zc4F>0|1#5?;;Rqu>e8geiV7B|ivI6|gd`~}vV6^Zl~FW0PeyEA^yg1(-?k|pPa{fz zXWrhi!Bw;r+m8X#HIIz;RsOzQ=WJHf>;^2&A!5qMA(v5}?xCZis|bzmX&B+|L_>jx zPtZcJ@ot*^DX=MeY;4S-rD7FMr-(@YBO$IuapX{%#IvfUU7vk*U4VMn0TMlqh8~PGM0zO=)2G}wPD803Z?Uh zDVw7yu6celPoN4OLfOAzuyi>Xo+iyW>7_Wdbha?VsN>@Qb0nnk8>=tHVLa(JlTt!= zR5Q~kBWdNjL0E;PW@qx)^Re84Pu4T2I`;>@X_Jyz3?M^NV-&Y8w(ktR$?)GPFtfk) z`%HHYI8Ytu@J&|JfpjkULVp~cS}LbzR6Pb=U77zXb7^J$(QWLV}%Vw{(>{wHr_MO@PHeH?{==%K%Mq)JucGE3mXSvES*w(l z2FvSNX%fES{xDS2gNe+P)KtD((Yn2S6%ZCT5m zvfrS2^(G%}Ww0qz+pFy0A>^!_ouyKlEMt0%*jOCekP(%gDq*M8xVkw04`!wfhu4}4 zH8D?aXQ7>)ZhwsJ&b=3=;*07$mpG8ZUQT}=FO$~fObNa*q?+w`*PU|^e_#_~;KEng z#v|qKq(gOdOly4ef{p#Z1xb?MD=e?qVa(2-CkfZWJ-S_-+Gwa%y z2?{Nhu<@^AUxtY|Qr(tw*N-lY0^Q?4V3a;}%%bf=xVgn=t zq;I=|rPnu~la4Y_LU8ZzGt6|DiQuAY{HW5=UX$<_=eG-DLa$kbGf-zcRu6|zdZd&vACWW8zJu}EF_qz-By&H0<#GW z1$+aE875AUGrott$!$Fqqd{vzsEKZ)m1_E#-_l|SZonEIp&&Wn%q($lYVhR@-4k*m zEIhiS&qQg0YWD)xH}F1!HSn<{JMrY6)oF;1CE%G);tDGoO8;_$3vNd?R3g?ngYSG> z1x)x|*c|bAIgDEFK-7LCm~o3my%Rs}ya{Q|)fN1^yV{r{ zV$7Ub`QCjE2|P}s`@}o`&6=`l;e~K&4Z90``X?^e+)V7Jf|yeye|Y-?J{IkMtfFI` zKr_m?96v*L#IYKC&i6={mfx;1LP6s&W$eZ?qbUa{*gfC7fLYrUNzO1+h!EFhBV(`?4N*zE)Uaa?7oxk>eE$47B%-gr;c5}Zlt+n z%Rkm(08KGKeF^FxU8C-I z?g8~gZX=#Ek6BYs{PW*Em0glxnka}?V5pM0-ENy`)^qlMRQ6N~$qqXo+Ctx=;OOp_ z5I0e9AYu<@#vyWxm34$>bzW(}HoD3;scY|;^d3=famPOJ@$EhLRP$_j2yEvUY@>~O z{3uw4&vO59wWG0H*zmsPp&_a6+iFM1j5s=vh5%{Iyj&0jnL zDaqY^FRx5H+e)!Dd!0)2py80zS8@_;_h{@Amou>qFFzsO)vFAbRYs+3Jy98IH^NVL z9HOqb7E9IZf76Ik7q~g+@#B(M0mKX*QWPozQM*cl!4%WA!Xq zmgZ~|jfgG5?Y0YmDW+#&oU!AW>|`Q8k92MjfIh7uD6Z%R??5q<>YcZtY>RY4jXjf z_FrsAk&#%s8KJxbpKxs}mR*K#0KDC9VG%XU&T?U5Q)m=L2~lzt=%W%CZT8wdIX#7&UsYNjw6gLYI*53Qw*v z@Nx?5bUdGZ6K+!fW9sv%C&=mwPw4-1cPK5TrqSN(g(q{ z&}SPs3h_#9qi;`ekKA1($lav@^@szP!*UxTNB(g(IYXB;;VL|3krI@FzlW`GNs4S% z(QL{V63Y=xk<8OdFs!bVE)hrs*@70^LMFLm#I~c&RXbZFb}Z!~5iFQEy;*icv4oRK zMUnQ{zkpo698G1*PQK@eFD5t2cwe(<@kJt}6gPg{W7AhMtOzxR2{*rnSp*Q6KN(3q zN!1E=W>Q4Q?Q|qNSg1SwBg;g++8?j^oUfmwheB!!QSG9wk(Xo8_3_ zdKmCrudrU2(c3|0;$~kwSzbBJ+Xj6-2Fgb6ag0~$Ne}?18EQR$MiuaRp2r{83`sT& z1JeGl$@=GjScPE+(t*|*les)o0Tq)Z*|xx~;NqISe}nU^KYE7wCUXJ*!vLNatGmK1 zv1`;V2K%|tqzugP^!&NO%3b2-Z=WbkRO(x??$LO*>r`Eq;c?l0Xb&V} zZ|Aw^1S)c6R0h-+(s!p1$ge*^(Z`WP-zNtqH8d<(%w2n#ZNhj|zN-Z9N9{&NX6+hq z#z)%@`6b#)2PDVd{_-k3s3H92Cvb??gOGQI9cQa0^=UHYH#u|JtJx=6UH23gl6!2O zX3hcvIEVDhex)QHTHOJOhcB{QIBaQtls3AY3tU^=>kn)GzGT78UV__ZA)~_zrk4pw zS7QyWmto_NXKOxfEeD>2hR#^RId{l%V%V(;e3r2Y^~`F1doB|7(&IiY!+e8;|HdE3 z(>Z+5_=Bb8m297bE?t)0Ue|E+)>2&oO)xVi$GI}!&u|vz$U|gN6}rCH5kS}X%8Z*s zT=bL-vu_Th52Qy7ZEm=<-K7l6U&}LYNd8+1hyAn3+NNoe^?#nZ`~T-k_A_xt9Mikg z6-Mp)0X|}GJ6sOSsp6`t3BL1u;bF>olo0>9O;zZA6#HHA-jko(jb>hT3Dg!$^@YD< z%u`5+R6mQ3MdT|B13?<(xYQH(3;y7Z=I}p6F*p%I+fcPAm2xMhI?Ic2zj_0 zZVQ<76ZrnhvP8p^I8!(2Aa(RrO10>kzL{UuGsmkM zTer(xZF$`SDN|_}HRo<$gJp^*qMD8@PVv`4v7+y&#Z~jgfErzA^pMN}zJxpS1vdeQ zUGim?S07(%AvHt-(gz*2c_Lfm?uid`ryDB(EQrsPiyN6P&6k9-XD+DdU zf@@ZbyGzjGrML%8aS4P1#ogT<0$kSG?>>8_4%@@&jN58U|AtD9>kPsr35WB^R4`*lP z`#$cN_6_BhYoi&{Zr!gaYj>;h3{t<#U-Rs7S{G5tU+ zWr=ww_Sb&QujS8O5iQ3(eszboPfq9hGGRTw)6;mVhD*&|BTq=p;|q0i$eRmELt8s= zC*ApKLTzjqusHm>;MVj=rSPDV;Ocjdr0X{U2tpEe*k5PNy2D9#>#8ac9Wjv0rd_Q) zz-aePVz(pl|G)Pab}9Qrf+F8;v)$7?O^IeFOK#9&kesIgja6Hgq`p9I?bEfl7UM6T zD*I;R9jcZ7QO?lS8WqmpxdvnEAJyw} zx0fIn_BXGUYJQezchwPtiOisecfPctU_?60t8$c7J9%nm53JxhcV(b zhxDZ~Q-X;u>@53gPJY|U_sw>H$j=LC>W|}luaU5Z+=B1#Bvn$&AEZ+@sDIDh3^L4n zSw78^yQHj2wCE+D5vhV<&+%&3Vhl6SbW2EzZLo#pMxQe zWHr3yaX`9pF>uBrk!sKi4d;I$FOz<^E9n(9yiBTQ{%hC%+wQhugQ;Q9Wn`nK>#j$7 zY-#SvHkM@U_W}n+ns$t=pC~SK^{<1chLa|yNSW2xnuXb@7GOp^QgK|QGMEi|!fp@L zADm`li_?i?nw{GrVN{2tlWSV>x*c>S93JMsx@6;3R`5i zS+G3>^tXyBXnf~zewF?IbGykY`D%R8j|)cs@mG(F*D0MZK!DU*VeNay%KLCz7JEqqu-E7$-UV#d)!UR@>&i5Op*S0`k%M`{ryAAg-1ZLh}-^^{NLr6 z_NDb7m=$Y< zB`ovfAJsno=9MPIh~S({uAiHS(f;_1sG!-2^i!reaIFx$+fG!;>*rGu=ieTer%Gz8 zN{IIiuRakjopJvt>`3%4JrLoDh^Rv#hOgsIBnO|Bf^%irs^cwToEXwFX%Y6(bFA{Zkt@H`4Xa+%TyHmg zySA$M+2+t<3jTSuo!RhRT`&L))3;cupU*V$U{!d~GMzrhkxZBi@)w9#?Bm>}dc5XS zETjuxTuh+4B~;ZPVLW6DdnvX?qHo9Oa|7uKupS;`z(aq>%z+)`2<)f0n77f1pDF zk95M>y9&L11=hfOgklj$^r^Sdi8mZlf z#Z=Wk%cu4k@zh&DcB+h0X9j1VwqM_tBD!2#2F17bWY%q_%z6tfpu1fV1{(!RuOkAp zrV~J))_x)ZJu_>33ZD-ju9K1`fxdOp``3Q5Iq00h*8|qN=sl12 zfN?r6dw*Oxo_pa+^aiD(mAfj;5!3))o>>aW$vE=MOs-zL?x?+Ty&uTOrZ@<|qT$>^ z!ya2MSvNoGINxqLt%pvTbr5aLyys3yW~L3#y$kg6sn!H?RS&(WhF9^^)0yt8S?q!Q z4%h9y1f+s+D<33MbA)1^A@X~i;tY6reHF0doXo{2M%WZ%8IMWl_lrYvryw!YGCn^RI4hp=5Bjg#r(ezc zGzYimA(ob&z{kq5keK`IZS|u3vRVSnyjoG8QeKnhrzXzhk|>Z^g{)Gd{KUFbm%MU? zW&KtBPcsDZtj&3YY))`#G_r^=j{BR)Ow7`vm#gQc{wO9>4wm9i&vSb+%05IVB&MEXYVHtNPWpV9ACce<- zM#bsEaRHCT8n^`^_sU4`N8o_cXBxbUY>1 zpy;u6&$}Ngo)WeB^zFi<4GqZN)MLncQI}ZB&5KjO#tW$#RiKlAsN>_SGw)FT0dqfx zRrkz8$>=A601?>{L5%dhJ_bIAYf4AFVD7g1s`lXJ{es5Az`iS;y97eUTUkP4qd}&? zk99*pxyWIUv*{t~?{+S+TIGK7e+C>44yhxd5pl6CbIq21CY5-#Fs z-jP}Cy;)CC@>#{B#4Lz!bZEBmu~)+p%@E6T=Gr?2?RA2>0L)tVsZnX(f8?~Ch_t&{ zzx?u69ME?qRT}n>dMD!X6<9pI+I{^QGq0n6@*MD%g8<9W}vfTAM&T zp4^YZmAY+;S>#kN54c_-r4wpHGm+<=`NtHdHv1%^cnQ4^bYt%Hi8|}cy`=8)=gdTQ z$y{gKLLb*p|IEvt0!4R^>D|!1EfS3!cI*nPB%ZqqauwASHI1^m$L1G13p**MI^|;1 z>~>3uAbJ&6?XE>;C1;U_(h_?no$ROiYkT#$URK>CVWjEotjVt-_H6N3z)~&@Ev)cF zrdnKAfCqozIycR$6UUmvAPI!?L}V2OxQ*9?)o0|XqTvyvatin9iR_Fnol}dbF`t=D zBB!9qxB6)rZ_jYfX6J?lRCiB*e8+VlIvC#_GEN|5df(Ef(cjJ3G~jKM$L%u7W7N4n zsRxts;di&1K#hqJS5|RE68<5{PL(!^E%KnT?sAB4<9u-ilpVdMu7UWGr=!BVP|IUP zsr-;*EY#z?xG}mgvfDQb;uia5Wz3T;;%}N7N_>%_^xuUoJ&)8+(aCEuYT(LhjxzGW z?Fg-}Eyd{f=B!=dg}QK7>C{yQ5QQPp?zwD)8nRjRdXFw(@XGOlWTYJtJ^9T^i9_EMy}at>Rg=^oZcTATAi3sNivOHH9+ zIsR97=!a@Og9gEx)aXv^^m6K=p&#+%)8*)t#+lbMLq_$L zXSD>--tmm=T#=b&W-s!FFUA7aPL{-hs#-|XNEkty~YCaOGq^^=n9xrTQ{7o&oS&caIZh%}{tu zZvE9cUMI}PNjlyfVA2}p=L-VeSmPt#eCYaka8@HW#^R{VL6(xkYTV>0zfzB}sI;mf zRJ4Qs8$)kk1H~971K-t{?7R^qdh)DwVL0850H8rJ7xBxAf|j4)cuCx@4RbX&7AKu{S+$yUn(ARR=q$f1jEkh2=!h|W{>6d?J$IsweAIe^pa6pc zYNGf_&fp*XzsrFb$J6)?DLF{Gz{(ai^==zU`Yfu6sE0$!PiQkjLW0cBbd&uHgHG%m-RQ=v`HkPI?f5PdQ@l6wi z4ZIp}@?tt-Ditf!5Mtg$0=ohVcq7r6>>M6A(n`qc$Cpp?18F?11^ z^fK)mJd4p9n`0xU11?>)&3>OO`L?f1uWz!J%r~okXVQ4oL}$^pb;?}<`|a&PQ4(u{ zerv8<0_}X$0p%f_fA%M7^KPO+r}_`S>8>|&9tKv2@qLH+h@BP(C1dZiB_FPZbPCyA zW>^F%jP~bB12|24j0o;Sl$J_!Kdlbwoqm;OX^)ZOIb zu%P?MDdYkPwqN+l(HoBXb9AOa7s*k(orRrbRG`O;dEFgo_dbuI{uU*bw8wicL$VpE5>bU9=rT%EE_tqTy*SLi_>0Rbc|ZMiMaO z{e+}3hoxUNdXTqo5{|ZFDRwI@D*e%lR6rA32+A9Y*M{|r0G_M4aGAIyxXuFH(3<=n zjvi|tY~|;bU1r_XU#`)H==%Qp8vPM9-5XQuIhHwv=T4a|Ay*gwrNUbrvHHxB{2q!% zADbN?oGfkHUd6*3z4>-~#AfFOFB4UM_cv!1x>{IBQBLv~L=)dIMiiR7OcwvhjB7_t zlkaGeRG@dw;v*|mnY^3deFxu3{!KHf>gt&0r5jX~yDF_@m$=PSQ=R!TCUeTXiRt<> ze#*t|lAr!oy?<-vxNHFLBxboQWCLx#=PJZvB;I^(t7wf+;@+>DBD29-*MBTFzF05^ z`&%|zOWuexdqpB0cL>$_{@6bKG}`dlT2=QGy&vqw~Hzztcpubl^*cXitX(M7vfNGv;xO2E5-zs^(Bp7`g?8F=tj z_AM>~vBht?zmJ%Hk^WG55;z?z?n9(EqwDw{)_ zStKo3s%>!^SwIX#ZoKVc&wTOW)(*)ZDERUVskud)6N$uuEwgTpwTKf+ki{=_39owmGaXz!USTt%`DK9rwETBJZI@4!cQv zYS3fCO*HpwM-;72h;CETl&N=Q{qP?l(KHHn4cm{^CaotwM{Z^oMl>bYrP^J(rkb|l zn(H2REyufTHyu|G-H_U(HQq-Ivv#jHFSDrfXchp@;Qh_tK+9&jtCz4#2GU4=-%D@` zm%%DE0hLG;OwdNl)bxJ#n(5-I>4c_0JMB!e>?Hu9>+LGs3nM--mF4Va%(>taXKy zzr<7S7SlSWhiMh}&&_u+9632=bu!1?($S8Tp&_=^$;^k5@hw6b2?`39<)^D}20Ml# zPA=ljHpX&OwSJHhIlsiR3bq}@#)qi>{k5iBOnde_gE z8tH~jWyu&r{Ul%du7c~cypfXR}Gxu3}ltd8poCJX-DbE$>tzN&EN;VsgX?r&xh<(j}+G)_ocO2 zr1hVqyVkw9rQyj=?L$;_4qd`0#z4lz*$b!BDc>9xfg3HL>*S|~_B%?~DOMQd zp-;@+58ITX)_+m%sUM!R%<&L5yIWVrJ8X`L2@GK?O(Oy+C8W-yTE%(!q1u_Nxi5uy z(3p)&4{VDu)F}7H`?!(ebb0eBaH4-zn25v51SYhDm(DA&`77x9y#5K26eP%JuPWB? ztShyKW0UCe`Fwn-IH5G;ZNO6s_;d~jPXYtjWUem|w&X6@Q_v$Szx_0#_5I{3rtq5c zc)F8Bw#f+J6?UG--KjL;hP@#+-|f2r~q zjoAfTa*)(5qVKq)eiqMH&>{>|GtNydPSPp1T||-#Q45=0Rd~iJJ5gH&%DAz%7HdTO zA7H#oj%@A;!<%6e^vr8k9vNYX56G@f6T&ObBpJ5#Yc2N$xq;ai9(z-jDln|3Jj zcczXlb93F?S|{nM?%fbgdswKJ9zRcGsG=EQIGdF#R?Vd*uDUSMmVyd22Ok!Sc4AJU zV@;q7?od7t$C#6o3=f*k3<6YctMF*|iI_0=_>yzSt>65Z2}*QGvh7jV>NkHG8h^y| zjj=S-u4i1V{`zA?8w)CmEu>1Vrg-Fts?zPS_~|xSRLYvkX=SF#dOA#{q9>fj?B<_$ z;UDfdBI^UG^79Ezg8v(;qhru6n+qeFTEz~`?Zj=WB-h=2Oa__NJ6vzDM}7MECARGh zDAal*mnLMz*RqJljP!unwiCxl9Z@>A?;tb1^S1JFRyCEu^A%Ct9GVRdlm zfFHT!omiMmNfMfc?2~cDV3J!XW{0D_pUe_59l6#it*2bBi+-t7X5P^R8KIDkMg&ri zH(R~wd;VOSzC#)Gh{eMp@n?G!Y6T*(G>P6$zZm;!wdGtXDjv^}s~+~4v(%kqTXuX2{H zvsc`&aQm$cg|GWwA^ajKoXmU95GzVDzCjxU44D@JnW z`9Yz}C5BH+by|neH%F&K-ATnC2nU`TbHXa_v;@)M zl9x)vRnJjej2dOouXGh?qLJUyBb^}%T%`tsEjnjj)_uq!hcqQn%@;=K9~a#9;20-X z#gCg=Soze6FGQL%iM#5;XHG6_JUozY3Q=n`Gy-523$mi<(Z zIh8_~uTI(1+Upq}Cc<$_Rx7C6Ggt4VMOI4Yz+4^|j>nJnE%BYo&_j&^kSl70~Z8%;h((4V=9TXZ?*08)Kia3OQDBFX-24Gh?~@X zmn}D78e~cmd?CG3)XBaTh1P~!?Duhq4E9gfP{0HuT$(`Gb_RJj8(>D?1n%a==UF=k z#+Xm_&a>we@IR=ur4cQ<|J@kk;%tktSSCv?ea{}U^&(Sr$MfHCE1kZWRFlUbN5_oq@ckH)&QpHkCmjtIMHHdv~!0mHNGs>&+({A~NGU z2cTJ1_{6AQ5%QXuWI$lUPA_Fyq~q+BB+wt&-zsuFScHXOe)%;#3`e5;zo8(Fr| z)_e7R^o?D9c=@HrA~ph<*_%3T5y@X>t~oXKRz={QF&RiOS0BZ?!k*McqN$;uX|EiT zC7*KsK0>YkgA4!rhE)+wJ!&N9t95dAn4w3pnK_chS4(}ah(Y!Bky4FZ9=;nR$jDB- z278<@I1?{o-lq?s5m&`j*OYjjC=%i@$_h~E6|MEQeID)24W_HlitY99Zvg0la>fSJ z3=#(-$W~{J--7KkdxUY}8-aAk;{?z_*9^5ov%juR*ekjs|_rS9gxCy z>l)@38Irx|1G=ha>-s9Oh@B4!=lZ`x(x=G{l>#dx;CgjUj)`stLC=8mt8FurDcnJ8 z+(~~93WM5F;yqirEcmL@vhj^!Bhh!nSHtjFUk$In_FlDo>1bck7XT=B_f_6teU=@C zDr^CkcE2<}fs{@KSIU3=FVF206c+JZzEAXfW(Py%*`|@;JIj~SZLLI&;ZA1#tI7QH z-Wx;k{WD`fKk0p_l>4vHjYm$+NO9_;W9Y!tBt&myUL(BZ_T8?WU4Psr-bD_uN z4h&rmj{TfPU`v1j`?!3DTo1p$F5mI032_6!;Ak4xIs@TS%y2F zXq2t>AY^)056Vl<+89uC__#L<3uAod|f2i0klcd4*ykKih z4Y8gsPepIA)McuT!JO@-KD6~S*_=_rHVwT; znTZ2%fs<}tw=9WkF$~RB)wK8|AFIJou_I%GG8np_})^Nj(a2)Xb<(l-!hYl6rOw|LYa}aAQFoBxP+7uy$`Iv zun)687+4v@vvcfYC9TjF>uS5*kcLV*VT&v zR21J6v-KtReE)?tznbpFsIF^I+O^sROOOoaf6$D26+^ssDqsv(=w9byd}pR46JL2C zB3>Ux2f0lAt*2yHtQOO@fjq4IP4NWe_Y2vzWGZZ)G3paGAXYl- zjHmsphTpEBvHOJ10yt;6o8KGLsj(<`5AtL0a&7UKTa99r<;RBJn5)eip|WsBUKXo0 zSirj1`7RB3+CafI72angdOTXAHr@0RFH01NclHQ z%N>Xjwyio1*N!NytsFJsC-8Eb8n92k_jqG;Xhgk?t_+Aa=<>U}cUA&UD>ocibRYuy zt{mGkNM>vxb4%Il9dyR3s2{#fw`VfTJr`=-8_yFQt9vf?uKNfoHe#lJC=6+=M~eE# zs>HtOWT6^VC&q0Yql_{4$d?2D6+ThExwEv;8IACTb^oqBTjm^bxtdU_c?ds(3|~M$ z9bSqMFBsb^bk;>cQjsdM0kjpgbN_x)800Fp7#|jqp;MQ(!?I{` z7SB+u>ZP}@Rb{`*A1G80+JH>Hma{f12@NhnFKB+iKQXEEhvW+;fqHH z3yGLynfzKWqkfj#r^t#E9B#dn#zUZNvOTq0;^DFz|FsR&{7qzzaWxWf)(wN|2W^df z2dQ~e@u#ZJ@bg!y+wf;AqF`f68rHWH=FA{=8}HYq+Q}wW~~j*%>wOHMJe=Z4lw>#UFH5s&4>s6 zD-O*oT{W4i?{xGe!Qw>2+Bot61%Z=-o8|e~Q@6iR6R^T;Lx1n{%v` zon$^no1Ola#PzzGu<_boxof)0mT|dS@bs*`|3ZbL8i0O&hBbWD<+MFLaUta97qbgl z!^Jo@P)=~U`NQ4@Y@wr~-^3%Ut4@?3LcjtaDQ3cM@Vf?5sfl*v4Mb5;eCAyu<6P|A z6{TAbU1|%w+RwjPy$a~pAK>>x3u6<3?o15oOAk~)ioX2yjOc8I1 zoZA7q>wyixwD4By{CU@%mlKGonnzg}CB4pE(ElRucUN9p{KG_z`cS{o6Nt_oiKQL^%m-q0A_3hwlTzI^n^v|d9U+r6^Npu}G<1vd8&VBK z?Sq=k_eHOVz9GpsXk{C+BQ;Rx^xzc1P%pRVv2(!;eWVb}73Poova|yvYGD4f`fhKg zBJq*ato5&fy`&AZ+t^*FT&$5(1d=NgKYAmI1Fi_iObCdj_RY z+R)hOZp{~$aKbSi=RGf+lC}Jtz~hr*OsdSfTA+HAR&6t)_eQqcgE$Irlwrd?Yb^b5 zN(J=YlIXvJQJ#`0NSbs0Y(U;935aVe39(K|KYg36!Krj!X?hJtPr{K%WA{sP^ugBc zGfBf?%-Y8R8Eb?_GB8OHam(b0*cqo5WjvBed6B^5!KJ7QtUC5N5z85!WzILJ+@lJ^ ztmWv(U)e*?lsjnFT4I+P;=OAk!Q^8yoo z(c)S2OE`yT-D*$p3+u&KFJnsU z38{zE?Yq|_%@Josn~fT8c$nR2^Tp+8>*_AWMc3Y7{s3BDwlSyQrP`wjosQrh*qrIth+nQ3(ui`Zz7*7yn6!!;X( zZ%lN!am?eo+Whg(8ar4N6@fI+$zFND%(3!r3qOz&de3m@XTO1C+WN>7aB*xrKMq7* zIW>oj_mLrC$kE!qvl1idKr|f{vSR9?U*pj0FVD$|Lt)&=7BF67NJ!NXyYR$BP>YmY zyhGLbP7D0fUS=~`ZRJwy-=*@dQE1gLiLBIgJw7W|Gx0kSG<8nnA(X$HFt?~MA+@n> zIAvNLtd~i!w`3#RYB8WM>s*+f_Qf6+GxPj_T5ljdh&9+WUt6*M(unCMq ziFw9(V}d(l%tJHpWtXMSQfuYAH|AL_xGJqy>R)|&F|UUDi`9aiZ8_`?gnB_<0T-)k z-EP3mA*b35`*3e9N_P1+A+E{{;L*cD3R=<5&^_sC)`K@}D#zed_A&ML^31#!*xUHF z^EnomNjzw5gH7|HGCm0bOj>SurkP^>E+WjeRHh>oz|`swrbe+JLFjo^Z}??b>d5bNF|1I>{oBI#Ba_Z-ZzoO|5hMCa!3PUEIFUUe%`t zrj`E5Pdqz@x61bk&#G^ZL`eC}rAm|c^gscAfA0qK4$@^Gr$1&u+pBmZuLB9|5hwqRc3DSO20L3K0et$Ee ze!R&L)R!T-=*wJ&nsW||t*W7DJ2E5EUjFWB0;7dNm)xDk>M zQkXRb(?K`5(4~(ZCWu>3Tu*r|k#uv9fMYJ`^?_V#ZBuk76VLKlsyfp&JnLYtvDSUu z5a_pNCUtzA*lL!F{LUZzqLqeDcxLrfBBj+!2I^-CYzF~{qeYE}DH1VOmK@+Ah;=Mr zzq{g?8PN0SPDK95~ZcT5%j4~s>r2g$J6Z-NH zak8l!DBF`2SoBXo3nSAs2LI}O%XRF0>#`#4+;N&>qV?vw6RS1g`DVY7xJ@R> z{NPS`>Eb%Bt<#hl)o7k#G2P(-ZWY*AgU-hzQujfMnTM>h?3n4Wzr=VkX6EGZ5HgXP zt{8&XH_yHzrB}fZAPAKhK@$dHB9!d&|D$ zJo}}S*#mBhC=XM~K(K8BN>7jGsZ!#x$I!_9e-7Z^7{TgaqtAlgRs6;bZ&2W(T-XA2 z%OBum`=1+->NJbrMpQb_Xa=9JJQM0fiRll5@IFUXH{$Xrg#YQ;W(>^Sr?EZU#f(AqI`%wzBY45!p=IW zbq^!z8Y7kOe@wxzL7Ph>Y2=lcj(69{g>zpNtLSp1{VVHw z#EzCk0iUkkvA`&)LrA;-cw^IQ!RWf%RMRs#nEUY6O~({3Xg7{ps#DSX34p{;IFTJTZhoJ6g|ITB{}k`b zyn0#jc)2C+P$?hp;YKDN7`qwaG~ZvJBNphQ5U|yCxxhc>+T-s+vjkr!M^j~Q=*gft zJnvU%5AV;Oq%|W>D=jkD!n|cq-ilj(RXlf{ihs^&bxAJD6OVtj+CP(T{l%=5gB1L< zazE_wv&>_TLR1)emf%;7*oPNpwDozGGHR!4rWud#fz zNJ4XP%JFh|WZDOr9@bRtr>C@LD%s8QYZM^DL&MiuG}(Auw&mSUh^?xMP!Y?UIT}ub z#jMCYuH=u5>zUl{t+8cNq&odGWo)xMoERdsgaBL`kzxVfo%#9=RLX%zJ9QfNSw6p1pV>q;WvFynUeKh8UkSxLxZVD zTNQ{_dG{G_^ztMxWdGX`&s*1@z%;covvcA{pHsC4Lr~fUELQL=j3LOl*n_J~P^i+V_U6X*cR*-D~6a z@-DJkUH8T-CQTxliS<=cqcP4x2J%>0iSMGHiCk2yZztcnAi64kV8l!`=f~b44gfh&jLz`KOucrUrVa@{QDDMJ-_xI&dUk@V zZlwjQn!7n#xp)0_UrQHnM`uw@UTZUAJ&?eHI;k@6N)vZw-PqVbxnTYsz+p{)p7;L zBz!VpYR*%do9X~C6M&ks^%OxFSmi!iCv#in7W6^iB@P5uxg$^75SXeU14+ zjOR$KU1}GZsmYhf=tM+|96s;ddF>NXBIt=dQw}a$h*)tEWU-gJ7&YYo_WOiIE=`fR z>2*Y6)Gr`pJlF*A4#bX4`scV-FgJjzV0g8a!L6l>XK*t~^HlQ-e6qmq_WiDn8gn{R zZ)vAG8_~C}3t9_$Y*wQiyWxCQ?8;6jR{6WIcMRD=Zlo{!s>JwGzOycA_rDHN_wgB> zIiN7u2A9+p^#x~-)IKt%p2Vy4^}C{K_+qLEZOJ@YKig?orWAYF$FKE#bRpBCeeHuu z^uBp|QEOx+IMJ?_v9-otL|L+f$Uq6Q`0b1xKS@GGK|-R3V5ZTM=Cr;8yk0)o)Y|GG z;##_O+JD*SZ7~PHyGj;hb*f2htrKs}Zkbvb7StKh%K6>{ne-rLTleM3Wrs!80-K*k z5g5|G-l?kIiZijM_)o@L2c--T&zy4)x`Y2gD+55l*PS#J2*eP}<5(WekVh3u36JLt zz5XPL57cX4h!a%@@n6s}|5vuqv!@>dpFSc){_SE%{^61TfSalS(^Mx?G^dBe6ID2@ zGo&B`jYk9=yw}2Q)(;Xz^q7b)^jDfWnoq_)bXNjBsTCd;YhJyI3Or?K&zgEXWvXa- zFip9M-B1xVL$dU}Vq;e?F4kZYv!w=vSbF-U%rPzOMpkZ&W*`Qx9%=$zAFr`w+H9X? zZ<9V8@09pGmJ=%Fc!-3i+6n17*ce&iW=CLCi}+wYF<1|CDt1V!p6Xhj&4k(*kSZ*b zCEvtJ5|bp+v_B}5#O{-xj`T`FY92#i|ApI4&_dL&KegrjU8&8?#aWtwTdqg#$4oS@ z)b{1ciHXwp_Ut}K#@7iM)AD{P14C+_2GYDnngpLK-YGKb2y#(jVV4E>sgs}23B;_} zvfPOO$%3KtulrwFFx3`jf2&Otpa{g1CD*JFpXN?_G)q;GNDx&4HpK zHAn(3WL_K(_Z47GL8iq*vLtGsaW1(hM|T0~pL|7h5^_>c{_%Cx%Jn|z=F^r%FKHC$ zqca|E#?newk#~2wyzc=-oH%PV*Bripv9~*w$fxD%yA9dIM@Co9eERL5&z>b3R3(Xu zQHimWED6pvfNl09A)m%#+_9A9>x8kWnJc2_sMI zr%80$`du?V8tkX^_+gkeFTM`c+I&WGsDE!X*{v%(#+vZQK6betj|y({FdmdkH(z zWI}RU#~7lh1tU)VptH3$cwFnMEPtGSctb8?!JMxc{b@Q55^+{@+X|bC zH}!EM6JtFJ79D?Wzj=*bO9Ni(Kl9m4$apka?^i|mM7R{v5Ql#d%@ zl|J2T728*g*oZ50;D>LZe1M`s(GASdhnZ!DWwpwF`0oQb%1u!CZ=_VNn5FHD z+8RC~7?%Z6xI1&CzEFixzw1cbV8psS&&~M_wQ2mCYVyKXKW4Y}-J=bw;rIEP>^kc-k_BlN6^PNqMw5t^0%T+*P*R=E6-A@jO~>U!hgNqc_4 zlnuXg%)YC-jbkqTH#3H=Bx)2Q6)s=Lb*wFaeZQhgo+lKW?KE1x#883so&=`dE#JMvCr(4BBg9v|`zyXX9dwcUUna zGVw0$)k!UbX`0T@SPqFbo$f#)nr)d1%wRU#+O-mcce6QrDoD$iK&3PD5H;fg=F0|3 zVhdO6-nhMl_p@e~N?&;2rtlf2l1S{In1sl-50V#@@HkldR5X zqGoZvW}Jz0?KvxM#PoqMW-I#%J7KGnj5QzzJeVeE2fjvY>_YUM=<#i7jzN#j@j!JNk8{z^<~BJ7ERAoFG~HEwn`IZl@WEG>~gcLB3^v#HW{Q> zBhrpxGQAw*on{JQT#IC_SMnoBw{_03CCJ&#>o@6r&+6>*(AVTLPjJ2e-N_kztlF6& zJPBUtbj{d2?%wb7I@r|1di;NAd&{smw>Cj{Cm}e&-QC^Y3JdP;?!hIL-Q8V- z1&V?Kg1fr~cbM9{r@ybePxqYZnRDJ>To=CzR;~BZ`(_l^bWDS76gObB{)PEMiy=?F z0{_)eF1;&Zn4N!E1!v)_tr)cOH3Gw+3-~w7T@jL@;#zc_!n%W0MA4MfY$v}z;q*Ro zHSTM2oMkb#xj*W-3U`mkm`vZkO8On$yI}3@s_SR~TwK_tq(ZQE_Tw2x5fRlbhlZrE zA{x3@3_`PRI8%si)-I#ST=4{`!zQt?vhI$2x)vD0_ zl2knZ3)&k|W8?e1 zEQKaAQM`O^{_c(anJL|m0?E=dqL9licsGh;J|N-%6o#-Whhdh;1EYYhj&Y)=s(58g zfcHFs(V#D3A>#*Q=0rPHV6Skylb zUvC>EyHNYV4+E^^VA?h12Z<~Lw!}mJ>2~h}ONC#wu5E8$BBb8p`jkeCt(lM*>3PzB zQ<|}?)qi=6oA%zS>UDj(&gI~KqC>0}(h}_st|fWHuj#q|T=VrIbEV~Bm`V5IUA~v= zy~8x_!*DRI_d}62AM3e!figBa$0eO2j=Eh6hy93x5}~gE`DUsDgxz`Cm`hLly2nZbj zpBN`co2k<)OzZK3MInZSqf_FE2;D3_@nkg^IG2cKu0=n8SsNh$f%c8Y9w4kBhG2#Ur49up7o<0r# zF&sW-+MZtm&mdzN*|UM+QJk>q*hLZH6om)V0>fwb;$rap`0;tYqVnEvzy~W`+_W^j zl}p2j9E-ovZv>lf_lXMKIrosZmOVFORucae0{`#bMo~@HZNi+2pl?z@+WZ!GwiqI>KQ|>d8 z5=ao}C!8CHP9Z!Dq7)V{hsSRf9-9m?lg<17XC3>paJcdKJ+?CGo3c~xC1;N4 z8^4I)BZm+IZnfgLhSuyQB+;4UsSme&Nky)$KKY9}UbS4|X`z;gw*|XfZuW3iZ_mh* z7f&AQC-fYmVw&EJa8@8XmSej+qw~ve6hE-PLVfIJ&-S_b_Q%Qv-!m&7gQlF>)ao5x znU`;-Yx$5TvT2a_XOE;02OQAX@DwHFT>RfQ=di-w2bs`?4^PJB{-Hlb$9$u&b3fYa zZQh)T331_s=kGeGFQ;!1T(G8LiMNF|)^V0W zS+lhm&1PE`PnWE&OpI55ywb|@+rgKF1N@(rQeQ9?eYz{m(<2!{j3mn^%92qRuw%vR-hilRODwDEqiVswIGkG;G@3WP!HlW@4HT(1Z*!8Zk zuRRZNBu`Vui$czn$(5B0%{>V^{!xby!!vTz2&uSqi6 zO$j38L2fO-cpRwZ*zeO^|EIZf#!quVMvaFh?FQ`2GxK+o5G zE4veR(>bFr*(yEXtVT8z8T{YoJ%WO>+U?0>MJ{**%?XCKl`S6gPK6%BWEKouXyy&B z5Uirc2AX^A3Iug~OsJ>oaledP4yzKlnh!7Q2tJ8J2Q7r#6P0!of!j=T?$;e zk$Cqxj~j8>n-`XAio86Z$VyY4Doq#r@9u5PskNp|vpoB;U@c3nkob3?1azI(M{w7z zDUvy)J#G3Yt|?Wf&h<=pm%jdJR3od^V>np`dLG@n$#K|~_jY!&jk?nA3~Q zM1gZEQDQD{9A$G``j>zy8ba$>Pn<30sk^!t9E%`iIMb+jq{7XLSx#|*eCzDIZ#&gArLJuIoNh=J@(&$_dqo0<7J zb$q+6@!y$%CLX^~(%5qILj(n72K)g`&u!}w$?v0bg1vD zI>O-Q-;zJxMCxD~4R?(@U!1LkIQW|iUI?VCn5lG{5rT1RO~$uotG)&jd+6KMW8jQ4 zJI2k(HVx{ux??MPQnF8R1>xYTwAi+V=kjT<|JX~`4;io2;rF7xGBWrP=Ik%VK|;%j zMJ33H1ja#c+2P=Iy1_jq#y|aky+9&Ivop3tE%XpOzku~^ zC88c{u6vXdiCgJmM*Dt8H%xs)vGfz`N6JszZTK>rRb(5_w8j($;Lh0BP?^-UDE?Oo zmTnsX?AMvFu^O>=l%)#0WpdaSf}}#{xxyj`-iC5^^Q@Lr3C#}nb@+bwT%Sl)!?l$r z{L9R(=Ne9$Q+-Z@aDL`2bL$obZ$mo5x1~d~P^E<1hjsvSS8JKh4L165i4gvH^7er5 zB8iG_)|_7ESq&~SY_{I-j2@h`Pl;SfeNFO4iU-6lxDEd{G?iJz`{TpE6!jMVh1-11 zJUe%H_?w8+>UL;yT*8S2&~-v!tqzDc)WB%Oh(e4uF@Wf-bjZ*p2`sb9Cf4w{i-@2- z`4;kKug<#Uad>P>A2kcKG&w3!JB-lYpO)^zca1=-7zWyC?ldrt9p`$xBca~~vCD`MYE1 zh2}>#Oq?4{lo{vhVpDnMv>-z)ENL^}WcIKg58A|zEKU>v9jVJ76sXT3x7 zqa{2ZfFTrF8Odlqo-*S(cQzc11o5A6Q~(I67vrI8-5sX@$0@yW_NF#CPA5J&wgwdU z#mm!dam-UH?WHL|B#4T9{CcC}3S(h|+ROh_JwxKroPY!WbVRrwM?#*b^@rx=qp~Fp z(9)f@8oiM@bAJgo;bM=ApihX;{j<^3-le~;OyJ>+NcM7t{MYkZQTbb1%Dc|Q!x#my zWkSzo{$KFQvNR%J4(!fr5q7nIH`J=vi2VXrY8K%HaszIog!=}!6J+uLgZ8v^E>csX^tB~Lb(>zSg zC8<36J3Ge0EwkQ4E;O%en}g68nwGfv*m*e)S>Dk8_sjcK3PotlWS-25!!tYgqRDM_ z%V`Sa8hTsZvX$$3GbvsWa&3I_#h+vn5nFBi0!hjfCoCE(&CTXNUw(DvFe%VDz^la!OlPOb%!Y_ zcE>g-)^Ha#c`$;^XC@qqE=~yg3zq@oE-M8r19RCh$g8&))s1EAzr=3l7%Eyd1>~qS z<-IOaPaB9&{EMJTJ6y!SM{8`M z6`}iTExouw4yU z^R`9r)Z02L)cBW(9=Z&{!b$l?zv%pxzkCa;3xU+pijeTQbrQ=yQF(BPjX7C@U4_&@ z9@i4mWGSI`ZwK1>L*HE64F#z(#pLi(M`R~8_HpRmm?-8t$+g59Bdk4iV``;I16)M- zL4AbJ&1He7!wK!~fQx2;>Lj?0qx*hQFN(NE9Pk^{vmLli4wOH*FW;%WbsMMb*%8xa4!HCr zpW~7QM9IaQsp7`6rbnt+x%6eYYj9Q7>STa_f|w>w z{^K7xZCw)gQjc44iE`u!4Nq10<#x-6L%;uYChp11*D6>|AVy^h$2lph!h zS!?6vI5;*((L{ImFp8Z_Bbx>i!07EEFC?b%EHzU_CpndtG7VK!@n3Y{bro_1fGhn5 zKI$zp{5R@<_05TAIJj){N`CNra}alaL)-3&kPD2t5Ch85?81IWP3TGZ*Vyn|gxs6> zkkZt;s{i?h>fK$Bg#b-@+;r7=m+zZ8Q%!9C*Q+$wGj4;s;zqpm>_^tFp#GLJUueYZ zyCWH8I^BMssQpDFq*mV_Za*k=b0Xhw@XO~HRnIDY$mOp5!^X>L4e0`b?lLwQMWw+ z7u`!FC=8W`bwgr8tkZKZQ1Kr+11LY1Me=Htv9`?8Mt~jdQ7%vh`suZwc~{pD7bgE| zQ#tS7#u^Y%bL$7w%nPtPr0`Z3kxvb_V_UoVF4Dt9=b<$gtH~%CC4Mq=Jn+lv@$a6_ zgq$7y1umu_kI5!24@Ohxw2hdp>ob^uGLEPm32PN5(NY&A_TVQ>eh$lzxP<)Pv>act z@~px>hs$#R29q)@?{0y9LmLG}aLcxZxVATiCyvEcl+r3lmJVI29FJOt^nNd1oATQs z?MpD>zmmYz*=M9J2jMdhKA{XBc>YU7JnV5PRdz+qRUnQF(*-j(x3R2qyuhq@7jeaL zC|)9WV75*;&Fv6@no4jjx0nHHB-m0Qd?s09F0P{0za0;KSR-AbMvmk4{=(q0S$>3htk6oIxqc)k z$vYLSxLAi+lKYjZQ6vzDlO>}s>PorDTh_%+nxI@>{$Va1;@?*Kx|05v9N_-rt=KM4wQS^kWrsq)5fQ=n>zITeL1$*;l_-tp*i-1n>*+As=a2g+d>25#*)c2IP zjeyPWu%!_r=*uziIe6TP&||i9q28Zxxy8wR7?ORmVqHlc_(i+Fyw zJ!rtI^j|Q%-Q!|=vJKGW2iV-%X_h6Rh$ATLN!%~(qvB_ah{Gy=>hj-NwF^tZwA zy5d^QRpw6}b$pbmq`>%Vye8!F!i$6adpK;eed74WRCg-&@KN>J)eR*b2ZW@CLEe;h zU2`8!F9PR})@-7Dz~&y)Cg1N1Ee64=A`8lf+mn%yoC)|218W4)>>@-(8L!SSv(0ga z(??WeBYL|q-#Er)U%3)kB%M+%SPb% zT+j6W=3%FYz-(lz4<=X?R|(1O=)f>(L|0U6VMeP%md3Fu4vrFo%sK?OwoG1+(VFyK zYFXdv8t<-)poEE6)|~{VOJlahC)-RKf}){yv1>L=FgALRY~XWn%Z~c%BiZPDig;yW z0tIcvV)gX)4PU0gFRNO;EZ^)VOmxx6gg#5GB%H?Qmt+m63XJm$fkiq2b)on?3qSOI zWB*T-u|(oZsyrR?G=CH~o&CRYxu*wNdz5`;f8gwi5REf4GTL4rj~~FsuUmh^idS5Q zi4`(=#{V~2rypcSTcC8*BF6^ZD9PhQ6y%E}^(__`47eQ_xN>UtU$NTSgaV{q)>nO# z*?&tEyqJ&qJomEDhB)4dL-NQIC3{5Qv~tS)qj(y>mFdg!?e9k6P~aeBgZ+npTfWgJ zrITM5MTVUPq<)0neO$@_dUAKd8BL4&O?_HU_dRxpNG|p2;URui>k8KgPAOc$ZiLLX zi7-c+sb(G~-TK3ybruBQ4OpbC++;yBIzD%^-3Kf3HH^gb9tM>DSmsWWJ*45ZAVcHf zjX>tYgBxxbz3nCh7weyBrhu`!-tT`*fqEDoFVbA~G{$M1?DGfUqLqExl@;MA^-9bl7V=r^*pCdaih z!5KG3h6{!=ZVoe^#FcYg>gBHQ&!EQ+W&tW!ex^vHp0dv3S?!P0)`@&8hd0L3AorX%0J5v0eg zLQQ$bLq(#)-g$#miLM{|;N*<|aVLrm6}{y_)Bf|a97ud6h2Gs58gn6@-pv%>p9N7SpQ%Gmz(Xh;BIq9+IQ-=?yE23vjkzcr!sdc_j+ zss-G?==D?U=g!c8L)%tzI6(PtQ^I1{hpBc#_RwVc8ot=tp-4w?*rA-u0Y;{NQbvqO zJD+?80r6i|w>H5{)X=m4Xdsmc4X(SVyE9T zs}}`njgRAASzV+zT)9BWtRYC0cd|}XH0>H_ zI~i5_7sHM@zvCca%a0K$n`ir0=P2pS$=hCe0 z;}eAHAE7U0TM+2twh8s~f(Nl2X@W|S_?M}9E<~nW8{&hbmYMtGJFJ;6_Ec|~Q^AsF z8wQZ)=fkY4_C^r%m`6=2E}y?1T`GaP<(75D2>;1JFLBPOrTwqdtL#GqnmZ_KH_Mlk zF7-e7JM=26XDbz4&1h$-2d5a+P$udGQy(8<^NyFf^EW^QudjhVOq)C1yd(2`^bzx`c%0!SnPGc{}38Yitm>+r~No>X>EbFzw6@*k5nMw!$ zj=!;BzAI?`C>NZ-rMxJ2vp|5sVLH&@64KpI<oG7w@5T)^v>MmL|vQN`dnK&WT!KLDsfis(kw@>2ukg4AWQ`6r(w z&bn)$qrCRt>59S^mH9-Jzn=(|>AdESAD* zKornXXy_{JhrIt3WA$fzD$udjY2P~H!|dOpMDgYMkiWVSGO-*j_bHsJ;ngBq!?mgF z__l{I3VEyXLAs90lod73*-QoxJBSDc(7qCv;Gv616yMVoUp?Nr!1vMJRdm8JGP z9sjn5`(M~(jvuC}{#`fuCB_0wv*zbaulg`$%a)sEgz?Xzuc2OfyB;LP`QN_p6&Qs> z{Q~PbJA=oBCWmm?kRu$T6ESFLxwsQ&)AH&U&9t!yb}NKSBEOjHl;FS{3IO{aX2uC( z`J4WzZBwElISJzQ4_R;^q;$#~5?&~YE!y*HY1?ya30_DDH-8_z8c@Iz>nA{0lW2=x z4)`V+!ZM@3bsAn(+&rTEf*Vt~dt9pi@UXoQ@RQFBDxsDwiD;85dr$Qn1G8m_8&h4U zn%@>zB%h3o%iHn{0l}D&-S~s5FoVTVeNGYe5sky1S}tf6h9TF{yWSbAMR8al?!FepS$76gV> zM?1Nt+zTL0Q~>{#**5>vvKD7cx-&*m+jWUZ>i7GbTUAOvKHh)Y)`63!arcByB0rxm z&5Jt=bXFG9;&%$$O0q<3O?5}XqnH}qP_zJe_`#Vl4$-=*yfRWuu)uAOX_J>4vPRyf zdIv*n$4#73H|Va2U(WIZL=0uolHPh+VQ zn#Eq^zlh^wR$EO;Ek+}mwk4I~@tNuT=l&;O5y}H!!%}nFXlV^{d5f)Z`rGYL=_;YH zgEWkSMG-T(MDLzdo%&Q8mrwCqLSq!7TJf1Fzy65FK;6V>r9=J2vmkX93|l}+0dQ-q zca;yRinXy7;a>d6WXo*I?o$N;Q6FBJM&3yZkRw(w()ks@qYl8MBK{H@lbQ~O0p*c8 z-+Wo|*o8R`MaLA-`y9e7)eeaG-k(#I(^`Nwh0(i!inuIk1}0a{Vvdn&cgv!74}YN` z*T+D!e3#EFYjDJjfB`}^qfs!UM64T)umz(a7!Ptw7z4UmMvvV-ELa)b;}%FS5HljP zA0!RX=kg)9?v>CSQ0S&88_X>0;r&opvXF6bb-h|_cVfKC&7D|&)BDjteXP4aIhe7h z#lx%v3g6rfc5MKt18>ozDZhpqC{-}R`5KSB{O8Qf#=o|SoZ|j3(%!>B_7()^EHEZ3 z#8O{!=HZ9(^y&^u`k-9tgvBp1<vKw=3D1bklj7tX~mOEsPI1X{PmZACxnooZ#1e2erz$7^S)y z$|7t^ezu8K0v5WVUXHC%XD>L)cLobNtnio{VpkMM6o*)TWcPcQuy|ZIPH??Pevd)PR;_0sf+>l)FEktv zZ!x7q@IjffWo>V4DZwyZ>Nc@?VVZV2&K&+N5%Af|?9oApTS|vLTa1c~8RKWjP>DIb zzPc>x;M|#&4B3wSsK;iZn@PkZja0h>ZF(82QcLUn8BYAL!~)Lasw09cjw1y}51##R zGBcNU2UUqdS))Qz#P$_p=C<{91wXZq)dPrt^*Ve3Q??+lmdVS7_j5|53i@(5Ec}9s z3fIkn-4k_pJgV$=c*%@Baa_m-Myysp^Jj&+w=lGYej{Pk`x}vLZ2<)LB~4)Flv?%FiCegRYbSPolT+&Ehedgp-p2V z;FNFu;kWc_$F?|yXA%?nRW|{yF-j1a3&Gy#6^9h0pIo?DKW--bEtSwpY5!c*FEg1j zLBcuVNd0HSS%0lw|J2em>G++7!=B1KI(Q=yW`H*W%uLiE_n)jU zazd9pOesD*D8XV6=?P}~;^#ty>Dmx`xJ589>4LPrtdNejB_6wcf1D||;|+Z3mVnUc z8}JNKK4Z)yC4$zM#%iF8-lAM#D_idwxJuwy|662NMMfbad}gMaq)iO)mQ{ zi0^a!SH8&qo1zG`!SqV<|9s_sq4U!OQG882#ACdb`m4)wefOk4xBQ^FG+Don2`$Sf zdE9P_x!+1V-xSOL1)+594SG3DvQ{rA-xAp0&%r`_kToueBulk$DOzb*5YIWTXqU-k zT9zY;1OFJ=H7xLqIu^O-i9Nd~G_B7iOL9kW`-A#Jfzb34-E@jJHAlW&}~YzY6sQS_RmT<}+sjhB6ac|MtC{*-z?5<+q# zCs1?TCQahCcAnQ=rEhpDzh!gTeCTvC$8UqcuU`-% zZ}6Mg+?E@@Mf(bu4CNr#fNsmkZdR&314FSBDkqY>6ly$>kl;)wp?KWG56{^WKo>^o ziUbcYr_J|m3T=qt6UOa9m4l67%Ib<*?;7*~dm#1mhIcWr~J#i!>> zTwLvU?9sj=A+x0+A(E6Q&jc!0y57P&M3o>Q!(+#DJV2JF5TPjIg+G*&@J)_mJ*I?8 zNX>Ep@Zftum+NY|*F|Ns7iYB@gOMTH*O3w8W^xj*qq92+~oI4tW8 zR$Sx3-t%lF#bQH1p^E>R*kvH+H!(eAns{92$yj6BBl)7ry}aa47LShnbn_`Z!^9L` zNqn(*xkbBeDds!A$GbQgt<9v`Dkt#fCmk>dq1fUbu%5 zS!Hv?rn7Oh0}j;UM4gNqL-sS6lzP&r-9VDG1It)y-kpM}ZAz2Lc+(~{XZOhsFIP!l znjj+fVC;BaU9lTc{LXNFY1UWHia7v9c-QDD$D8X>YIqSE<|8|>881tkJg8u zQdrgi>AVYFijx2K+1>&4{5ZnuC&zLY{-}?u+O#Pj!RsdlM7HnE`!c|E1isCqd)F^u zdq7?acOf=H|NS0&Suj|shbp#9DN*_l*U|XkFHAJqQma;K&j$nOAKs~p^${T_l(pvZ z0yd|I%;0s|@3$wz3IG}spJ`DL00GDX5)!C=aa{0xi^jnOlRk(DGcU1mULh()lihk2 z4`f3?Q5~|B?{pn_?!Khn`xrF=#P5$ovO?lSq-|LAn4Utq{G2FI81%kR=5oFJHEb5s zjuW&?QO|GafP2st?+cwKw3$(*ItmQiqB2ndTsj#%(s)KXA_e*Gd4H(7?9`<^a`PV< z=5WT8AFR(zMr)?X)GAEZiByF_Vz)wP(z>w6KGsSA*!%!bTk3n4k{e_30zwyMwHtvf zwPKHX~-#2a<6?| zvhbw_@zG9yvR>nH0F3T4irD38v-JoF2t+?|O;8NyaWcCF{Pq18prExZ5^PjyTyBv> z=arj!m+0b`nUUZUR(w;TP{_O$%Di7FsMSTxXrHFSCiY`LzsHZX)6HI%=nF$oBT?AU zy6Gy=?Yx4ycU{b6%zvEzvcW;Hqq`?RoVPLT`^%{LWQ zWl7usHQLO?pO&=-PNss@7){WT$Q6K`2yw%n&T4wA_TJ*sLPWnVYzOIW zG|YK@Ja442z#KzM0%q{V=e(rY?_qJ+?{U`k64S|Kv`Q;Nd}Jc?|>sehJ96 zac@zAUZ%MR0*4OmF<%;zZoEMr!HLCH#L0prex#O;6OAKCtX)4A>dj|8K)|X*NJ*nd zl?z|S8CMK0?c)RUwBrlo)ntPo)l#DE;#>#vIu)eQrRjPJWAWE>AYCv9a+wkSHEiRf z0@wF3>p}alqHZA>8{HIJsg$B`(vybreJ$+=nU}Mvr{Qk{h{4yeRTqZg=f7}+==B^> zCsk6EC%0+net8vHB_{F94w}RPNth(%kKn*m(-^(KMkvFLlf;Mjeh>uo9x6Lj8iBD{ zh*5bDK(`@mupwo#UWuJZZV|*Yoz>olz(Sqj*IA>iZ)gGL;Sorot{c488!?RqsPf9_ z90BoGZmt}naQpx`^qsW z%{8P3fv-K1lMmih5}}wanORPjj9$vgKPq>p5Y1|DDL=Gt@kLvrDo)# zu24H!E4_pc>n&lw8md{ZP!E3~bCXw0fI5CV(?EUPWj6hMy1VCEOzH|SswD9|7c!G= z#pH@CpuQ-|BcSg^9G}Q>^FX+$rxP~3JGhHb=2^_J2Y`*jm&mG!*V9XjIs6Zu$J+{I&t%VZ3 zX(LT-vgw`xhDs+3tyz8Dfr3JT1>5T?UD=9XM{V5OFG!=rVi!rI?9S1nMq^4HAh~TP zp@AwZ{urpwFWdo~^v8HUf*iC7S-mw$E6*<)Gd!E_0vE7Fo3f2rcx*a=Cm@Ee?AVFi zM3qLtc>K0<5l7Q7kYyfSDW$cAQleS1_Xb;9IUzRsT4njr97 zp{C0WgVNWE^YR%9FNXpyS=0dfCtQaOl;rQ^tuc9tS*^4LF-+QoG&$rkuRLjn~DW2Pn5_U*nqRV`>7rXrv}Ob}LhsI=|vQjojOu{zhw zfyxY)DG*LiajXXaR@@RIkdZ$p8;`uyZ1J07HXh}jJ1K4X0>>jGM89Z0nMt%IMqeDh zjk4u^6X$tFB+ddRLy@0BiBCQFj5eez{<>tVQ=aM!3DFq*0}wz)l{!EX05lqp3^%r> z5n#@26WIocm<@g^WMHLql6x^px6r$kA=MK8HuLrP;WWdTqcDJl#9+m^aJ&97P7gN7 z&nHhEKBLOp!}I%Rq2mhH?c~53>JvFEYLu;(V7g9sKJ4BsG4m=p0MivEhRtg{sNBbP z6O9-U_Z4-W!NzTc+*Jk$l^)9J#n|&p*R&cr<@@MAxr9uHF|u=2Q5F+;)QkE z7W0iEm0%&)Yl~k$(Dk@N?}>18p#w}OarP3EPY3X62?+Rp1py(DBI=N=)Sr`oKr{~w z1Sh3XWiHMzfw;gXiQu7tAWLL{CwjCpvV&~OTT8QWoMnSt%qnAsEnsMxYA_}XE_hXn z$5J1QoQl>J{a}bjm)_TQ54|9A@w@Vr`e|w?y;(0^S-g?7 zg}K&z5k%^uxLR{i15gzyQ~@^2UFy-Smz)5_WVH?_#Dh7Akm%RbU9;t^_fNe+*&{Xr z53h#`tJ!$U%iLT7@p+9Y3D4O~Pg^J?-TORFZD#^CiJPH-ty=Ab4TJELoDs(B27oV0Pb4I zb2@4b9aG%Q=wT0ECWyl*1$y4$a#QnHmz;hk5OVFC~fm)NRO$cV_qrRl%_FHT)`MwKy z;(RyW`VCU(l_QlpVoi&eApPYkDE4Y_;O?SPpXI5KFd~aC&3qb9;KDAz^L#+)Zsxqr z+nWYm9iG?_(wQ4;#bol(5$&+pk&aTC%JFn{_pop5w0|9i*16X+fazsCk=!aYxP0ZM za;i1%8zY!IB^keIbHbrC@r0I(aIlee&?9i-Gj zgpNL9{R&x9gGRcQYs@pUnx7_;S17gZB7~;7A<(`k9LUXR4+UNbmIBRpT-2uQqQl7o2RmxB>b%!?hXIuNqA)zFnQ!bLD@pN-igS}YMU z5)S2KsgFqDu$?HIN!r)4dc<0~ch3lVE@v=Qm>e9+wzb?t#rzy5WY7I>AP^deI8bU{4)l` zyLr>k;X|%PWjC+|9k-?<8_hVBm8;&}g!&H$IXJQ7-Us!xpYdY`|Lv=G>lhFwj{LmB&0m$(oMGnT-+G(=_3v1_ zxzKeu2*MIHv2XtR{NS+EBxiU&S525mqewwXDQW1u37C_!cH?MD#3cT^E}}xz^JzBrTw*45 zPt~vvftrYny&8@t^MkeFM_;T9Yq59|sbFvUlOrV0SrdR#E{TLQk9n*-#zQ><|DiwN zGvJqK8mi&obdWn+KIYTFPc}yIe8TqFfb;bfH`<;=>?eJd6YlVB%efuXE#=zQi`fJ| zDE;$NF=zpU!ldaJ7aitA+S$Frcsc0Jg1}YR6`7dG#W{2PldjQ z+jaiu{^&-8j+8tkEvYJ!enm6oD=v=BynF4)tu@J!Uj2jRu(bsg$+Da}FJ`>0sYN*S z6r*h^eKiz?>xLR+yzjGS`t-2o%E+d)pX+q0-|E())@Fzc=@aw3I>W7V<7!fY`SZmX zE!SjN{_$nXQ}gk2Vil_4ax=LkhIu&I#!Zy*w32md4qlNK)f`&0Im~za^fkMm! zJD{Htb$4=bh*PmPrtQYemMNfxxm!X0nO;J=ERULyp?V^_WncB{{w!~DJQ!MDlCcJR z&5px5dK~4D(=MGVebr;wl@C}Ka>1z05-wY@o*`me1$DiIrU@+`yTdHpy+GY zuIcvLtkzpmgy;+#V@o~9%*ZFhr++jb^RDNvaPkFUA9zsdY z`pnCZYkn1wD4dPHZ_xj+4Hs|s1_gbsu*Xe0|M7xJX`FM+?5Xy2Q)<1!>z7%UpFi~w z{w{OMZh+zp8zXUCscF$&4L>5w=0MT2pny4Ce}KI%PUyGL+4DLSafKgOt}cy4&HNV?M&)u{ke4&dB5wXO)OnW+&xR9o(cz_A+zS1 zfu5a(GbEpvpHy;WhMsIJD+fyP`oj^}r5;y%$ZU#aO_S4izrcQQBV7TwzOkwtnV3fv zw1^NOcho3leejhB7i5V`5xj4@53^bHXuwEuD@kJxj>efC5%&!dpIH`fmSqh85~QL) zFA#_?ZOroXC#wp9mc;cRQMa1~k40ToxhcL$V2R$8cSl$}0>`!#(WWMcatm{W-TqVp z%Xm|B7yx12N&N=ZDvF!~aPTuR1%y+ITB867yFeSTGV;h7aXS+Z5hgV}JtmWAJ0o^S zxte;aq{J?TAWS312ik)34}xG&M7m1CdC?A~q-mGRn2dtNWs3Vo1Zq%$2;?a;O~`!#~Gjs|NBMhus0T>-cu$JUL0t(h}X z?hco*)r}@z3+!^*F6ezq(y@ycB6|n~*P8XFd$?81H#}aNy`p1=(k_cqg|aQc7*8F~ zp4K|2>32mzJ?7}{TIy|Xa6{Kt{eu_CW8qI1-QnB|fJ`7>*nw2(WHra}meJ=Z-S2`_54t2%CSWn(a)*;K8#K*_zWk88xGk<)&|`rer-c=m z3K;Z$>zrDFZB8kLez<=;E|=uS8AyY9+PM^3d!G2~R+>4T1j`_VfHJ@bz$#2nl}*U0fd(>Azlk~huC!q{2YOz7WsO1f16 zBDA!^@^KHA@Bj+cq=da8V=;(>@6^zjb!tajU|HCugbI~Oa%%1ft;tTV6^<#NlM&jN zJi2c{ED<46t}wWuSed?+XCIz;ZSmLqtsFAt9h!0_@)I6H-#O191--sjT34Ms#9a0) zqTC~#%Q~jGvw=V$>bK(Ii9n*(-xo#g@LwFCKy{Eohebb~lq6huP9#F=%&h}beCkv!Qy_RGaKT`a^u;IVX<*`mL`ZZX|%tt+ckdm5NKo# zHGO>F2g7opwO_hhPzxjQ>*wDm>@UqG-sqttiTRG>cJ7$x$tT8zJdOHs1B$h_!}UA< zn}5!|+TVkVn$4wQgdCj!=UK;r$0BLY@>s8mHz%m#(krS~2^tBFM2K|*559jI9nlYr zSv5f+i7J^~tx|P0VLl}k6@cB5yFX)XCkI5x!ABLdp68Jj99;Gf`qP6fJ3XVRvAbvt zhPn+;T7v#`Pnm-(s_UWQl`hRs~vF$!F z6gPhBzl4(Tk>6h1E7`12ZE`vdFgsVA=KuasS^F%&8T9N|LqvT5HG(5_q%8b1PZnb} z9D0v@3>SKH5S%%2E&+c(fA@IW#h+qEe(C!=lcewWL=MFOo|Ko5eOzVC?lr7G1 ztiIN$yl&gV1>-@|hyjyYgKtyMq7%hJPlM$ews27pVM_p-%rm98GqVdRT>_w(g_3@V z-i4-PL_FM9^sAGGoP}hT{ul0C11i~K88uakW_L6fTU>r$a`7W3CuQa5gMQ&$=PZeQ zvgGf>K^1utc(Ow?#1f8my0~Ypu%Y zZGK+QwwZ(}tIkoG13a=!Z<)0B5#CRo+Ilkd03#U=?-r5w=xh_}$#HP_c?@M#Z-}O_ zVy5zN8Ret7oo<)87uEDGGdg4Kj5m87%bVF;PTBW7M4giJv5!A>D$U(VTz2LCW6yqeddu-%tx?;7kJRTCDk2N8%bl$HDay3<91rY` z{9K`XtOeiuZD0%i@~2L7ba35X9a9sL($((~jjX}TG!XRFoAjbT!1bic_PB#SsLlp= z)J$P3@J`rlApUzIAJAsPSqB13>_6YVuhd5 ztj$Pr?)sN^_d3Xb?mLEic@NfZCni7&ahS)cHGR-6^m`UM55Mk8?F#XoC%&1O*9wCF z{OKr19gGX(zpWzR9o+Yu&wd-jO#VT0K7;YDUQ2Gdu?qp%Wt?YFMSHhbP?vT&?R^9V zM0kytxsVzny$I?}^^*B0nA!li5|o5rV~1z=Y7OmqD^U_fpbIM1wp4Y6etrCJPo$(i zY+3z;w?B7<+}f_@ack|m<@YMr#C#*0t72X6VD%W(<}J$AAz9u} z$2T6`!AQWDoPQ$645`0cDfes}s>^-I`wCb5gA~r%#+U-`J*=&9 zC0%z2AT?fS{Tw{|+? zVtVal-71W_FO6C=U6FB7r)o@3)gz7to9Si4Nj#~{Z=!LypD6O~8PcG4QmOj9QJ~jL zwiD0Dv?e=Yl9Ow`7YrtW22%LjynY8BVmdLN9mDgUyR_OuB5Upcd-a{Lj!oCAdzV#S z`$pN_r_dUwU58q99jD>q)i8H%;kMnAYh8y$7fx&``4|FIhoWdh9p|18PxA-3t1T`C zB77qbEJCXbP$7vm9pRSTr!1YV5-}E9#N0N^19BHXJ71p3C`nDO_3NFOi&R6!NUdBr&ly&0CdIH+gdV8~kX5?tv}z zTKGzqi8P~xk3;~;o+<;!ryr6<^3&UuN>za!l+-9eClo5c_S`?Pp9AYTc9-o-MudX?0nRKP1Cvt z{r5FDp*m%xopbEL3I9pY&}LoIdHIX^6nDk$>o?fTSINu8$*sUe0OSGpb(sI^D*RA5 zV5{R4Yr;QLisX^~&Bq&|Zg0YPBJ)f6xNH1enee)5AMX*J>9{%b)xPjclEx;8-eeCU z?V)`2oW}UCL2JLLSlHk5;w$Htaes*m{K)P3Hgp*d&g`7~*dhqy^k%Kf&%BsYHidZX-6*&dvZ-%aeDd)xzpW=(PHKL z$#8`@Xd#KROu)Rap|K?D2Jo6<4thgnU)Xi}W)I_B=rdBJDrEi${_X*Jn(ERRY06M! zQ0q_TW!I|T{h-L|ph)11?5@8A;1st>F`z@-^bEGlpzr5d`k_0(!qj!Bu6VkoOgirq z9STz>ymk1MGKvy1yGzkUrgQh}~Eyc5zod~hp^<-OeG2B#rVH8i`m?)s)Kqc+T+u$jH{DL}G< z2M9^&xZ%Vq^JS3~Xs_U{y?fQZYrKurcepm1pSJuObMboyWrm`Q7D~0#0FnABT`!0u zg3yi^_bHp%;|z#;F<16psK$85u=$>>&gd@%S+9>0B-+d(tsJ#7d)^!lK0HYw z4%1bFjo#Gv{2ob2{T{y{a$$k{OlJhy;aAC020ysIo8LlS-Y%X2Ajqy#vWng5=wZ%fP3ae$O3t0EymWRRv+kx%Ep=n+AEC*my= zI%}!jn6FuSu%OAo=ZCB1ddq~??4+dre3^uyp&{i(9iBMXndMBR(;acC*{lI5B-F*v zof7r^DsRKT(rGVe-nXpYdGiuy)0Z zaoDjWq}K;9)X!vy&I{BvxwILD)a-tTAH|5QF@oW=#yL>N|MKz>S1I1)gl)SR z@=7Szq24b{6D4PUOsJsoGA=J>4qW>8Ls80DnNd>|p|!PYinqvkA@Ze=39r{oo=gRm z92NGkg^Eas^1u}Bw0AE!aoi5Wj1#Sn;iHZp7FQ8rM_t^3Hur5i3z9)6%3kIYo%kr286O)YgWHqKJw0wkGi%b#G*Y0t?>HmnvMvjd;s@pz z^F_y7Jiwx3S*B8;*nM}s<|5UyQz{gEk2^cBzUhAj^uJmcjO&&IQyKA>m)qlAJ+VFdkvnG=qn(+a9)lIVJ)CK-mbPbW^e(;q{8jjZ%l28^ zPqzlhw_<_qyj7Qd?2}h>ptmvhH;*(8VeMds7F-ywB13Dx3{01E!^>IV#f~cQ>L7vn zVrKNsYygED|0QHvaN6gh&_~=j?gu=BkVo8CQjc`hl-;R+xV0k8oDbIbkgP?|o81&o z+efZ3t(Swv@91@5Q(Eo$Z%@Rh<*prKvL`A1?3&2ZQXlon*G!=BeHgIlyFn9s%%4E-SOZQ- z$>eqdjo*M?muHaq8CD`35-)h0q+TP#(djBGDJ;xyb>cn)l=hL6aoUD?>kqIw0%lbt^ zXY-PZS-^qR-|JPOa4fSoQ)@Pj&l~f#d@CwxLl=d8ho)fyXG(D&`mw1 z?wBMWXgA@zm{u~js;j7GW?$RZM{M?wVwB)=uEu+7Rlw~H=J}I9egU|>W*-3Lly z-wx#hlr#R_Eg`et=W4WF9k?ZaDn<{?Z|=L_OGzZNavOs@Y`(=03N1l--@5Vd7-Oh< zNtsN+nR0#Rj2gGPk!(UH?#Rw$e3zC4hXLSX5bq$-&8se|4m_4c+1anXnDTsS!sA z2)Cz23scc?+pw_?((E_!PY=dxE{noT`BDEgCOd}2n)D_f_VtD&_G8GVTQUkFal)(P zXLe>=Y+P_1{fM;n?T7g`ypDhZuPbb0=IgzPldpd=nA&2mCip{D(hS`gGY97Iol|Kb zOP$;)nd_^|wH*wV~{4&{bBCKIJYf9f%{ed=DpfT9_!fX_FY8H8bbSfg_o=;^75LR^K<)I^)(k zow^Yaci=5CW5%^dc<(cAq_#$J)(Jyqsl1DC@ecSrh+y(=F9txQTV2l17pulK49JPe z8p?Vqq8r(|2dsafFPP+KPN*39M&ta?Ta{n6a9lZof-fvhdY&KmV@RVq+`clTFd5&B zB-xZjn)VI7=1XeLv1JY|)?#Lz@2=d#6xF#pu^rrtFUq`&+W+B*{vRwer*8CHw8XnP zUJia_J;cO-?YL5m3RcFH3$wPQx5@c)~J(P6GWqC9}>w@bxkUse(P5_Zo82J6}RrbB63~z80 zm8;9o)34m=5gzLKB466^wK=Ezhx-Etz(o9!0eTcrJ}N`j`n0U3#+CCqU`agt6BlOE zi)t}%9!+*O{E-Xqj9k!I*FElV$_^Z##a5X2Dv|rp$~zWhWn~Mr?$}Vw{K{#8_82=r zo=aWxQ;FvQYg80><|$M~`cOhI$h723MV@6wCLEdn_XvAjFDlr)66RZj)TRE_u#DD# zp!FvS!IbT~omj3>nVxGl4BTDJI>yjuD$l;}No04iXl*ASR7UUOs!dQW%VedO(aAmA z(2rw2>>*F*e+TA`$`Uuicp144_t}N@u$IBs(S-@kM_3jRTB|Y@o)A-(Gf-PAzP}d? zGAsI$GclJGAwk60eJ6}G>OrYlal6Ht^hEP^J^I3(0ZxoiM>W)AS@7~$3-quQ#?H0- z6#t7!bN}AvVvO6}T~#&xQ0^)1BU(K00oN_T>+?#{SlTVmcAj|l80&Sq2d=;WzVC+r zoA2x0^Lev_1?~=i{g2_FLiY|rgbPdt8%G^#3tU^!@EA>Jmrd9#-pEn=&Sbxt95KiE zE@LW04clBvmkAUq`JsK2Hwd$qeo|j--GqteVimGcfGNYnPJ(ASfF1O{UwE#)MPg%R zR-9*Ps1ZoIw+9lGgHCNiUvX6*lobar9x?l zOun3n#%BfZ?Cz>q9E>(}dB=hjj)KLsr^7@F*F*UYgkzcMHp? zrv$qfGFyxFb%?|&p9Lp{oR~4cnz8)@1EANvP3&w8m9||sI@`lK{q;WZ-TQ%(+_%cr+|skt`}l^`(l%~BUF!_PT|XL98~N-E9%?UC><$u;P`Dfkd8eK zI(?-(QxT4=s=qf)qi^;aV}(+&!6KqTRVI9o*t8C%9Sz*}xEL>ye-W?mx)U;(y`f%RPZAviPnK~ZvJ(%MjTcwEUa_>2Mwep1u5p2KPfe*NxZaZh+ zy#jh{ow3W5Zw}zTwS)Nmo|u!*%E)pQ!L$h|(+&m{=Ryx(UmLJ0??=7mAFe4j{Eh_+ zC-lb3-_{g_t%6(GeZv6ytjV8wVG&!FxSxDCbn>>-{e6b_ zDhQXWO=;w)g~!tL6oUF(&DZ0V?iVqYptq958eJ?|3OZbfX+&HTFNCNZspLSPFrO*SAdsduD8!^fG4mOmMjM+!fE1RTlpuZaf7yPC7EzPgEQ)&niyo}x}W z_h5eY&{q2Fbv!A(zDyeteBJy@fx7WGm4vQuus<$#CE8KrIUv-}8?)E=g=7A4nCI9| z-_M_bUFhxW;!QO9PKEjE{^UiGKV!e+Le`q|(*x2;y0nid0nEe`=cIrKHvR&OYcS#c z{X^H~*2>WkQPC!WW7UPXg_Tv$q05$2_&*njja7bbVc0l3V7Jpd+e}{sM$-FJAoY5` zG3w@+!J$(LcK4-e`zvIXalE3>aqk_R(HD-#4BCJ|WX^;#8SGn<7zS>7j+jhtcPz`R zu84ea$Z$U)Kc(I+s0u(=WM`P}sr>H`C6I=q4!(0*W8!$~sM3a(FL{{ZLQwW%6xL?hiJ#;)DEZ^u1 zJ=JNdw!LE8@VuspI%fYISs&WhnIB zMYw5ff4#}URa z^-K^*187dEd!jD$9K!RUZhF16V-n^>%qole_2!*yPHmsxO|gGB|Nl2Rv3Q3Ex`ht~ z#iMuw{ZJde-YPfw6Y|+(BEWt+fD0ktkjU3S zvS;-i_vRuSTe!lei~Nh6i+9ET-KiKBRu8ZLl!7*fk(>Ur3+%N>omGxeZEAP}-HC2T zSgf4TU-SnoM{gp@v5vO*oH|JHMb3)c(7wXvuEjOh z;98bBy|4WYJXz*7nHmVqdc-q9wo#w{c9X&K>1b>VNqxLW^>3Ur0hc9E=O9-3S8qWaA7`b*sC#QkZ zkjdWz$0*Fz!Dyl%lA>5fxP_IN&BwXIm;1sSV?$N8q@9?xNOVPd{kz-b0F0-R%m3^K z=9lXTy{0me>RU~n4B#z;b0Z!LQVqe_9BB(?ec`ZsdkGY{93qYq)ZXavqgQ_o-F$<7 zFAaG~5A}D(_bqV8PysnE7dc;!Z2ny&x zN$iDe?&CtAe3iClpuRFLsdG+8{Xgifmmb!9)>}-{)=pMiW9IpZ-DLN=Ae#lfE|D2Sqxf( zj?iuuu)QZewanwPCIdg6@_0^KZRHzN>sFDjmlv~5M8Z0tV?vm^s$hVa?oYiC^9EPO zp);Zki656aNa#mak4Rw|`vBX&+OHm4N*YtAaM{ir9G!GltMU6G%c5$c0;~`(Fh}xF zL&ARIt~SsrH8S~zRgVEiU|-$&KXVO~Uh2vaPF|hm>AB!zzdJa00AKtZEjui$^aP$e zVpqEvmYZWiX$kL-jX&{X6O#rKtY2J(Zn{I3jc^t(tE$+K9+H-$JN35J&ILA}VX#Pz z7{ODiZD|s63ke!kMVl9KCWAaEUL-1`h6;t}#dSQc9@GN{yvK{S|9-e8NHfRB)VXk; zzwBd1Zu=1y%iO7s9y7+2Hp~1F*P`W^5e7$P3$BacS#@Lag)iAOo z#0$mjhsIYgN9N*6`aE!9rQY^HVAyefRZXfZ=q;tozZQt@E;umju1L9#)3QJk?LeFC zOzN4ie=d-+5sq{1#a^Q)<*;r~x*Z45pU5$B{ByO-c2fad)}+ykqT?BLV?XX_O3g4g z9V#(rmApo`Intnx<8cL__Pg;1>YTO5iPTKR)W1k;C3T3N zr{K1#_6Wl^uGh&%#@34$85brB7W-XY)zZShXrl7}%P{=sVH)AVM@t2Z^#nQG(CSO& zk$_Lb)6_8*qf7_OfNQrwJ-CV5K<12L;5Cr7@U}m7MwRi7rg9LDDE!cX#Q>-zY95{p zV}M>K;)?!tS ztt%AHPdG^8XGA)OB(!Y82D~TM&4^4kk{&kdj@brK5?10m9B{GuC`b)L5mVkfJr*4` zMkYrINW1d5$r7J$7CqkNMiu#>D`-NPJOZ>nH5Mob!wF`iyXxRF8u5q@X+m3AG zG7o_d*W67pAlH9q5=d!?J8mp2GO_S5q?~qvwasVDxkej1>e``c)# zsx=ckp0t=HWmcDQYfQ$U44VM`mxwPVT7;WLw!Ju8-WbJ6>4mQJxC%NvYi zjTmLl5Z`nuxq@x9TMeec=#X97F0AQ9H~-P8)?kElTEhvOk(GZ!ViM*BKhLR5)zMkc zC$%#t)n@MB6Y%_@W?1iVv502cmVPnT2Vqf{6*km|OusXWlkg5=t6lJc(fxUhIrc+doh$I^Rt-Rn!DxI~zibny|Dmrm6Hw133>2+7vnwyi9nR}KcqU+Y zVSb`ZL2l%(vbJ!+fC#Xr_I&{8Rw6+=cz4TH@nV;~BB+U}<*?Sh@5m)|fl=WdJg4n4 zQG~HxLmb~>1Cz?L1!b^SpY^$K6e`68bkka+{{=WJ8lEiGH9P2n#0t&eAJ*s8IU}xA zGGo~kL4+w)H}E;idcR~hS7NN(zASRm#S>?+oOxZuMTHoaeMO4!R0lNMb}K; zTdWDb7pzBfY6Jz6k5s?Qte7K<+WRxEM(|z}o|^AUrzw}avf1`5Qh$azJT67`7VfeR zm^n{0EIPkhIF4h7^EWY?bjMITD%>j9`l0|PHp6MCvq*fsWBrCY*aGzHC+&-sWR;aL zC$AjcIxFaMtpTr(r(^VAc;78AvE0fBBeu|lnRBGpThI?(D`p5JKD?0^?uo!Ld0#oJ zD?8*;7byZaPq7;hXZt5>A!iWGL2tyP$6te3SXj31s=*=RZW6B9*%|E6jS#tWG%@3c zF+gs&t>Jc~$!&_%Huj+hx@CBfc6I=PMC)|+H?s#LQOc4ORI|?EZEVge^5OwzBauY8 zqN}oy&uF4msVtvuO3~%}ytci?O+;A3zWD`0xET@XPDxG_{Mgh_Xr97(k=pLqVPDPI zD1??|CG|e^=VR7-Lo^-RIWkpDvL!a6$#NO0qJuhyO_*fB$GVt;*?`el zZ|d38SNH0q%D-}^dve2^dA_IlY^+)#n7)P`r`zR_6F9b z`9iLHpS@7|qicF=t3zb9+8p5d<&G`+kAV|56nw0I@G>m0u~#+gJ=m#kHmaWXx6md= zMBQ-JFt}Z4*NwZnaB|r*YKAP~k<2ijG6&L%uF(Rc_p<0Eh)vBF@v7I8W_WjaAu&411)h#fK@*yF!uBev*B~auD+uN`NiyPRK0A5 zl8S?h9t6{zp7~jvz;-(FIBMNawZmrEQ>Gw=QikWmY%qNu>%OqtpNkTEt_O-z-u8hEu|J`b@EwPe&?_EL@RmnO=!v|l6G}qb`G)f5 z_d59uiKyvQFW=46$o`kj??2r8_8ipk1){w{6xfF*oC$i~C2faJtUe!(bc)&|d{iQq zXydLncr@AYyE4YH`QB5a-7nqIK&#jNOK0S^LWd4nA|WweR{nZXwNPTpIC|V@CTsFy zpv@k77iw>;JDKWM>HVo+$#_`Zto@Ffw91X`_L0BkCcuz#g}=8?8JPDZJpeqEi?7Fl z?$`NB@d_!ycqcO($tB@kNJX+y4xqaZ3FhKw{Yee7QE`X)1=Pts(rvWckdt^>luYDX zCYk%ET>$A^FFm z2>4})YfhLX>$t6+^uHOi{~^s1un _v!MS&e`NQ2#mJb9Vu_SiIQj~uVABYJx&~l z4+1|PjUrsM9T(GW$eEs*SzU2WD>nx_sKW0lQ&e91dSuNP*V*A!1i6+155ri4rDdQ` za1Xjh)kKM9#G(;06u(D}BpnmpEySsiC~YRuR>#!7f;Af{!oio~J*DnIbyWY)mzr7K zzc);`{?4lB}>QNMxKov&CJkvybgEis)4z11F_v zPm0VuzGG|fKnvy~$4ThDla#$p@i|lW(GO}c1q%AGykwR?TSm6VH;xBY^NMj%uyFo}B7>Jb5BT#q5U6 zogo2Zx@iTca*Ar||EyuR4Qr+McDg=6=O5sC#p3=Upu2p5f0U4b6O`Rf3?q|G{XHqw zdq)0fSGl)h#s4KCdlR4-@VSn$+mZd+olDn|@_Ccn6j7j4v%J+a7G#@9M6K`1 zl1|uUwtQ6XA6q%xn$Bui|0}sA2Wb})L^p>V>SRMPFVa1UJXQ2o3(vvDvr&sb8+J6Z zF4XfK75e&3lJ*S--0RytEFNPb=ML{#wU#o<)XW`Yxf~dcAF2N_tPB-Z3H0YF zb@fQqsVC@cVV#O2;V^=;nVN&yy&G>e-z(>OEDHU6Nl}a&UDDGoXN6~j-VU^4V)eHC z#&&oa=^!bjUv>CwzV6{7TEk-=)}QqUvs=dg5)dKV=Rksy2tzKfhvZz1O5WTsvs zpI~Sfh^&h=g~z_b5K*tye3o+|*GoQ5((maq67DEUyy(`nhicL3?$NtP}$R^dE zK@>9an~E>^q|a>sdGn#fb-9> zgt4))c?5r*tkq;tPbWuFvvX;Mgt-v9;GL?tjGrmCVkr~Q(J>V&t{Lm3=j!WL^>-QP z@b~ejyC9a6pHb|ZQgQIClY>1T6`IBCQ;t{aK^gZC9jMU0y8k55>yz{5YI~O*kN>4~ z;{)%!{u|hRGvASO>=+IKFGl8su~uM98P!^usAyCNhXh1(>{Jvvp0h#Q!X5)S4+4@7 zqWs86zx}g9%awUaouHL`aauulwn3QIdYO_aK%3Hvot-KUG@_E8Iwr?LB6?X?V%RbK z>1H%ivPR%cCI*p0tGxlM$e4guCF6+Oof;igV!|fF#Nt%ZLXl?-a!kdX5753VdG~cg z@XGKKfG+2IkF8&j+_BM4QH|iGds9$w_V~ZKej}w?5_fXzQsn#)?g|^O7b|9@pEV?x zuS{JQ*T?fUE%`GTD}jghH{f~5QpbcBNrb|z2wtv3gi$IxmO=7jH66|+VUj?e=MyYe z87TdutC~D*0asb3K?k~=sQ_7AWa+FmT}S@s{bI>PixO9u7>LDB99Jq|H@<0WFe{HS z%Yi%xN@_uwd0+OXxS60SoYq?13 zfXh9+R$NnP!c*05;lABoa*^GBNY_SBa1)^CP^-w%Y}Z3k<{e-T0e9ZLh{S2!}^y3`bf$zI)F`&nq{L%bo{}KS0&{JCPPjUxLl;B1ezNPwsoo?`4 z4R5r7d@VrDGj2B)-$B&e$wL@?vK>3$qoOrv&t%JXzjX6)Fu-X68?%OQ=PFN{Fs|Vr z_D${C6{q~`6=C`FrcL9xCQT`syp>m}4=trhzFL0g`|z@#fDfwDdqY{##pYV@9EXXR zyu;{oF%I#lgmX&V=7}DqW=pUZyTFmrdvST$Wg3wRv5Oh~_&I>#TgxOVfHEy7l%I zCi0kMi=Ez%#>u;=Tb>zqc4;?SwNtI(xi_In%*n2a6bARLh_i{<+zy$>22g-L^F*#8 zg*0oBWjP&{`=(#|3M=33+?+D*+%}qRfqT^%It5|Bf`li2G?U8LN1jKJD;4+pOrzrY zzq3ujaxYwE0JYE+Khqg0q2(Yf;b9<+<8hV!c4oJcvPNxM)y69!@D=)z-=5n zXd;srIOcdPOr-+|XE3WrSZi6k%SNP>!gw!dJAd$>`8FZAk5!wHQDxojb#(k(nmSCP zG?}}d#^VT4=}F7vs};69ZLK+AjOueE9gMzviD3vCh`3?dSn*LGOB;Zk&iu{eM3{gm z$a~&cl6dcRF)nRWIG<3hxlO*T`A`i3G}SpgwyoQ7m1u9iI7_esDZB(&<$pCoon;Dj_aK?xtk_d&|X zYPADt_}CNse?1}50%{NVT_sr)cy~9WnLCF72`Bk!z4zs1TtRs z-=QG;foZI7#j11riUZXs8quWY4Iilts2Byq;cm+GWyEpB2GB&T!>a4~ScBjXk(pX zwVmoSl<4>gyl&XgAJ{dlO7C;%G_Gtd>wk*)|p>{|XyExGz=nrJ+PtoWU zs9tDzhL>^1J6zoF-m+eFFXs%2&zxsD&ztQLQlslPX&RNmfs@xwxe`@Q_dvwp6!kv) z288k4v$l9XU@45+dL*0#mmSy#Wt!S(vA)k1j9zS!`JW2xovY!w9`+;2)pL;MPUB=v zi%7oBrg>Naz!K()mP5ve;9on*+-NTQEO%GCb2L##Gx>(Lh0+hOv`py)9iPR>1@*#~ z#?Yhi+4qVKuZzh{2I^&%?Wv51G!;oVLMbCbFLLH={_a^vPRedxYn)G7U4}G|uei>m zt_gU{ZHPGjta9ex&6GMg=+W7bA8a;%$)`-`9l9N4vIrYs2_*53`JdYhTwh$t>o5Me z#n*5+W12d*dX=-LWx9t}XI@P+t946LfFc(vmVd)BTYukEd~WO?86a zKG53m_&$)be;PtX9z>4Z)#InDV4A4WtQeb{i`VB)IN5-UMD3)3qO95o%;F6mpVg5G zm^(`y7Jj|_pw!ubaJ+Q1?e;G%RdHirU>v1Ot9L4= z%nk>=D7qMe9&2^I(BMNA?lY_`=~pC{v?{!a;>0C~L_UcM;awxNP#~G@nxTt=b;%wm z)sir`optITKj!G0OV-eo>dOhh@*^);_~4rfKoLI10lF0P-wg~T|I+NhNW#EEtrM*h z0586II@KyeUxzLh9CS{}{nYtEv+Ehd2w2WB*2H4ZZFBpFaRMZ{0-NyYMfWsyjHBBZ z4C1FuxyN6vhrKM(9YOY#-KTkz|7^T~gg=zkq9$R=Qiu(DL;LZ#SHm3#auD%<1;S-z zQum`#8=#^^VYt#T{9FLm521%k!^EeL%F?y(edx!c4?jUKn`O8-=6_d5%vaa%9wv5kA^!MW8-!1;yjh;H(ozt1)_GW{unFtfGn(Z z(`g5YNWJ{Pjm|5BE)h5uv|19KgAH{B&pP!nLMfdOJ5r!mMgH9l?f@}07c5DYJHCWs zxsH#a>xyO|#z)|((x%Ewm_j)E!|b%V2kB$^5#7^b5Q8Ha{6(`k^m_w*~t-Hc^tsCjO(KlUbR` zfm#3!gG^t7cY2EJC{+yuSL-F-LAcbbEdB=O2 zL&@6c{*Cya6uMQwMO%43cV0N(wWUO+?UKjzMP%M7LV-3^6vx0to=@OjjHHEx8qCQY zK>U>Il2~_XFyo4f{NbagybeGj1)Ab$_jZ01PBD}A>zV3gv;co>2zRmKv!I)MNo5c{EBH?n1TBzu5tJc_xrUES%8cgcV3H_+i zSTV_xQUo+vGD{*URbmD8(k9_5MU-c&&==cfz{t zeLgq7t~adFJaW{eFQ4#XJ%JgzGCJvS&RI0m56Po6hXK5tOOQNl10Gz&bgEzo2xx>l zdxG1qhZ5S`EQHsYUy!>xZhTxHb4N9p9&{#+m*4zwbwUw6qWgyd0C+bv`;V zbv$dIJ#UwlSDp`WcD*qG*WHmR>Wp|j%GmobFc~xg<}|QtDQKGXszlIxO1&3KAx2hw zm(%0!5~Kx+T{CRRnnUNkhtDgtJ%HMXrfl!z#lmtbyBS{rmwl?JleB`ekZrU(l4a*vE~JAj7&nwujB`Bo{xcK$zi0ca7*AMLO+YJH z&m)6=I#{5Y{`RLv^;WDhh3M%Y$`M_HGAB#gji@&I&{ha>;<>i$ zVT?p#m~y)>nQ6;t3DY`t2#_f*2c=57uBPM@z?YZIGL>AiZYED6QY1)`rf6m%GOkU; zcjF*oc7#^Q(d5U=R1a;;G@VGz3C>R;EMA`^=2}Os5GryY_RMwc8Ak)5G@^;~2I`7zv>$ zKCDQQEGr3L2Ld|DuPS(38@=ac)R7nPO^aiO&ECS25@hrTy@mG_ttwY*!yIMuLNak1 zqF_C;F*&z{dMDCw&xZVpF`CY4=IcCVRSh}UU<*<<22sES8|j~UJ`7=C-tp~c7q8&h zFlo*y`0#IjVxpQ0(P|;rZIFwD79Kw|jZD4|#sTPn#Q($CIYmd-b>aH!PIuC=ZQFJy zso1tE6{BOLJGO1xwrv}oq+;92$@l-~=A1DO_NcKh_gELT=9+8HcfL=s_OX0FV}vZq zNi6ns0n`T65m(iqgpWE_U$oThB4T5qLj`AK7H#v6w+Z((eFU8|yv^A0DK zhOP<9>(!ySw%td+^oNIs{nY?wtV+Dhe61PENnQy1kiGG#=z5wHC1!^Am$#wYqNQpp zmS?9PGY;UiWE3GJoI8YA2HQGPLTM5tZj5<_F>lP&ylNhNOC_eY#5rp)zkQ2w+mG4b z5`*MLt%-rSrTB_q1t;CgVml5((xp9TbGY315$g$hsp#HrpznLW*V*j#3ywOjM@;_F>Y8qh=w=aXY1&!-A;tHTYmLjEF4rd? z2eVT*;;zx(fXFWrf)Om4gE70;U~=}lepX!L6H!a6gNq0j0fYNRmGQD{mb5Q zd$IRZ(M``ic^z&Nd~NA@U^ghL)+q6wYFq4+I4*m{tVh9twqMhgb2bKufvWQZmwyTG zI^}qOt!i?;*!xuD|Dp?Ri>;6@yxdwUtZg{=z(O)N^Zezy%(1I0pp)r0#1mY{h$F97 zo^At9%V`eP-1d=SjqY2wV9iXP{pt0=6S z!A2c9kNLyL&yW7yN~bpd%vk(#1{Fb3zc!;K%X4zF^#iev?t9me$8>O?SsgMVp`UJ~ z{#C8Vj0y$gim)3|ngauG&}p+8q=mQ$T<2M&N^5>maE@(qt1hl)4Reyz19ByP&Sy$T zZ_;yZJzsCSO9Z)@xO^_tyf1hDKGFix#$9Q<^x=D%l3!8VQ_1r10(YON|$xez8(k<+%C%Ct3TSuR+8Bh3=@W6S7gmK`iH4joPq4-#fN* zebQbFcDCX!OFMLH@^zI!;zP1>NY2hyrFQg}neVvf<)5SWJvgs>Z9hWye(u?@Q$>g# zzRHDvf!NQ+)>a5N=6CXkkaIdlSs)FtP#_}YYb011Ro@3Dm*Pkl$NfTL(Gac9kzgbP zo)RQ3!rHqdEQAUPI5CZKyUpOP`5=%siRz&b-#U^+B1H^}@dfhmwenb4-_7&zH4pq< zw@ex&39)zwzdYc3Jg_=U0u$xZhDHZy&8|JF1A0p__p61X>Uqr>C-P7@nEI7_3Dc1eVI&eY^99 zq(I!iwb{km@}k4Yz%JVe^@z`Um4;n$+ho9q)EgHbqCpD^!F|ykdpu;t#$yen4cZ#S z$9xa1o8IJTl{&EBE=6+2fP!G7!tGd*=Ljw~8mZr8bXbUdZ5m7e%*!PK;#+VQ zX0J>!8DaajKXOe?s|Q#Qb5FQke#<%vd!kijO?JJ%VU+oKwb;%g#pcRR;CGwM2haOJ zsjrFN{-L&qk1g?=SK3o z;R4wG#~r5`+!2}{F4ZOzUm&9d6UHxqk0j<9XS+&+TQ`PcS!K{;@%)0vSIw!bd;swEm3y(eY z-Wd;@eRJRVEBfh5qDX~!c;Ov}RbwL}mDV*9w7qTy$379T4eMLg`z?BAn&!l%bQSnC z2I6SL=bLG|gTO(C2c%KpL!O5ijU57P#VLT~Fc-`DxFo-9(TCki_Z9n-H!}LA=(TqL z#C~Kix+%o>&OuWXr$|q|3z_BP_^l?d6{WVvK9OEON}9i4#v;iZ4tE*1jZP#x^>u?= zg~K%<|9e{psk^15w!EV!jtC?onP$AU`TE4Td|@?Eo|=GojwkH;ghc&{A<<@84Bf6@ zi?jr=lK?v*WZIp`5l`!FN9dx%6PjB;JMf2HjIA?5^>|Q6G0Sj6SG38BD!i7O8FcKw!yhf_^N1~^VfLjO zAvP8ufieaQ--J%eGjqk=U(f1)Or(~6LJ${+oenaeWLAo!6N}}P3jcd= z{qOqXAxcjKifx$~F;5p%#}?(_IZ&rKc{+fU zAj0V#a^X@$4m0l&r(2zJ2R?Kdoj9+!i=xH?2HWmr0BaUw(C8L*I_Sp=a9X?|gt$X8 z{8+R-2!UHQ|LhO36%ksakQFBiWTxzKv2^eVf#`B(D6qOTNYW2t0SquDqbPuFEaq}L zG4vncI64=`q{zOh$#vy^pl=U4F@1|aIanX~8;O8zYs1v%-@GoVsw)Gnh=5IXD9>o$-~;`EZIZ)_@3rB9HxghPZFWgYqM}k z>E^`tNjw*JR1L?;LQ4`OKlX*2?FO)6cFp|e#}id2|8s-#%sL%QmWSHrlsrb?Uv86o zb%Q~S?~Ty^4{e={2- z!J%X{eCieP-^Wz4ZWHC{NCAI(oh~GxN@8Xp6qKa!#2hcH`sHC3v+iT>e2TznL|91tA{P&3j?x8=z6QF;_7{DhhDSSGmndN=i`$>O^-0RJq0*!c@lJL1jHW7c-_*ka8oWI~{Op)= z6|C6v4I<_%n3^H4c#+F?{a$C&JoE!1UZP(H3Y;b~-&XfU%ERu_F2lYAtrmk@$CW`< z0E8cw_9mC(`g$fF$K>m-BW5gdvxl3eOnG8aAlodNSX7|h^7vVMILDlIw{0M(uyj)l z?SSX~uBa%P98yMrS;9|SI6%FVGO19>1kdp&x^+@q+_=#b5>!GJh!e*~N9Psv6eXRkAsAu; z>C)22>Q7cU?e7l1g_Fq2k3b51%Io=_vW~0-6=aR#P?N?ajdUBh5Vv;r9@Ju;7qM@x zI8qO(uGjBFJ^tndGKl|-r~YehFl0J)^Du=-WJ=ap@Y&E!;AAm3Yj@v@agVEHSV#5mrSS4`JpTY2$^OKJ$f_e8sX)8?!EB6 zAC8SLtNi?21XUfZsWOD>OB2gm2dAdy4~Wm9jmnnFdFL7X4y#J}BO12GrElKvFIx_M zDQ_V#2`nE2f|q}Jam`@x-b%@C@Zj(nNM8^awAERD-3hGgGVF7@=nG?(A$M?x+U!I~h`c2)q6nDjm+v3Q%gXsKthIil038?RY+ z(a^u7`XG)T1t0tHBxUj^GtA8HPz`rt%iAwiW$u^Jlg~rz@`%3JaV#p=cnB~GK2Wgx ziLvr1dW+`&FX7lvC{kP3>2GPzL3u$*A>6AIR{w7$OpZ{Ztcg*5b7q! zjIq$ig%IXEW;_&L`a=;4nPUx=(`np*_8&n1N=!R8jW-!(j>}+E82Ka2>wrA8wcpIi zZEzHz5H)y`d`kE$dHf=&TOR&Ke>kN;eZb)1*ddE8$&92|jZ|LDDKjXl?Z;>OZ5OTy zRs?Gji~s6ge8hFUKiji%y}5PRm+qG> zo{z6ze1kTuosx}|uaB-8^qDlUZa}mQ;(ISuSox!lL)reI~(h`ms z2q9{wpH{YFv9Ek9s=b?M9@I20_?|}$`mXrPb*WQZUUA%MWdj&B^nXc_;u#yXP$|gt zmxsw`z9eKsi9EoLQ;f63vV&kPJjk=NHJdA?E4|bGWQR%N&s?T*dHGq4uw3FeU zqFcTKi8;GdCLl2s3Z3#-Pe{EQ0UAv&@}~;uAU>{)7|y)f%=TKpH97X-7UqIK4S?g^ zjZ!83@e#p4NM`bY1oO=t_ZD;H*${-5k?zXNX`omQT^G#q^^v7#?fyRn+_)MwKzi*aYa{KoYA@ckrVE3I zO;4%P;{^`tntQyyfP?UCZBOvYM`%Ko-N&y?gX>y+w8VI2O;u4*O=ND%qMO;SLKHlX zSmG=FthmioB@RZN?p72R-h+mSc`%5Xhy@rWNldTlc%1fNW`-Bpy!fm$kCvpc8_7nY z@5N5eF6&yL4(${MNDuo%bRI~Nud#g5w|B*^zow0Jy`yu}wsGu5f(KXp0LLwEV+sBg z@Jg)3+jd74ja0Z-D=<^cdHiJDp%?S5gfZ#3HPw5xMNV_M0NxyPw?8zAKkjaI8OS$q zvMeK_)p|`}a4?e3sg4Tn#;cz+1H<;|#zcI8NalW4I z7J$_~`kjP>_}%C@-=3WZjm5%xYMT?#D-J5eeMTah-hE#P-)N@<-o7EeAB$DCy{Jlg zeE3^ZfJ)~U9r3>Za>6Fxi*Q{?QR??n&~cF(>-Q>e%!D*o`0F@3uOV-(iVbzm!F&=S zcr^yK2~2{@oqKV(hkvlWphrEL-HGm{f_(OJk3pj^QnsskObBw_@wr*9w?tf9q&K-P zlfOPV5HQdcLNzqU#-cKL2*kJ2=T95Cm>}lF5{=3^usdtOoD&?Osl;ZmH+j#{*YP2B z+4#WB^gh=lc-iG&e~!a_x}gkkab$P&v2COPkX zaUTQ?h6up3s6cbo4FFqo;U5S>pA|7u%SciU9(O;x;Qg(4!tA%5zJfc9+-tsn@%bA~$ePTg&!fn_F8pqPjj$qO0~vTJ?Wx zA%h|Qc%+|2xzDFwbdyln-d!aFId34ahYX&f|A$(~FM#ju@=R^H0CIdF?5u*Y_a(6` zOuwy~O-!EFk*qKz_hcd|;Hf8*%{BTa`Scd!TCPt?;K3T4H;CS@%$xNvwjZ1Kt2+fr zS|Pk(;c@R#BcB+a0;J|?&u1ywj}aFz5aYjTpfU$@LNB}+IfFr~6s?`=fQ7JAPSvZb zPMnw~zDojwoq?kT$2gA;;T8MhVDxb!QQL0-Lygr>9QYd{PjH+-YB@PQjItrpPLqKF z_iwKuhLM$>twOw{T!2!RL4K|a9)USHd%uJGyb*7{1TZ>+G)S@&qMX&BrMeKg@lBMT}$&%srEfnXD&>*He$Bx>tjDf({P~pDBJFXTS;kg@9vFJ z?F#eJo;c|&5tYAy84-^ljAe%?SaV;Wc*WGHoHc^h(d)7049*+Ffkzh&3Nok2QJlrR z5p+6R7EaeGBHyP_9d!X20S&jxAKW?PS1h5T!SQJqpeSbn1hfkAH&!RW{DMdubAf$3 zI!sv&svhW$E*9cjxlr6pedJlX?+j5i-yNosGI^b3zD0~gZ-G-TtjeDUlLSk8c~fMC z;Ju=ri7NU%7wa|J$J9tf`>>>yPNp{D!{yp6xCjEDPK-WvLTv9J%zPAPdW&3NdF~2b ztaxZ1jB@LomhA9pzu`zPGnwMdVPY5%GDXgb-I1@DaMgR{a{}u$u2fDz6OA~n^3TvN zx;_XUapgf;O4LHfVU5flt7wUPqeo1Dpz;$1icIG8a983rloCBhBQM!cZ@Bb`mV$Wo z`M>D+>|uuls!5;o|L>-rOl}-cw>fR(S@dpqRFQ1YcViU9qoEr%1EBf5$5IXECw7x_ zvu>=Uo?zC=?f6c~AlJs}6HATvZOZbhYZN$0CUJ7Zhmgp_9qw|P*%@-$xX1nL)=-R_ z9v_p~1NN9@BQDcS2b`4}YsZ=KrcVWrC7OvmF)pV~PBnwfgY?B7BWD!(jQuI*&-c)U z_J`|((>tt?F}(hi1KDgBo&1|hU~rLkVn!}K?uxQb)YL(~*#S>tg+||E2Jdpa4Yxy) z*KpSdZ^E1IufH{0`cewsoiBJSP?Rt?SYXDw6O&1{?2aL-uPSdzKG$cP1R!ROLCcD?4D1e~blf2&Mo5ag5 zM2zSQ1k_I#Z`evK`9)&=v!E+HjlQC+NmKaw*8rqFDlC4*Osb4C+}LN2xtoH`39KgY^tY5cjZy8A8{#GbXK_~g~2#Y z%Sq{y>eBq$YNr(Qb(Fg0-izM%D07Pm3XLxCtVJ^Wc~Ju3HZ=@#n*8<|^;ije;Vfwz?G*y7*d3Y5<>D!D^yI_P&6v-K;f1?Nmp?R~ z)Rmu0Z>eaP+=Y+J&WU6)rd0TTAG6wJPN}e+H--G=xIcwPD`qTW3QGi79`Jk3ZxcB} zDQ@as)-r|ljuqpqgn|fi_Z|$#5@%Es_J&aQ#YI$rx9OM3dwf(%TgTU|;PK38xg_wn zTs-H~9rwdB27`x6>+z3UFB7YZZblH@jAVXM0Sc;dpNUg+s#(744TUpaac+$Tq2S`7tC~vwW(wJ7xUZsTGL%fds@HSR7Y}CP}q$$W_D+N^tu=Myhr;Z zpA8>D)G>aZwOMIZA=fY1v7}%5JSP9_iaV07S`Z7AF={F@NWn-I+t_d4@RZQ3_Z70! zYBqYZw@HcIrZ`d}3fkft&G7&+s3Ppw=i@P=87E&7^u0!d>eLjciHhQ2P|Zq#ADI~w zVy4*w*5@nq&8>xPhYhqWng%Dk8gZD++574gl)yVLP1(vx1?lzC2qGXBWQ zjaC%&@`B>xc&(&qO>5>h(GEua#_$I1`|a@Ku>ir2pth>CpG{O9ho&lu$yt4^RRa4R z0la3H0ZfNy`48u!u`9n<&b!N9uUf-aY84JE%D;4#X~fq=D>!~d$fN4$3GZWw&=GF7 zprNWLMZfC|GM460#IH4A4zOKRRjl{mcMwP$Kh+^c^?ueqZ?mAIR^0b{AkXL#*0FG@ zjSZhqAjmeDa^|?GAhEJDh$SeX30tnQZ*69n%H{Jyad|sW(&1wfxw|Ag8PZo5+wANp zf4r3y(9jIlN7icS59tIzUXl2yrNPHdN~^{c_icRErX!-3HO?_xI1Esi?#nBoEqq2z z*bQaRd%91la6&_=N{a`NZgxxP4X5@S%5`dd;oGEsxbSCWZGC9lU?s`Mb1Q|SK504U z&V7yn#{K#KqF(^5x)gP%Mn^woNs=HCsQ85blkRzBB^T&ypZg!s)RhQN7jJCPJxHq& z-+HS_=#7c~Frib3No*j;-`D?*AwLP>sa1Or?+N`Q%n{s*^)-yS{Bcfu1{W^oXl>6{ z0h3!S<_Y;^)}ts`w?S0F(GcVLp_(XN9uCa751VnwGbC4yahQ?U0CH#nLr)<+LnsLlw5uYBKp0x=0AWOK~AF>oI?8D zyo{24j#z5_yUfZ9x3&&1bP&X`BlY!e13;N=PmPk+iB|&Vu|;aB-#~;^+XWEzVkOi6 zTf`k>L@wuOzVmSp3augzX$(pov50tGoecQ?4neRjieuFh#*i64BWQWP#%}n7J=Vp8wa% z79}fILfC{aGdk@zBT*!Kusf?!Nv&b=m$VHjM<&438)Rh}?zn7;sa>iaQ(r`hhNJUW zNc@#V5HJtV3Lb5)*!5EDZQj{Bm!uxCtsta$69JEOc4c=I^=zK;SPZOOkDO-aa^RfO z?*@%&FZOzLD~|=R=@US9c5pg&TIar80wuEq&+#VfvL$_0rjB|AL(5GOkVZN&(U_1G zQDzE%0*p~>t%H5bc#z4_tTi!N0EMWr!4ubt4doV3b`iI+q-*KNN22w=p5UnHHtOn0 zfQ)D%RRI_7o6@W2f{E??+U=j{vH~+J)v1bn@Xe0QWPr2*c#Xk=KC=d(A`TT5 z4AbKdZp&@`W#$O9K@E#ZZ?ITDmD1G^sfYO<+g3>+JW*;+w>js(X@-tZ97E`V z!rv-3)9izZyEG2&d*}qoS2&7R^`W6l>*%mK2OXX+gEmDm0F(pD_2~s^FeR1LCTpdl zbb5`6I{Fl*dTb#vD_2-JVauIPv}1xkNl_U4vt%7=hq8bn3M5=>N*WrRvRB7;^sQ3- zFfOB0A($pB18ujjNgEH^m3XOh(hd`ksUF1=nGAidjPZ%tuE+gZw@m+^`<<+4Y7$1NyQ?tFaUD z+z`1NP_`gPQZ>*NP8QK>7kWB6_;kD=w=wojTdme$n*?J#b7J)?mi^IG^HZMmNSOeEz2)P3xn?OoHr->Tm96U;vCmc zADsdXw(a0ZD9TE?UbeHX{?UoHk+n2&_>o&ej`Zz5QLJ=X-q8;_}@JmtIgNs?89*E^6!8W4&IQ?@`Dx%^VF-y zmX^hprG=F_Oku%n{Q~GbuM}Mre~L)ko*Wh zQD(vB*|s=uzp$yyWb&`X;50UFc$^pkA@T*BT(|{iWzN9{t?Jxr?9Lvs+7hi$D<;EH zSnagpW+e-oI17r&H~VX!zq1Rr?bEprN+(emdM3Oq*Bj!e-9zh|A{r!5TXvm(gZo1y zrVTf8WDbC$U8&;_s#SpFe?~M}{wxbl1d~iJ`-|tBhshR7+p|$9^YI6T1Lr?yI;F$< z@X9HD*4#NRmd85V@tn_A#r--EF}I!_Z1_y6N)(Z&WGZ6jX7V5KhNHjmOH@nm=D(G? z6m_(;oNjbPRJ4r-<9oroY&g-ziGd6RUG3m@sn;g7?9MhcQHYGX6=uZz3G!p~6ngN| zB&6&=oj01_ZPxk+WqFI|O<5%B%W@z9yqGX>2IR!#0bo~N$nbs;0&qNdlT6F;P{tWJ z&_PLE0K4IixIceWxsUnqo{CFxtA4Z68{$dNJ9BhudH4yP`#<9+g-nx#aKh*`s(-R# zm=BCh%gH&iZJ6URdP^L*h+j`<%8WJUoxN}j8_6op0LV=Z?D8eb5YT?%f9n92({yMJ zH|-nt&Ps^j*aaK2v)}2q+?LX1L5R;L5%7}e@q*(Fuks+DQt_EPgSb2SPgO!MaeFnK z)Zidt&$Gjn3jDp^A;^$L8rkvNR{qRH{{ULyacED6IEl#6ESlBGf<^X);aTWw$RdP5 zn`h9P8bD=0rlf%z9uNr@g7SXVP#G7t<*Z%d@@5|fXm=4SH8pph>#JBm>n1gq{Dt3^`y*?lA3N!Z zglh17ln#c_;Uu0~bFMck>8nSunvQ|gS8*IeKPvU<3W8diQo^#+zRSZsC>mt1QoMrA zs8163Jy#2ZN*>M=KpUEJ$lwx^d9j)KN#qzxGl(w^9Y_)OMY=u)%pfU@2m@KCsFDP) z&4pvsnNle_b4y#zl;16;qRS#XiG>metkGU)OSv6~Ju6sO7E!GB4d~QSq?wrnkhm+F z8fNvr6Bvfp9T&qcDQ;IFohdZZYjPs*Cde3Zde0pQ$jS14&)fVhsginInhKKZ`30i{ zrWwrUjIoe3{`sl^Nr6B*gB2k9i)>;u0Hj@OCPhM$BX{&&w6RoyOQNAFbk%_Vyqc;l zc&V7R1P3!V*Lv8Q@(zuJ>{wB8Y2eeB9K~9CUpm3+J;8_G5sb&b*s|gLQ3EE&52@o% zxj(dOnFPBg{D-iXcLaVZs&k~Ql-w!Jw*%z6^W>gJj=m)yT5UK1->!I&Nc9FYg7$#ky zy+s_3&F`OiWOqFOPwaELTl?KbQ%YCGwRR&j#_E+!>d`$hC_eIAK9?{k=%v!8z*N%#p?B5<*X^ z&SLr2BtcBmYpO{HPI-TkF|)z4nHa*kwAT0%V@!N>Qfx(7GP@CiH0Qp8)(IIB2XexE z!n@HVI9Hm%#Is9?9{;ST2gK8JbTK0!au0(_dCvjZw8-PSN*V%I35W;Y@&^@WJ7~J#hHU(OeH-oNZRR z3(s1R@k0da>a!!yXOoEiAXz=v6U>$Z{$wG z8cZnlJKp3sHLDS0pP9F3PnzFjm4`zU^jvx$ac`ck^?mlcXq|2eS2K$HhS-n!VhNJs z2A^?8VH{SJEw)cRGQl-%QqZ|O5;*T+%waJhr=_{(6uf7S=ZZ z_RWRCS>Jg#izE@M|8u_4igfX~nl>DR{E#(h5h~hM(433V)clr~w<9riDj79SmdaT2 z!RX)agtoV*q@l=Ic_EgIGIzL`hg)KYka{MhDi?Wk|9H9E%HbUTf4i4n9&+#L7d!t>r_N>02WBerXFP^c& z3At1D6l)j;C2@XSL{mNIFv`(<_l8x$KTAT3>@^drp zeia)Gk7_UGd}KbP2h!n14Nzn`0Q8p27==f0sS&rA*AMk$uFO|zG_o}oY%$5-D^`W2-RbdUn)v^PEAJa@ z_U8^~&g}KsWha7W{04tIs667`7B#~gMr(?h0b5n@{(luDt z{V5{1GPlaxFn^;O?d}+dfK#@tQE%-t&EN9Ofk zbLm#a12l22{O3Q7?L&+`B9wPpIA3fk$2t=U7WE#{1z-4I+^P=wF%sw%ib`7YmNGs= zHM!!l@hORq(uJ7fY_-{fH{Tb;oNCW0l#&Z6A9BL;r{GcB?~;KG>+?5a&x7}>3Dg}& z!;SO}Wc>RQ7nj~u${B&nRy84BEc&?J!y zCo(*|zlNmG0b+9_5AvqRa;zBKkQpVXgt!?MZG@?je&_C^p|I3tb~#vi633~cP4!qZ zxeh3|Y>HadQIN_s8!Sq}H3ZMkC4D@|_zdb3U#?+4-LCT_Cx-K-m2Qa{4ahzOdDP5k z)`mLWQCIj`f-#JbC1kd!e&t%CE8J-TL^INUE=#Bg)98vqvC283sP=6l^dFQB6Axwd zb`xApFlwv`*(ugLK;6|qU?-y&I1fdMnI1Q@?#9+LQt~uU;z)(&Gw>hMG3T2&QA#_` zF4P8kvZ-JwE9itVRMKv+DkChGpj<20Ka;+{ZDP3V_z-f+AO6Y17lnM&YbSUTZjG$% zD0{k{C_yvT9e_aG82Us;MG5Rin+tJk#87(U>&M}v?I*wf>gU|ZhhS(|Vur)! zP5|OP)6V`&^Vw@pf2hN@1`jmYrZ$#$(rjt4@5m}6 z9gas}<`V$&A5_T6RqTHt>AW;9e2)U7rPyJ3Qvc7$FeL0A6#X%F5G;!hb;*(Vx0K=M zL1hmT0?O=2(&_Av{%{~uaKDJ~qMw}NG%h_A(C0_obB~UUMIw)Ncq(}eXG+(e6YP`p zWZ5xP8shykI&~|E#?hFsb+_u&`7F8-YPdu{_p6#WKbUiUX_vDfB?6e}O%HOfU5C6q zjwcjG6jlQ;B&ysXwqT{)Q-BAQTcV!5aG{%Ndw`@~hi=QjI{#o*0BL?7%-tXN%}I|m z&ijw3-oIab4Xa8a77hUk$abTjnpL@Y{d`Ogrb*m=!UpF}5AL{EFYIr39H$#7#mzu0 zT*iy2CelF?k~6;0*eqmm;)ADAV;BO~RG;~eu)Y!(n`K?!L@+Q%T5%hvfv{6vs=Nr{0BKG|0Z zA_mc@fLYAmn2mF)dai&aarvbZsA>$q1C!)RwW8j!1d zTx`E|z5agSN_)@+7ko5y0kuAqKOa_uoePL5D8h>?HVi8sLu!$2@l<02CD z51%VLPOTqKdkTtnqx!(Q<1>nrC#6eG6(Yk$dy0#i*mLAJR&|rneS+)(;oGHb)CA@p zVyBM)R9E5|v2asGO&EFh-%qp|;Qnbz2eJm1(pn6~91aZZA{DGMrN^Y?%SB@M@) z_;0Wg1EOpi9!&T1?_`ZUV(#K0k}?I-4jF|-WJtydZsr(f0h350>cZvtW&EMd{b;jo zCdKD5bs!Ik@E-g8xNP{XuE@0m*AaX~wv1OF63E%_Cn>L4Nspy;31JMpS%#W5$6U(x zfD9_RYYcfqXCr-^W+0oneWeb3#BO)gP5T*Mz2)w1&-#=ik()N#w&(`3XMEt_NgkNRp-&zJA4*M+(Bis5oTP->hN zLS-`~tDJ^76tAI>Y3KbDdz>V&+}VOON?COBU;fx|hXs4jq7d2bMxM=XiKvPOc6H(A zygN$-9_H)>(mWHbm_gqRi`aLe^_DI+xaJvSdrDk*rf}f2vhLWD&hT*5yM_T8ekjH`1G40)$aCAk;VeT-!MLqz zA{cKCckDt}OR)UjW6Xwm9$yz-O|tdV{gfx~m=4}<$$f?H<_f9~Ee}@J{y^`zuRSPIiT!m;D>Iaqy)sc_+mJakfmDL z-N~iws!(LRS!f|}?1U>2sG>{eJ<)=9OS6;&2TPV@fs-VF+PzrG(!uCS4oMEHxEuLe zEAa=@KtrrR$!H>H!5iFHy9K-DQL)_NgzlkWY%$Y5WV|DU98W2BDc#qcNhV40*V$Ej zM_vx=m5ALo&h0&N%j&8U#MO~4Vkp$p?o4vTT~qlc!%ckkJen~AdIPL~KmFeyHsnbU zDnEz<3Jx8HTb1itfz1R%Os4HC`CO&gQN5)C%M}YKK{lYR7-w^5gbLe*S}eHzv{v#G zBSKtl+sqY)CVRDxD2^4%d>)-DHMHa@DVxiZ4B_^C=;8hyO;GUKl5h4B5f2(4lLaBU z_Und8Z#8c5kw9Rh0xVp>Iz)5 zemJv-DTOMbxHX<9;#1Y2cjo}!y>qc38ejkz+@H%ESVJ!Qv*rG#;p7E%OmUm%;?9e+ zwG`e+D>A)7M&?d3NROM#*=lvB?KUrR@{8wIY^WZ6Wc7!e?(NWAP&Gf0Rq`ST?L0Cb z_K>kr?-wZfbm@T>nq(-7M=ZhCNDh^rj4DS3$3|#9SYth8 z2NK6eAQ13%(&W7Y9BDraekk!-S^=tXknS-KB-vCJI5C&=J-c<*+pnvy~q(GBHd{6*7+DEFI9f$2tUy|IC-3J~5k4=x&O_ZR|9 z@6PLd*F5Frx)B#v?})Dl`&HWut4RjQIN<7#{hzFf933IMS`faC)x}jtiOE@l8E+1+ zPVaq>;8S@+@jixfo=&-CVmy3(EB{`B4)0T+N9b<}1ofj{#jx|cznzgRdA9m%7nc-q zE0J00S(fC8D5@CP4uvJV;X)fvY-NU@8vgm7{N;1v$W3mNtf&udx86TsgN#QAvLTmP zqE$3jR)h`pguIUllNL{t?8|c5(Yj>g8B5P#Nv$N$8ke$FtV~P&nf4)8=JS=yWz?DO zk4G{zRr#J~WH+R)B)AZe|BRa%{QGrK*Y0l*%O=9U!iBPGr!70Y^+w2F<=(hK$EnjZD-oNW0=LdCIe7)hLkQ=+gQ``3e<8l2 ze7x)+hj+cjJ@!YiMqj8fD6KIc{WI0!5OfjYC)p_bxxyoxIM5imaA7#Rs(nZ_wUMj} z13H-p9G)hKDTPUX7QYccj)o!sgf~V2jvRFixAIhnTQjRM1|$K4mWJ!>LZIT3Bk^59 z7wxFeMAST+T*S(P<8)F-B2+|05ugeF(^?>O>W?h!)ra!c-4nW(LwUYR+_V{@6yzuEEJS>^6O@}V4e^Rq z!>kcCT5?ev`qPEA{vBhu6;iaJCsUWG^`a)9Y@`3`-AH4t0Ww3YX~70?Cz;{DmrW8V zR>D?n@P`psEC{~Bjm5oaF>f!H;}O=A=?*e^)oxgfG5@dFPdt%=DIJe|dzqAlzDEu+ za0!h-N(O{ST6NGSv}i85zFaG1`GZ`4+GffnEd#S0Ep6|AkV0^W8?6PtiYMbkY%57M7LSM`psQbc60fUx8A}JGxx<%yq($8=JLNZ zo_5Cvb}z$gisw|lzYyOL5D7l;)#@vKwMTo=Mb(X5F1%m2VxTT9mdcjbo8XNn`D7R~ z{hUkiEnC%grh*7W!WPl&yqWvcBw~||;Qxr>U0wA2iR4dK5x~BY-q*gF46P&`qzsEm zdlNd!3A8pNlS>L6(K+@NVWF2rSxP_?wyrdKv27Zb(i@m7MGw%j4) z-}X2kkE%WbcgdS}^e?)g>c-Nqw)p8|`6B@mA!px8Vx_-Z#rbf+)P~Y1u$TVxCs2gT zu^i!U=-y~7)!6lDh4Si9|2IQ{X${>yG->(Heu0CdM0! zd(CR3Bs=DX)63+x$F4!Fb(~DKUF#az%71rQf({92R0B0Z`ul-VIkV}f0;EaHM}dqo z=FcR_kyRWh&I*^+&v1>ogk+=YLZj{KjB++92L!pFQ8BrVrLpC%Lod`mb_k{s*F&1T8=bb4%c)6Ro`G-@jkCtmG3y)Nh;g<^8-zmv&uDl$n8 z;+>To^AD~t6ST*Z6j%w!*m%-lbLI=xPU=O5(Qq2h&4pZaqXfz>(tXPh77^;7EIQ5( zMuE5q{^M4Z|HxDGW(IkuI3sIT3g?pv-*6fqV;>WNP)D zf2vJL^2;t$zR>Z%^LTgesk>HglhWwg$t{wJdo}VqR)_i&4jqLjs%6f7l zo=}9dTRWtkmwO|Gz&Bq6Ur^y%dZPNxn#XK`n1(%|QmF-xGrSoZwXT$UFrru}+M4=0 zs5+ziNb!s))5;C+*L``fC3{tFOWqe)aXQ~QTTO4#m=$}J)ZIlE_Edj@JWTl|frzTg zHyDEDd#G~jjo;R1Cs$${+Lt^XRCY;lnm$CP9$gYD?^bsdexL_>Rr@2jf48H|TF9rk z#FcB9&-A*P7$u=CS>k^`;~WY77Cruv|JZ4JTp*RMscIqY{Q1)}B(fqFOGpjhwuPUn zI5~1n;EvpLo0QYQvl54%KLPZ>h^%DXrZQ6SwNdi;M?HS;AbQnyfqIhBS)y*S`GZi= zhikM-vx5|~;UtNU*KS4gUF%aZ%{_^;aqU^MN{_?E`u}s2@$>UPt}b;3LJ_!jk80PO zA$vd1WdXN)0~VQM{!KVX8kt$K{ypht-<}Jdcbn#emfw%Cq$mZ$AB52>kwHCHUS0QD4J-QF&4$)WBs= z(IPay+y8i45Pr?oNn+!Ir(8G0;d%%m-XjNxi;eK~k3Z?6 zGS~6d@u7q{vkYMBtVCnQfwa7Zz)gG9LSZ?4MY!*UHkfbO17iJdwGnoQNZoB^Dfn$K&W{4L~9Z4*48j?tmRWj8%Opd1)y?y5kA6_b!K7hwY+L@Fc zMuoO8JoG)6!EtjnFp+k3I&;pDTMDa`#HyqZx$w!jcWo_*PJ#v~?oh0SVx?Gd2@WmADa8sDrxY#lrq7vk ze(&=f`M!6~O#axD$;_VYz2sWgwbr_)Be6|IIxRirTq`?CJhcd}U$)hE2B7DB(PH-{ zv?m+yFWaXw+)R2^GkT2pAj|pPlu$S`Y47x1FU8P3*PZY2q<_gSRn)!>W7Erckga}A z#1B)p-vsF6(F!veQAF;kk-Cr7&Q?5|h?*HM62L+Xg;YO}E>D~IdVz}|Hj44nGsJNh z=3U&oqsxdXZ8QPy3ex1Y-rEhiSU!zw5RMB3%xzECP&37GhT$!D=l@ON_i=Y$>c>8tXABk>jevmcu|LuOm^oraK$nWF1H5>DS!b~bj zO-Z(YQoJVXSekdN%TO5{clTPWj8Jj7|1^e6%1Emst;94dgCpQdyHhpLMTV>9Ev)MJ4 zZ#kJ&g;(;6dDEh&?jCoK*&%HHkUqIcL)W0>Rh%B`v;!2E@CRECbZV>sD53W79Bb8M z38)8wVhW+KB0jbI9*cekx>ssujCE&{vTnQg?SIPQw4{yUlW&B<>X6 znSD0awnJZCylcFz+k7NW>!Z?rCfzbXH+t+A<4{1+F5JQ{O&H7^V+5G4WCgC?HB+U~3Q=@FA8bax$H>vHi&khz}4jrHj;k%Hm4x zC*(OJz1K!1{0?&8K1Ltp(l=MH|13iG^^m7?IM6mh5aS{t=-qqf$%tMEyDvFxR_iXB z-_j@{{5m$33XUpu()1I7`7Wu>fBDry5>tF$IP~RrbLW4$)pE2b1{1B)Gi^-C_F$}d zzVpL8Fkq{#cmx*gq2YT^vKF+o>HTko*(Xd??l?SNmh!v2rq^h2NyQe`^2=p5<0Tff zdyDenC=Dv763?n{K?$!=w|mmyGKLrP$MC(oI;&PLd42(#LnUKUVOs;qYtG&TL+%GG zPdcPO>Xq1uIc8_@*3dkZc>rH3dcw(U5>ftK$BxGPbNAx;7vsfd!1zm=S+N=wvlg_p zjxR_*@*b*m0og#1ti+Wb<@0w+8L8;ei>b(05STDCr7K=rEv^I_99%%i#!!}~9R++j z|HjY8WSr*m!^%D3_?kUC*su|HM?<1-(ZljpqSDkf@gUqnT<{j_5xPRLl;iF!)L;i7 z*Yr5)J((L|Es7R|N9g>zP!Z|V$Udcn5#3VsI#QcO{iwHJp6wKZBMB}kz^^XuW{kxx z?$RArsCk12^du)nlNHbs=p@CXZC9=M-cS67;++F!zG1z;osSc8VIiu4ZB=j0A6e1eDkbLJlW09ZOBM8l$hpr zkNBwwARE#Wx=G=_I+MJveG{6zN`H-SI{p0DBx%E1n}I}@yZ*qa(T!CSU+;Ruo9SYo z60WJ3>E?Q4@l%{p6UT)Uif?0{-yBt#pesp7la~bK>N zGM;NDAdRmmDgJ={{y+t_%to%)XKM~yO<%T~d|DU$bxM2GY35pz!3P?SyL%}o; zpv5Ze%$hyh_^Z3!_7g#Gf~rW%a=h5}FqL6&b1GWqdbql0yzRU9uU{(}iq!U$2Hu6| z98y1KW^nwNNtoC{agiR1<(C{JSIg0bVK?E~65Ro^G}Hrx_q=pxux=6Pr!P^rp;;@7 zkK7^xSz3ST7xI}bGCL|Evt!(AAj4L~ug;>aRlYkHJo~tUU8*_r>yfn|mUZkDXi!A~ zqU6_K%TB5$z_J6k(C3X!7omtTwSG@^@~8;#Tt->dw^)<2+e1Zypk1YCP&dZ*oP^fr zRk7i|-=b%6njVe}bHZ#r<#*crP9^?2%ikv{zJQP9&A1CtM=f-dcC&Bd@>(KnH;UUi zA)t)c!D!``m6?x=l5rGx7nrN?*rdt}$Bs~EA z=My}Rc+JKu^)wiG9;rX#6&PXTxn%2hK-ZTBeBq>z6Q5?pD{3klt?=%hZADr#K zF7Aev=>s2?Yn zCKo+BQqC@Y%qt#E&r+TFl{B!@S6O5D$1_9?*3}E3b4=7z78-PWa%UjAM0?{}fB;SL zM+NA?805UxT=gza533b_p1mz*2ZzPOkwvAD^!%G+WkIf$D(S;Ma$?40l2dsqB_eB) zup*27RLv+#37g6izFAKusE`md34~ScVaaGSqeRbx9JBCHDDUqri7A&z1~uieB1O^u zfH^4O3)FhKlcX}A_Y#k~T?gbmyBLh+l0V){YeS!6;!SnSOS;60Sr=gXV5Jp9^2tFZ z!`{%>68o9VSnIa_4q{*C3woilQn&FXckdgX#kj8L6yz1-othIa?Sb{-x`bxm$)#d5gyN$G0g*_^)Ut@2| zmt%E7+M;#>ti|e0+71QEU|r8a{~;UKY4GEO2T5mi-3jZ%lzVh`!$XX@Q$F$t<((BK zd#~Db{MfB!>o`5xZfr+J8-f-ko?ObIuisa&7JLAlkd}rW9LF`HWxf4Tqh&*$&2i$zc|y8J1fGJ76@>7?6qixu*B?nN zS77L#=*ratb?E!ZmBQpsBTesv=dpm)HU616oL8XWILnARDVkhI1`G;l}ym^VA zizF$qZy_biL@VUcGlxW~<%%5(5Q#5rY`B;jpE;f&CBieNrXj(LY(oeDV;f!h8r)*# zxyq7(fn!$yPQ+EMYCt-2haDq^-Xng>4&Y1n4PH>?|3Q)ly?XA`8qShO#rBh+y5F}* z902%B*x^3^izgzqT)6A=SorOOF?IG}x{&3&z@Xsmi{s<9t|G1(Xe&5{D)vrcZ#Inp@pyh$x#!!7C=O6;OS?q9zQsZ0{z2FMHmEg=(v0M%$6 zFeh}HwH1=DG)C$vT8!J>M$T>;r&?Y23Z$$AJLMU^d`ZlWNu`^2F+bg?ekS0lDS+p7(mOiE{2;bl!Y|dO9YvZ|JEu;FP5^S5 z^>|4K)46Fgh}pSf(b%;MrL2Y;#@+|YtDDQ78G19R0m zKTz^0T&GVgQ`Tsi@nW9nl)X|7Rm}*(Vb^8Q6tI621C6YDa(o_xXb<9RSglq3 zTFVM>byJ$)Mfi&KgtVY1aT7+EG&@RpU545O4n(~OZ09UJ|@!3AfE73IEhaA zQ1dWJOKu+=F@c$%qv~Q>$+B+zFdtOrSXs**wssJ!mtdQII1v1m#vSr2P$x@{`e7+% zVkax5&7-M2cXYC|7HRhc&TfSRqNEq^Dc_{8CE0#RT;Y!`Q zH6f$3{bc|uA_-sPA#X=JMALk+OucTDOFZrCEE8dxM)>!l83R0nvE54vHJ=iL5;y(K zCSxAc8-(C`|AZQyjpe3z1HV}4Ltd`VM_zDyRGERv1nuQ&eQW1NPR64YUD5lAV&iNM z@_3Qqi9LHNPo~CGkr?l0wFc|lywFrTDPMB62L+YeTK@z|cDN7sx-{tj0PozJjVRT9uIL_2vIzu@TVZ@O%hH8um}-l|S6>I=HQqbhl4O zu)bq$Wa1TchnAf|sw{SMHu8Aq4%=qSMtyWT1r7H0p~PX3xnT#<2mKD34+dYutKY)v zOo&B7tC` zSvIW{(wa%~>8M&%o-&Alc2fiFz$aKf&z;n%mz^?Nzhz@?NcJQTQa22!XSR>T$~YvA z)zxH!R&wWX+bqRtK)L#W?FhS3KKD2~ck^r_!9$ zDz}Uf728(UTKx`>f5~J)mzYVN^A1JTn*Wj;^jI>nvIS?Aww@|940TWZkI+0_E`ToySIL@!JhB2TL zv;p0P@{iFyof$DnQ}$QqA0Jp2^Dw$erRbyz<6|yTssMA~Sz>qp3rDRloMid+ zq7{7&+Ce08n~Y#r<~9=8qONxx-GW>I`fHJo?pM7jkEj__X*CZ!8Pg-jkI!DDbhuus zH@@BSu<5XMK(v4s4x$7C9!JEpbt4X{2QxN4%lOweh`^Y9t>PkV=B+J3zWA%_CxJ1&JsQ3wAN$u+-Gu!oRCrSY}OnO zlQ^$@>|R~%pZV}8i+%rqe`Q`;J#Pa<;mXlUe8kg*X={Br#dyxXU7E>ibKrP2-Mr*c@F~eRKU= zQb9J@JHJo6oT<(_YwrV6wvmv69L#oTGP1vl%F+QE%AQ@U+H5YU_J#7(0_$*$i|3tM z#=*Ml4PGDrKR;<9DY&uDJPM`7qTT?$i>?F)hFf4~>#f;5>2kDhDcQR(T~USmm*^Sq zYZb5_&f~9matrWG#wyXj&DN=@qX1qmIlXlR-Nk%Nxi(!B4sy>8Y3ZMJ#UM&OND)~v z6@5KCoxDQ2H}!Lxb^l{9?)j?6g9C$GtZLSXrZc?gDe+ydM|vbRSz45-q_>uIvJP^1 zIIlcwu~wRX5G20(Doq!XAUz*nVi?LC6&sePR!MVarNcHv11y{)QyLy^Px&?a8hQBI zBVN$J83dF|jahwD;-F73R;t;(c)K$v@`eFzG7d=X>XfP*$R8IrnUql%W8_EQci6^= z7ppi=s*|rZAAOK-E#>*hwUSP7-T-^7YjGH$%9zlKKnS6QZ9Dy4C|m! zbe!u(YtrVz;4L2i7q6)b@54H7MHQaeA*6g!Ch>Op4Y`-ljygR;=c^hUilLFmbI4$Dz zbqYwKtYA>|hEXWS*yw#5n}}|-65fY=se?iJavqogUETm&Pm6vv^;FT!Te}V1dw)#O zbPTh8QuiD~!=fJKFy$%QR9Z<6bDL=M?E~n+3-FrR(6__V5QN*4&9_hQL2-kx`5U~*^q)OVcsYMk z@fvfQCdK_1{)g^TNur%SMK>d<{Z@3Hk&*CEBnchYPg|mI^HND&5&EB0I3Xm|K-kH9 zu_n-4WL+kCsr$4+mgLO~fW*jJT|hCCRUZy!;y8x3PA@D)AP#YYpd+z5&I~ZH*+Z}b zzQj|}CwT)YTbD=>v6h(I1}u|G>(1F{K>TA!gAmI|snV#RuB3qqNb%Ni4f23UvFN23IY{zWy0xunXXXczC;?XRtUa~J=!1pPO4 zto`hh`blHqEgmifZ&MQN4A`nQm#czkRp@2~CEg_u6 zsuDV*XIa#)2M!TP!gF>Wr!(x*TFr%*Z2Oh0N4u2pQ7ixO-IU5l$diDh$Yp_;slGf3 ztkd?fj-Bn?gs5(H9=wW!vNY8M<7YPK4b;g?$;9KOd6%6gad;-_nO)6ToULw^QHOfcaz_85y>D=4uKX+04H!ol&0;OSCov2 z1J|XiX8f!}6-xqa<9LTbi1s+_=;}BLsPWz}opfr8F&a~xA>M1?CN%Q*OYUtF(uYls zbpFk2BEQ`ylc%U&$QR_q-)EhP1bOaPtfgDn>{Lt?7k}56*0&!&VDB3)RHG0`*9iJr zwN>IWNQ=ucZ{0ASy;|8 z#O8zz84!7HH^!A6dy&+FwnfR&z|u{0g{S%&^cW%tG2eq- zdS(VPJVCB_XWtT^RN)V#@hsM_)h$MB-AYXr$5^AvTZ`~Fv(b4>SMKL+Jeb6u%I<#t zDUE&r=N4LcFg5dMCmww81a3K?NI51tPnuRIg==vv(*Tf{>WtJg*o57J^}Az(W>UtU&(S!b0})R`67sUZaM)dknVmO_xlS6a5(?`ZkJ1UkWN+993?vVS2 z`w{T>oY{S%Z;e786WU5oh&TF6$h%?UEpX(Z%IIWD;!es}q#CRlug`p}t`Y0M$kC!% zsn#smtN^_T4PqekOO1DFs~A6me2iDlSqWm!L1h>t z2!Qw%#tkc;HZoS;3IFWSzI%$89LF0jZ(VJ}HB%+pBym$WvkarSU7D(i3#nx7O(KdY z4c=jQL~qmFJQ@KH4}t+T)ZwXfoNJz)4DjtINF>SadsxWl$lpZxjEl!pDedqwP9*uk z_fgSNg{bEAQvigSs#TO~i&tvoT(oGX^&sWGo(%J@gJkmdH;44qs`fgJK?bH0diI$9 zW^KcNZ;r2c|2iG*ogU=W@4_~RrzO?6xCIxK8`TrSF_2$XOF*xl}Dkv zKER9sB0HFhZjqtg3H`ZG(XVcDZ~B#(!$WG}%cSDz)vy%u2+(4^!ho+CMZwPlaF@{I z1>vF?$Y&tg3!U(#BcWH}?A7DS=Q#xP)IM*-TxG(Z2giD7?r8xq zUe=szTS@RJrOj9L$%{uR3l!L{F($#4mW(BNpYj~w-kgna_BI_RR63K2a7@HNE9>@3 z<~H}iy0S`DPl!#fgHm?GN;LCSim)RMB8CYA{X0N)Q7-_r3CxH-b+ zHWC4aDo6p?dd`$|2_qBJ$I_F;)@K71tU);FNkQoj6xMm|IHG;TH9h=zUTb|5C|vx1 zL&iK08K*xH;%Zx=Q0)v$J%%G23l!$sNJHl-)_mjp?R7bYRpZie0sO*7co63~dfO8M zN{o)DZbjOAnH?c@$UL%S#l1V1b{neyrrY!)QM|#0 zhg0FLWyH3HF~bBkMW>`10pkogvF8h=(fI%! zC~2|hN{l;OcmD@BDD&;*~P9wXLJs(aa!Rg|dNAu@;ZW;C?NTZbc}B!oyi^d=C!9T zHE)l>RpwgEr(E}ooj?zZ0eqy_cQEn0C;?4G@6Lf^{rPhb5+m`1t5#B716iD|rF*uZ zABmG|FqO{8+0eJq4z^Nx)^ydby`)XWm9h*4v)rR=;N}dZt~{}JlweAHOv#?R@AaDV zVTAe7P(#hDOQ*h9j&kK7H$Nfx#rxhBnuy2*{wWpEx~4!@rC%)Xh>aH2M;6yCUK;b9 zsG>?hI_P7OY=6d;M-GOxMM#U{ED-8BQM~f=_-rbLWycpiW(Za{pUf@p#hI?CzNul# zj~~ApEP}&Xq2OT;!I>26b)B_=*t~(uNP4C2I3&s7#>Z1^z#HD^>~J>h521{bTAk_qcZsYe&aR=!D1FWe;0j(@#6@ z3!WjTfSYi{6)OBM!gvSi_Ee;2$~dm~$A^_ILx6bG#g0}Pp^p$= zq)VowU*i}WiZz>IP>LG|iDZ=JeWLXx|5qCMkHqo`BUa%MgyU?=J967vsUI)U$?`Zx%AaI&ZEcdsnh}Up#(ZSGPE&OPCEGA zuesRM8m;_B9feJ&V=XUT9y(`DCw3XR#gO1^q-g`z?z*-&4!83uTPtXqtum*W6wzDZ zc|UvYxp^YE$rPMlxk!G=w#QFR^MN)0iz5=Cuy z(rhnA@*KI`yEC0sx@$npPPT^!Ug7GLGD^spTV>*qxg~r}8N*;N4C0;xV z9=H(Y2ve?`WgGpotK&>}?;#);$>`NC(VxulpAtlbn)Su*vs5ZnvW+K)s}cv^{)`is zMA|4$(Er47SdYGtjt*@<8+GqVGUp)Nc+Jt$wVrml@v3X^?K;Fg$8}y9ouJYXK&&`Z z+P3|J8;sy_ydo~{KDA6?mRde1ri_1)Fr(6KFa9=&4(QA<4J!vzd_Usu-i&Nz-ye@y z*+UKBu@Rf@Z=EF(q^pNZ>(`SKT|2U51$oZ0k3<}7%z@l@#T{X?1EjxfVG&W4;AC37 z9=UdsPPZ-^e7R0ipZj1o>QyOH*$Lo8sZxAqUK4|MOIllO=us2H9 zR~l`PVHQI_wx9$OORZBa*)vQhmSlZY9rcx#Mi|mSD@S4zW*cjP=2^G|o0hYL|Bf|e zI_eSAJ-+$EZXAGB8%ChH^FAJ9Wob|6bc#CRwuc0;@7kPETuL>@&g7yv zOP88;J20F6{2>E&QjVzq#cZjgI?ssc7m%A1Plrf;Mq1pLf|T(W`=ndHj_I9Z7kAOT zv17yr2++@p@VxFcy+42@jwpt5h7 zOD&d-N#==cT637n5mmClwsFPW#zbw^8LnHfD8YnpQLayswQNZzcpLS&jE1XkTI}XT znj1-^!we|>ZY-uo?Tf2=sx@6Svsw4V@YhJq|)l?}))Do`A1D}gh$;@dh&&^9ewSs6$Fd!hRESjiA z11eK$OmsNYOac@>dMa*ygiVYRO!=`Bf83XR#P9KR@ONS6>t|%YS1C|J<3GlFG$_#! zoPTLOtelxR^Q^mQOG#>e{)hQeLBWKq3!=lQafNnGl)UtZegBtkbgVV_?c!#7em?H| zcl)LfAIwl^VLs&RIDSMxD{eL1vf`>yAic0S2$htSEMmvW)2d)e+lBT@tNEeCrs|fI z+9RcF{&Wp)H{^A?!xRZygQ`s-$&Rt}u7V7nIFygIoz+Tfm|Obg@w>~w72EmcwugI? zVm5B83JK?i*2;I>$W+eiv>S{siIogV^PfcE_KGbV55Tc^4_;fdBrF<*wY|~_mYKpC z0@tD@`~t(j%yk#Dq$Ke{TA(0W$mghx{yRd+kbSfR_m{a4I?{gSJ(q~Cb}CVAS?R4W z>?1Y(#%@$e@9AUZ$~I^kyI>|v4?acz-I&?hEn1EiNQDs@*nI#}hQQ_;SV4b&KwANM@lA-nn z*pegCSV_pn#rQatK#@PPlH!6u8y(hGnrp0m@B3@UhTLl{ajF4lg(JfxnhJ@p-*Vg~L`v^wJ zGlpR@1QA`VhTg-@w)jG+_GWA1pF9u|5xM;GE+l#~_|S{(6Ghi0#M{?bnFZx>S-51L zI0$AcAV-vJ6~)70Hx%0_983NY-=}y3>xYbcdqb24kw}l}$P1+H*~WKF%iVDkS4bi84RneHHHn0nrgo4eMON2AJ$N0oFUrfBh7b6&+9 zD4cgcWSX%N!Q68=L1IP&t)L+A6NS?bWa+g$D9k^T2y&SXwYrPr61Ztw68H{x`o_T? z&BR9-^PH60Q6amMLTLsYL|myX&m3sm*XzXhqk2rC;`@d{YpFR{Q`` z`X>%eU~b>*z4}{3%4+J_`(=vSdF8wi@mf98=IV6I`2CAlWHQVc66wyXophraJW>d7 zG0~GJPfAEoJ~|eSBlCe!aVY$2@a1jN>bLC3$v7tb;5A<9kW1{KkXwoqhNaE~hW#tn z&c=iGc?L=Dk>?U*-KGBS8*xr`EzpPh77@^=FxOqp#|CkWsBZa3^YIRSQuo_B6u$}1 z)k6ulJ{^I~CD6maQfGCFs)B`h3~eSE2z|yx+@Y^vrO-eDyznnk{%HnI&=_lD<12b`{318`&)kE1yBx@;?yJv_?mW#d# zf>`Dc7p#1=&J>A7NBtC&1Hn`WvZtdsDBddoSu+Mv(3Kt3tWxfJx0?Fr5vLML#s3YH zj6tHt?fWeJR-&t`s~xp$e&2N6w{JoJ02lLlonvfI@ce8iZfNx_lci$_8NjGSU^z*o z#;LBufr|cl5KfoOIisaxAhCsMW@Twl%EQbxrCKfu6qC{$Q(IHUvyMjO^H`j;VXF9v zuTvHA7b%S9kqd>YHnh`9zj$vc9S@TJsxyVA5-r~Z-zGCK^>SJS z?nDw(hHmAkFdd+GW)`96(Tytx<@yne{ZTG%YqGXKY0FJf5J~?>oSf4UR6rPEl!w<+ zP8%{!&{RidfY`vUX1%^FpluQO{D)|7@xY+Ec#R*V5?^&dv4}eMBZ`B^N}i6r%O!T> zw#l5ReR`xhsgaJ_0~HC>VPMp^hJj!Ye%6SZ{fLYCY%Ny~v&$Hx#% z)1(ikTPWkxIJN#qqf#r9Qpg_f>+PW*la!QHj*#@6WgrjgL|K504eA~6LYWnr_4+0z zWGaPBzwxA=?AJS-+^lpB9p2?nvCxGu#nG1%K9sHsLT!o zI5shR_Dsrj@4OEWQ(pb3zYXAJ>G3k(*nbb4P$0DE*`H!XakR+}MkqMK7(5AKUYnE{ znM7YkJg07s#6ZGc%y`*@1&q2vCYDSkk>T8!lC<8g?8@rQ4XX0{mr5V(5Ny~hbPJ-w zTT9>Oz=j3rE~zdriuEWeOerdI@{;y(S&Q#K8}%O50~x>T$fxVwUoQn7&&G4r9e#PW z7|9u8*1FAN6Whd`d|PDCjh;Fsg4Y?mOQ2!H3l(Dj|Dg z0@lACB_WT1=a^CK%T)oLtk{q!omlE9Iv^K2L}SoXmaz*VKKT$|GG}qo)VuMa}kZwo^i;3^a*c7m;8DMAUA6h1;!B-EVQ zxh1YC%n*J=;^$GqXgj_qh86vjwbYL@55lO0`>B$oiI`)FJ@>F-vb&hnI>*N^6Yj=e z_@9gQ5mo4r|FR-LjTek}W`9pqR?o3;GdU<&qeY^ni@$u#SsqV~oC{x2tL(#6s z=;v9a8h*ly*#(`gUuJ11Cn7&Hu8T98Bw=DeHN%H8qu+SF8{mO_DC$O_NjV#)0;>HF z5s*sk1sx?Jf>E`;bmU3YGIb?0S)8nV%!|>Oaf3$#COE?de3Di0&ivz|`noYiK9goz z>hrfNkrW{}k4y=g|=h3*g9c$S75TiI1?aFFS0%)Rk;|EOBMo67_NQ_YIBzU1&iJ`OI71Y1OyW z%5Pj8jv4^n(I0ToAcvVnL-VvF(6Cj!#Pt(UB7TOpc4$znxumgG zsvPG@f0>Q{W&{ZpD;*&i`n2=oN^A**U*Gg6wu!!l0EiAvS^sO8W-un+=epmV?9URw z0LZoaP!n?^_gSoUcVh*yr~sQIHudhAOpr=SVd7R8^Le1>!A@r_^>vviJLnmy<|kSt z@9o`+d;7`$dzzmjLBXeD0)sLW@JlOpd*-&N z_Z{%(kgKBCI)m+O-xSF%)^fc%BdH1`+^Ol2fZIq~X{7q2>XZt9vZL;vuxhyQTcH6t z)X)(Gdc;hu0xxE$L6An)I`Jo}x}4g@&Cr<|Nv9u8KZMJHe6H7mg9&NJgJ&CrVc-K3 zgp|_FVTyS5QQg_O+H9``+e?>Wn37$(!)M5(-&{AbSxC&M=YiJ~MR(Hxrf1AYZ5v3* zPY*N2$t}XHDJx}+4Sm?cm%*3YQgF4Jk zFcLL{?l;az^U}SSe0)` z&%;Qe6SOWx9?n508OST;^N$O<@e1%s0^WYG>kM(=>7MGGH z>+HUX!hzXCr+xy79{*X|h~N!t_L(eXsQ`P-j<`g096rST#1FkLaK)AOs~@{yhofZc zpI6>Ns&$K>^0)t*6_4Xd(z+Hy!|(RB!;ygBrXOqBeXa?Zz^9%(crgtYW(5Cp zH3roT+sBYIMmCrdBL!^(_v~>6r2!g9(yrg5*9;xhU;HfX2ZDhH2uu8! z5$Ddjt$iDb{5(Rogcgr}UkiG|j$rDjfw!{#HlMRnd9SO@n$!Bt(?yEfYd(91S>F?( zTzeCI@$7HG)SB1EaX33U^!El^Nj$e@$NUq~+;K_!Kgh`35!Dh6X5Q?u>W-DYWM<&y zFbfkjLXS94`PK4u*GT76S@%|>EtH?(=+rX&oyIkZtkBv9d-Imr1THm?oUP1k*Ayc~ zw0JVQ;1ITUlB%r9Zod2kdGTG!Yu*ykukXXpcZEco6Ay+`@!%Vf43>%{>p<{28mu~< z+l^2$PmgqnZF6G^i!cJTN?py)q>^qN&rF3NkHUA1ajlYIFm{29Nr>_bsNN%KM~hk6 zNnWWeW>^dDmlRgBlWyqz&hq)90U!rNL?r2KlBU|B{!nV)Rol@&oI3oQySSEkoQ^PJ z?_F0xDIE1Kp^s+UKp5UaczQxpi>nc<`@cmxo4@a|d%yIkYI;Dhf?;Ibduht&8Z;XJ zM$Ex_gjZ_td~@R*F6FNM#RB<(0{n^v5jEi*Y|8L6B#zZ-XO_2hWQV_ms?D{irXHW> z*=kti5+P#|x)o*l7PmE)naQgO^y+H$skbeZ*4E{!dolB+Pf4xt8=MiM9zQ+*mvXB5yK&4i2r4Vvi2d`<6!XSy7J+Nhty z^8`{~;Zv#j%~*{Kq0TE+szo+AbNm?c7DSo&b`o@ojnT_g&=NgUwBaEo!2PUIKk;Hr>Z5u<4#Xr0E26-XiqDy;q;M zZc1+K&DTqD(-*!Q)plBA66u%*m7ZFZ@sRs#)S4A@z!(42wMF&x@Y9o_CWZ~v=wCl| z;2ERV+j{5iRoyeL@%oVhhWUR z^N(HMw&XrOGU!mHXmubu#Wta)$){M1r>Rg?P>aEZzo*r)@_r~9x`*Md$;ZN^6jie6 zs30wS`(%@IU78APxv8rTxQZ5D>~DWocJF^`LMyo#^Oo&Hk*7oa7t&M~Ok&L@M+!4I z6ns@_ky7+||;Ox}h9~uDQ zl_{T-)SC?n3WgZ!$TmKb3s_S*PWONN({>#3&=)2!^P9@Q7K6SJ2Hh!Z@R#{l!%Fw5 z&@d@%r_7sGRW{pmn0X;s{gt_7pQyw=^Jm{wBFSIX4qCwM!Or>rw1eRPm5_V13wPmo zo`YeL9J)>f{OdlP;)S*cJ3DosT0A!1ZutLcphPiwvV|$1Jq**Q6~pe!|rF z{<$werM2DP9M6wP#S!#S0dY`y!rr{56MN%0>H;j@OpIUq< zwVGi^qK)yJ2f_q(Wbd-hZS&L|kPP5DIjk?hzAc~lk+94TKwoKn*(QPUzB(;Le^@3P~&b!Gw78hZ8VuZ!(iA;4}K14}%*zLe;mLC}X;&e7$ ze%`XCZaEQc7a9uuqu8fa66n-%=oX|UWGhixH7T?;Oo{{Pe_6_r@RX@|iDifdAM46b zBF`HRa$Ja*Kn*(+z zjjaDdQFg2r{iqFH0cfL>%-YN)Y5(QOV|^o%O0~S6PxOC1R3bmE$c~6q9aCfWrR6I1 z7FhVVChc8k(ih3JiU3GG$-&U+@bf6t9Dm`;yT&b>9Bqx4h{Fr%7h)mm!EpzLNySi` zfO04}(i-w6#Ycdiq*1gjMfYtbiLy40<=vuKSQ0(^!UE;RVXnYQ+n$vw0rJIW0m&q4 zBzQ#r>T=`qe4@BrF$vI{)+H?K+<;L#HvLl>oBZmJLB+ zQQ`HTBe};PC*>XK04YR8nnx; z^Vq2Oji0A*Nv@%fe6t!#Mjq-fa5}gde*(#e zsLm1LiG*Slya^w208vR+aa&HhpE`Vkpq5z`iq-x8i|Xab$4=EQNA*hku0CbSocOEj zz6sTJj~Wc8zOiBYU$)(g(UwJ$KuC&}ZZ-j6_1w$bkh6=6zU`ktN<0uPf!P);#qrN= zQPRP7q4#_2_mBxjf><=(OkDcpp(oxNNuc^u<1&PCzW3&TZA@qK`LqC}iBgbYAOEn2 z(lP`4@TQ=Ryr3{8&|;Gc^gL`mY}4IT{ndOF{lLrK(EmH*=;tGX_bd`(X?YFhK2qOb zF39m;!PPD|#pHOdU5nP-neHyf<*Y}!av5m^l_%-r)sM{MMoyScE*NCPH>8{%$lz%= zsy!KEeDw)$hkB!2agw#P>DoU~aVPh+Cm2_1kw` zMEd$qALjSE^sfm9Rp$GmHP{vBgMw2sV?he6uc3S=q(s5jFUKGLuC2kgo+dL8TEn7r z-(g*Tw;%R(D!luku_)8NkEr2cEVaiJQ8hAr2jDmrHFYq{qA%{{zC0Jrh>u{UB-4Wz za5>E_(#Y#40(`Je&Z7Ab^^!|2t=f^V7qI6c5CJBpV4%sgXg;RfYs`!LA6_kIv(Yw- zsaqhgo2XCXF*$N4KjSI?U!xPpDzVZ<_UDwH$D1%o)Y!)Vs;_rhXNsYi-4jb9+)-DB z*c0!$o12QB9%>hs>D1hug9JQ<_Lbg~DDTWai>`iZw=G63GO@kYVTW4ST3A@@92~r_ zm1&EyC17G!oVt^0ZJrd{T^3_)RP#Q3-AAkWZ%aC$^{?$@cKFvf8AFiS34;BE5`xIo zN09f5^$WlyIwCPr_L61R$CY>TF;0BC7$ZXrgzvaKS6}<7p%HX^BwVRk(IJys{ITe9 zd7wDo?n>zUZUO(9N*85pf{wAGq6L#Pc(A6;W4~KqlnN#RvWl9b%DR=_e~pd+YOI;3lal$WNT?nT1`rrO5{KYy+Z~2f%!a+r`CivQOc11c zEk91KO~#au8C(-svIt-$g~l##x+^ zbJ(hU4S(|X#yeUz_ZN&3e{mXo<$I`^#WUr5u}GxFp5#v+QGviFe*CmX{fiI#$0cdl z9iX#=8vhNO-5umifHPR94;f3-;Q44lViBpBNS}&i`W|xV{QE=@)G{^D2to!#|4Rm} zID-&oXdr|c*ZPmuyZ@LjVrb0EZDN;ASPpDTJS;` zqWy&)z~}SMel%p??+t)%$8Rf*K(P;JZFp03 z-x;;uKNF%>c4JudGrt@s?s-GC<+(6*lkHfd*;aZWmyamLCFnk4me%r(dgmPKULEOX zUa6z%Y{i5>%5rCvvjg6qIl4(liz7HD;=JY?h{WRj> zIOQDxb`*_-ZY1?*L&Lc2Y%<0;m(GBHAZF%sjCE{l2Qo>L*>N*b`yOJTYxRo^b zx9x-7qE&A|Bwluqm)v>~rIYX&?8k}KCHkH$H+?!!Ao+2YNC^|gR(fs0f_7A%<)I@v}<$sq*aA`v$?w>dYtc^c^# z<1ol0*--XhcP|isPq_zY8)J<~lk!u1iT)MLYyNx|F3wOp%_nf>O*;BoUNDQ;8S{YORsf>dlX?w^f!gaqRhvQ+Ud`H zfz+#S-HAv!!XWucY0%~efre>{kBIzc_ff5LDuco@RgBu6vl6(t`%(D=4+&XY`qPuk zvnYx_1v*`WMEH;%vozX2^-e7yX>f>>tE;22l&DEE@urB?5w%h|_SVd7(HJ|ldN>v$jXn1N%N>#K-{Hghspcq!$zE};xD!T{tW1Sj| zYL@(071BeD8AKp1WCKL19Aic@J5Tio)FsPD#L5+#45n?ZVm>McdqEd1szt@7qX$2B zfud+01%2)(!(K@Qwa&2ni65W5dVxHjGs0^L7v*6Sr=&BKBx^HwgB_&MQJWc%;pgka zs9{TsNN@*Z`aV1|qQ$=7#l(&zYQ0mN9|*_(z(@-P3e@D54kEO;jnEl|ob9g2hqk(v5yycNcCNIZY{fml1D^?B)H4d5orG`d7_c`zTA7v@DvWzB=m`!kCW1Yow;s+DCi!DE!AGiL>7$I4s(fsbyrcI zVqXie4_bq9opsNDu;hdWimL*gR8&B^UVpgi-XCL#V;r2jAxx-DTk-<{OTLJIypM7T zckD5B{ApgLd&s95>%G+S-YA-VI0WClx6F8g|LysjKY0o^=M6Q zDAU0&3M#^pY7%!=%h~YiLQQbDb0P^?lZ0{Oc~pd>^{0opCgw&YTNBQKRXtm76R$>D zLGQEJTdN5P=qdN(wX>;j>05DvTZNc`6Ta=ZQ;`0YnS9Nxf}ew^t0F-^7Rn>!(IDSX zPA>+vm5a#lk=Bnxl)rFgp8xiM{-Una^1}cOxrJ52K^oB7TKJs*qu%7=a^k1P#D*B{SC<1A?wdEs&qPAp7@{PzwgtEpm2F8 zVPyHWR)@1}pry4{PhiPjcSZ)YfJ6eDPYTswqXhR8Zi37{Xt%XS<;p<|GEK_URVVjN zD6xFYuAN-Ay9=M|YveN)b5uW#OnD#j7!R^+=6tAPbcLQC}XaDRb#ttRp>L=rQ@)2QVM@?!|aZNZqJq4@o z5JNW8NA9`12yn@-a-#9KSDjKnFX2Ppxy=~aH8De7)@qj(OB>73_GyGx<^J1QNJ+hq zWl4e4#FBSl$!Xwo|Lxevh4=mCfK^x0e%H@M9Cml-?|eL%-x3(1TNC|NKp|YglEyQ$ z^SQs<61rmEFW_`6N;QR_p`0WINwsT9ueR&Q)E&`wIh1|GVFrk4S8s>OdTccAnNIXn zSrN}wGf~^ea2+actJ~719P!(LYXXZSD6EQuA1g83I(K9fr){xOow8v-q|U;zH(YZz z0yf3d@Uy0pzN%2gCA5*{(p$gNA~G@{>wzEb|GON(u()OK@vHnHiy@d2N)l)u*NBl6)Lch||K$@EYRp>R2NenD*LsIzB0CbFe=y(?9= z-#J{%c^E;}>mRg*8Y@-BJ*B!=b&ZP3XH-AKcTE$dHgN#@HTskS39AB4J{)el!sup(PB6X(fu{O0~7Nk@vU8X>|fWe6P! zm6M6em>o&YIDt}AIzRzRoofWG{m>LO4>m_jU3G)Ae>ff`6t>}-E7cCu?>g0-2gg7j z*EiXJvhcfTPIQ0uLc~H2DP`_vJCGl#VyyGQ{kNyZ)Q@&CoA;*<57G_@R;;k^$>#Ow zKITj?R0+=9@{!}n9YTI?6&ZkUGc*;&>%zwn^rGl)*3f z{kgkeq(B_b&O~imPErg~x6GiIPaHe#*i4rbj=c|&)Ra@E>^!|4yY>2^Pi?R4kgJz2 z`%U#AT?G@qIj)*H;`ZMrnT7PuRoH*MfgnfD81y5C!I!l_$a)F`NOx^6P<@0nsc`dQ zo_GNrlIxy`7;pPIW#$&BIT6`oWUg**3H2~_ZB=-qPZ<;ux&*y&2WKbJx~4eEm`3xq zmJ7OZb`EVZucsnXCf^2B6yS>UWHMz3_*t+D!(Dou&Eqv1gzSsZiEzFucl>O`fhweL zs)!(y1rRaipxB(LZ>{pkRXf;139wd3446jE;KKE(;K8M%mLA!3CcN4^7ghoo=stpI z_@jcUi5zb0jLnrf10yMMk?21rXUiBeOrCH^z}W|@9}1%h*8NIF*GRp%4jSL^Sbv0> zWAQ_&sYtW>HGs`!#rGc4Tzl8E`T-y|T=l}Iz8ovI;|`nz`N(3fS}4JMv!aps*K0)N zMs4-Lw0t=a4_+S;!`f@=A8eYG{{W=$znLW}s%uEw+p`?I3qmx~&jODBC=CCbjkTfL zWj71Vl`A=CUJ7(an-gh-2aW>I-JW_=QHfq1F&}mt&pq|Pmf&Fg+)WrHa zEpa3tuDU{L#|~VQinePo~BrE*w-p&>qmXdHJ8=HeCQI&<(dAagx~Kn3nJV!>llFrpYpyj;gcoxI8-9|( zc*tF;>hPjy%X=YAUdFV56D5=~UpBgx5g=27B_m50Eu-H}E*QJjoBj2g(6zk~krA3v z9jURfr_V95livwds2*ZY9Ss4yYfA|y=5Ky$k{B?kBdEh$c($)~gj%pTPl-F0qT;&*3@jcFKw*1L(9kVu{F)0e3v-U+jv&nAJj34 z^fnvbYu{Wpd|(y&(}<*_G0ooABRq4$QwCv8P+y>k06EwS14fdN zF!YzHz|TOSFr@Z?JbsE&pk#=@=_fO7D1&|pcRyHPl&xo2z*k*U?B1|@0 z%%3_CZ(8QV8Q{u*xT@7PG<0+`M!)`2clp0-6Kg*vutIlI&tEY7$VswSQ3sK+#8H0J zO{k;I`)m-q@_LvolPntthbTFdVm5t@rHoF)=Eqz}(Eky$g2j*c&~@%{8zn%>7Fhdk zz0KYjzy?>Sd)XH9taC@l+?l$ z34T?pyianYd?RxZlQyM`Mpw(1NUNy{=;PVxg;d0Q{ zXD@ZJ?wYO0v$TQf^ShGf^M2995^gEv{2eZZf@&o}(U`DcE_iGp*vCv3g#kQ~OlvW! z=1vZv=Ge#iyC(lHO&AY`Lbv1IVZUeY*Zs3$r^+gn*La$8B$CWEpXeVhR3h$g7U$W$ zvjZ_tpG<>cidL_Y4c6UZ!#r#|LYWaJ$b{U4uqe0V2Lt9##qCw;WnzeU_K88<@Jn;cGb7g8E$3G!vYtflU= zsnGe4#`A^Hv;_jpLB6*OYKBz00 zV=`%6KwpvnMfD6aI)o#QPf?h#wq^j){$=Hr5?@W>YQ=bCZFK_ugq|&)UqX1Dil|VV z1tFq8PW4(?pX74o*6x0l*(f?!XeK1KTfCg{F{jZ=>>n$cIzvS&;rZDIf+V7fg1DdE zfV5L#fBQ<^-?JFcqzaXk$l=dNsFd@rG#EY^nXABim8kR<5vZm%ImF9Z%M7%Fl zm$^~3vR~NpB4+mQ@(LSK^2*<_=x`xqjb@7Uw5+nCTr$bCZNTV=b)w5kkR%@{j8ANJ`lCn zEcM1p9wr7cX@$>D8Xwa$1u8v`d#;MvEQlp7e72r*O+aNdbd`LcbIiY9A@0Or>XH7t&;`kYtu}8^zYegNV&Ex_TaP zRCM&e$yg*n_SgUUxNO1U&%b-0{j|$_k<+dlkln&ClFpB`Yd~5CNspLV3VoDpt}IlQ z+}$}K)Jj2P|3EpwAtX)3?mOkGK>9r<OXH>t)#!B3840q)*yV>zhn7kwGtQq5O z%?+Wv-G`+hdI&)P-Sp*B>*7+Lbu+{wyVEAa`aPbe@#R9GwqCX||El(%l+pWbkG9A& zp<3vnUxpp5zL(}LXOF{G_tnFWvv{e-n!jNB=UO8P3_Q_nJh=0WI)xj&FXR@&kX@Vu zF2>Vn_hVsY#gUL*F4+&NmoLhX_Hz|hN?5r{Xoua8ArP70KZ<^c|EoTUJtm0*30Hh} zMKnl;@cAJmBLA@oDj^ z9d#`$zrnZbVRbMYJ`(x9tg>#voVDVdH6Oh+IJsjjVc7P+hAP#PSIAa6a_o&3$pi>G zpTx8D?)gveN+@MVwrvYm`JRxug0Y?PeF#M#<_euwR|boiSjAPHkYv;~ji*K)8Xi-Y z9gj3P<5yi^mpx88vmvn%OD^(`QlxP@oHA6)G$^YHuV}q0rrh0V+P58d=dB2avbs04 z7yStB!NUw!8&1s|&W)RcEMmPq@|BkE{7rXuy|>+2gigC*3pzVo)C?AfzFen<0fjls zVJ!!<1EgDwtgHQYI=g9g=btW~)~S=dGzO3km1+MCzvpkJJo7JuZdTwZ+QpN{h^e&fAT@RfyF|G zW2-L4u6Q*i`Vz+@;d7XX@(w!S{q5;eREp_nyjuc+8hu@YzE>(Ha`2>Q3zl%%Dw``z z0hU{6ICcUE_qTM<84gTbM9p`035u56o+HnjDs#T3=DtW=N*&V_Ow`cx+k*P4!#^6@ zZ_j;lUdB{B;U}9nVH9xwk<0t0rvzs`MfBT_dg@0!XvY0u?J>t#rQ|847R%DL-x-LS zi}d`ogaW)n9c?}27?b11ZoQZ{13J{rva+7j6b%wIwoODj%(M*7(201B2_7U0IubV@ zX4CjQ67So1q@E;iti;w94PXYRh*&u@pBE1!*)w1NVb=jbNQz^MV^SMnVxhKLywsH! zBA@F}s{xPYykZ7hbly)gas3M|?{3T1!%g*t5%<)$4~M^+st4F~wB#*r$*!KdX+P}o zaKU}>q|uAPXg~U#ywAtn@4ui8Zf*zh#jAaZk+1j??No<4W+Mhr=HB+60kWqU)c0?Uu0mH; zEZPXp412~XB@^NSFWHAS{dHuET|*keheXFn^@lsGtTm(shQYBG2c$qb?A?|>Z2U?J+zGnJP>l8aExS# zNa*=(2_VBWBivFX^l%pyOA6p-q4RqK?3>?~?{8(xI}q!cPC5T*KZcQS){mcvS%7WJ z$Jq|!PP+s4H17{sHeJ#AE2#KX8t-LRIBZ~BT%p+Zqv=npvfN0{SE5QLu}4kEQ2$AH zCyLv&>3Fmt>%Gz?F&4N~X+8I1O4l@x)vip z&l&m01Mi7{rzYc9kFl(I(oyW!62bch$$=1RAs&8ffp&=47L%-qPXLuuHtkw(RxYzU zJwi%LLNt#$i}?L_5KhS3o$Qu^im?esAaj2bwdAA<>Y~`-J@gKq#bnZuWxJUwrS92| z3ibhQV~+}JaI~bfk_P+rmJnc40l?`7Yo>?_u?hHMe!ZByq5Hg|xrI4P&_Lpiq1Wjd*P%yAKj)&-1SG z?zRS;qDv3M=kF-=v^+#k<}=wO$;T?w#BjOZYpL*tuRV}g`wnI2go3Sh8Eg*@aC^Jlq76KddZtpQ7(P351wU?9*tD;rUlGBdO)nydKQ--tvyvt_prP zY`fGOcg9>n-b|n#DLIrbf{Bf?%%GC;w5Ws-nAF^F`+PDieX=6XXhZbRxodGvS&UU? z5AL-gRJ#^3q4ODNanls#Ks4EuSETm)BMyKqhRiU+R$s%#>;UMglW1~cT{8-}TyL`d zc(c&phJZd-2U31E;Go`Wn({f}t8*Z@1=xqO9>)B7YF72L`F5iyUKnfO!%R@9r6_NN?_BMz@wpXbH`gLm2tVUI-$^n~(47uc`P&Gq9NJ|$0t2f7xBQXx*=5Ei7|Tx zy@0{vT{CdgDyLN_jYZmkFS!WJH~mh>_f9i2ym_s>h=ZG3=5SV9#2Gry5RAE|N>w=?W6V_`+r)OKcgGMvTMg zPE?-uUn^npQ%=w(1|OZZpd(7T>(U3_myb_)h8N2$3OpwWrIX~5UUS3|s z16y6GPACBxBx36F9{U(0*=R5G6WDF};t>a9J5TflFHZD|GNq*oemVAr(xyi{n#omi z&c{-u0g6@V*5dhMvwd@L-=K5qyG_-$rzmp2V$T4ec{X1SDZk_^LW<}U-Rm}g>k0^i%>NY1tAN$6$fFCxii20jr{o%W86sA zW}yj1e#;~3wC`o1_EE8PaTQuil|Hg&b)w0p%4VCPdyr`?K(OIIrDX(;DNzK}PKxy+SPgK4_MHiMk=-*;EM&eg>JCMJP>}mb}j8FH~8ft$Ci9 zw><8-*A9LtDt68Pwr}0?yn^Cse(N=4xf4;cB<1ukEXnXPE^ z-%n`?f%&6oSt=L`6s$Mt)^#wx8Vj;KOij>Le^)V$|B_#fB#8nl`^s8Zsvq#7qAl54c2sHgBwh=jMOQ)>qITAOQJ=aQlA2;PyL7WqX)H-?w;GgQ;114q zPX3yMDr|Z$;OIIpz+~72VALw>6rj*LaxvKn_u&Jf3myEuyp8;(e!ANCNt7f1n{kAu zbk;%%w=U2C zeI3vpiPYSncBDdT9;~OW?DqSM`X~~b1)ppoGpWj1a=x)E`tnkT7etx|12nZzRh6;V z=??_e8~9KP=F$HYpeg0xbM_)xD!KC9Y%QE_^ZAPTD97WftH%Z_`jA%(B8YA(n)#xC z-~{~d3!y}2S*^^9lXsde&m6Iy_}gu9t0CV^FT}OgZv|4;SXkD$g zo-ByIvNQmu8CHT$C4z1~R(`9=b0_QCvqP9zst`YO=zBd`uDlDs2X_33tLAuA^1FOW zOVp2@+*%WhoejDn0+%dDTKK*(K06&a?I;)om*9_f_OOQ_&f50ajJHQ?ka*3tN9Z!Lt=w3CTQsXfQJ)FnBm&y?CNcF}<5K;1Ma=Db zY#1Q=x3qOs zQlYVJ0!sB{P9?VNgTW$hjAc$eE*6D}13tQ!)Seeb&O1@PH@(CI>O-9*pr!Dl3mi#QuXeA712 zZ-wiBc}U}M5kl_O#?J2hNUbwZ2m!N;RYwEa6K$8x17p<8lU(bZ3Kpn;IR9XSr~I!t zpU|Y{^Gs-VnU0h#_Gzy!|qnQGsR%k~Xp7!M&51`l%rdq~m zX5e|iue%q^axub!{FN#NFrmtQ$HA1%3zJkVOQq$|bsGcO^Zf=hH{R^_TXZ|B!Pt|u zAMZB7DZ8F_y)?%*Q|u;(Tna@4{SmqJCXB-z607}{_JhOPSGVG>%ZzMxOH#>ZzY%Bc zyD$IfR({)M)UN)D5L*|PV91Z$7_jPj5J6D6seK8*Ewj1jS9x)F6)FIs(v^R ze~tPWJSKTm>2pJ4>*+$TX}1_BlS87xS)4Fihd2Iwg2c4yg}d6x_P9i%qPy<@xZOW_ zT%Tv_iZ|p;TKHU*4Uk^KDDPB@6NbRm$rQ5N5mELOx_b^kH3;09jxM1sv{{e~>>w*- zs=(LdN?5FtmFl+vj_S^O1A_Xo7}8meMex4=dN}_~z8&9V+@FUAY8Nz)&Ww?_P!jvr zwtqeOc?$6#GSSD|{qYCt|BBB3sBWRM@@xI@sBgWQc;d@esH#SIN1B9d9IO8fxy0$w z)5Z+dDLJnGPOx_veO~Ktv`|qdk&+MK&p)ZRqMi2I7~SF4KumnXED4ztXcw*0QBv#? zSZCK*rbcUy95Xpvk(8bg*1gw3^TdIkbWeYloJa<&WWpqW8R{x|Bd$J9B-ZT|HWz3D z(<@(GO$p4`%`aeg;O|ukK+AVon1i2-5+#$^uWT|2_o6ObHVc*)+WJZF*a`%c-};*; zs=0>42m_=V)-hUXqsNw%0Bb1IXzK`f(g0S$^P$2IzNYg2>6tFpHGqd zK@2?WVVR4(8;{azDk*rn1J;OZc{SnmZM$_2KzQW}8|?W?sBYazA{5sCu85Ez-!Q^Y zIc8U&+G^C>F{*{SLikqW3qkccRQ1rm zd@r)R2Fb3YUU3Ns#7~9%y%aji$e$5Kt~>9xmJYsIt~R5{>Ml>NO(Cyl#1wF|n$+9j zuRTpFZ)}tMirQ!D#=e?*I~C<2p>bysR^7XgkbdI;MNN7jRy@D%-}mMi_D)86Y^RF8 zLfdBx!a=Hdnc4e~X&f}OZ97gulB58nbM;!KIo|QwCX2nc-CCK=eF;SMC-?N zCCKM!CU34k755mkZ8FPfNirP1O!BR9-`%R%(p!{t+aSj;E!YnCrB?=Mmc!vKa{kppdVGZV`l9FNnf|P$sMd zt1>?k9)BYT!`9tev{;9${JibP#YeliCmStG>})Mg>-G9w_VRK6OvY&!>HJ84GSarHuoFB>vkCiG&Am==QD@3LSw9L% zhS30nPzYX8wh)|Ji7aK*Yq*`vBX*q0u*5-HM4EsdP4UnQ8){I>fk;4ECGr((Gy1&Y z7qmDQFEeXP(H2q%JAuICMvu*?^?UTTw{G&yC&L%Ia>7(Js)Z2K<@KK7@zz*<#e_ej zF|XU&Z%ZZGRmQ9WLwj4T^-O)&5!>ua(ZH$bhlbO=49CS8jx@WlO7~eWfaOuhpX3H6 zsuB}omiA|kkrI{=#ljKH^!nBSiGs*9najn?HbZyjQqoB8j{r8sz$0=JqmlsNfk($nVAHtFxKU9a~E2?G_ z2A1|y8a?13pV`MQUz2-&a}7vUYRW&=L$#DY!57Jd|($CydOx_isu>JE!Ov7azW4vxUiat$hOlMBgS@6^lwt6f%(Lv>eaI#Cp z&7fvN*y(aX{td)@>$;5VI#wc8IpE@4*@gJk>;C-31t2xO_E~$^+tUv1`L3Z*EVnN2_5(s&T*cdlc&Wfb zsERY|5sq9U+AnEo+EW22KXL(YZ>-2SXi+BQmR*IHt8GpM_5O=Db}IqGnio`@NGmGPw4y6ZYg}A9cGUS@yavHg&)rl> zd3IPrB$VN>LCfV-$%0QQoR@p!y|4*(!FX0A$HkvHRM-5fa-`S@j}4c%$B!$(Ww(aQ zOB=`j{iv|~8y>k=eM6Xm?+5$FyQ}-D*3|ro-(Bl% zI3>KKVth-&5;nq-Wx98ua?W=<-%Zr>o_bbQJ|7Q|b3)a8)yyCHowwZ~4zNBFpAc}s z){{16H(xSS$mSw`=#uj4H@00~j&hm@LL0se?O${X+IQ?ifU!->BL04jZ)78kDOi?* zk*mvhxpk7-iq+j&SvLIA`xecVlXxG9io0Vng%V^R8W$ZG)#=J5qJOYqO|xXhRM}hm zAqKbBi$3W-U!Ik0WL)_A>;bRXD0NpLnBdR7XBehz#;!!p>No)@`r6gbg3s6TJ#hal z_x^}Fko9XPH+Bxjl&eNj+Gl$U@%%!~cY@AmTh&DK=;ggplP!Dzwi5`~sAvf;d-;~u zW+YMo+4tpAZ^PQ36Ld8~5)3|uKTfLq!O9*kmA!G*O*~b4EaG@tpHM7Vtc2_C#mV4H zqvvsa46P}G#p`W!Wb~iY3S$<;pgMen)=}MX6i|}ECPL1WRFz&o3F>EVc2-f>i@u!P z9rzWd5rEG1=Qhu=&#vJpwuIfr1WzW8D2ynz9UFG z-o~;w$`6P&F8mBg5o(0BbF)*)AA?Mmq`_MYzDIX&Z6$c(Iri`7%Ewqnt%|v(p&s%U zW=KaN<#S$?n*m;aK7Oe0wQRIX9MH-gz80s0HH`?jW5oy2vXmlw?>2AJG1iVd@3p$^d$0T+EQ7D1T~!TsD7>V1>_9JL!QiL-v{BZpZ75b(4WSz zU_{EsNL7Vhc3CdBq3NRk0kIweL7qa6Y6-#!re6(%GVHa(ojx==E`iG#I``DJbPN#&TsPP3F>?a_yU+D@#+dbNc=jk(tggy*eKV#$c;9+ga&eNx^EpYR^tm1X7RP1{uvG49VhC<)T zcGC9ghGe1Cy(`|5mi~6(|BIhh=x?|GKkfg7p@T-K;B+DaMM;4ZzcTbVSRKu+S3brG z*Q54#F)HN5?qz+4qs#*=BteJl-yL<|=G?JmC!tq7j{vydHLBX`*3w1c&qT9Q4OWZ> zhD|tjz6f7Oa^e!9j;7_tK8-YPeO4gWg?7+nFP2HAs8is3(aQS{>Mu0a=1-ufkKC1m z@`j=rX-A(o5JBs;7sUN>7+$rUIZ)Ate%E3law*j>d4k5*0OYmy=7R9J2z4~WTaKX> ziiwf5GI2tzKKI}}o{7+^XTWk2`m^Td5W`8QVVUCCC>Kxk}6!PbW4-PVApNJvM2 zxs+iZigAU$&(3109I>XB+(CycJEc+?bsGX583C6ufq+_}M1Q&`K`72Td3r3*)}-Mp zOoh-bv6p~1XqQsI^mf>&+M<NTc)8zH9&3n!|_aO<$OL^~3jxVY2MJsoK)@Qr9YF~>J!uRx!o+xa)1>@U!sVSQY4Kac*>CIS(GJ2NO8h=1`a$9$V8EON3>Q1b6ks!HBJc2xxl?7@i)|$qa z^T>55_`#h$nKr{H8@8P*Dog@oX_m~WxVQP7i!PFqi(`!S80>n69@pR?5m#>V?$G({ zZkGxN>DHWbHMI15QijvingI`BNG5D=+o3=2GRj?tFEUXx6-IB!gM^YS+k41FiEa+6 zMNDOgz=lT%BJm+_Mdwhr;yJx3o7<;{jVtD6Rp3?o)?HkC_yd%44!p)^61w#Q@5eCw z{K;SEJsi)>f2mrKK5pO9jikvAUwitu0ks=_aMFF8{GbTZHkAPUo`_9_xU+b@j_B49 zd{t;f*lFSibP^HKI^^}D>|mtDMM^av^l&q2v;@VK+k?9eNm0lU>99;kLoIY2+%FZ2 zRM1NS5?GrHJ$)Geo2RAKOc_G(^`aOC<^fK~C6zf$MDoO2kL)Y0Fm}S0UicsJvG+f_ z1@IljFZit@Np*ZgzI~XK&xk%fGyy!^O91XudyAT_u$LICC$o;k-tKI@8=(Uz6B<9uueQl{Gd_$}2&Q0&;?tlE0d;fSkcKI zU$ykGSg)k z5vzgw;@oGl7Dz<+{byKVA)1D)NGGG;6Mx%-bV_}DOB6%uJ4wHx{JP{eluB0FfHUfZ zOEC`0>rNfA!S}+?-iub^7kAZCNFY9^XKnhWV{|$R%EUzFtnUezd6P7`j)%~CwU!Ld z>fPzSLvFDhG17bh^sw$t$N%_t5oIZ1k_ZQHx51r8`OThd_khEcZPn zGdCK1-TsYhURxfLuVr4oAwcHwSGtV}d9r!*F zG^gk7BEk754-K{q3*J;z02lJo z&W`>lO~eM*st(JEOf%9`dO?*y_sMdh3Y}y~u^U72=Z?S ziG|{FN&M(oIx>?Ejr}tT!q)y7LRzdl$Yi8sg6yluhi<4a-uXfw(!lotfrubF#P@*@ zaS@*z9V2SE#9Rr%H4#e2Nx+Ng%e}T~sYR>AI?oSOsR5N>R3S6`l(8f2K#L`JwRXwa zuIYOatsoXD=hfs7YYAB;Xlqu&h$U%h%QiCru|r14q2SkiyG}VdnV@2XM;vY53JcPL z^g_;2IU(%BF=j8iZ}i7bvg8*6syTqe)PpY*(_$7bw3+T4*qAUkF*tW%CZaT#o7IN5 zSVnf9AK7<4Ma893`O|Rx$CGR{a-?*O6-X>EMUvKDWXR4OAg%??jXST+n#nSRTN7JO22~!~Xr6hhM;a>|qu7O0>T{`;D>9t((bg zN`Ui?(c(%o)oWsL5{8wi0V2S+(6*!XaJI;A{pSk&$9n8x7x-zh_r6o^E2%%|esXTE zKuO=vi_WnT;P}n*vrlNee*)Fz%&rl&A+K#MTZ4rB+C9aMewW#MYT9ESf-=kew&ZK{ ztJFl0rMQfY0Qg3zZ244;Q)%FQQP;`#4Zh`J3dQrF?5614%w&x_r(3JtotGtt8ycxl z*ujJR5k+Zu%5~b!*suC5`fKcD0lx8V3q73zued2==Msi{J~x}dJ89><-+P4VJ~Gh^ zEMnE!?VooY`7+gzj-CRW58t=t2K1yW>U@0~nVd4aF0qf`J~ZnP7mm!fTemCfJ3{-z zx3U`WPykz+#*V&CpyZ#N89Ix8;;>Lws5XpLF3n%5sTMZ>Kw58iY@Avn>XSJ&8B}Z4OI?H%5~f%P5G1MGbw+% z{=38x2y(IVr`MX_w^N5dC zkA?-<-Ew1q@q>_H0Xnny61sC1$TJr3=o$n5Ae}@<6{-z?O_Us+umk!*M?8z%lcwKt zjW$?cMA`|n$62N4NC`ksU6foSL(;=;)01k+Q1vM(X-I@Yp4uL!ymXaB=B@-ZpsS?= zaD$mprj<;CPP)BnfPO_ls@qyJfhfv8)(_FBpv<_raO2lJ9R@lF*S7w5wMwJnC%`C` zn_lUnc3VKxRnnwSH&^Y1Zvj>vE-5wD4Q4UEx-5C^cbyiFsIcg-Gtt?Y8<;<_BMt^E zrX(ylLW^Zs2Uv5PI-=PhdPB5b7b$;!{mG%MF|6Rvc=Ie47rin`sFsGnS%M-^1E+cO zjVp9ODH(XrBVzTQ5e;fC(_}H++a=iOOh3}L>kh>< zwRjM*skilz>LS)7NTLRxIIoZ%X_rhrnM^uA2B-zDFJ?SnXqxD4tGE~KWyMV_!`C%< zK=HkeYB!D&B3OUzRd#&Sm`*T9N6usnmq}ytvOJKG8(pX`{9nYqRd8Hgmn3R2OBOSi zz!ozzgM}6|mzbHE*%nzWScjT{(-hvLHu6*TC9~7kE;woOH&M&Mad)unO|E(GBMev%tEk|?uRa;kAPC81 zlA}>pCz{c*vQ46$pM~We_{2~h9)jhxxds=XKN_-Y8*=<0`*T!#{km}3YfV}p=*Vc- zbxcj4t{u^jX?>b$CXPgplhLUYIP^QJ>^>xwM>S{#|TczAiy9X(?Th^43z&7(b>;!_(@Jn1&*V10R%J! zLT6Zqc(MsG#8z6elVOf%=U}3`>0ekUP#+jxd)2fSBmF^SsPXrMAYfz)I~>Gs-jJi) zz)yzqmxWx$i4pxXF`KI+h?;T&cxWVWVWO3MK1G`L8WLsqcjG*=OABi~rY_iRwFGv! zQES_!MegZX1ECK>Omk!$P zV6_Db%T*3;pJv22i=B@KZ?Yj|f z5*G~pjhFGAY9OBg@0QAkIXMN#>DjlWtW;9Y7YXZ*Ut;P*BKAb2lz1{af;uH~5XobK zB>9-}*YhRK&5E@Mi)@{6>Uli)Fszua4MbhvO%Fi!q)F~d&`gpE@Z7C@!lVlxfmYjt zB}INXNNdcBW#TIyVs|GE0ANPOvDACiOnM9E;%28Mo1f~T6SKHGZ3P5*zQ$&=rCFYj zUlcro2wv{H?2V-=A)KqbP&DL61i4T?LetgN)f;gpj>MBinw5-n;(xs3)GN@{x%AoD(P z)RF$?`<3fLCWhk=hw9Uex;t3|kq8=dWZg1c3r6`~BBl+6P}&8GjiG-wa_7zkj!Vmq zNobdmobK|0X9H2cx@j?6j$p`-#JWYD;fXN0)w9k)Naqv~et*8pD=U6CczDc;U2Jnh zveNS%m8p*&*ADR zVOmofG+|@UQ(F&12fWt$CATu599xUet33f=h)Hm7D*V}H6-+<#y7I7S?^UErJhEgu z9AimWjM>hQIn;NmB$#?s@VYR4k{so#5UJOpqLKNK)y zf8m-G{*8;1x3`7_dT4WDk(1&b?V$>-vaV_r5 zhb8TNK$ms<-m@_Dkc@dge6j2WS~CkC(hY$@qNb!|xr$-${}A7Ze(6rsqoc?V4wmMu z^VSQ$LBQRtzh4PkdMui4UlrhP@`k%xa5t>^{F&O6f@=)9vLsN@8yZnHSp5?L!_1g!P6*{_9-LLejdCi-&DeIxGfse^4E z_$uFe;iSt>g+^T|+@}x1$6$84p#>nw&nw46@ilEKx$1=A_WeboUx|1EO!NYjiIJzt zElkm!DkX0vvMQ~R*47=M_0a37d(>-TNK-r$O;J1NF=2qWUO=s{bqjFSLKk#rowPT{ z=2RCm>@}zRx25!ANdR$O{jAVSZ$8i>2=R;P=c{O~PRvzcRh77}4%!O=Tas$U$Kcuf z`=P_m22vsc#G!a8>Q{Ksxp?1n&(gTo+2rgP^`2hdPSyH!!t9BNWjRm2}OnTqce zPxu)&_ZI5j!fxr*W%~2K+4vOa;;oy#ZpNlBX@1KL`ZIH#SjE!2{kyMxdb4IK_Dcnt zUQ$XJR&vQCV2eGimPabS=tuEZbi5z&dgmc4c%!+gt}1_&Yu%jrSXxR#2!bbmLVb)w z(ebA2Oixy>)OLmLoS!AS_C8i`qK&AkBVwGdk>IGm1A=U47g2bpKf5s44#)BW2 zj&WV|)jJK>os4DhC*LIx(uG0YSyqw@!&pVu1VyJ`9twi30su(N4_|72EpM9K+6K@N zDztC@%DWo2JKRCAf-edU~1nT_q*Cy?ngx8<5JB`DpXhn$o+%sulrc6 zNlT&3@KyULG`n9&`lQxGzOl3xQxlALzB>R6qSM%tiV~4g#z||i8 z=D3JW4@>MZsIg?{#iLhGn;@W_RA}J5?_M2~l?Yo{i|Nx@Fmm^#Irv=#cqE@ye4@yG z@Nu~3-i%{EjiTt&a{nmx`Bi#?5In~vy@K&{xNk)Aj4#`V_VH+uRn(t|{Dkak~W z`Y59DWh(6FYC8J{gNR+dl+EhllIFdf)KJ8jY#U?p zK?k!XW7|p#)V1vic|l=UQ?U}pL1m(4@W}DX0RpSg1Jd(39SBh+!5I)z!My}?Y70Gw zPIcFS9;V@zcxWj(symz27-Feo!}|wCU2p7>(6C_A?H2!znBXn>mCI0_TA&T=4Z8Vs zg0J0To2^5O_rOPd1GF;t_CP7pCu+Z!U%wnmmVz5Bxwb6)Uoks9CtOS=nVI7d-38`_ zUTOW_uTWiXe$mI+h#fB9U-WEJ8{Tj{Ty@}5u-C$qYa%kg*IC#4*s%aP#7FeQCzxIk z4Vtg7AKuR2191(!iJ9K_?H>+<4F=pv*HMoPThiEBQSuPHOj8NV5Etva*7v3QWmp08 z)H5j~BT&O7RED;{PVTx1KgR(@WVK-{)S-}80w<;f!)OG#5pko(&ksc&X_&UBb6$`Y zJAIiAk(pcz3RV&P6*=aGo_X(DwZ0`GlMak1Bu1IEC<=@&O&?qp z-=stMLI+#;`g$>jbyk0HTzH#;F#|5jrOJF>Qb7-~>mI_M4JRQ{+*+3vWtas%JBR?h zLaA38c!ajSQLD|)?k4<0Ib_;%B*K&{SNR2O=Z#mJdP6FAYa+LO!fxKKI%etOCic4A zVZ!4e0O!rqL35ee?RYh4zv?lw8xEJs-=M8Zh3gOgmg7N;eFwUJO&K?uhz=jY$8*IQOeH_X|_+NqTZZ z4nc;3PGI>i8K+MJ8iOFIq5NP;EhaOC0P`Oi_@&vHSSJ2hG(v_n?+M~&a}n}!d9=79 zj1wNsIQ2Fp+uO>ISA!-#Mi?7sBEPfaFRr5Pjkn(jT-tOmM!j|vbt0B{NKPIN;W7&( zDb<<^HV?q_E~*GW1fej^rv`Tt@qJT#l+*Mx98HH{iAtP%lKAu)#Pp-cbH#i=v&x?? zwiRHl3JRUrHq!F=K&zqH9vlHhz6^-iJU(#nKj&~MWxIS5yk80(Hb)4l+LU2GTE00f z8F;E3+Q9lUoa4+<)#bsT^u&S_*#vg*dUcSLmX?AJT|P=EyY>Y-i@0Y#0Ont(mH-+2 zA!8-Mdr`1;70S;xK%WE-_Hei?(^pi_EQOXdeuiJ7Q0}I*B2b|OQHAzMqgw#dDBko& zmTMI@7A`6%?-ML#ki{;v18=sNpNwebF#-daa_a zX(kJj+u=MD0WGOh!BhfT93;x`6uYJI>z1ofF`5JT zjBE)nhlAM0xesv3H2INiN%DY6@yT_!4@~cOa1VF){;@PUp6KT`JTXrSg#30i%T{ui zBV`sB&+Bj4*2|KccjqspXIqwMbX5h(I{>A0X>9kINGLWGKzk6vhM=zK-CxPi!(m^?JRep{yoZ7Jl~~`ho=rYX7b>e zv<22S;(^|iv>|gi07FRbyO(*&)LU9e3OOnG_PINxU`Q1Qrs`G(Xzs6-93k`bP zcSP(j{K^GhN~G~IVoDbMsambxQdF##DliEARQaJEhoNsLv)diCd3FD3&r9UXA*Yw7%;Ue zcaPHl#|Jc@d#nT=0fe?QHT6V#pwAbYIO3&n0dKUtwJHe}D@bGtI>J}kiQ~}O`2b(* zwQexo3CTHk$b2|6we{9kJ+PAa354hygRI#a)DtxxM3lBNQBq|aJbN5i*@#2+>}Gsj z^kuQmm+5nc?{HQJr2DGdr>JBXEP6cs0G4t*OlN-DC4)f5cw7=G+#@XBt(3e6|6+|8F_KFxNNg8HwpYpd$fH!T{* zhvIY@baWu|a}nD^h?B({Y=Ld29tjhAhsJrM^}+bsmUd#nu7I5&7BFu(az!QjbxcN8 z=03N@imNNO~4d~GNB6zp#;dJn43FmcO#EEAcC%JTu#Q)CFgL1G+tWU%pk%qC(V zSc{aBRf2C(;3w1NiChkUF* z>(@1I4eV*dYeS{bm=*}zi60metJQOpog6wPE(k^Ic(j znEpJ_ASH(#+DOSr0moofW0(p04;rq3X!{!kB z77=sRNBYdqQlxprp#o@)+==bQIIXBOo@F2ogvio-k>L$#dklvjwdjO|tg^dVEbBt= zqG|*7do}LbC^lVlvd7t2x_;qHG-^e0ObbA|bY^1aiKO$RzRkKuRMDB;-HFFX9YO6- z4SgSQ3hfOnC&|&^u-w^Dy#JlRisvPCVU5MW#Xj4JiPWLD;%2@(9_KU&ABW9*?VLMK>{)J_0YFY81lQSDe(_-77ry?!?aTgf zTa(rRrU#6sCROHfbQ0Jo&^il9x6+6FqouOn0vd-1(k3LOn@kq#Uul!lYObt4v!o`sX*dbTqri2l*}!m!fe^BIGXYooO51;*cN zOO43!P_u)%_Z%pC80ZUh_>q{!1$n4`*CEPI?*25;?BN8dO(#1oY^_tKDqHI&l^T(V zWY>G|e@*9eTV^RuZjw)X47MT_c+*1?+`$O7mv7$$&SA#Z6@2H*iD*3A5*ZTvnY;-O zN|~0S-`&bX?pGR-5NrR&q1}=faBTX}?K*^icG2p-ck%rE)wnX*19a)IAXBlMPp&`8 z`^~ArCvs>zaN$_u?_w`)?H~)NOe8Zh9oGzRf!sC3eW0Fu;yNW;k6{2SN0pMPJG3GsaT*S;ni! z`|ZNT=A_AiliUZnS>VKA1)Wf^PE|&5|74%X^LqUDDh7Vbc9G#^I})?~9bkv%2bmSX zh3JXN?`hS-L36ynP|KNRbTMc<)~|5SlyZr-pu7{&wTfN0W(!BR!v&Ewwx>{yA1GJ@ z%k<`s?WW$;y{n;lMO}z3-y^ZG8X>nI8}zjkc1rM`wJGIZxM9!eZ{pL`@8nIvcGz=! zfrz@KZJ-7&&Vq32w_p|%YU8^)Ip%6w?4W9#KN|iqt5iQ}EA7Hl*UYVMq#0;?(@K#X z9wpckQh|^tkB_>eyrSi1a5cQF){BrI&~9#hs*SjwHS3}*>2Sf-qRm95Bg>HGC99OUO8 zVyYQE)JlQ>L$>^KV~v3}1YL-jjTlxUIAlev$i}7RZj`#hSCdY27@U-!V-tB>WfUU za^mzLhG%mX2GdQ0@Ri(03Ak($4!H6^85c=eDXLIcS|sv80R$;+Ha!6_+DLH^tF(^X zxhz&JBG%?!2@c)oMxI5-!=oFx&eZIe+3(%Pm-ee^j*w>E^z)ti=NzYL=^=Y$ zOEH;p!VbSPoXSIFcUj)l5XZv=yPh>;5LAnDxtUl%aarW=s%sPj~>vcP4+8f+{$FfeLh)hMG0SW|A=r3na=9zC3R*QI)k8_9lbn(Pe zCaHu`+2+3Zv0La`HAe+@Hlt+Qw@No z+mKU2O_0k6==!{wjBQF7%M65$uxeiOJo=&Nn(7OJZv?kY-K9WffBo>x z3AGAFT4rW+E)Ouy<( z%S;zW{_MaVxajBz0Or}k{fpt1IJ;vKA#zguPhI***R?QNJMKPLr@P3XXU(<4Wc?~j z`-HXgdi)0-B`&O)u0%|{kw10$zXjrbZUIK5)=AQFVL4NmVam86H9trMtUx}4`dapKu9p*f7NGc?VS#L>qA+@_iDCIL{| zgHZ8BiLJThs5* zok-vofC~>mD%YZkaompbeZ?W{c;^IDZuQnqhv$D4(1=j?eFoPf`v~2;`5r~(CrP{t z5W#=#B}$R>+;_eEz8o98i^|9#03vZXH*caLUFLq})feI07134mR>P5bnh~x06P*j) zlM6oFOFcY}AxEn`rW|=YO+35}`^-gq=RP`S{F5Af?Dl%Gyw~Za8pTM6B8k zjee(73r-9t@q+h%e_mHsS09X(dmKwnm)llN#n6EMk6#> ziRd6blkip})F$9l+H%RAIX9|f-lYaqNu=}MM#Eif~MM+Q=35PpsqaSM55gHzTCU}66Xxy!#!&tMm z@6pM!^_ZQHv_*xpIeGe_sfCdqARRi-EP^;VlQe=FLad7VM{BW<5tp|_x(pv;q}I7i z0)pcrkDT=?H%0|e2sNohPS~JS0SgpI=XLv}d8-;LA*yi%fb`Nn@i<>{uf1Bg@0;FM zVPg8LMJdFq{?Yhf?4o~FDgC^XF8X3(iB*dq%~F?b)B3(2jy0kT?*6R1?mlyCzJy+)?%FAuFzl56FcV0T!VlJtBVhQ)+ ztVRR&F-ZI`Ap|$w>PE301$~k3*`B!mZ-XWtH`e~W=bZKf&aPlt^xBVlU-c){b$CJ` zxD_Ty0ho)@%-E1xS@f)=Cz3@-TlMR2wJN|q3Iqj?%JRR!=(;YH&_-Zu9|DU9@w zCLcivsBofQXsW-)k3utnLB=51Cqp;J89jx@KeM}iaqjNMtH;M1M~*P&X_2%OV248H z+s3;4f3mA(UgN`D+%9e;ELE+;rLIDx3a!EEU_IDQEWTfsKkWNw_tj7FhjzWZDZkt) zPML5~Z$Firlk1UUfxU)(kgUj-c6Nhsq&@)#uSXiq_iNs21?X1=Kb0pf=6)IlP3|2S zF%UIw79Rl&c`@*OtHDXf;HvDqL%Z9$xYnt$v*5|Ls~GAcI06SkW79MFSmz+r8>}|Ax1_< ztX4b0yMHcPrhHM&>v=qy{AoKh8}lrJ69h&70M9Ua6<^svZQw9K^DBVA|MR$L55MB zeCt*S{n>Bm8upvKY^Y9c&|du&XB#)x>@;ZjXl`V5Y26pg))xhR4d?$B_K72mMi!{0 z4{qy#{T{1#lHs+fv5*JB9t2;h9v^bUzhg?>SAo>elIiM1O&f&_=BvEHQAM^G7_6*` zDVZOV^~+JE5%xU<5W-_5fJ(22#W{m)Q~b`d*`-ALko&TN;YgRc)!^U_k<^U5CmVJG z7V~jD{)dg=W^fYsoXJ(Zi@&7zI_mFN;#RvnFl^dt{Mv!#N~U)u`!`-H?t}+9RwMXI z@pFy+s^G(_pewgPVUH4hU?notpMqU}B5icrXsLp7vUE>5Y1b!1S&lx(5zUvY{ODQ$ zPnKk|p((^Gy%1K;o48FO z>m_Y#>&ipd$o1vJ^>*e?JyvbrWe`7KbhjU>%9B#TVZRY-g|fDAx{ z!z^yl)ni&RZu1@@4)DZIG2qE1mKf{D5~7R#kpBJ0&8Q$`RGvT-MFS>sq`m}kqqP%El-$!Tb%Dt7Mw0SRzLj?p7FAA}rw1}fB z2;`B^LgIwy9|fG24E$`EBi_Svkp?xMmM?Vr1&`Fcmc&RvN4{F;0ijK6eDr}|#OTKE z_o_G_)Pm*rz@V0lzdU3SuK1!vi?{BI~YybmApK;o`=7x6Wj+kfg?wOA55iTV=&r`C&hOUGqGb9e!9nqjr z&y!nAI7l;Z@@hfaSIzVW(fR8j+T-amwdPkF6t*2)W*sE!Xf8i30YTF_v4Ov|d|ZqQ zJ9}dU20YcUDQ$ipw%y!uO23nbjwH(&5f9E1F9eq++eMr5tfRFR=anY$R4$bjvfqIz zR}u_@3_S^hnRY>NPqB&FS4v|NEGB2G$AYo-CEKIVxc5kJi){~HL5Z8q@v zr%Pjz<1vdcL7$YJ9rwFjW4q&i?K$Ua#b8nT4eIBthI=uly?>@1$rE^LK#@k@xOwl? zW+(c=V4F(%Ez5mP#M|2&M8gV}ypJ&07j&Ib&YHJTv2bu;r^L;h*Vn( z6_eGW6m=f#^u_dI6lPI8LiidrFGpA?EA)SM842TBP7daVJv!Q8;Jm#Ye7zvzmX>R$ z&x3TPMG78rF)h<^VFcA7b9hYAfxHOP-Cd zuxKK;mO=?B!ztu_@vSf4vRL?LjxIAS4W6>V+3GY{L+VIR$wkD=`1rai^}4F@`hWX& z{I}}BkN%5`fb5c)wMMj~4uZdFCe!PI(-byKbrO|KzT}jNrK-iok5{|RmxF{XZYP?q zIH^5&XZP+U6@RZbmB*2Ab;irZRge4g^2?Q}Y^Y3jACiD^P9Uuy*zGF}R7XP4PW~W* zMaK7Y1B-$tZ$xldDL6))mV}iAJ^kavSuRlg0E>Y@vY7n#3>?}Z4>41pw8OH;&wfCh!y~GPQ zXQ(@O_wA<@(YZz5i;Stb`(^EfJj@z;7L zCQ^R*@7w(?Ak=ab(-RTqbYP5x0zjs?t{6rZ*VW>e zY~GQ0On#?J^V3Vmp9IS#8W|czH^Sc3_%i%q!a%vG*Y!**Xi*)G5G%-_tnpRbA6E|i zEt*^q!y1_o_o+`lE=~b7)tJAeGIv1hm86XHGn+RQa}6N1Ip@pl3+bsXP?x5lBEg^{ zPCT=C7lJA7`eRZTc2>_VZEQN^&UM&SXNLPQBn4gNP>2uWDTVHZTQnW+uNDAS3$PTG zH2g5fie02cB3^k`~RqQg8s z9V?AEnj~&Q{6$dNE*K-4rriHJKG5yxjX8hRnvP)nokDDWe|vV^{jNbFB=jD0blu|O zkM<2t=Ca__gKoaF9S3Cyh>+2}A%v0=E}EDq*}WGaXjW)m)NRU=*V2lwUo0hn8Pr>H zhkT?ABqZ>&kRpKL%|Z741sBGM){w;rJmNE2Rz3W7$4mSX%0CT{8y&M3 zM;46!OH#=~bh^&AZaSm|p%l+d_A816fO_BL(^9&3c`<|{ry7Nn{Q$SyL}SHE8R9`O zIHci27Fln9RfqaVLr{dYpAd26HzYr+}EY`-VN}`7Wx0? z0sx;NDadO0Vf6~eD1YC!dh7NW2pO`JhQLgU&Rr1QdUXJ7e3Yf>(;B+WPu3hA5W|ni zOZrpW0-@Lys?boKc%-!CsO4c0vD*(IF?mn=x|2q=L&<-0k zWtGqkGpFUwIl|l@&%+mQvRf@}Ys>hz!;U=u**(`!!p@)kA$NUwflggJ z1Btg%m(SHN5S3=9A9G<)-c+kbN`nu}(1>R{kt+j7;3-VwNy}~r54$OTRw((f3&Bi? zZW;1v@6n7xdeK9!+>Z|>r6)5ZNH#B;5v1mX#s!JnIJ*U>1f~~pZAHaE79QR#TAp_w zXSt?XO!jNeD|9SthvVds@}~6Oro(6^rNNp!BvBaYjD~x~E(+>Vvq}dVK%rfG|DF#E zw~E_px^eNL{)Bt%t$k_d&W8bw$=O1jW((RlFR(C9t|Vr1{bj7<&Q(NB-B);N^s5gc zHQq96i=PDwUM_rqZxq;VDu;2$)2g(6SAsBmsMI+Zqclk2Aj+s4b_doEy~b)$h~W`S z2KS@A4m7i0O#3imD4-`+F+Nh6qy|jGsKT`qqzkmw1{-PzQ55K;fJ9hNAV&$htX}dH z8v|V^(MK&jItxDa3D-CzrsU-z=+Hu5ck;$OPbmP-D}qvft1)P;+yuEyx%5El`s_srm3B%vVNSxUv+8%eQ<1>1@BaTsyEDyvj(}X(n*PXSMl8!CSU(KHB;~y=WR4;nu9wrOf{V@V_7vu<|2dqW6ayq1UWZk39?Y0WQ0T5R%$nu z+{D!6J%J|vafNA=!v?uK-p5w|ab^o0^bLoq;uh-a2} zW)c|?SoDpVmSxZ9WV>c4a(#hBvS2JuC@wu3ftQV+9yM4o+I%Vga}FF2FZv@zbnxLt z^PgB$nXoU0hqW=Di*LW;h=oJxd-H%s%0Lk!QX)ueQYth+;9(lFq~==*Jt9&8rfJ$M zt1i7p5`B%+Y!VIm7v{NOOsNj;yjlnRWiZ)xOjUfwB1OncEr%0y)$f7>j~-xtsAQ|t zn_rkNG53VISDY=xQh;m=^&I##gO=wLw$GQriVTf}YiW*--6SKl5_GcO9{y5b`*8_I ziAD+`69t1iX^}%FD>G=l`h}SFnsr!=0cq(9xX6Ko*T+nX^*8`{?JpI%*;C8kLx9sA%j_AKb!Zpr7z=lif5m5x-ar zmjvVdR8IY*Y%$0!^OZ_%8|g%;!bN&=Nbn#nr6AA6GALY$zz_Y%uuBS*e_S>KG+;q* zVrPpXLFc5zylNpox^*Zl?kP{z^3UDKtC-eINGMCu8>6_MwIQ$gAh?7kM2ChM9Nlui z{A2pSeM^UO$S4ySBK2+KlFp-`9_MEjiEe5}yw)Cj3ex;LJgX1AJ+)RyFGcx<$ECO! z$_atccs``E-bLty0(G*(9>q-uJRc?AT6+Wh5Vz-LY zn2^&N8unsEi3!NC9>E;Hx9pj=1Yu047?Ei<`^8Eu+W9p?9SUhh?i73hJ)1S!-AWiQ zI39A0)tub@O!JtAhUZ1m_upb@;A}J+IMpcd7(D2fr@9&1LhFiuB0-b%&;hM?@M!** z!g&L2h40PcvIFqc|MEsizrVF$Rhm=?&bc^VO1}&}svPNd2-R@A^%Tg0WAtc?wjt-P z7rjJ@K|WLOrKxVL7Y07tz{l8sv*>>2!-;d*u&@D?`4p$2VYprtyQsW|94~JH9{iZ7 z!9xLAN!*&Tq0lFaL_(wQg_xR}?wi~!MyB!Gex}CSB^$5_QjqKN6YvddJA3NW`$bA> z{{YSR%Wkl0#=IGe0g>^E!0&do9BdNM`s>W#ruiQ@sN}gPk%)9+byMx)t!r&)+rXDd zdp>2xkQ=2fSz1Y+LuGt?RNO=kb4jQK8g|lBO5g#jCK_53OcOz(JSl0{tnn5{WtXRMPP9qc#E<0QM?_DNe0b}ePI;-XgM#`1 zH8!0&7xDp#FQ=ovo;t*$9{SH+l9*z+&?R1WxiWQoEQ6PVrZxUr@f!#I_Z3iP9| zVzEm~O9wRkp0UTf?6S4@M}UX@zF39lmxdv~V2W3Mh`>fCPlgVGM;Htp(GyL3U!Su7 zX}c7hf<>;t?G)lt^t{QBi*ti_Xn#GexNn|Z+jQ%1h<2LnB&A`w$iXuWYPP-3@d8m@ z?KLoG=wlb%;EBmLrkptWXp#yU9&#=$v)EFFF!;Z18@(%y;5hu5^Aq_p;xOG27)1Y{ z-OrzSyfy(cNk@N&lTf8D#$~`$O}@JgdjL~h9HZWE>1kRUeR;%Eppz>xW@@7ou;X1W zrwfW}-HXny?Us6_W=mGL-4w!H0WT1%5^5-(qNLsuF z{>Hlr>=l%P0*~2P`Syj9N<}z#H;dMQWtQG8awxQFX3evuo1ZF4JL-gRuW!QUxmk!*&g%zGz(!SE-jn z_eaV#FAy&$TteLw*NBFZh6y(EpZe!ukeFq`n@`Ux2c>l6Hsxvi7xVc61dV0N_$*(| zVj~s8@cz4bhODVU3L13725#j}J3`qu{*N&ucwIM=D_+ykh1&8U>qgK~Yz?4@qOAr` zb#J^ch9w=;$_tuPir+S);?`dKP|ZTj!(&-<)JR`xIH%0I;<_LN3GGhoX0sUVRU9I2 zM}JlNchgqpTBh8+`#M-6fQd7wS@ZItBC14n?e3VW@lFUJWiY@Xpywip{U|Pyl~=$7 zQL0qp2cL~BeOHufkfY}`Hy$1%2_Je|I#ycqg5N42%@2xbSZ$R4qZS<^;3~bM1D98Z zy*8Z=T3TV|{9|Ne6!(2;c}LXRnFiIx)i8W8mY4+2t);a#$9C+5(WNK2){B_P4@J;v zaRcWD&rBgeF;QvTF>i1`EOb}08pTDh;RDh{rey19=ev!7KVcp8E9lg6lAlgOGW?^- zDDmr4>!=I%3tO%hsd7DdK-|Ht$unG`8rvHLU>e>U3aangpig+}_D%&~X(;c3b* zM90D+ugh}|I3iuig3d$flxMjX{)OIv`z$Rb-Jh86*(GF8c6=Dx@ z(I-1_0hJ$rfc}3-d+VpVqHo(XxI=K)g9g{&Zow_MySr;}_u%dX2<{FiaBz2bcX!_T z-q&5#?{-!95BERVd)L})tvTly<0J5&Iuw(W;6Gy~0qyvf%HXYO(ZO7WGEDol*WZrb z!6cF}R=IGOD2W+~qd zX~+CrCy4nRm%{rzMPvQYASJh%SH|OeXYqEM8m4|~gav2*oiW7CH!F0`%p;~%^#US1 z3rk2g1^uBnD|n;AE8`6nUFdCz?GJQoc{-0$DLIBQKGs!2;cU>hu?=W+a2+ zCyScy(Vm5;V9405yUPRzoUctwV^Q7B5&r{K3iVfyxO+~NrOgC=#lu4;0_(s~X})2( zDP_ZU%RZ)xUf<_f897P|Vo^6L#-$pN<072oh%%9rv$h6U=V3G9I&)-&ZB&W5D>I!! zVg3w8ihQ2#q^_c*L6(M$8^L86fgkHH5I#*z&|8QqPBJehv6Cclp`&7CN%>A**NBdv zr$%v_A~59bgeyl%6tY1`8$TwvYKlU@X(~h3N9UzVe=tTx$ARyt6GcKzL+ zM*q@YGRJXd=FPxdWTH?lm|6`kWm_WyXY07@%B zplI6Z%|uz+JPVO@kl1cSs`E51eX%+ApN^Q)NlLK1D(EhRc3ZdS=3U*YkL#Ut@{(q; z1cI_*O~Iqiu%PfXEF+hNn^W`Pb3xt+kVf zeTfy4cO0&ws~50UlG3~o-f3uqyTsB}N5AUo;Bx9ib_=0fG8s|Cn^S`2zu}v^4hyQu zRs~p0%F2{~B-oZKZM%IFedpcA++O>kIpQivDb~^W6MOKk|E9oN1&d7D)}V{zEZy#v zoh0&%u-HMrKmIk>1o^)%wQ0JV{MI>!UQJx9iS_&39A^Ym%Z|^&DDeM8%q8b~G8m~F zxxQC6BuT5~p&$-U5QG>0C?dD?dGz_Ojr#HwS3Y~24Qe>gUY8s;DuBg{B9j|1EKafD zoS1N4;(2=Av(uCJyYE0Xf%lPw|1M>Jh#i^eaF~O03g6WAC+WWjyYQ)l#)1-h^aduq z5T|4F2=tqShL(bgJ+g<()$m}c5vAEVAOQ%AQpeTWj|g7kp{TL|8Sde1;3GI(S-~#O z0I71miYeNSLgO=7K|v&O+odtz_ES);aaejnE=&Z_x1kaWe04XO48&05bQP197Kv)> z3H8CN$7D>QTI%p%MkV460hC~uTiJ%oE;RVW?;D)u9AcU!bk#PgvH7JGjynjeB*F0V z?%PWZ1g#|Imliu81=h+TV3V0Pn~_O&8M5^|cIWtaEF% zG%Mb0>Dgg^eHyEJ3Z~Km)3kirZMTvC)8B?p%I3v~pTx6Ee{ONHsHB9-ldHE)DJ*@+ z3w3un+&}8`%purtyuZKW15K#a${9;lf7G!hb*szbGtOd@FT@_7CAJgG;Iq$9lZzLH z*G-s)2%-8y9h@{#*wzI{Vrir>EHa%wJ=8cw`SC7h=Ggpmd+FNSbx9yWf?cKImbIRR zFp@flw-hTWT1|b~M9M~t(V8%GdjC#jN+%&zCjvc`N16-S-Zo!aaEXsVNl!1_JqwqG z8W5m{?%cV09e()`9qcccR!l~Iz7?`WMSXMA-T$f*74o{u@gIEdh zeJ-26efJ+J@cpCoKaL?d3DmI;kwFR>G{?u7$v*u7cVS70yC16?VglqnR|O3Ta+J>> z@Z%pM@Y*7NsQLTGFV@tC3>XPm;t-LM2#ya+Msge!4l3l@I@;6f(~!ywY|Fp0MH^h8 z0cew|w3C7UQP0x_7~IU2JiL*G1X|!WYvx(qvI5w~d(7`qec=EKn{AFocNxa_FlU~(F`oRoN^u@Pq zb4B)rWsKH>VvL?OL8Jsc&Bef`Rv(LPA)nWFZ8iOT6bq^TeqO;>mWw0>bS+Db|09+F z89Zi+Rj7PljL33m(w?wYdRnR-#Uge1;}YTzX14TqjDpUP|Emiq)li+CfE2BIs9{)f zBD_EYO1$sqO7B>ks&ftl;g>T9)<>xcs#E{f|FyVn*bT`hxxe*NqZ{A0I4&hA!rQ2= zPAckUUjF!V>A#BGu8Z;6i2twJ_7n{i__;%m_V$I@z9vixo%L47VP{uLgN$m*Q+)3? z$G-iU|GBd7qfgJk%C`kiJkYM}rzcAM20sLjdYR}@H7e0+18hzA3q)Mwa4q%8wv|9! z5vU`9#Tm7_`<-OX`w@3KwK~c{G((r=4FM2;UA9qfdT+6XQS!ZX2 z!N0OIG`1&JtAUNam>p=8u8UxP8W?7m+h&)-6w|ds|J1`g8E!Ya>iN+Frs!>3yK+-S zoZ$;u>|ppFbi+B`Cb81Yz;8+D9Gh49KATmoslcf!2+qi}d2632=`AAiI8mDGx(Zyp zjP0kZry~leG9GKD>RR%*6B91h0NYWroN5IyH9mVBwxbv213#~{J8#~iHQSC^GCOh- z8rApYH6yZmuD;l7aiP=sk*LnU(7aA*wV^n4-|=!0*@trRAOv+Gfa1UR*)jFLRoa7~ z@;Wdi%yHxV-xxl+oZ$%0IO2bP75f%NE!Q7JZ$D6ishW>M&?4PzPPFkLsP5m0nSzpY zb)w2@K7HT%xez*3hnG$gPE*$jdK#lp{%R+7-@{>f4Yrf~Ps2}e6@1On!{(yiN%IcI zD83Oycthk?^Zq8Glj_h3+}sxJPk&D5WP@qP>={wrYG~ORqN%)^vAHzykRZ;qjx#=e zL0M|`LUJdr4i_Sad)E2^?-TC_8Q_84TAfxXyV&6DJem4X@U@SHH&ymuK1l#0iNKkSUpqY%@eD*b5k_fw4 zu*aQm#<|&iH@uyAFSkugO!|-Q6hf5cw0=)=v`%2@>O-dUVdT`)!-q4Z%9oNa-f+L+ zv#+vp(fBlucuB`x?4a0cNHOB~bMn7-K@=+RmqIQ3emL{~_kf(+UAB$qZP>{7?G2Q$ zZpwi^WH8i9k)5z9d{{{CyQE26Yx0TJ?tm;a6~04_`DK_{EK$?o;Gw9g0DHnc&6-ve z!}O})`;k_M-^>+&Us3Hq->Noe%1e7lPE80ZfOjGBIO$H>FvrNc4D(MEEP$WD)RXGq zt+w&LS7{x=CK%-<)!tIaPTJc1^^8Z-{P>CXP@%;d%h=Y9#Ui{Lh!D6x?%jz0!{;=i zt_jf!+m6MJo6Nl<|F5;!%ml%ZpwJr3@YJ)`hNkjdhKhn|`*)g7;Hg^33$}7KbJcFq zTVxe-lyn${92jOOj-buPF!PiV6GM?&Ho1pgw0hZjMtYTg)1bq7n4uip#|)KoEX}_v)`u1OHoKnrKRaZqufEU0{8C`ZPI$!o9I= z1cGH~eSdu#>x>b}1NoE;Umz)}@-*ANKucw=nTA4~ADOj!{a_vIGfQ}j!#VRmq9FfImM!C(4$6A{QcPranruh^bFmQ42#(Dw>~&9xaYCu z3|IgF72x~eUv%!vjD0gd4@_*r?BmTxZ)BmO?$v9+rr%sfbUNW|^&6nTvY@KH{6S;k zgCFyMr^iZ>H5?AvFeuK+%B==E0Z7C2mL>I1W}BgB_m z85SG8=xy+68=gq&3V8`sm9>X2cdXPNd}o+ZM!j;0|(9!Puyh~UI(FzDs(-sde!NyQvd(mZ)?d6{}l_%kp#=Pu^cBrdJ#C* z9JayNL(ZXn(W)ZU~QYrMkoV@m5@?x^E2aoWyIN9u0$fmAk$t#&Q@KPGhQ zEX;)dm3#LdOp`lU-OThjq$E{GJ5d6A>B)hKT-4vsx`Et3>d=pbI3+hb#dZF-fG2dv zh>-13dQC65SitS;nt!eJqei^#d(Q%CESLe2Ys51zNl70+`i@)*YR2@K zxwSdre6s()ny!qD&(ZXp-qU2tiYI`m8Fu}x(!4LFX%Kpw3l z^!@5!0wl0FCWO8_gn!w6#riFi zV0vy_Ud~UK}y>j2a&DGlD`&n^)a=u zz6vX@zogY!l4)##vY0w++h!%+jr3Bg?9W#uCX>loqd^J+5i}cT0*u(+>+{O%A8k-p zyG*_NqqvoW6vm=BgL0XXbdmF#_Xnu15!9N<1gaw&U|E9llr?2JWm(4${quV9XPSKFG$~!ANBds+cM-n ztNaL4wg(FC^9&6nu)oEp_Hplmfk8GLl31oxl22k@Erzr~j|sD3E8=Qe5*dowQ6XtO3|2 zH@AkIW&k;a2U?GuL8;kln>Ulr_JUDo$ct58{v-Wkd~ivkW#>cZu#drKO1}w>wCjPO znW#aTg|n(>mqlIRmQsKDU=XHdGdv3y3jpARp`**7&E+6=p^g^~nvBcLxisEi0Uo)& z%zJp-WP5}cd@-xi+a(~<~Zc^vHfM_7X27&_f5>(QPbM4Y8UDjBwAhP zc(xzOvqSV7sJ1@alTVZCgr)1ALKp2(`vTGX0#w+`5)Dq{dLBFVJCsA#VSwJZ_4|(N zm!3ROW~r~P$D+I@3pM5+2H%AUI!*Wc*j7(-rY&o!;mnq+mz7{km#4WK8pU?#mQ8=c zY6D)dqW{~naK8$arqDS1`AoYk5>|bv9WL)aj8W z#d?DaQgiN8O}`{$=fuFYjDZUIaE+&lk?PXz4)q0a!Sfn2zVC$hKfxUH@BrWj=W3#E zLdc$SWV-*fnyw4zr}Vg7kTRCzdJcVpK;73DXh3Phhuh7}@g0;OEaNQz2yDifZ901B z+qx5Vf8MeH{q_jWNwbo~Xd$f!q3wdg6WlI)5^wfDdtX40M#p!er)K-fka2GARbR%2 zZ4Eec5lCj$-7LbEcl+0ewVw3)Jm07+kUugtLMExq$$q9f1`b4pH$+`o4r;C?YegXc z2uWqoa{bMPLQj@?t zxh_rQiO!PaYZGjGOhIR!IE&`8M>vORJaHsL;{L#LTv#a+mhKJl+vaBn!Yy^2r2dDr zT*uz+6OcWiZ(WsP>dB6F^Hxyb%LgfQ`L?U41eFS`Of#EYAt;j28)P%}%tsebce!Ep zcSd&}M!0Vr;+&Agq z$dc_-!7xTX*)5}w((Fi0@Ap_uE=J#`M&XV(h1dCcAn*=P{M(ELo^B5K@Kus2wBcz?js?*|2eipDCo}#S z8W|NKh>q8Weeh{u;i zRi|}Slc8+>(o@G|`XE%dn=&D)GD=SnNAut^0fP{hS#m9y19^xh+EB)Si3I4y>K#=z z{Tb~LC)iNVGPY)3=85DXT}2AvccD#5C@SS_+)TT2*?!%l#hp8;05P!d4f&I+;Kk3v zET0Ff3qW=TA)6Fpr~WpL$>v9VsMKap0AOBnS|e2 zkg8D`14R~I1ts>D5U?{;uO?n4`_@BS!eWweXGAG|){hi5)o`IHdP7VO0}-()L)sc) z=$B~}pGD5aL^k2mGypFj>>2k(QBZZ3u+v+-UV>Pq1jgwtvi3p^oBXlXC+!RaUDB5x z3Q?o?(ztt<#<^@TssJ-NP@Kv*?tZ{kA`Gb@d+LnoH@mDV8XKuWc+mGp5epZ#ykFf` zZ_gnnLDbPO>P6Vv>iPs+sB<_O!}S)Wy>BchRTTB>Xd3ZI{yos4sYl8t01(e(K?dqF zCt63g#e*m4!oyuCQgrG|V1m|~ZP8P9`SJ_zhVE)iW6*Uvs}T`~A_G3Yu$)eqPmdR% zGzKrQZnxBU=7^zv7Y~ttp73gfkZ;Pe!El>uG;tGjBW6Mni&WD=lE#o5l?b3Vnm)T` zshsROBeJ1|R1i2z!*uT3?T)BI&uj$wc{a?eXAQRhZXod_v{9$ivDM(s5DgF4oreryfD&43>l(N~) zQ9Lo=Na6fxfk_7D4E6TS>Sb?~gh_(2DWfmTB*ApVzcu`H?g_|PQ< zW^umPD@e1gi$H+Hnx;Shl6gP8wnP5k(6Xu$n5qUVb;uP+0Qzr84M#yL4l-cm6dlY&&z_8#dTgC`W=n0A(BoXh zgL2s+9DZ9r?-5-8&}P=bN8^n5F6|7bvzNd}4)!7wM(v46%`5oUH(=m54h`y#s+xJ-?}rgwx2Z#?Cretk6IiR@AeCUU_;ag!#@mQ=dPYV+?d- zJ^pWKCvivy0UvDL2mj58HYV5z^PtMg2<;^i}s`nwCZcV>s?pY)Kh17dO!!~V|5r zN}MF~#{9tDUPEfD-*Om@70)7unz3)-Ht2kvAZO{$4;Y-Wxuq!4WZ5&pZOI*P;(6Dg-K4?+pp?4oay>vEcoer@Q-$)XbHyQ?q zu_Ky)ox9NsbEJ!!vh)A|SjV!3P$)#ls6CR6-;Qn32e~M|5w(n=Fv=cFt$*k(scxdS zG=GsOQVsgpK1$^KWPe)5oZP){F#wTXW>uE@sFMNMx=3ESVU*BWcalz>rY$qc2jZFs zRp#ba3V!6qWRbi<<@(rA<$gSvNz<4(JmeHqwh>5q1O&AhyfJUQcnZCeh_;*Xn%a3V zYbslYVaccfY7ZZ$Vi^3UP8g&WWQ?5=ef60;1p{gaZg!SvPU|{Gk#f8!C9$Z*rwSm7 zm|tOYy+0&69}Z-nJ~%@~Oo(wDMl8pAo{Y!v^H8V#zf|h`ke1k?&R;QyoLNZ>2l_c4 z-Lly9ON&3fU_OdhVf>#*EjTI0wxIX$vHujU3w~bMf&*AQFMPwN{8Kwjy$4##hRVIl z3lrQtw|01Z5NvIbG@7G5kUXH6|0aC%G^MN)X)aEt|-b?j5?dvJ9;eXq-VHU9K zcgAlnCF)=M27~a`vfG9LA4#;wc#m0the(0{t0r&oJ=U24z(&bCADX00hu9V=UG$E6 zs;XWvELB!H;8H$YXOM!NQY0+Ue49sObULl#l0vL=JrL#Fs>YcJB>3#1x3SRm2t+RqVDgyz8 zzarrXeTC*2g{wEFO|rt8SJxE5tTMkJ4t1;?2tmhcAaLbG#+5s}DoMw%DePt$t}kRv zIhn=98hu9L_x`L@76%M_X=y`;OwjCfXk4}ZywYfb%CE@eL9*4V{EiD+u7E3y+>g<` z>#`of*!d2E{2$UmZIia7G)5k@0#{P%I$x8}^Jig^hZQHN#Is0#r=TDl5jSI(sEe=4H|lVR}Zi#UBrw9;L&f((C+vnuihYLw5AItuOlxdXC&bBuO7oUGnyee`WgpbM8K^CEJa2MpDb-(z^@@2|! zx+&UmJgr95+8%LGo-q8Bq&1AVrZ+k(tk{J)eqLnxe-PbAcy<4CK&htJZ9uTlnnefX) zEN_o_NUL={pAf zilOlb#^`ta-Ett%)?h9Qe64fu$Ps8MM}oS4iH06mQ}ro^ecjv)&v7{KPi<-GTi!Re zLc^tUoFIa7zLg{gOjm4pn0Eiv(ST}c58$P(QgCaq(2~l>0h{}Ty5Y_Lei>d3n^0iJ zOx${G+rG~kjHO?mUbjOFNz6H0?wPD21R{$x7Lo}WbZt@&Pu`p(WgBW=yQ&$1d?t4v zdtOjJ9@Yrv@M#PO{Lr=+<9$gzbon8yV69CjAXUEe@@1b?G1u??8ufcYDPywZPt|Y6 znw2aSKH~Y6GT9UFtYg?sF>ooYx9qRxO330hL1~IFwiz?QJTa-_^52RnvCuO~4+|vT z_8@TkicKMD!J&U!(3WZTq z$ge1tk~m>iEyTaMv}VQaFvx~zsezyAa7_XcpRP7+>GIy_Ff7M&8;yBqYOqLY?WC0l zz#*&<2C$`1cn%s{F%V*EyQa{6Z?hrC&nTw-qcAf?M2MB7>sXDheIbneJk=`jocC7b zNk`+#w3bL@7pW9O^A0IUzC87u0DyFY))i)QO#OglAtkPTyNGsNMRO2Wai=h6$PDBQ zaOJyrnznGd@6rI+n5wh%E8#Uu4HAfyk<8}xzfc}PIzx2L+%W9HyOpA8{Zd~58cqST z?f*qh*7T-$=pp27X^fglvl!pZ41+I%3>+)M_EtOu03=zcO8tXvlQwthn8j z1??rx9Ob_zn$*&8uz}7JO4UIKGZ!qs%zr@mxU-PjiBH)0BsRW$k((zzv86L*5EW}T zMJ~c_f+!-%?B8$9FT)5u*69U(>Ic&VJYlVzSkci;w8clnuwt^(F%wfaMZGGyJke%~ za-mLBbWBYzL^La8d3(U)tJWG)`!Fpk7gwQBDn6XqUs_8_mGQa4Nk~aAErw)f=FTF$ z(g6}g+U6@V(-XH_^=mA;G0xn4t`Cyqx59<|Fm&{aoK6@0L-P1NN!1xLW3t7Qc(PdD z;T*hrDoO599%dH{3|2Kst@EI#1C&#j?Le#}#%dRe7?`i{qXs&LAB#CV&fWFVvoL^eB&br7 zz!_It$M!03;uj)*1T83P8A$UDyGh=%kU?l2fpCy>DB786xer(O@4I zA=Q9-H`)HF_#;;RKSDr#1V#Zd#y^`h+gl4zo{ym|(+g9OJ;I}730l&YUic8O(daNH zw~5(_$Rg9mhj6RPs_LOO-M#>O?<;DCHY)_*#s`E(xlGt~2%8E7`U{=FYox(SF2LME z*bYF-YBcj#8+_A02;3#KA=o{|3rwuku!mt~U1+;k502l8k_>G4kmcKd*4UCWvh&m%U6^0U))?Ya0*rcu8d) zbfb2j2k;v5@pf_Jfxk;s6SWeCZ;rz$pZWyn*^#LRXf@%!Ll&|^Wh>Z8(cN3|Wec$sa$Nz}6| z7g7%l!_lT4`8AD{X2LqI4Z0u{$qNaAwx;K|w5t&J`U%CaRzT5z@^?0DVUXH2F~Km1 zs&6PaS&J%Fr_SQg-}Rm_HNmJ-DDGve3OA4Uj3jaY*L2HrcFbI{I4$x1)73KQssm&o z^7FvV^QyyO5OIO26t)K{+wC7TCH2X?d+(`rZ{RysEn2=9SW8xPz@16d73zAN?6~}M zl!cWBJ@Z1$^E~4xt;@^Qyel-E46hQmhoB@C+)K!9qI4JCe_zf#gtT|YH{7VCOj4EM z{9|~WCQQ_a)D;aTmeA~SCzF8pBYOu?H!=2?Jg1CQYRAILE@JoPFTumm#DDRtmz=7q zAoaR?Yf1qC;PT3^Qv2_wFWAjY5C{l?o3q0TaP7Qyj>uni3n0Yr(^kDHsml6|>xFAj zI<0?~;Bas*2lXrq61z?vqf{Z=YB+6sYVoYBbKHAuo&6bdbZg8XzX;{n8ub9ju<^9y zi#QW(mu6sN6`_t+cR+7`%(h$=1$>g4>#<`d#SsWtyFgKY0kfbPiT~Di=uVAD60P_` zzoHTKM!~6XWjg2wOMGVtql8T@E-+IwjsBvl42iSZj`ycXW!_BQau_492`HG_YHE&O z8^M6U#)Pg02;PYMfE@g$GKFC6If7}H8%8+4Syl;kALzxeFt+a`%GKCJ+0gAcK^vpb zLLh+~BXS*7m+skTs+s(^;#!tW-~}n9@O*gfYu|f<2C0KH2fYLt5pmdnNKrzgtY2-I zRRr;io1m%uQ2*lR9jEVp9rNyn!CJIe+9dE!)_nWLVI$_W+YK-{K^GQw@1v%TSUr)@ z2TzPyz?h-~dNFO7g$oYmiARRwqi2P14zUKJQ$^7h>L)sm?HW(Qvt=%I>(mT19k*#F zCPkVu9_QWXon@+eF-(clSdq(A1(#^4r{|r7LHs7xGWF&--7cxqo;T*mXKzP?cK1tC zNuH--HD#mZIroKwOejEjkF1Y)7(|^<2*ZRsa&hd7s&s^~0S!?D=Ueef?P!H!ODs*f z2`U{8KaJ*Auq_)bc3%k7Ol;e=x&oTok+E>h8&YLN6zzE2?KZ;+bc7UJ>){ypJL(?Z z6_unWpMqePdAw>fhwA&|O-QT%(YQh3)jws{eh4ueSxBWL<5x|kuH%}D*x<&j^x_9uhBmIo2%t9$;M01nbY z4D2b+q|U@{;eWrt~t>mdqw(nZn0t9495Fq#zP7JGCqn z?)4hYx!vEYs9hIz)FF3pT?*%jh41aTYRXf)i%PD3*e(6!>#cEk&*5%02cR~=mbxsQ zs3<8VR4SC!w6;&pOogEO2r&b(@slQ@NEy9MhQvQ2_$jGM$f!8cd6j>iHGum87_L(H zz@gf-wjh$o;Xl%i&?8fBmh{bPu9ZEoaS^ss&j)TxTJx}*R*JHd;D#0um(7OEvUY7_ z1tt<183VH*9c73lBep6?+;l(FP=>7@RyVl#4w~|S65!>^%T{U%aB-$iLQFRN(a@in z`78T}SL;?O44#{aAR^AecBX@Fw8Q&0-w=LI^hpJQbkxvFz{_m!?<(NWN4>a7|6BE9 zGwMn&1U&yKQaiChz}%3J7QF(yDf9te;u7g8xtbKa4s_VzpOUhMKrJv{e8v+Jm1Ibp z*_p;{_w#*Q*7J&VbI{}CnVA1+63*#!@+bIy| zk7~_8nqUhNj5={=oVTs_Ip-NsZ$25(z-nkz*J+)<4IR-6xPZD=Sn~$cS_vT=3J2Y|MtI`G2 zg0!N;A|5C(cps|zk>K8kWZ)MxltU&=P0!I~S5HG%cf*B5-^=y^CHdr$?J=f3ljCe} zf|=NKZ2pRvU`0k?F*4RAC)uD>EgB-3zn+9}WN?tLtQ*cp5MM;<_VUlFZHM{>f9sZx^QML%rdRjr$WMDg#&DFyRcz2j}*OWBe zHq$V^4&hG^W;STzrI(B+j5hVQIXvy1CI){#4LV-Hm=|N4T{%79uXX>1vis2Y^i2-TO6D?0Pg66+{&EKS0#D!Si=`a|%zKRz~XuCt)%&N|_ z4S})6-p6)C7g2BYa*DI)w)9PvsqGJkxf+S2~E)EQ;MB z4bhDZczoR1z1@}M?P@T;W3&Xka>e8cph`B7EQ9Cv;PJYHC(J61)#F)BuFIIMw88G9RW711B>RUJZLR@yItNQJ9$0|G z^sjJ|sXu8o&Cn{n^(=7IX&(sA8A<5DejLG@+5?X?P&wyyR)H{nN2fg{&%8buS7wS{Hv%<2T9U?8hr!WL zFLa%gCOnU4y{D7In!aM9XzxwTpuQMoT>VI@-KFF|I}*3K^PV@8io3y3v5uhvl(OKY z=xQI58eh$@PJH$Rc^s0e11dS)n9+lHl~^e(141srdrbbDJUdGj3taxlE^9#U+ibvg zR_7PYzkwMaM}k~brDTzQbP4B-XUm;10$o*VSe~|SOE-h6o;EJQ*10s#?^m*>RUY4> z7NIPBXuMxC+y~rj85b@e$Xw=#blsm}hNefZ8lEUKyC-#mgW=-(wy(nn82c7QdTp?&iTqLzz zA35kttkjPx*~07hIznSg>N_LZXJ(;v?T9~v6u0MdezY1M6BW-iavZYmDEi+dhX=oA z!;7yPUB&H#DJK8JUDK>kDt9i>oiZ?1-g|gFBHK zC(PlB5`mLuV*AbCukKWtjWmLBjaA5T^j-KaH0!>={D25X5d}=uz{4~WZA6;n_&=SG zbNhFq>#M>#vnS=_=AUdFHWzqr!f7@&WAQ{+|67BkcU5Qq9cM!Or(7aFO)syas65$) z#aoS9F0+?+&p!`Wpy{>tWZz>dwdV`2A}LCx0u)$>(q|mnar_l^-W4239Z2p1Js30b za}pCGhxY1$!({WtqpnUS`p;0A2@zp}&f(2(oX&OruiQMo- zB9;8v&#^hhXgtdy{~rqp?sHPx-hmH&*C?gec(bfXmX2nHw8L;nnDakzVF|3`Jp z$(w^pM@KhhJzOiPw65e*eH4co@ShYCl@bX1moloJ%oFV?&GneyXB?(D7KX?7vWa}u zaNwux)CWU_navq;qUvr3`w~POVl@pFIDqMfA)Lv$0s8A7Ih?6J!zpX%P5Rp!2#)l`<2n6~J; zyVPi>>DbZjyLP8NVFde+gYnk8)X_S(bp@;#x_@8>h>)%$ke{KIYi%6VU&-8Ch>cq@ z+CHW$h-6|BFZNu3c;*{`i=m2{n3&x6qN1HE`BS`i+7+`=Kz?Y7Qv6A9`Z1kM=)Pi> zuGGg(5bL0TAg-JmuCX&3S_IwE)Pjdd-rd#1trAzB)7q&2WDL(pL0-}=u=S5_q&^Hz z4Q%rtuUh#LHJ~LGPHGOao)4!y=Q+H>Zl6Zd4XaQevevb6!)T~;86grzGRt(KMp~qj zg^7{d(xm9w^-5h^2R3FSPD~SJU*DS^Mr^SvKHX-ZXQcjd2vN%4p55-lptz%RaE5?g z4I8T;n;za0yOg#WoLom7a3@t$XfoGD(?IbtKeI)N;KaXo8Z`X=3&10~4*PyYQuw#Z zUJI{ukE=AA@UkP3F2@UoL`iL~%aF(qVSre{phud`4kyYs}co=hfNE}$U zK7kk}C#Tc?^?1Ca*398Qc%?u_CkE>H@h~mfK?53Pi=8XpDbFX9xBbuVa2hZ%`)LI3 zYBtZGSVMkPa%h4~=@TqJHKo9hU23|g^>KGAEb-+N{IFz4O(g|gM~N-f*>iqehv#i2 zS~OD_J!@PytsasyUTKby^v)>%W-c_nMg85xz12K#!t4{KKe|&<{inB2kMQcjC4L)M zC;*xCmHFSJzaw9%X*%Sy*(L&|6d$UcL2y?Unx(BRUWysao5I|gb|fg^Rj1R5 z0|B6#>C{?*l-USY2Xn&>EUQ~CH;@eM+OI^VeLSOETdE89Mf4-Mu)2vSwc9Fw!S@HT z@_!aN>^`p`>%NW~Y*lLD0>`=uTM1B2W)E`8Z5xU%r0IB*K!ywHYEMfZf$yaJ!JC3N zxSid9F!UZsT79+mCN-mwZtOH4FCVAb{dk?R3F9t3(HuGhzMQw)${BdG;!NT}*j%x4 zIKE?hwx86w-A8NcSO+&55~S+8l2ad~&%fE+@!%O80aNF*lrHc3Hgx0cY=n##+mL^P zMr$2TL7w-dJp=J1AB3oT0TA&YPrQ4l^GAvzCPV$wvI6RVHzK)~L!9Wl!yMLmkm&l} zvSt6>kYJLUPsL8?nM#S@T%Hz!W6O0L!HA4CM|W^mN$cpqj6~MKGbL^;G7po-0nYL`orF zeKcLvj&`-N_QlW80RB&sZ$y&C@#!(L_T5M=2Are-70c(0t5%q!*dNk!_ghQT-(1_@ zuU0Z0Jumf%Xe-6-^iXa$!%;4~!Bl6VFYj}wj_*{-iG{fN1)PJbu}2kuM1QK%bS4qr zOL-U`ewdUyl$&p(8jh}bT&1o087Qi>^9I8VBT(?hr4)Am#RjK_ zsoY4C39%!hc3)26x9AkO%o;Vrqe(S7r7a|~7F?>wUktd-ZBMDc-X~~}zt860UT#m?nAyE77 z+B<${Rmm50zWP{L*i6EiQnJ+J$GGAD&S+~j(OYlBG;r6icBDnFiIe}dZppM6DZe^8 z&6t(QXOBo7PjC2A{1$%dW%&K-Ug)?mh0t260japOv_cY4NL#^_v{rUJfxYg88^E;E zj{8@)N{>v>t%xL^hz|pIvy*642)HIcn)q+U6vgJ&Rhv;VhBOkXM!UbcNR1xaderJH zt6x=<;1eS~y!XvYNw?UKK$xep}(qtS3No#+^q#F!YuW3pwc>+H+{e^I7B3b6f$sOfJHX>q$s+U*m-*(Kv zr{~>7A!=q8VQDXO1*-D+S*l`13g5gVAGwgXr4;T&I9_AEld$HBx*W-W#XQNiMWcL91{gDqD-u)&vm2m=EM| zJ8JR3sb_3gSx5q+^>j-aAXf|VJQH6kb+KVk=s+2ZKtnB*D;xg0iu6csZ@%sKS(h-3Pf zPOQ`bDQ|RKAt_N|s+Dq2Z9TLjf%^4psY%cbxzryxnQ~#ZMrj#oVKEcTddWkvg*sk^ z_R@Os7!J>BwCTA&-65mMlmYEdp?I_({{>fx8I+(W{Hv=rkq~G#$~5>7nlFxYC3HNC zYuB&~t`wTy^{VgOwRd4QHm=mU%IDbW?IkUsAMXA~>(#{NR@r927hL(FUxEzXN*k6B zT~&z#(yEzC`r6@4Wk?Tk+C&>|2&7$~f`Ymo_$BAOa;+z$Pt(CICf=DBrn%|t2Qi?r zX^L+TGc!r5(RpmJFFcY z9q>9*ZQSlZ`3M8zAyla|i($_fH257ZCp-rd@Q%8xJVf)6bIpY~`tLRU6#PtmyKlfuUh zum2z$Q;7%q>b*-lywAKYIXmwTzIA@A30;k}6U4Nh2(CFPbDcb%_g=>-5DB#sp9-xY zuV(99XV959k}v~b7V|(lJ_K84$27Px1OEzrQ<}VAjdu{8_+e7Pg9$8D>$CZP(zFG4%7Z0 zbHJmYE?2*==wlf#L?lc`t%Dcgs$G%ep$AESl+&FaZPMA5d%+7&dbFehCyp`|&%0<; zRG8)d1kL~S(wsu{&KnscF_c&*UHqqSx1_3TU$adB)wQ{$u&D|5RZutf@o~QQSYvLP zX-(j~yylPO&!3zXm6b|b%25EgN5jJdH&2iGjwS7B6qgIlF@0xF`?h;?-EKqv8M(wm za&%W%O0#`(2^%RJRYGsezy39t#3(;$c^XN;c+p)rfVQM!?#MwDPU$&>=-4) z`rmQODxNr#`}HZjzbux%j(4v47gso)pBFTia@m14_H7Vg3}GK$L=bszk^mwM4xe>J z)|APmh>sZ0gY9a}U!7)S;LH7zmP2o+8Rj-~jd0iOsOS^{`K*40sX)(!bM{p z2|nK>Tz!f7z29oZlvKUtxbuC}mXU6L1Tqx((a3NUFzZe`$^YBEzntHk+DudLIwQov z*B_}GqqHHm4UER%#=TIOd6er7{jB?6t$k%wT+6y`2nmGX5*#|X)3{5J;O;KL5*z}- zA-KB-cXxMpch|cR7u?(ey~neio)uOCC|V^LaXvP-&XvvU>6Cl2CDadWqijZF9+BA zd7Jo=>EhIeXkQK;tyZ<*Tf;?k2kzIaW@_2YJY%Rz5n+flTV&WZQ_zraZJ#xD+})YY zf{|EEE1m0r$!j#zcYRauawe~e*XTwoYKdi>gce!CnxTG7k%+IJm0VAcx>w|X27$2H zsFlUI9`A-HijiL0T9~0CK$NFXZA~V|r?D!1rmA(tQ&F<&mF5Nv{ub|e1Ye=ULW?_+ z0tet57@!1S6K%Cv^L>OBU-v!xX^rlUj_||0Jq}E?bWWsRNh(=sS)wv2;W9QV<^)JH%J zZ&z741767wVqZQv682>3tu)AosvMk_Yo4rSwld@Q#;R%uRevL1iMaO4;F-ZL)CF#{ z=DcBzy+p6sRvd6=Z9(ZZ=9K2lob~M}H0ugNEbFEz)eLSh=D)Ul!Z~TNVkUHYgyX5$ zCR+AbTdBWTTI<%tc*3CcQpjuG!0X?hNm0;PtuXxp}Ii<6T&&&J;+ z+d=>Or8&Wt2Q8^;*^TFM1ATkJ?*z;CV^AqEhm<4K^uO}kEZtHD}8lBGMHjt7XGHj%C>xW6(qd{gtq85zl#tNazlAQu_Jabv=SV89T^Il-1)o4V3H!)_&T;%3V?(oXFU@R(- zaSJmXZ@9+Qs`&zQlmWFB!o@-f7)|EIx`;3C8d|+vx*Zo?_14wl#zXt49yyGlCQ8^2 zy^wT9;qhon%Xt$#{8m)yCc)W51}!Af zaKg_H4h|KwAVKV|Ej!a?UeR*18y$Qu=Qps(_(E1}^_C8=?32IcMwAwbDoWBAkN)x= z9rabKJ|d<~-K8T_B!K(G>A4cV5}w9;j&V)M>zxt#TF-c#!vJa4-1&p0^R~SjZ{4zo z1eW!6`&9i)stfqS1bOpl{hhg5zL{lL=ZE->#`v|Lp+#7kIirVS`)VoXK%`UW*GcUB zLB|ha`t4r@b|0O-nlUg;^ZWRbsczgKmi0E0Sx~YMRYjk6U4*X&yLenv{s3?XcO;iI zuK1|K&#gD&>4wJJ6ZWt_KfBBuRqJmzUe%wUOIm{m$r3cRvR1Nwt_m%I$7MadMr2P# zWj!#R`aU(b;(U)U?(L0lc@J#)!fDKYf8ol_QMD`2b31K`FSnZuJWavrD(pMDqp|Lh z)bXN;8x?TtXC26DO*NHC8or6h@}QQ@KZ@Aj9Jhijp+0TK{qbF7L6tfYr`thW9AH?8 zivF#5X#V#3yvUk|JAkB1U2B2~eF)d676>k5*&!}s(>*!vbb;U%F=}seBS#r7Z!c>8 zlrbSbdV>8Vhu*=g!w>HKmujyc_Mwo`sXH`MT3rVW~$Q z?bc8tg@hv(-zyAzdekZ$=|$Q^;xf$F-V1R!?KvGSKfRW@3~M({_0GZ4^^=SrqkBl< z2zVp$;l8QdZ0W}lw>u_h73z1alJ;M$Lkwj#?w5-+b0=b_Hau+x+}HS=(0P`am_=+r z-3g>SYW_(Fw5GJxpy;b1o4ZA$gzu|2-_mdP$2L`U&Ix(+=zB-nZfsn4p2~h!IKW=x za|H@e`0l7*R2)P}Ry1ZgC4b-0JL~2%U#mrjl`)=RFhQNw`hlZD-E#816xl!31!hTy z!#g)8Ucajcej*ucTwc`cQ-)3v?y2ZsVb=GZ!_oGYOzwx2p;^6^Lf-@;DBaI)WXT!Y zw~yRwfplfJCaz!ypy)@o9j`B)twpcAKNq6pROR8xKhVSRmBz|$9)YrNNhP$=dOuD} z{E;45i%T)bDZrksBsTtYt6LjSym9$%QEvc!D-`mFoe?n=vkJSg!D3N*kgm}b26 zK8(TzNw!D$Frz;}5H=+lXBesgW;lTxAc;BhSt#}e)n4@8=4c$sE(J$;RXXdN3ipmf zt%33Pl|5Zv`0c)fkL-?^D}CWvO?k)*UZ#z$Z@+V4>OD8J?G5lyuXC~axindCS!0bN zU@6rtjaTSF?TnfWQoG=f9Upkjw7wa_9G-lc;HCE^O6G7y>64KJNKMyO>#sh_45v)^ zY-Up`cD{8;QOc;OkcSQKkLw^dY(&3#&|E~c%t3yBc8r<2HR@oL>NY0xLOy6ohn)$# zr`li6a%$UhwJQ%ed>cG(Nk{_CdF18{#Kpxc|9bVsP=ooPd_oR&znagI=}q7MLq-&` z#1OUFv;%SAUO6KEyc5b2;c8e&WJkqrxZXwGspwdw4xFSMP&8C%KikbdVV4_7*{Q-m zBZ~L_fVt+-LKW1##GYA42hE*hclX8Pv)UwVU~##vy;}SaaF;#axMczKafs ztbovHU@$7)t;m}A;R>Qf|`G4k6lUtgM8{im6^&Bnvho?e={jm*0l=+AGus>kPRUlq|e zy-#AZ=v0m`>GWEV?$0+E$(1Q?I*=$${q=a{082P=pl|+qNF>f&2g2Fn}Xb)J+ZR@IxS4skA zO_*8Dz?I%8fKBU71my+C`a}pvopyHd*JF{)wW0#GT{mEYghO44e?N1hHsCl zWSr_6qYZsPna?Dd1zNN4fPn|w*wi=n+@u&a2cBEE zeZZRne0BrqXnC;(${Zcey2&eZrCfX@+Uo2R8LI_=l4Myx_EwFU-|uQ1F<^rKyde>A zM98URa->L->L`MRB%Z|XmJDlSsXJiMriqaEg(o-?`kGH@X*Ff-GLFL@Udi4` z$CXwPq%YZw=Pfef&!jHiLm`|mQsY9@op6b4c6{elDnq0ei3`<q zvd6>zslIhr*`{>-2=2?1J+7eczuqEyQX?juQI$_(!;vmEkijSBlhP3chMc8q~%s~M*`#-aFKW?H@Smv=fHS)Q)Q z)_%A!AH(7B1p}gyuMyMBny`F5d9)_Jv4feTwQjd$Qv}1E&8#mt5egpCtw{Gbi0RK; z`oQDt{K0Lvhv6FrfTxeo>)KX*7HwCkk1Y1Q?l0P9ot?SgI}Cg}i2sqX{^+*h>%F#T zIzBtguV2RlQP6Y^G1F&fRa#Eh+8}K*MZJV=7kH8dZGHs7eI!Eirh=xzF1OtsYzE}0 zt5&+ z2aCrvl7H+B9*labUnDx%0H08@r55?*_qe(h6mzNvcHm&Y`{HOhaXWB;l$#?gM`xZrmloH zKQEtJePgeW<-2_V$;*J^-}W(}CdKAY>!?Xwbu=G62+zo_88hgbHdPOg!FnT96NsMs ztva~2WV8ad@gL{rPI}{4UyKk^sXm5!aZLsXFRYKrd}bnDVwa1QJ(*&2zM%#`?E0K`P?TV*JlZ}!dBB-%KnM0BA>1&09*rAa1m9!_f<=aDc zqLq9;;L~C|M_-el#6^yn_P7k08>^-ur*wI7b?YykcLs_ItipgCdWp;v%@Vtr{#2fy zy#BHzc_$6)IIIZp(p6YrHXa+k7Px?qW3p!XP9^&~Wd@$es3Rt3LFr7}G|O(Z3@C3!O|{U;k){bx}Q83Vbx-hSYPw z9gVBg9`yixGB3Cj4u8Srjc+odrtzcc(HV-= zU@aKW-*)d|_=e$ffjc7VCq`t9+t6qi+cs2d;V{;EK3kpR4-q%}Gs=wEpBJ3U<*=CvKv|QX7ze}PrE&Upn4(UYaiDazcHG5uBFQfEUPVX4oU>>9& z_~_h(u1F7Qi+O(ZWCz^ET!N!s9x$YLCL%k8k?Pv1YnAAn_Vn-XJuiTroIb*`Ps=&= z=1;a@d$ojb;d5YRFm-fy-Ee5DBRZZ8vfD_5Mw{D)1uUy zq=R0E?RAX))k?Lp8=z%lggyOYP-eI-6a7NPVekA%?*|JXh zFGU;WUUUHk9A80EQP=tQkbQf=C1dnJH+8-lm$odaPWRJu+z)TfeeX7Y!fh6Jb!!Cl z3iUvVW8WMl!0=G3tkTn^1E!IFaMoyY8WL)ndTec^IYL0RUR*H@S znuRA{u5Gh}m9AyFN{@#h$2(l_*b?I9ivVKD4| z{IKQ375w7$#&0-{A0RC)&2)6BJ!}W%LUDD&8 zESYZ^zDoww9Z86gJe*nP@{W1Q1Y}g#ig2jYfHYLr_zu{x&81asgA-wEH|&kI#ei90 zHjEk3=s{8J|AQnN7Jo=m(UYi3TKrW8kMQGXULR%X_yJizli4oug0bv?Bm#U~uZ6Gr zSAMK&PFK{$3-#~6V+~Wys)&w;k_eD-q8C)PCt^8Y7L)p*L>b=Vz5b6W1*AYHGN6$X zBd04;ot7CIU3>Y$V`VlpsPDoN)d(14M6T1RyZ5%qPVHB(LNM*VbRq*H6?J?NULxc1 z*)z%Q3IB>Gg-11_$qn+}$H-{yyhxzo#>X-iCl+?LkFjhOkAsi}Gk}&4X6&V1%RR@A zLx3mh(gr7VDY&rl2~;#%TT2-PzyOXDY?XUGLDh*nl06zN@wU-gBQLj3x+o(zIdJLI zD)548SgaSF@j4u4Gw!Yw+lzw9YD880T;82oAhxI(SP%@BN&shrfCn!R%V& z0Axut(Cx)GrY9_udEGD@Ht@A}d#+{0#JB3 z(I6KX#i6RFk|&is$0yX_3Qad^d`gqz#mba5Bu9nANaS_7{>l*lCaBw9C34@6H`39D zXk|_RUJg>6l?#(-<>zTJd7B-Bcw6HDegR)xg)G33uiRB9v8C3q^9wY+gMQgv_& z;Z{0b*&N0%1ah5<>3k6LzmjXA5=Y2)_BcXw+6)lMUUlHOd*0cnmUHGCy*QjF>8snm zYauc-=;a;D%&@}F#r5CIS*EBjqcX4#5z)sXr`M=^<9|cg+vUI$Vgi`=Tsd&Pwo3uK zZe}6TIW+5T&Tt+)6o;O4QIvb!xYO!XV29E%Y0hy9<;;CcIAqH&$WF_rOOv=R+V^E( z8~QegT0<{k%Ie4QKx+C&>Z4xkxu|>dXQOJ_N*qo3#UxfH=M})>>P&*M@Zx& z#cTUgQ&(p&5Jl!Jf;#ZfL9FHOha&jKNo}VE(8#6LCE`vRo`P)IRmTUmcijc8bvnH4 zB;B6$wS_jd-U28Qwqu!Kr)>0Zwjz%S1>GrwrLufgS8bs!o3CQOnkK`+<0Oc@4=tf9 zkG=DL2w%^0SP3`q4?EnJDlUFo^$`lSvO6%9M))QJ|2c?RX3?wL{cCS@5?a5)&u`p? z;S#xPhsrdjy02v>Yt0dr%kI;ohy;jxKyO-_!$3DCk9?o~STY*d)nLv`t~kI$@n0sE zbaO_#f=w=m3&HrV*RKd$A5fR?WF0p9bnk%p^EPrpLL2t-J5&|+ZAV0~nkTLZ;cFLf z?nf_+6r4U!7zn-#M1Jl={4)&@MGF#&3?02a*VBkt_{_z{Rc^a2LBK-nBq(;-@5XMm z%2TXX@x=~@5^})9^I(c61mSP$*Zaww=j#f7-ijUDw$62V1-+tE=xK+kQw{>FLo{HBF<1$5`RGh!uVxZI0&x_==E1}9@||F zw#S>>h;$+BJ+``4biDzz!LsKGaaKz&3?BBBFVi&){j0eE-gj^`bqMl zvYz;Cn%hxpMl|=hSp#^-jwg0y$W01DZy-|^Q@J$69?o1bb%>gfYXeu5BqMy>M0#7V z!WUQ-9=#b&X)liLEk~n`9mO`^``_F^a*8;bE51XTa0$GSLazpm zHfwj-W2`BC{M3VcrG+L?l9_=az3$Fcr;Y--rU9-jifurd0NYR)VBUzs`T8mu@4HwNGXpZt}g7qg+Pm#|i!pMDaD{TWN^ zeW2AQ#mi}4IX>2%lFk^C?Xg*DN1EG4i`a-b4V|^A6?>}75$6m}yJ>m3B}fAsM{k$J zMXrCF@%ssa9c_~TH5tKRqiL327Y^lm$0^@WpvMu-9``Krl-%h|X$~|}HBoHyTQ#0$ z+w}CHMmD;Ri?pV`Tn5;@qJwpM*XxaRJ)3zTy}TP08+*`YcJ?0r^WE8)?-pco06DC{ipHR*Y8|(qGLP+L zSEbnPnpDAougF9RJR0ut*x)9O<88p=H94ZriZ|DKo>PATzoxn(eyE3=NY#C4Bwxlf z8P3=jpJ`XhWm1w-Qh#+_x8A#C=`#~0n5)BQqKy1{E?({z@biJ!+>ifmj$q@*m*|{% zN$~%HAeO*_sUQ>kDV8BOj$Ai{dQ;&y^Lqd^jJp!MMJx!DYE$lh{hz`&FL+Y5?$7Ry zZ)BggsCaB|Fpr+^2=P6x-{F&lhtXiJ=UCN4@%G6O@9x-u=-4*~}8Tc12H8{bx-cOXR`<4LH8O*z4_n8gFY?JGFop&f72e+C=MTQ+gpYT<=fig3g<+;yMc;b7 zbq8OI{;nW$&uJ~DY^(RB4(nAO*Esf;)4N|7DgW_kL2rYTRWt3vxH@v3XXfTI^aY?T zpVl?Oi;&H$nRA>`>RWv#U8xkXnrmtPttujqX1`{d6U=-$FpZvb`skqlXB`nF7I+qt zAk>(fnc2@_c!Zsol9HMrPc#xfVQiQ)6hWmT@j+1_gl0OOc;?Q+EWd<~%s-6lT;lWlx|XgJSa6in!7_dDdxNsqw|nn4H{Ait z6zn^EzhP{#HMX^mu%$m=YN;A~Kq{ItlqQ=&9~ym#mEmCDWGY#cE)}2TJz_o8Umzp7 z-b33bnB_jX%X<=S^tIS4aI_J@QnwXpj%{SNgN#W`6t;>o(o%y@G)RDii@#?toZ?F8 zszZiTt@aBXZ3}&9a13G2Teu;VQz=ZGUh5YInd{`A(bRk2sb?RR5Z6BJMnQ=tA5rTf zZ%3RGid;i>2NKK<-bE1p zmU;t*gEk$S8`BllC|@KS8HaG~a}+ZcF_vlDfUoSl84zjiYXaXNszmGoxg-XgEhd`JAX zfFTuKpTw%g7+wPQbQ5&W8TNUYuXPNmT8-FMS*xEw2NF>w*U4eOQ{tAjRt|E;;A=0iMN^+!79lVCZneF+KiOlM)WcaE85u$q-ZT(`n_leNu1q&+?b1DZWExGd&Fp~w^TA!c=N|n0OK%WYA8O5-C zdwGQ-#_k0Qc_9c`mA?z?ASPP(<#PI4kQiaxTs)D~sr;CK`TB+?r~c415(3g>ZEgaq zY4!yv;0!)uk?WVWaA;-yd;E)!h-9s=0ogp&L2vaDas;9W#sXvqEfZF~=U;bwYSW&g zr|uXOH}+Y(!pQgj4>nIg?QnI1su*4)(M-7aFeeYsZnvX(ZT_&W7q1AP!~MbQ$R1%U zvL}Ag-^;C*_>NZYdHhC^B+U-UP8W0zckgYUTO%wiKFuw$rF)2Ij0F)@#c=FOiZ~sI zB-Xb&k*ka(Lrsku=z0J(5a35l&FOMIgNwPE-Hl-hPft7IHU#Pqsz6$lJ|;?hvW9`L z2g0U8MECFB?MNPCfEJ>_gWa@fb(O}J+_O5(Xwrk+K=UT6`&VJ=PUmcbpLPzC(rDrjlds$Ru6AH1inH%Fb-u7bfT(GIe6 z6PZb~A{n;FyXN}8Amf2N=)WMm-<&L5?rpmi8xFBr^_*3o)#J9+>(bEE z$76OI$3A@m;YcsvI3;)ujb9rQl%8hmx6dHQ%f9e0eX*S7tU0Klj ztk5l({{`E@ZG(Tpr5^U%j9lTomYS6MhA?DRq6#W!Ig6J-B>j9pWl?2_RMnvF!gx?! z4X~$a2@51d5DeZ3BfBlR_D#^o&976?`8nj+GLDQ&G1)ah!WTnkV{2-Dvt(k^mzc%D zO$x=T6CswEA(QOoX@2?+LKZ2q+-??gNwdwaR>QR2c(}uQob9jm(X2k&PS;IinZZE> z3;(}T(Z6p1X@gj|PceYx!nMA#HaKtxXQcj|EPmrWUMDQaEMC8yBADWI)o+#L$(lga zAE$p(H?fxmpLYbd++aw?6*RThe(jR#8+{eW36A-x{YqpMGvbh@y;fOgM*Q5kE?-9A zwXo7;m6cdjCzF4pDec2~*E}!JPt?Ai@vg+_F{g!AsE(ryY2{= zmo%Dw?(Z~ueU%wwfQpGMmaWIEY91GmIk1KBP_gkeJ6k>%p9Yx^DQs)jpBmzCAtJ&D z^@iI@K-s$b{Yn1{NJP`Medd5@G;4UR2jJ`sj`^;sF-+4!5O3}(+h zf0NSxlljjjgmCy7r(z`m4^MQ*Fm;o#9v(b{-ySR1vB|$wp~Wu3OBV1Y#Ji>C-|sK+ z#NS-QcfEydG7H!U^LU0z)(1{!WK>~ZB>+_o=~jfWS(%NgSFFs&-T7thH;8nEF?8k2F8 zsqT1wXw@gC(MhfpMPF2FkK*0CWPJW@Yl`ice0z`m$e!X7bOY*eGlju>CF?6yIQuu2kWl%_b`3Nx_~$hsos$lFh_^8H~4-n*Q>2f?%? zBtT%gPzIY$S#z3u!{vfCzSBFnC8>hM!bHANT4Jq{@%xaZ5RaKDX3J1)LL zd)Uk}>kg|t-YCG+#04(_9srI%9*_)=s*^EhYM~R~WoNV!CMt;l@*V*7Y}r!A5d~#s z-my&fnrN?MqBEo(`nQtQ5dT~MlaeWrHs@YX=h`jvAkXSTi;(MGcdZ3N8bR;)0+DjTW#KZ5 z5jhenHit_t*3I)e-(|D4;26i397VC2JUxv#q=2*6ADcmb&Qh4?nHqD!gvKrauG<-c(ZE_h`ryGOV5mt<=Ai!<#prXe#v1fiOj&|9=Ny( z@gBs87{h}jN<5|9U{u?{rrAtP!9l~X6nI3LWGYT!UB5PPnVeIzk?wvg*PvrPl|B(!6MSL!tOHxVEx zRCu2oCRm9`!bFz+@8P2Vl>Gjm0v}+mG^CJA1G_g|247I+>aMK@*jp$St#FWl?WSb|3Kbm`?JJP9FAvZmhbZiz zS>IF@;ArL`PYzdYJ%OD2JnwXPi;!Et<|Kt)w{68_aUV@FPJZ-9OkXR-+R0=OOrP&A z@0Ih$;?$44XYaAaa;DXz*HKc?LkkXK*}dH`Nr9p;!IwqVyk)~Tp6g3gHJrJjXxa2% z^P*aR(Pbo)tEp)G2DVfMxS8P!SY+}yB`S>^zk%&N1lTGEOfF1?=DWqe0n80j*j(Ev zxT59I!x^BM=KR6Y;Hs#???vB!T8pgjGP62AdnS~e<`ee{2c1Oc+v|%Bs+s+zS%E)< z2-xFRpxJcykZkHGx$Ia2gxlh_(Q^0&&KC>kTz4uIBJUWjQ53r0vw7cLSQNRDajKFc z_b7{e=pRyX+LnI8{ZQbnP3;tmplWJ_gAT_O`3b{{9HAhdMA$;y+J{R<$1kJ0m98eT zJ*KKqXH&d8>WxbF)8ItfU@r7Dm3+#ZCNa;PqkN{K_4W!SH;E@Xr$qU#IJOwlL|$L!RIWV*cmdU|+KJdO5^gzbtGEP}(c=8Rxa3^T5L zkpBh!`HCdO0Wq{CcU1KM=sWz^TpDEK`5T1&RWRwo&3T-A zVkzxPlowS70n>2+*C!(~LFiJWt_kr>&g{i1+VI^ia8oyT2zgE-6zq4U5p!jB(Hyp# zwcJ6{5wDWyuLnlPh%x*AOpq z&m~|*nxWe-#PxL-e-(JMC)b;SAg4qDFnofPGE;3A7WGy{`Dr zd(9;4*fp?G2HY9=Es1?1w$NQ^$p81h|3@F-{wuEj-E8XJU+qA@d;5i16kqB8n=5b> z^5e2Gt;{idk4>q`qA{bXmVb+L{wFmp#S0;~gw`T8Q2(v1^RMLbR2|N6{31PFQ7&8- zFz3=S&Gz_KsS3xC+5+Z*VEHC7eVBe=o6@8A8zl!2VMnQiE01%gw~Ve07U|@PM2P{gjVdEcKPa9 zc)ZuIk2zO4OXUU05TUXTM3!c|-Qkv@rTtBPpDn@52ogm4^+B6_RNnOz!+3gXD|W=D zYh)dLHRs~VZpj#F!Ud9@*ghKje>jJK)4-&B;|`=JXNyTZ&24>XHL{SAT@R7EerpYX zX$F}+$U?A0*!26=wP*^+S8ix^|I>ji_=med{xH+$k4gp6$hg*^UI3((?n)sEI0UaC OFY(VZ!li 150*150 -# less encoder layers: 6 -> 3 -# smaller input size: 1600*900 -> (1600*900)*0.8 -# multi-scale feautres -> single scale features (C5) -# with_cp of backbone = True - -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] -# -plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' - -# If point cloud range is changed, the models should also change their point -# cloud range accordingly -point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] -voxel_size = [0.2, 0.2, 8] - - -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -# For nuScenes we usually do 10-class detection -class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' -] - -input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) - -_dim_ = 256 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 -_num_levels_ = 1 -bev_h_ = 150 -bev_w_ = 150 -queue_length = 3 # each sequence contains `queue_length` frames. - -model = dict( - type='BEVFormer', - use_grid_mask=True, - video_test_mode=True, - img_backbone=dict( - type='ResNet', - depth=101, - num_stages=4, - out_indices=(3,), - frozen_stages=1, - norm_cfg=dict(type='BN2d', requires_grad=False), - norm_eval=True, - style='caffe', - with_cp=True, # using checkpoint to save GPU memory - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict - stage_with_dcn=(False, False, True, True)), - img_neck=dict( - type='FPN', - in_channels=[2048], - out_channels=_dim_, - start_level=0, - add_extra_convs='on_output', - num_outs=_num_levels_, - relu_before_extra_convs=True), - pts_bbox_head=dict( - type='BEVFormerHead', - bev_h=bev_h_, - bev_w=bev_w_, - num_query=900, - num_classes=10, - in_channels=_dim_, - sync_cls_avg_factor=True, - with_box_refine=True, - as_two_stage=False, - transformer=dict( - type='PerceptionTransformer', - rotate_prev_bev=True, - use_shift=True, - use_can_bus=True, - embed_dims=_dim_, - encoder=dict( - type='BEVFormerEncoder', - num_layers=3, - pc_range=point_cloud_range, - num_points_in_pillar=4, - return_intermediate=False, - transformerlayers=dict( - type='BEVFormerLayer', - attn_cfgs=[ - dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), - dict( - type='SpatialCrossAttention', - pc_range=point_cloud_range, - deformable_attention=dict( - type='MSDeformableAttention3D', - embed_dims=_dim_, - num_points=8, - num_levels=_num_levels_), - embed_dims=_dim_, - ) - ], - feedforward_channels=_ffn_dim_, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), - decoder=dict( - type='DetectionTransformerDecoder', - num_layers=6, - return_intermediate=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=_dim_, - num_heads=8, - dropout=0.1), - dict( - type='CustomMSDeformableAttention', - embed_dims=_dim_, - num_levels=1), - ], - - feedforward_channels=_ffn_dim_, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - bbox_coder=dict( - type='NMSFreeCoder', - post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], - pc_range=point_cloud_range, - max_num=300, - voxel_size=voxel_size, - num_classes=10), - positional_encoding=dict( - type='LearnedPositionalEncoding', - num_feats=_pos_dim_, - row_num_embed=bev_h_, - col_num_embed=bev_w_, - ), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0)), - # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) - -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') - - -train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) -] - -test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - # dict(type='PadMultiViewImage', size_divisor=32), - dict( - type='MultiScaleFlipAug3D', - img_scale=(1600, 900), - pts_scale_ratio=1, - flip=False, - transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.8]), - dict(type='PadMultiViewImage', size_divisor=32), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=1, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', - pipeline=train_pipeline, - classes=class_names, - modality=input_modality, - test_mode=False, - use_valid_flag=True, - bev_size=(bev_h_, bev_w_), - queue_length=queue_length, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') -) - -optimizer = dict( - type='AdamW', - lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) - -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='CosineAnnealing', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) -total_epochs = 24 -evaluation = dict(interval=1, pipeline=test_pipeline) - -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) -load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) - -checkpoint_config = dict(interval=1) diff --git a/projects/configs/bevformer/bevformer_tiny.py b/projects/configs/bevformer/bevformer_tiny.py deleted file mode 100644 index 78858ee..0000000 --- a/projects/configs/bevformer/bevformer_tiny.py +++ /dev/null @@ -1,270 +0,0 @@ -# BEvFormer-tiny consumes at lease 6700M GPU memory -# compared to bevformer_base, bevformer_tiny has -# smaller backbone: R101-DCN -> R50 -# smaller BEV: 200*200 -> 50*50 -# less encoder layers: 6 -> 3 -# smaller input size: 1600*900 -> 800*450 -# multi-scale feautres -> single scale features (C5) - - -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] -# -plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' - -# If point cloud range is changed, the models should also change their point -# cloud range accordingly -point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] -voxel_size = [0.2, 0.2, 8] - - - - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# For nuScenes we usually do 10-class detection -class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' -] - -input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) - -_dim_ = 256 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 -_num_levels_ = 1 -bev_h_ = 50 -bev_w_ = 50 -queue_length = 3 # each sequence contains `queue_length` frames. - -model = dict( - type='BEVFormer', - use_grid_mask=True, - video_test_mode=True, - pretrained=dict(img='torchvision://resnet50'), - img_backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3,), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch'), - img_neck=dict( - type='FPN', - in_channels=[2048], - out_channels=_dim_, - start_level=0, - add_extra_convs='on_output', - num_outs=_num_levels_, - relu_before_extra_convs=True), - pts_bbox_head=dict( - type='BEVFormerHead', - bev_h=bev_h_, - bev_w=bev_w_, - num_query=900, - num_classes=10, - in_channels=_dim_, - sync_cls_avg_factor=True, - with_box_refine=True, - as_two_stage=False, - transformer=dict( - type='PerceptionTransformer', - rotate_prev_bev=True, - use_shift=True, - use_can_bus=True, - embed_dims=_dim_, - encoder=dict( - type='BEVFormerEncoder', - num_layers=3, - pc_range=point_cloud_range, - num_points_in_pillar=4, - return_intermediate=False, - transformerlayers=dict( - type='BEVFormerLayer', - attn_cfgs=[ - dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), - dict( - type='SpatialCrossAttention', - pc_range=point_cloud_range, - deformable_attention=dict( - type='MSDeformableAttention3D', - embed_dims=_dim_, - num_points=8, - num_levels=_num_levels_), - embed_dims=_dim_, - ) - ], - feedforward_channels=_ffn_dim_, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), - decoder=dict( - type='DetectionTransformerDecoder', - num_layers=6, - return_intermediate=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=_dim_, - num_heads=8, - dropout=0.1), - dict( - type='CustomMSDeformableAttention', - embed_dims=_dim_, - num_levels=1), - ], - - feedforward_channels=_ffn_dim_, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - bbox_coder=dict( - type='NMSFreeCoder', - post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], - pc_range=point_cloud_range, - max_num=300, - voxel_size=voxel_size, - num_classes=10), - positional_encoding=dict( - type='LearnedPositionalEncoding', - num_feats=_pos_dim_, - row_num_embed=bev_h_, - col_num_embed=bev_w_, - ), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0)), - # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) - -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') - - -train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) -] - -test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - - dict( - type='MultiScaleFlipAug3D', - img_scale=(1600, 900), - pts_scale_ratio=1, - flip=False, - transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=1, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', - pipeline=train_pipeline, - classes=class_names, - modality=input_modality, - test_mode=False, - use_valid_flag=True, - bev_size=(bev_h_, bev_w_), - queue_length=queue_length, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') -) - -optimizer = dict( - type='AdamW', - lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) - -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='CosineAnnealing', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) -total_epochs = 24 -evaluation = dict(interval=1, pipeline=test_pipeline) - -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) - -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) - -checkpoint_config = dict(interval=1) diff --git a/projects/configs/bevformer_fp16/bevformer_tiny_fp16.py b/projects/configs/bevformer_fp16/bevformer_tiny_fp16.py deleted file mode 100644 index aa1e043..0000000 --- a/projects/configs/bevformer_fp16/bevformer_tiny_fp16.py +++ /dev/null @@ -1,272 +0,0 @@ -# BEvFormer-tiny consumes at lease 6700M GPU memory -# compared to bevformer_base, bevformer_tiny has -# smaller backbone: R101-DCN -> R50 -# smaller BEV: 200*200 -> 50*50 -# less encoder layers: 6 -> 3 -# smaller input size: 1600*900 -> 800*450 -# multi-scale feautres -> single scale features (C5) - - -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] -# -plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' - -# If point cloud range is changed, the models should also change their point -# cloud range accordingly -point_cloud_range = [-51.2, -51.2, -5.0, 51.2, 51.2, 3.0] -voxel_size = [0.2, 0.2, 8] - - - - -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) - -# For nuScenes we usually do 10-class detection -class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' -] - -input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) - -_dim_ = 256 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 -_num_levels_ = 1 -bev_h_ = 50 -bev_w_ = 50 -queue_length = 3 # each sequence contains `queue_length` frames. - -model = dict( - type='BEVFormer_fp16', - use_grid_mask=True, - video_test_mode=True, - pretrained=dict(img='torchvision://resnet50'), - img_backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3,), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch'), - img_neck=dict( - type='FPN', - in_channels=[2048], - out_channels=_dim_, - start_level=0, - add_extra_convs='on_output', - num_outs=_num_levels_, - relu_before_extra_convs=True), - pts_bbox_head=dict( - type='BEVFormerHead', - bev_h=bev_h_, - bev_w=bev_w_, - num_query=900, - num_classes=10, - in_channels=_dim_, - sync_cls_avg_factor=True, - with_box_refine=True, - as_two_stage=False, - transformer=dict( - type='PerceptionTransformer', - rotate_prev_bev=True, - use_shift=True, - use_can_bus=True, - embed_dims=_dim_, - encoder=dict( - type='BEVFormerEncoder', - num_layers=3, - pc_range=point_cloud_range, - num_points_in_pillar=4, - return_intermediate=False, - transformerlayers=dict( - type='BEVFormerLayer', - attn_cfgs=[ - dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), - dict( - type='SpatialCrossAttention', - pc_range=point_cloud_range, - deformable_attention=dict( - type='MSDeformableAttention3D', - embed_dims=_dim_, - num_points=8, - num_levels=_num_levels_), - embed_dims=_dim_, - ) - ], - feedforward_channels=_ffn_dim_, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), - decoder=dict( - type='DetectionTransformerDecoder', - num_layers=6, - return_intermediate=True, - transformerlayers=dict( - type='DetrTransformerDecoderLayer', - attn_cfgs=[ - dict( - type='MultiheadAttention', - embed_dims=_dim_, - num_heads=8, - dropout=0.1), - dict( - type='CustomMSDeformableAttention', - embed_dims=_dim_, - num_levels=1), - ], - - feedforward_channels=_ffn_dim_, - ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), - bbox_coder=dict( - type='NMSFreeCoder', - post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], - pc_range=point_cloud_range, - max_num=300, - voxel_size=voxel_size, - num_classes=10), - positional_encoding=dict( - type='LearnedPositionalEncoding', - num_feats=_pos_dim_, - row_num_embed=bev_h_, - col_num_embed=bev_w_, - ), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0)), - # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) - -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') - - -train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) -] - -test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - - dict( - type='MultiScaleFlipAug3D', - img_scale=(1600, 900), - pts_scale_ratio=1, - flip=False, - transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) -] - -data = dict( - samples_per_gpu=2, - workers_per_gpu=8, - train=dict( - type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_train.pkl', - pipeline=train_pipeline, - classes=class_names, - modality=input_modality, - test_mode=False, - use_valid_flag=True, - bev_size=(bev_h_, bev_w_), - queue_length=queue_length, - # we use box_type_3d='LiDAR' in kitti and nuscenes dataset - # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file=data_root + 'nuscenes_infos_temporal_val.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') -) - -optimizer = dict( - type='AdamW', - lr=2.8e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) - -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='CosineAnnealing', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) -total_epochs = 24 -evaluation = dict(interval=1, pipeline=test_pipeline) - -runner = dict(type='EpochBasedRunner_video', max_epochs=total_epochs) - -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) - -fp16 = dict(loss_scale=512.) -checkpoint_config = dict(interval=1) -custom_hooks = [dict(type='TransferWeight',priority='LOWEST')] \ No newline at end of file diff --git a/projects/configs/cvtocc/bevformer_nuscenes.py b/projects/configs/cvtocc/bevformer_nuscenes.py new file mode 100644 index 0000000..ffa30b5 --- /dev/null +++ b/projects/configs/cvtocc/bevformer_nuscenes.py @@ -0,0 +1,423 @@ +_base_ = ['../datasets/custom_nus-3d.py', '../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +num_classes = 18 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] + +CLASS_NAMES = [ + 'others', + 'barrier', + 'bicycle', + 'bus', + 'car', + 'construction_vehicle', + 'motorcycle', + 'pedestrian', + 'traffic_cone', + 'trailer', + 'truck', + 'driveable_surface', + 'other_flat', + 'sidewalk', + 'terrain', + 'manmade', + 'vegetation', + 'free', +] + +class_weight_multiclass = [ + 1.552648813025149, + 1.477680635715412, + 1.789915946148316, + 1.454376653104962, + 1.283242744137921, + 1.583160056748120, + 1.758171915228669, + 1.468604241657418, + 1.651769160217543, + 1.454675968105020, + 1.369895420004945, + 1.125140370991227, + 1.399044660772846, + 1.203105344914611, + 1.191157881795851, + 1.155987296237377, + 1.150134564832974, + 1.000000000000000, +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +pillar_h = 16 +channels = 16 +queue_length = 7 +use_padding = False +use_temporal = None +scales = None +use_camera_mask = True +use_lidar_mask = False +use_refine_feat_loss = False +refine_feat_loss_weight = 10 + +use_temporal_self_attention = True +if use_temporal_self_attention: + attn_cfgs = [ + dict(type='TemporalSelfAttention', embed_dims=_dim_, num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +model = dict( + type='CVTOcc', + use_grid_mask=True, + video_test_mode=True, + queue_length=queue_length, + save_results=False, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict( + type='DCNv2', deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + use_free_mask=False, + use_focal_loss=False, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + loss_occ=dict( + type='CrossEntropyLoss', + # class_weight=class_weight_multiclass, + use_sigmoid=False, + loss_weight=1.0, + ), + transformer=dict( + type='CVTOccTransformer', + pillar_h=pillar_h, + num_classes=num_classes, + bev_h=bev_h_, + bev_w=bev_w_, + channels=channels, + pc_range=point_cloud_range, + voxel_size=voxel_size, + norm_cfg=dict(type='BN',), + norm_cfg_3d=dict(type='BN2d',), + use_3d=False, + use_conv=False, + rotate_prev_bev=False, + use_shift=False, + use_can_bus=False, + embed_dims=_dim_, + queue_length=queue_length, + use_padding=use_padding, + use_temporal=use_temporal, + scales=scales, + encoder=dict( + type='BEVFormerEncoder', + bev_h=bev_h_, + bev_w=bev_w_, + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + bev_h=bev_h_, + bev_w=bev_w_, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=operation_order, + ), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range, + ), + ) + ), +) + +data_root = './data/occ3d-nus/' +img_scales = [1.0] +dataset_type = 'NuSceneOcc' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadOccGTFromFileNuScenes', data_root=data_root), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='RandomScaleImageMultiViewImage', scales=img_scales), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=[ + 'img', + 'voxel_semantics', + 'mask_lidar', + 'mask_camera', + ], + meta_keys=( + 'filename', + 'pts_filename', + 'occ_gt_path', + 'scene_token', + 'frame_idx', + 'scene_idx', + 'sample_idx', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'ego2lidar', + 'ego2global', + 'cam_intrinsic', + 'lidar2cam', + 'cam2img', + 'can_bus', + ), + ), +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadOccGTFromFileNuScenes', data_root=data_root), + dict(type='RandomScaleImageMultiViewImage', scales=img_scales), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=[ + 'img', + 'voxel_semantics', + 'mask_lidar', + 'mask_camera', + ], + meta_keys=( + 'filename', + 'pts_filename', + 'occ_gt_path', + 'scene_token', + 'frame_idx', + 'scene_idx', + 'sample_idx', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'ego2lidar', + 'ego2global', + 'cam_intrinsic', + 'lidar2cam', + 'cam2img', + 'can_bus', + ), + ), + ], + ), +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_val.pkl', + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation settings + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_val.pkl', + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + # below are evaluation settings + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 24 +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) + +checkpoint_config = dict(interval=3) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/bevformer_waymo.py b/projects/configs/cvtocc/bevformer_waymo.py new file mode 100644 index 0000000..7582834 --- /dev/null +++ b/projects/configs/cvtocc/bevformer_waymo.py @@ -0,0 +1,537 @@ +_base_ = ['../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +occ_voxel_size = None # useless +use_larger = True # means use 0.4 voxel size +num_classes = 16 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] +CLASS_NAMES = [ + 'GO', + 'TYPE_VEHICLE', + 'TYPE_PEDESTRIAN', + 'TYPE_SIGN', + 'TYPE_BICYCLIST', + 'TYPE_TRAFFIC_LIGHT', + 'TYPE_POLE', + 'TYPE_CONSTRUCTION_CONE', + 'TYPE_BICYCLE', + 'TYPE_MOTORCYCLE', + 'TYPE_BUILDING', + 'TYPE_VEGETATION', + 'TYPE_TREE_TRUNK', + 'TYPE_ROAD', + 'TYPE_WALKABLE', + 'TYPE_FREE', +] + +PUJIANG = False +if PUJIANG: + # pujiang 0.4/0.1 + data_root = '/mnt/petrelfs/zhaohang.p/mmdetection/data/waymo/kitti_format/' + occ_data_root = '/mnt/petrelfs/zhaohang.p/dataset/waymo_occV2/' + +else: + # MARS 0.4/0.1 + data_root = '/public/MARS/datasets/waymo_v1.3.1_untar/kitti_format/' # replace with your won waymo image path + occ_data_root = '/public/MARS/datasets/waymo_occV2/' # replace with your won occ gt path + +ann_file = occ_data_root + 'waymo_infos_train.pkl' +val_ann_file = occ_data_root + 'waymo_infos_val.pkl' +pose_file = occ_data_root + 'cam_infos.pkl' +val_pose_file = occ_data_root + 'cam_infos_vali.pkl' +if use_larger: # use 0.4 voxel size + occ_gt_data_root = occ_data_root + 'voxel04/training/' + occ_val_gt_data_root = occ_data_root + 'voxel04/validation/' +else: + occ_gt_data_root = occ_data_root + 'voxel01/training/' + occ_val_gt_data_root = occ_data_root + 'voxel01/validation/' + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +# mask +use_infov_mask = True +use_lidar_mask = False +use_camera_mask = True +use_CDist = False + +_dim_ = 256 +num_feats = [_dim_ // 3, _dim_ // 3, _dim_ - _dim_ // 3 - _dim_ // 3] +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +total_z = 16 +# for bev +pillar_h = 4 +num_points_in_pillar = 4 +# for volume +volume_flag = False +bev_z_ = 1 +if not volume_flag: + bev_z_ = 1 +# for decoder +use_3d_decoder = False +use_conv_decoder = True + +num_views = 5 +FREE_LABEL = 23 +# for data +load_interval = 1 +test_interval = 1 +total_epochs = 8 + +# for cost volume +use_refine_feat_loss = False +use_temporal = None +use_temporal_self_attention = True +use_padding = False +# important parameter +refine_feat_loss_weight = None +scales = None +# for interval +queue_length = 31 +input_sample_policy = { + "type": "large interval", + "interval": 5, + "number": 7, +} # only for training + +sampled_queue_length = 7 # only for costvolume +sample_num = [30, 25, 20, 15, 10, 5, 0] # only for test + +if use_temporal_self_attention: + attn_cfgs = [ + dict( + type='TemporalSelfAttention', embed_dims=_dim_, num_points=4, num_levels=1 + ), + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +class_weight_binary = [5.314075572339673, 1] +class_weight_multiclass = [ + 21.996729830048952, + 7.504469780801267, + 10.597629961083673, + 12.18107968968811, + 15.143940258446506, + 13.035521328502758, + 9.861234292376812, + 13.64431851057796, + 15.121236434460473, + 21.996729830048952, + 6.201671013759701, + 5.7420517938838325, + 9.768712859518626, + 3.4607400626606317, + 4.152268220983671, + 1.000000000000000, +] + +model = dict( + type='CVTOccWaymo', + use_grid_mask=False, + video_test_mode=True, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + sample_num=sample_num, # only for test + save_results=False, # for visualization + use_temporal=use_temporal, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHeadWaymo', + volume_flag=volume_flag, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + # loss_occ=dict( + # type='FocalLoss', + # use_sigmoid=False, + # gamma=2.0, + # alpha=0.25, + # loss_weight=10.0), + use_CDist=use_CDist, + CLASS_NAMES=CLASS_NAMES, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + # loss_occ= dict( + # type='CrossEntropyLoss', + # use_sigmoid=False, + # loss_weight=1.0), + loss_occ=dict( + ceohem=dict( + type='CrossEntropyOHEMLoss', + # Online hard example mining cross-entropy loss + class_weight=class_weight_multiclass, + use_sigmoid=False, + use_mask=False, + loss_weight=1.0, + top_ratio=0.2, + top_weight=4.0, + ), + # lovasz=dict( + # type='LovaszLoss', + # class_weight=class_weight_multiclass, + # loss_type='multi_class', + # classes='present', + # per_image=False, + # reduction='none', + # loss_weight=1.0) + ), + transformer=dict( + type='CVTOccTransformerWaymo', + num_cams=num_views, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + volume_flag=volume_flag, + pillar_h=pillar_h, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + scales=scales, + num_classes=num_classes, + use_3d_decoder=use_3d_decoder, + use_conv_decoder=use_conv_decoder, + rotate_prev_bev=False, + # use_shift=True, # use_can_bus is False, so use_shift will not be used + use_can_bus=False, + embed_dims=_dim_, + pc_range=point_cloud_range, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + use_temporal=use_temporal, + use_padding=use_padding, + encoder=dict( + type='BEVFormerEncoderWaymo', + num_layers=4, + volume_flag=volume_flag, + pc_range=point_cloud_range, + num_points_in_pillar=num_points_in_pillar, + return_intermediate=False, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + transformerlayers=dict( + type='BEVFormerLayerWaymo', + volume_flag=volume_flag, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + ffn_cfgs=dict( + type='FFN', + embed_dims=_dim_, + feedforward_channels=_dim_ * 4, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=operation_order, + ), + ), + decoder=dict( + type='OccConvDecoder', + embed_dims=_dim_, + conv_num=3, + pillar_h=pillar_h, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict( + type='BN', + ), + act_cfg=dict(type='ReLU', inplace=True), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding3D', + num_feats=num_feats, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + height_num_embed=9999, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, # it seems no use + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range, + ), + ) + ), +) + +dataset_type = 'CustomWaymoDataset_T' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), +] + +test_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_val_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1, 1), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), + ], + ), +] + +# class CustomWaymoDataset_T +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + load_interval=load_interval, + num_views=num_views, + split='training', + ann_file=ann_file, + pose_file=pose_file, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + input_sample_policy=input_sample_policy, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + ann_file=val_ann_file, + pose_file=val_pose_file, + num_views=num_views, + pipeline=test_pipeline, + test_mode=True, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + num_views=num_views, + ann_file=val_ann_file, + pose_file=val_pose_file, + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + box_type_3d='LiDAR', + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) + +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=100, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' + +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) +checkpoint_config = dict(interval=1) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/bevformer_wotsa_nuscenes.py b/projects/configs/cvtocc/bevformer_wotsa_nuscenes.py new file mode 100644 index 0000000..9080dd3 --- /dev/null +++ b/projects/configs/cvtocc/bevformer_wotsa_nuscenes.py @@ -0,0 +1,423 @@ +_base_ = ['../datasets/custom_nus-3d.py', '../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +num_classes = 18 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] + +CLASS_NAMES = [ + 'others', + 'barrier', + 'bicycle', + 'bus', + 'car', + 'construction_vehicle', + 'motorcycle', + 'pedestrian', + 'traffic_cone', + 'trailer', + 'truck', + 'driveable_surface', + 'other_flat', + 'sidewalk', + 'terrain', + 'manmade', + 'vegetation', + 'free', +] + +class_weight_multiclass = [ + 1.552648813025149, + 1.477680635715412, + 1.789915946148316, + 1.454376653104962, + 1.283242744137921, + 1.583160056748120, + 1.758171915228669, + 1.468604241657418, + 1.651769160217543, + 1.454675968105020, + 1.369895420004945, + 1.125140370991227, + 1.399044660772846, + 1.203105344914611, + 1.191157881795851, + 1.155987296237377, + 1.150134564832974, + 1.000000000000000, +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +pillar_h = 16 +channels = 16 +queue_length = 1 +use_padding = False +use_temporal = None +scales = None +use_camera_mask = True +use_lidar_mask = False +use_refine_feat_loss = False +refine_feat_loss_weight = 10 + +use_temporal_self_attention = False +if use_temporal_self_attention: + attn_cfgs = [ + dict(type='TemporalSelfAttention', embed_dims=_dim_, num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +model = dict( + type='CVTOcc', + use_grid_mask=True, + video_test_mode=True, + queue_length=queue_length, + save_results=False, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict( + type='DCNv2', deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + use_free_mask=False, + use_focal_loss=False, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + loss_occ=dict( + type='CrossEntropyLoss', + # class_weight=class_weight_multiclass, + use_sigmoid=False, + loss_weight=1.0, + ), + transformer=dict( + type='CVTOccTransformer', + pillar_h=pillar_h, + num_classes=num_classes, + bev_h=bev_h_, + bev_w=bev_w_, + channels=channels, + pc_range=point_cloud_range, + voxel_size=voxel_size, + norm_cfg=dict(type='BN',), + norm_cfg_3d=dict(type='BN2d',), + use_3d=False, + use_conv=False, + rotate_prev_bev=False, + use_shift=False, + use_can_bus=False, + embed_dims=_dim_, + queue_length=queue_length, + use_padding=use_padding, + use_temporal=use_temporal, + scales=scales, + encoder=dict( + type='BEVFormerEncoder', + bev_h=bev_h_, + bev_w=bev_w_, + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + bev_h=bev_h_, + bev_w=bev_w_, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=operation_order, + ), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range, + ), + ) + ), +) + +data_root = './data/occ3d-nus/' +img_scales = [1.0] +dataset_type = 'NuSceneOcc' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadOccGTFromFileNuScenes', data_root=data_root), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='RandomScaleImageMultiViewImage', scales=img_scales), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=[ + 'img', + 'voxel_semantics', + 'mask_lidar', + 'mask_camera', + ], + meta_keys=( + 'filename', + 'pts_filename', + 'occ_gt_path', + 'scene_token', + 'frame_idx', + 'scene_idx', + 'sample_idx', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'ego2lidar', + 'ego2global', + 'cam_intrinsic', + 'lidar2cam', + 'cam2img', + 'can_bus', + ), + ), +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadOccGTFromFileNuScenes', data_root=data_root), + dict(type='RandomScaleImageMultiViewImage', scales=img_scales), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=[ + 'img', + 'voxel_semantics', + 'mask_lidar', + 'mask_camera', + ], + meta_keys=( + 'filename', + 'pts_filename', + 'occ_gt_path', + 'scene_token', + 'frame_idx', + 'scene_idx', + 'sample_idx', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'ego2lidar', + 'ego2global', + 'cam_intrinsic', + 'lidar2cam', + 'cam2img', + 'can_bus', + ), + ), + ], + ), +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_val.pkl', + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation settings + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_val.pkl', + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + # below are evaluation settings + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 24 +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) + +checkpoint_config = dict(interval=3) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/bevformer_wotsa_waymo.py b/projects/configs/cvtocc/bevformer_wotsa_waymo.py new file mode 100644 index 0000000..981f220 --- /dev/null +++ b/projects/configs/cvtocc/bevformer_wotsa_waymo.py @@ -0,0 +1,537 @@ +_base_ = ['../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +occ_voxel_size = None # useless +use_larger = True # means use 0.4 voxel size +num_classes = 16 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] +CLASS_NAMES = [ + 'GO', + 'TYPE_VEHICLE', + 'TYPE_PEDESTRIAN', + 'TYPE_SIGN', + 'TYPE_BICYCLIST', + 'TYPE_TRAFFIC_LIGHT', + 'TYPE_POLE', + 'TYPE_CONSTRUCTION_CONE', + 'TYPE_BICYCLE', + 'TYPE_MOTORCYCLE', + 'TYPE_BUILDING', + 'TYPE_VEGETATION', + 'TYPE_TREE_TRUNK', + 'TYPE_ROAD', + 'TYPE_WALKABLE', + 'TYPE_FREE', +] + +PUJIANG = False +if PUJIANG: + # pujiang 0.4/0.1 + data_root = '/mnt/petrelfs/zhaohang.p/mmdetection/data/waymo/kitti_format/' + occ_data_root = '/mnt/petrelfs/zhaohang.p/dataset/waymo_occV2/' + +else: + # MARS 0.4/0.1 + data_root = '/public/MARS/datasets/waymo_v1.3.1_untar/kitti_format/' # replace with your won waymo image path + occ_data_root = '/public/MARS/datasets/waymo_occV2/' # replace with your won occ gt path + +ann_file = occ_data_root + 'waymo_infos_train.pkl' +val_ann_file = occ_data_root + 'waymo_infos_val.pkl' +pose_file = occ_data_root + 'cam_infos.pkl' +val_pose_file = occ_data_root + 'cam_infos_vali.pkl' +if use_larger: # use 0.4 voxel size + occ_gt_data_root = occ_data_root + 'voxel04/training/' + occ_val_gt_data_root = occ_data_root + 'voxel04/validation/' +else: + occ_gt_data_root = occ_data_root + 'voxel01/training/' + occ_val_gt_data_root = occ_data_root + 'voxel01/validation/' + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +# mask +use_infov_mask = True +use_lidar_mask = False +use_camera_mask = True +use_CDist = False + +_dim_ = 256 +num_feats = [_dim_ // 3, _dim_ // 3, _dim_ - _dim_ // 3 - _dim_ // 3] +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +total_z = 16 +# for bev +pillar_h = 4 +num_points_in_pillar = 4 +# for volume +volume_flag = False +bev_z_ = 1 +if not volume_flag: + bev_z_ = 1 +# for decoder +use_3d_decoder = False +use_conv_decoder = True + +num_views = 5 +FREE_LABEL = 23 +# for data +load_interval = 1 +test_interval = 1 +total_epochs = 8 + +# for cost volume +use_refine_feat_loss = False +use_temporal = None +use_temporal_self_attention = False +use_padding = False +# important parameter +refine_feat_loss_weight = None +scales = None +# for interval +queue_length = 1 +input_sample_policy = { + "type": "normal" +} # only for training + +sampled_queue_length = 1 # only for costvolume +sample_num = [0] # only for test + +if use_temporal_self_attention: + attn_cfgs = [ + dict( + type='TemporalSelfAttention', embed_dims=_dim_, num_points=4, num_levels=1 + ), + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +class_weight_binary = [5.314075572339673, 1] +class_weight_multiclass = [ + 21.996729830048952, + 7.504469780801267, + 10.597629961083673, + 12.18107968968811, + 15.143940258446506, + 13.035521328502758, + 9.861234292376812, + 13.64431851057796, + 15.121236434460473, + 21.996729830048952, + 6.201671013759701, + 5.7420517938838325, + 9.768712859518626, + 3.4607400626606317, + 4.152268220983671, + 1.000000000000000, +] + +model = dict( + type='CVTOccWaymo', + use_grid_mask=False, + video_test_mode=True, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + sample_num=sample_num, # only for test + save_results=False, # for visualization + use_temporal=use_temporal, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHeadWaymo', + volume_flag=volume_flag, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + # loss_occ=dict( + # type='FocalLoss', + # use_sigmoid=False, + # gamma=2.0, + # alpha=0.25, + # loss_weight=10.0), + use_CDist=use_CDist, + CLASS_NAMES=CLASS_NAMES, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + # loss_occ= dict( + # type='CrossEntropyLoss', + # use_sigmoid=False, + # loss_weight=1.0), + loss_occ=dict( + ceohem=dict( + type='CrossEntropyOHEMLoss', + # Online hard example mining cross-entropy loss + class_weight=class_weight_multiclass, + use_sigmoid=False, + use_mask=False, + loss_weight=1.0, + top_ratio=0.2, + top_weight=4.0, + ), + # lovasz=dict( + # type='LovaszLoss', + # class_weight=class_weight_multiclass, + # loss_type='multi_class', + # classes='present', + # per_image=False, + # reduction='none', + # loss_weight=1.0) + ), + transformer=dict( + type='CVTOccTransformerWaymo', + num_cams=num_views, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + volume_flag=volume_flag, + pillar_h=pillar_h, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + scales=scales, + num_classes=num_classes, + use_3d_decoder=use_3d_decoder, + use_conv_decoder=use_conv_decoder, + rotate_prev_bev=False, + # use_shift=True, # use_can_bus is False, so use_shift will not be used + use_can_bus=False, + embed_dims=_dim_, + pc_range=point_cloud_range, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + use_temporal=use_temporal, + use_padding=use_padding, + encoder=dict( + type='BEVFormerEncoderWaymo', + num_layers=4, + volume_flag=volume_flag, + pc_range=point_cloud_range, + num_points_in_pillar=num_points_in_pillar, + return_intermediate=False, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + transformerlayers=dict( + type='BEVFormerLayerWaymo', + volume_flag=volume_flag, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + ffn_cfgs=dict( + type='FFN', + embed_dims=_dim_, + feedforward_channels=_dim_ * 4, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=operation_order, + ), + ), + decoder=dict( + type='OccConvDecoder', + embed_dims=_dim_, + conv_num=3, + pillar_h=pillar_h, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict( + type='BN', + ), + act_cfg=dict(type='ReLU', inplace=True), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding3D', + num_feats=num_feats, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + height_num_embed=9999, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, # it seems no use + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict( + type='IoUCost', weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) + +dataset_type = 'CustomWaymoDataset_T' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), +] + +test_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_val_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1, 1), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), + ], + ), +] + +# class CustomWaymoDataset_T +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + load_interval=load_interval, + num_views=num_views, + split='training', + ann_file=ann_file, + pose_file=pose_file, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + input_sample_policy=input_sample_policy, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + ann_file=val_ann_file, + pose_file=val_pose_file, + num_views=num_views, + pipeline=test_pipeline, + test_mode=True, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + num_views=num_views, + ann_file=val_ann_file, + pose_file=val_pose_file, + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + box_type_3d='LiDAR', + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) + +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=100, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' + +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) +checkpoint_config = dict(interval=1) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/bevformer_wrapconcat_waymo.py b/projects/configs/cvtocc/bevformer_wrapconcat_waymo.py new file mode 100644 index 0000000..c4575af --- /dev/null +++ b/projects/configs/cvtocc/bevformer_wrapconcat_waymo.py @@ -0,0 +1,538 @@ +_base_ = ['../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +occ_voxel_size = None # useless +use_larger = True # means use 0.4 voxel size +num_classes = 16 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] +CLASS_NAMES = [ + 'GO', + 'TYPE_VEHICLE', + 'TYPE_PEDESTRIAN', + 'TYPE_SIGN', + 'TYPE_BICYCLIST', + 'TYPE_TRAFFIC_LIGHT', + 'TYPE_POLE', + 'TYPE_CONSTRUCTION_CONE', + 'TYPE_BICYCLE', + 'TYPE_MOTORCYCLE', + 'TYPE_BUILDING', + 'TYPE_VEGETATION', + 'TYPE_TREE_TRUNK', + 'TYPE_ROAD', + 'TYPE_WALKABLE', + 'TYPE_FREE', +] + +PUJIANG = False +if PUJIANG: + # pujiang 0.4/0.1 + data_root = '/mnt/petrelfs/zhaohang.p/mmdetection/data/waymo/kitti_format/' + occ_data_root = '/mnt/petrelfs/zhaohang.p/dataset/waymo_occV2/' + +else: + # MARS 0.4/0.1 + data_root = '/public/MARS/datasets/waymo_v1.3.1_untar/kitti_format/' # replace with your won waymo image path + occ_data_root = '/public/MARS/datasets/waymo_occV2/' # replace with your won occ gt path + +ann_file = occ_data_root + 'waymo_infos_train.pkl' +val_ann_file = occ_data_root + 'waymo_infos_val.pkl' +pose_file = occ_data_root + 'cam_infos.pkl' +val_pose_file = occ_data_root + 'cam_infos_vali.pkl' +if use_larger: # use 0.4 voxel size + occ_gt_data_root = occ_data_root + 'voxel04/training/' + occ_val_gt_data_root = occ_data_root + 'voxel04/validation/' +else: + occ_gt_data_root = occ_data_root + 'voxel01/training/' + occ_val_gt_data_root = occ_data_root + 'voxel01/validation/' + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +# mask +use_infov_mask = True +use_lidar_mask = False +use_camera_mask = True +use_CDist = False + +_dim_ = 256 +num_feats = [_dim_ // 3, _dim_ // 3, _dim_ - _dim_ // 3 - _dim_ // 3] +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +total_z = 16 +# for bev +pillar_h = 4 +num_points_in_pillar = 4 +# for volume +volume_flag = False +bev_z_ = 1 +if not volume_flag: + bev_z_ = 1 +# for decoder +use_3d_decoder = False +use_conv_decoder = True + +num_views = 5 +FREE_LABEL = 23 +# for data +load_interval = 1 +test_interval = 1 +total_epochs = 8 + +# for cost volume +use_refine_feat_loss = False +use_temporal = 'concat_conv' +use_temporal_self_attention = False +use_padding = False +# important parameter +refine_feat_loss_weight = None +scales = None +# for interval +queue_length = 31 +input_sample_policy = { + "type": "large interval", + "interval": 5, + "number": 7, +} # only for training + +sampled_queue_length = 7 # only for costvolume +sample_num = [30, 25, 20, 15, 10, 5, 0] # only for test + +if use_temporal_self_attention: + attn_cfgs = [ + dict( + type='TemporalSelfAttention', embed_dims=_dim_, num_points=4, num_levels=1 + ), + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +class_weight_binary = [5.314075572339673, 1] +class_weight_multiclass = [ + 21.996729830048952, + 7.504469780801267, + 10.597629961083673, + 12.18107968968811, + 15.143940258446506, + 13.035521328502758, + 9.861234292376812, + 13.64431851057796, + 15.121236434460473, + 21.996729830048952, + 6.201671013759701, + 5.7420517938838325, + 9.768712859518626, + 3.4607400626606317, + 4.152268220983671, + 1.000000000000000, +] + +model = dict( + type='CVTOccWaymo', + use_grid_mask=False, + video_test_mode=True, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + sample_num=sample_num, # only for test + save_results=False, # for visualization + use_temporal=use_temporal, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHeadWaymo', + volume_flag=volume_flag, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + # loss_occ=dict( + # type='FocalLoss', + # use_sigmoid=False, + # gamma=2.0, + # alpha=0.25, + # loss_weight=10.0), + use_CDist=use_CDist, + CLASS_NAMES=CLASS_NAMES, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + # loss_occ= dict( + # type='CrossEntropyLoss', + # use_sigmoid=False, + # loss_weight=1.0), + loss_occ=dict( + ceohem=dict( + type='CrossEntropyOHEMLoss', + # Online hard example mining cross-entropy loss + class_weight=class_weight_multiclass, + use_sigmoid=False, + use_mask=False, + loss_weight=1.0, + top_ratio=0.2, + top_weight=4.0, + ), + # lovasz=dict( + # type='LovaszLoss', + # class_weight=class_weight_multiclass, + # loss_type='multi_class', + # classes='present', + # per_image=False, + # reduction='none', + # loss_weight=1.0) + ), + transformer=dict( + type='CVTOccTransformerWaymo', + num_cams=num_views, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + volume_flag=volume_flag, + pillar_h=pillar_h, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + scales=scales, + num_classes=num_classes, + use_3d_decoder=use_3d_decoder, + use_conv_decoder=use_conv_decoder, + rotate_prev_bev=False, + # use_shift=True, # use_can_bus is False, so use_shift will not be used + use_can_bus=False, + embed_dims=_dim_, + pc_range=point_cloud_range, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + use_temporal=use_temporal, + use_padding=use_padding, + encoder=dict( + type='BEVFormerEncoderWaymo', + num_layers=4, + volume_flag=volume_flag, + pc_range=point_cloud_range, + num_points_in_pillar=num_points_in_pillar, + return_intermediate=False, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + transformerlayers=dict( + type='BEVFormerLayerWaymo', + volume_flag=volume_flag, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + ffn_cfgs=dict( + type='FFN', + embed_dims=_dim_, + feedforward_channels=_dim_ * 4, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=operation_order, + ), + ), + decoder=dict( + type='OccConvDecoder', + embed_dims=_dim_, + conv_num=3, + pillar_h=pillar_h, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict( + type='BN', + ), + act_cfg=dict(type='ReLU', inplace=True), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding3D', + num_feats=num_feats, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + height_num_embed=9999, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, # it seems no use + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict( + type='IoUCost', weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) + +dataset_type = 'CustomWaymoDataset_T' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), +] + +test_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_val_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1, 1), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), + ], + ), +] + +# class CustomWaymoDataset_T +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + load_interval=load_interval, + num_views=num_views, + split='training', + ann_file=ann_file, + pose_file=pose_file, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + input_sample_policy=input_sample_policy, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + ann_file=val_ann_file, + pose_file=val_pose_file, + num_views=num_views, + pipeline=test_pipeline, + test_mode=True, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + num_views=num_views, + ann_file=val_ann_file, + pose_file=val_pose_file, + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + box_type_3d='LiDAR', + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=100, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' + +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) +checkpoint_config = dict(interval=1) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/cvtocc_nuscenes.py b/projects/configs/cvtocc/cvtocc_nuscenes.py new file mode 100644 index 0000000..b54059a --- /dev/null +++ b/projects/configs/cvtocc/cvtocc_nuscenes.py @@ -0,0 +1,423 @@ +_base_ = ['../datasets/custom_nus-3d.py', '../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +num_classes = 18 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] + +CLASS_NAMES = [ + 'others', + 'barrier', + 'bicycle', + 'bus', + 'car', + 'construction_vehicle', + 'motorcycle', + 'pedestrian', + 'traffic_cone', + 'trailer', + 'truck', + 'driveable_surface', + 'other_flat', + 'sidewalk', + 'terrain', + 'manmade', + 'vegetation', + 'free', +] + +class_weight_multiclass = [ + 1.552648813025149, + 1.477680635715412, + 1.789915946148316, + 1.454376653104962, + 1.283242744137921, + 1.583160056748120, + 1.758171915228669, + 1.468604241657418, + 1.651769160217543, + 1.454675968105020, + 1.369895420004945, + 1.125140370991227, + 1.399044660772846, + 1.203105344914611, + 1.191157881795851, + 1.155987296237377, + 1.150134564832974, + 1.000000000000000, +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +_dim_ = 256 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +pillar_h = 16 +channels = 16 +queue_length = 7 +use_padding = False +use_temporal = 'costvolume' +scales = [0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1, 1.15, 1.2] +use_camera_mask = True +use_lidar_mask = False +use_refine_feat_loss = True +refine_feat_loss_weight = 10 + +use_temporal_self_attention = False +if use_temporal_self_attention: + attn_cfgs = [ + dict(type='TemporalSelfAttention', embed_dims=_dim_, num_levels=1), + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=8, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +model = dict( + type='CVTOcc', + use_grid_mask=True, + video_test_mode=True, + queue_length=queue_length, + save_results=False, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict( + type='DCNv2', deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHead', + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + use_free_mask=False, + use_focal_loss=False, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + loss_occ=dict( + type='CrossEntropyLoss', + # class_weight=class_weight_multiclass, + use_sigmoid=False, + loss_weight=1.0, + ), + transformer=dict( + type='CVTOccTransformer', + pillar_h=pillar_h, + num_classes=num_classes, + bev_h=bev_h_, + bev_w=bev_w_, + channels=channels, + pc_range=point_cloud_range, + voxel_size=voxel_size, + norm_cfg=dict(type='BN',), + norm_cfg_3d=dict(type='BN2d',), + use_3d=False, + use_conv=False, + rotate_prev_bev=False, + use_shift=False, + use_can_bus=False, + embed_dims=_dim_, + queue_length=queue_length, + use_padding=use_padding, + use_temporal=use_temporal, + scales=scales, + encoder=dict( + type='BEVFormerEncoder', + bev_h=bev_h_, + bev_w=bev_w_, + num_layers=6, + pc_range=point_cloud_range, + num_points_in_pillar=4, + return_intermediate=False, + transformerlayers=dict( + type='BEVFormerLayer', + bev_h=bev_h_, + bev_w=bev_w_, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + operation_order=operation_order, + ), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding', + num_feats=_pos_dim_, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + ), + bbox_coder=dict( + type='NMSFreeCoder', + post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + pc_range=point_cloud_range, + max_num=300, + voxel_size=voxel_size, + num_classes=10, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict(type='IoUCost', weight=0.0), + pc_range=point_cloud_range, + ), + ) + ), +) + +data_root = './data/occ3d-nus/' +img_scales = [1.0] +dataset_type = 'NuSceneOcc' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadOccGTFromFileNuScenes', data_root=data_root), + dict(type='PhotoMetricDistortionMultiViewImage'), + dict(type='RandomScaleImageMultiViewImage', scales=img_scales), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=[ + 'img', + 'voxel_semantics', + 'mask_lidar', + 'mask_camera', + ], + meta_keys=( + 'filename', + 'pts_filename', + 'occ_gt_path', + 'scene_token', + 'frame_idx', + 'scene_idx', + 'sample_idx', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'ego2lidar', + 'ego2global', + 'cam_intrinsic', + 'lidar2cam', + 'cam2img', + 'can_bus', + ), + ), +] + +test_pipeline = [ + dict(type='LoadMultiViewImageFromFiles', to_float32=True), + dict(type='LoadOccGTFromFileNuScenes', data_root=data_root), + dict(type='RandomScaleImageMultiViewImage', scales=img_scales), + dict(type='NormalizeMultiviewImage', **img_norm_cfg), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1600, 900), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=[ + 'img', + 'voxel_semantics', + 'mask_lidar', + 'mask_camera', + ], + meta_keys=( + 'filename', + 'pts_filename', + 'occ_gt_path', + 'scene_token', + 'frame_idx', + 'scene_idx', + 'sample_idx', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'ego2lidar', + 'ego2global', + 'cam_intrinsic', + 'lidar2cam', + 'cam2img', + 'can_bus', + ), + ), + ], + ), +] + +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_train.pkl', + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + use_valid_flag=True, + bev_size=(bev_h_, bev_w_), + queue_length=queue_length, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_val.pkl', + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation settings + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file=data_root + 'occ_infos_temporal_val.pkl', + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + # below are evaluation settings + point_cloud_range=point_cloud_range, + voxel_size=voxel_size, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) + +optimizer = dict( + type='AdamW', + lr=2e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=500, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) +total_epochs = 24 +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) + +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) + +checkpoint_config = dict(interval=3) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/cvtocc_waymo.py b/projects/configs/cvtocc/cvtocc_waymo.py new file mode 100644 index 0000000..8d97195 --- /dev/null +++ b/projects/configs/cvtocc/cvtocc_waymo.py @@ -0,0 +1,538 @@ +_base_ = ['../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +occ_voxel_size = None # useless +use_larger = True # means use 0.4 voxel size +num_classes = 16 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', + 'truck', + 'construction_vehicle', + 'bus', + 'trailer', + 'barrier', + 'motorcycle', + 'bicycle', + 'pedestrian', + 'traffic_cone', +] +CLASS_NAMES = [ + 'GO', + 'TYPE_VEHICLE', + 'TYPE_PEDESTRIAN', + 'TYPE_SIGN', + 'TYPE_BICYCLIST', + 'TYPE_TRAFFIC_LIGHT', + 'TYPE_POLE', + 'TYPE_CONSTRUCTION_CONE', + 'TYPE_BICYCLE', + 'TYPE_MOTORCYCLE', + 'TYPE_BUILDING', + 'TYPE_VEGETATION', + 'TYPE_TREE_TRUNK', + 'TYPE_ROAD', + 'TYPE_WALKABLE', + 'TYPE_FREE', +] + +PUJIANG = False +if PUJIANG: + # pujiang 0.4/0.1 + data_root = '/mnt/petrelfs/zhaohang.p/mmdetection/data/waymo/kitti_format/' + occ_data_root = '/mnt/petrelfs/zhaohang.p/dataset/waymo_occV2/' + +else: + # MARS 0.4/0.1 + data_root = '/public/MARS/datasets/waymo_v1.3.1_untar/kitti_format/' # replace with your won waymo image path + occ_data_root = '/public/MARS/datasets/waymo_occV2/' # replace with your won occ gt path + +ann_file = occ_data_root + 'waymo_infos_train.pkl' +val_ann_file = occ_data_root + 'waymo_infos_val.pkl' +pose_file = occ_data_root + 'cam_infos.pkl' +val_pose_file = occ_data_root + 'cam_infos_vali.pkl' +if use_larger: # use 0.4 voxel size + occ_gt_data_root = occ_data_root + 'voxel04/training/' + occ_val_gt_data_root = occ_data_root + 'voxel04/validation/' +else: + occ_gt_data_root = occ_data_root + 'voxel01/training/' + occ_val_gt_data_root = occ_data_root + 'voxel01/validation/' + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=True +) + +# mask +use_infov_mask = True +use_lidar_mask = False +use_camera_mask = True +use_CDist = False + +_dim_ = 256 +num_feats = [_dim_ // 3, _dim_ // 3, _dim_ - _dim_ // 3 - _dim_ // 3] +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 4 +bev_h_ = 200 +bev_w_ = 200 +total_z = 16 +# for bev +pillar_h = 4 +num_points_in_pillar = 4 +# for volume +volume_flag = False +bev_z_ = 1 +if not volume_flag: + bev_z_ = 1 +# for decoder +use_3d_decoder = False +use_conv_decoder = True + +num_views = 5 +FREE_LABEL = 23 +# for data +load_interval = 1 +test_interval = 1 +total_epochs = 8 + +# for cost volume +use_refine_feat_loss = True +use_temporal = 'costvolume' +use_temporal_self_attention = False +use_padding = False +# important parameter +refine_feat_loss_weight = 500 +scales = [0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1, 1.15, 1.2] +# for interval +queue_length = 31 +input_sample_policy = { + "type": "random interval", + "fix interval": 5, + "number": 7, +} # only for training + +sampled_queue_length = 7 # only for costvolume +sample_num = [30, 25, 20, 15, 10, 5, 0] # only for test + +if use_temporal_self_attention: + attn_cfgs = [ + dict( + type='TemporalSelfAttention', embed_dims=_dim_, num_points=4, num_levels=1 + ), + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ), + ] + operation_order = ('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm') +else: + attn_cfgs = [ + dict( + type='SpatialCrossAttention', + num_cams=num_views, + pc_range=point_cloud_range, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=_dim_, + num_points=4, + num_levels=_num_levels_, + ), + embed_dims=_dim_, + ) + ] + operation_order = ('cross_attn', 'norm', 'ffn', 'norm') + +class_weight_binary = [5.314075572339673, 1] +class_weight_multiclass = [ + 21.996729830048952, + 7.504469780801267, + 10.597629961083673, + 12.18107968968811, + 15.143940258446506, + 13.035521328502758, + 9.861234292376812, + 13.64431851057796, + 15.121236434460473, + 21.996729830048952, + 6.201671013759701, + 5.7420517938838325, + 9.768712859518626, + 3.4607400626606317, + 4.152268220983671, + 1.000000000000000, +] + +model = dict( + type='CVTOccWaymo', + use_grid_mask=False, + video_test_mode=True, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + sample_num=sample_num, # only for test + save_results=False, # for visualization + use_temporal=use_temporal, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True), + ), + img_neck=dict( + type='FPN', + in_channels=[512, 1024, 2048], + out_channels=_dim_, + start_level=0, + add_extra_convs='on_output', + num_outs=4, + relu_before_extra_convs=True, + ), + pts_bbox_head=dict( + type='CVTOccHeadWaymo', + volume_flag=volume_flag, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + num_classes=num_classes, + in_channels=_dim_, + sync_cls_avg_factor=True, + with_box_refine=True, + as_two_stage=False, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + # loss_occ=dict( + # type='FocalLoss', + # use_sigmoid=False, + # gamma=2.0, + # alpha=0.25, + # loss_weight=10.0), + use_CDist=use_CDist, + CLASS_NAMES=CLASS_NAMES, + use_refine_feat_loss=use_refine_feat_loss, + refine_feat_loss_weight=refine_feat_loss_weight, + # loss_occ= dict( + # type='CrossEntropyLoss', + # use_sigmoid=False, + # loss_weight=1.0), + loss_occ=dict( + ceohem=dict( + type='CrossEntropyOHEMLoss', + # Online hard example mining cross-entropy loss + class_weight=class_weight_multiclass, + use_sigmoid=False, + use_mask=False, + loss_weight=1.0, + top_ratio=0.2, + top_weight=4.0, + ), + # lovasz=dict( + # type='LovaszLoss', + # class_weight=class_weight_multiclass, + # loss_type='multi_class', + # classes='present', + # per_image=False, + # reduction='none', + # loss_weight=1.0) + ), + transformer=dict( + type='CVTOccTransformerWaymo', + num_cams=num_views, + queue_length=queue_length, + sampled_queue_length=sampled_queue_length, + volume_flag=volume_flag, + pillar_h=pillar_h, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + scales=scales, + num_classes=num_classes, + use_3d_decoder=use_3d_decoder, + use_conv_decoder=use_conv_decoder, + rotate_prev_bev=False, + # use_shift=True, # use_can_bus is False, so use_shift will not be used + use_can_bus=False, + embed_dims=_dim_, + pc_range=point_cloud_range, + voxel_size=voxel_size, + occ_voxel_size=occ_voxel_size, + use_larger=use_larger, + use_temporal=use_temporal, + use_padding=use_padding, + encoder=dict( + type='BEVFormerEncoderWaymo', + num_layers=4, + volume_flag=volume_flag, + pc_range=point_cloud_range, + num_points_in_pillar=num_points_in_pillar, + return_intermediate=False, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + total_z=total_z, + transformerlayers=dict( + type='BEVFormerLayerWaymo', + volume_flag=volume_flag, + attn_cfgs=attn_cfgs, + feedforward_channels=_ffn_dim_, + ffn_dropout=0.1, + bev_z=bev_z_, + bev_h=bev_h_, + bev_w=bev_w_, + ffn_cfgs=dict( + type='FFN', + embed_dims=_dim_, + feedforward_channels=_dim_ * 4, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + ), + operation_order=operation_order, + ), + ), + decoder=dict( + type='OccConvDecoder', + embed_dims=_dim_, + conv_num=3, + pillar_h=pillar_h, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict( + type='BN', + ), + act_cfg=dict(type='ReLU', inplace=True), + ), + ), + positional_encoding=dict( + type='LearnedPositionalEncoding3D', + num_feats=num_feats, + row_num_embed=bev_h_, + col_num_embed=bev_w_, + height_num_embed=9999, + ), + loss_cls=dict( + type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type='L1Loss', loss_weight=0.25), + loss_iou=dict(type='GIoULoss', loss_weight=0.0), + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, # it seems no use + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type='HungarianAssigner3D', + cls_cost=dict(type='FocalLossCost', weight=2.0), + reg_cost=dict(type='BBox3DL1Cost', weight=0.25), + iou_cost=dict( + type='IoUCost', weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) + +dataset_type = 'CustomWaymoDataset_T' +file_client_args = dict(backend='disk') + +train_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict( + type='LoadAnnotations3D', + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), + dict(type='ObjectNameFilter', classes=class_names), + dict(type='PadMultiViewImage', size_divisor=32), + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), +] + +test_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=(1280, 1920)), + dict( + type='LoadOccGTFromFileWaymo', + data_root=occ_val_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1, 1), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict( + type='DefaultFormatBundle3D', class_names=class_names, with_label=False + ), + dict( + type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=[ + 'filename', + 'pts_filename', + 'sample_idx', + 'scene_token', + 'ori_shape', + 'img_shape', + 'pad_shape', + 'lidar2img', + 'sensor2ego', + 'cam_intrinsic', + 'ego2global', + ], + ), + ], + ), +] + +# class CustomWaymoDataset_T +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + load_interval=load_interval, + num_views=num_views, + split='training', + ann_file=ann_file, + pose_file=pose_file, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + input_sample_policy=input_sample_policy, + box_type_3d='LiDAR', + ), + val=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + ann_file=val_ann_file, + pose_file=val_pose_file, + num_views=num_views, + pipeline=test_pipeline, + test_mode=True, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + test=dict( + type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + num_views=num_views, + ann_file=val_ann_file, + pose_file=val_pose_file, + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + box_type_3d='LiDAR', + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + } + ), + weight_decay=0.01, +) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=100, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3, +) + +evaluation = dict(interval=total_epochs, pipeline=test_pipeline) +runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' + +log_config = dict( + interval=50, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] +) +checkpoint_config = dict(interval=1) +find_unused_parameters = True diff --git a/projects/configs/cvtocc/solofusion_waymo.py b/projects/configs/cvtocc/solofusion_waymo.py new file mode 100644 index 0000000..42c282b --- /dev/null +++ b/projects/configs/cvtocc/solofusion_waymo.py @@ -0,0 +1,477 @@ +############################################################################### +# Training Details + +_base_ = ['../_base_/default_runtime.py'] +plugin = True +plugin_dir = 'projects/mmdet3d_plugin/' + +work_dir = None +resume_optimizer = False + +# By default, 3D detection datasets randomly choose another sample if there is +# no GT object in the current sample. This does not make sense when doing +# sequential sampling of frames, so we disable it. +filter_empty_gt = False + +# Intermediate Checkpointing to save GPU memory. +with_cp = False + +############################################################################### +# High-level Model & Training Details + +base_bev_channels = 80 + +# Long-Term Fusion Parameters +input_sample_policy = { + "type": "normal", +} + +do_history = True +history_cat_conv_out_channels = 160 +history_cat_num = 6 +history_queue_length = 30 +queue_length = history_queue_length + 1 +if do_history: + bev_encoder_in_channels = history_cat_conv_out_channels +else: + bev_encoder_in_channels = base_bev_channels + +# Short-Term Fusion Parameters +do_history_stereo_fusion = True +stereo_out_feats = 64 +history_stereo_prev_step = 5 +stereo_sampling_num = 7 + +# Loss Weights +depth_loss_weight = 3.0 +velocity_code_weight = 0.2 + +############################################################################### +# General Dataset & Augmentation Details. + +point_cloud_range = [-40, -40, -1.0, 40, 40, 5.4] +voxel_size = [0.4, 0.4, 0.4] +grid_config = { + 'xbound': [-40, 40, 0.4], + 'ybound': [-40, 40, 0.4], + 'zbound': [-10.0, 10.0, 20.0], + 'dbound': [2.0, 58.0, 0.5], +} + +num_classes = 16 +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = [ + 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', + 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' +] +# waymo +CLASS_NAMES = [ + 'GO', + 'TYPE_VEHICLE', + 'TYPE_PEDESTRIAN', + 'TYPE_SIGN', + 'TYPE_BICYCLIST', + 'TYPE_TRAFFIC_LIGHT', + 'TYPE_POLE', + 'TYPE_CONSTRUCTION_CONE', + 'TYPE_BICYCLE', + 'TYPE_MOTORCYCLE', + 'TYPE_BUILDING', + 'TYPE_VEGETATION', + 'TYPE_TREE_TRUNK', + 'TYPE_ROAD', + 'TYPE_WALKABLE', + 'TYPE_FREE', +] + +data_config = { + 'Ncams': 5, + 'src_size': (1280, 1920), + 'scales': [0.5], + 'input_size': (640, 960), +} + +use_infov_mask = True +use_lidar_mask = False +use_camera_mask = True +FREE_LABEL = 23 +class_weight_multiclass = [ + 21.996729830048952, + 7.504469780801267, + 10.597629961083673, + 12.18107968968811, + 15.143940258446506, + 13.035521328502758, + 9.861234292376812, + 13.64431851057796, + 15.121236434460473, + 21.996729830048952, + 6.201671013759701, + 5.7420517938838325, + 9.768712859518626, + 3.4607400626606317, + 4.152268220983671, + 1.000000000000000, +] +bev_h_ = 200 +bev_w_ = 200 +bev_z_ = 1 +############################################################################### +# Set-up the model. + +model = dict( + type='SOLOFusion', + input_sample_policy=input_sample_policy, + # Long-Term Fusion + do_history=do_history, + history_cat_num=history_cat_num, + history_queue_length=history_queue_length, + history_cat_conv_out_channels=history_cat_conv_out_channels, + # Short-Term Fusion + do_history_stereo_fusion=do_history_stereo_fusion, + history_stereo_prev_step=history_stereo_prev_step, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + img_backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN2d', requires_grad=False), + norm_eval=True, + style='caffe', + dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), + stage_with_dcn=(False, False, True, True)), + img_neck=dict( + type='SECONDFPN_solo', + in_channels=[256, 512, 1024, 2048], + upsample_strides=[0.25, 0.5, 1, 2], + out_channels=[128, 128, 128, 128]), + # A separate, smaller neck for generating stereo features. Format is + # similar to MVS works. + stereo_neck=dict( + type='SECONDFPN_solo', + in_channels=[256, 512, 1024, 2048], + upsample_strides=[1, 2, 4, 8], + out_channels=[stereo_out_feats, + stereo_out_feats, + stereo_out_feats, + stereo_out_feats], + final_conv_feature_dim=stereo_out_feats), + # 2D -> BEV Image View Transformer. + img_view_transformer=dict(type='ViewTransformerSOLOFusion', + do_history_stereo_fusion=do_history_stereo_fusion, + stereo_sampling_num=stereo_sampling_num, + loss_depth_weight=depth_loss_weight, + grid_config=grid_config, + data_config=data_config, + numC_Trans=base_bev_channels, + use_bev_pool=False, + extra_depth_net=dict(type='ResNetForBEVDet_solo', + numC_input=256, + num_layer=[3,], + num_channels=[256,], + stride=[1,]) + ), + # Pre-processing of BEV features before using Long-Term Fusion + pre_process = dict(type='ResNetForBEVDet_solo',numC_input=base_bev_channels, + num_layer=[2,], num_channels=[base_bev_channels,], + stride=[1,], backbone_output_ids=[0,]), + # After using long-term fusion, process BEV for detection head. + img_bev_encoder_backbone = dict(type='ResNetForBEVDet_solo', + numC_input=bev_encoder_in_channels, + num_channels=[base_bev_channels * 2, + base_bev_channels * 4, + base_bev_channels * 8], + backbone_output_ids=[-1, 0, 1, 2]), + img_bev_encoder_neck = dict(type='SECONDFPN_solo', + in_channels=[bev_encoder_in_channels, + 160, 320, 640], + upsample_strides=[1, 2, 4, 8], + out_channels=[64, 64, 64, 64]), + # occ head + pts_bbox_head=dict( + type='SOLOOccHeadWaymo', + FREE_LABEL=FREE_LABEL, + embed_dims=256, + bev_z=bev_z_, + bev_w=bev_w_, + bev_h=bev_h_, + total_z=16, + num_classes=16, + use_infov_mask=use_infov_mask, + use_lidar_mask=use_lidar_mask, + use_camera_mask=use_camera_mask, + loss_occ=dict( + ceohem=dict( + type='CrossEntropyOHEMLoss', + class_weight=class_weight_multiclass, + use_sigmoid=False, + use_mask=False, + loss_weight=1.0, + top_ratio=0.2, + top_weight=4.0), + ) + ), + # model training and testing settings + train_cfg=dict( + pts=dict( + point_cloud_range=point_cloud_range, + grid_size=[1024, 1024, 40], + voxel_size=voxel_size, + out_size_factor=8, + dense_reg=1, + gaussian_overlap=0.1, + max_objs=500, + min_radius=2, + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + velocity_code_weight, velocity_code_weight])), + test_cfg=dict( + pts=dict( + pc_range=point_cloud_range[:2], + post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], + max_per_img=500, + max_pool_nms=False, + min_radius=[4, 12, 10, 1, 0.85, 0.175], + score_threshold=0.1, + out_size_factor=8, + voxel_size=voxel_size[:2], + # nms_type='circle', + pre_max_size=1000, + post_max_size=83, + # nms_thr=0.2, + # Scale-NMS + nms_type=['rotate', 'rotate', 'rotate', 'circle', 'rotate', + 'rotate'], + nms_thr=[0.2, 0.2, 0.2, 0.2, 0.2, 0.5], + nms_rescale_factor=[1.0, [0.7, 0.7], [0.4, 0.55], 1.1, [1.0, 1.0], + [4.5, 9.0]] + ))) + +############################################################################### +# Set-up the dataset + +dataset_type = 'CustomWaymoDataset_T' +file_client_args = dict(backend='disk') + +PUJIANG = False +use_larger = True +if PUJIANG: + # pujiang 0.4/0.1 + data_root = '/mnt/petrelfs/zhaohang.p/mmdetection/data/waymo/kitti_format/' + occ_data_root = '/mnt/petrelfs/zhaohang.p/dataset/waymo_occV2/' + +else: + # MARS 0.4/0.1 + data_root = '/public/MARS/datasets/waymo_v1.3.1_untar/kitti_format/' # replace with your won waymo image path + occ_data_root = '/public/MARS/datasets/waymo_occV2/' # replace with your won occ gt path + +ann_file = occ_data_root + 'waymo_infos_train.pkl' +val_ann_file = occ_data_root + 'waymo_infos_val.pkl' +pose_file = occ_data_root + 'cam_infos.pkl' +val_pose_file = occ_data_root + 'cam_infos_vali.pkl' +if use_larger: # use 0.4 voxel size + occ_gt_data_root = occ_data_root + 'voxel04/training/' + occ_val_gt_data_root = occ_data_root + 'voxel04/validation/' +else: + occ_gt_data_root = occ_data_root + 'voxel01/training/' + occ_val_gt_data_root = occ_data_root + 'voxel01/validation/' + +train_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=data_config['src_size']), + dict(type='LoadOccGTFromFileWaymo', + data_root=occ_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=data_config['scales']), + dict(type='PadMultiViewImage', size_divisor=32), + # dict(type='PointToMultiViewDepth', grid_config=grid_config), # For fair comparison, we remove depth supervision in SOLOFusion + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', + keys=['img','voxel_semantics', 'valid_mask'], + meta_keys=('filename', 'pts_filename', + 'sample_idx', 'prev_idx', 'next_idx', 'scene_token', + 'pad_shape', 'ori_shape', 'img_shape', + 'start_of_sequence', + 'sequence_group_idx', + 'lidar2img','ego2lidar', 'depth2img', 'cam2img', 'cam_intrinsic','lidar2cam', + 'ego2global', 'can_bus', + 'rots', 'trans', 'intrins', 'post_trans', 'post_rots', + 'global_to_curr_lidar_rt', + # Below are useless now, but may be used if add data augmentation + 'flip', 'pcd_horizontal_flip', 'pcd_vertical_flip', + 'pcd_trans', 'pcd_scale_factor', 'pcd_rotation', + 'transformation_3d_flow', + ), + ), +] + +test_pipeline = [ + dict(type='MyLoadMultiViewImageFromFiles', to_float32=True, img_scale=data_config['src_size']), + dict(type='LoadOccGTFromFileWaymo', + data_root=occ_val_gt_data_root, + use_larger=use_larger, + crop_x=False, + use_infov_mask=use_infov_mask, + use_camera_mask=use_camera_mask, + use_lidar_mask=use_lidar_mask, + FREE_LABEL=FREE_LABEL, + num_classes=num_classes, + ), + dict(type='RandomScaleImageMultiViewImage', scales=data_config['scales']), + dict(type='PadMultiViewImage', size_divisor=32), + dict( + type='MultiScaleFlipAug3D', + img_scale=(1, 1), + pts_scale_ratio=1, + flip=False, + transforms=[ + dict(type='DefaultFormatBundle3D', class_names=class_names), + dict(type='CustomCollect3D', + keys=['img', 'voxel_semantics', 'valid_mask'], + meta_keys=('filename', 'pts_filename', + 'sample_idx', 'prev_idx', 'next_idx', 'scene_token', + 'pad_shape', 'ori_shape', 'img_shape', + 'start_of_sequence', + 'sequence_group_idx', + 'lidar2img','ego2lidar', 'depth2img', 'cam2img', 'cam_intrinsic','lidar2cam', + 'ego2global', 'can_bus', + 'rots', 'trans', 'intrins', 'post_trans', 'post_rots', + 'global_to_curr_lidar_rt', + # Below are useless now, but may be used if add data augmentation + 'flip', 'pcd_horizontal_flip', 'pcd_vertical_flip', + 'pcd_trans', 'pcd_scale_factor', 'pcd_rotation', + 'transformation_3d_flow', + ), + ) + ] + ) +] + +input_modality = dict( + use_lidar=False, + use_camera=True, + use_radar=False, + use_map=False, + use_external=False +) + +load_interval = 1 +test_interval = 1 + +use_CDist=False +data = dict( + samples_per_gpu=1, + workers_per_gpu=16, + train=dict( + type=dataset_type, + data_root=data_root, + load_interval=load_interval, + num_views=data_config['Ncams'], + split='training', + ann_file=ann_file, + pose_file=pose_file, + pipeline=train_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + input_sample_policy=input_sample_policy, + withimage=True, # For streaming input. Do not use `union2one` function in class `CustomWaymoDataset_T` + box_type_3d='LiDAR', + use_streaming=True, + # speed_mode=None, + # max_interval=None, + # min_interval=None, + # prev_only=None, + # fix_direction=None, + # img_info_prototype='bevdet', + # use_sequence_group_flag=True, + # sequences_split_num=1, + # filter_empty_gt=filter_empty_gt, + ), + val=dict(type=dataset_type, + data_root=data_root, + pipeline=test_pipeline, + load_interval=test_interval, + split='training', + ann_file=val_ann_file, + pose_file=val_pose_file, + num_views=data_config['Ncams'], + test_mode=True, + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + # img_info_prototype='bevdet', + # use_sequence_group_flag=True, + # sequences_split_num=1 + ), + test=dict(type=dataset_type, + data_root=data_root, + load_interval=test_interval, + split='training', + num_views=data_config['Ncams'], + ann_file=val_ann_file, + pose_file=val_pose_file, + pipeline=test_pipeline, + classes=class_names, + modality=input_modality, + test_mode=False, + history_len=queue_length, + box_type_3d='LiDAR', + # below are evaluation parameters + use_CDist=use_CDist, + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + CLASS_NAMES=CLASS_NAMES, + # img_info_prototype='bevdet', + # use_sequence_group_flag=True, + # sequences_split_num=1 + ), + shuffler_sampler=dict(type='DistributedGroupSampler'), + nonshuffler_sampler=dict(type='DistributedSampler'), +) + +# ############################################################################### +# # Optimizer & Training + +optimizer = dict( + type='AdamW', + lr=4e-4, + paramwise_cfg=dict(custom_keys={'img_backbone': dict(lr_mult=0.1),}), + weight_decay=0.01) + +optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) +lr_config = dict( + policy='CosineAnnealing', + warmup='linear', + warmup_iters=100, + warmup_ratio=1.0 / 3, + min_lr_ratio=1e-3) + +full_waymo_dataset_sample_number = 158081 +num_gpus = 8 # need to be changed manually. only about train +batch_size = 1 +DATASET_LENGTH = full_waymo_dataset_sample_number // load_interval +num_iters_per_epoch = DATASET_LENGTH // (num_gpus * batch_size) +total_epochs = 8 +total_num_of_iters = total_epochs * num_iters_per_epoch +evaluation = dict(interval=total_num_of_iters, pipeline=test_pipeline) +runner = dict(type='IterBasedRunner', max_iters=total_num_of_iters) +load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' + +log_config = dict(interval=50,hooks=[dict(type='TextLoggerHook'),dict(type='TensorboardLoggerHook')]) +checkpoint_config = dict(interval=num_iters_per_epoch) +find_unused_parameters=True diff --git a/projects/mmdet3d_plugin/__init__.py b/projects/mmdet3d_plugin/__init__.py index a10cdfb..3c0f98c 100644 --- a/projects/mmdet3d_plugin/__init__.py +++ b/projects/mmdet3d_plugin/__init__.py @@ -9,3 +9,5 @@ from .models.utils import * from .models.opt.adamw import AdamW2 from .bevformer import * +from .models.necks import CustomFPN +from .models.necks import LSSViewTransformer diff --git a/projects/mmdet3d_plugin/bevformer/__init__.py b/projects/mmdet3d_plugin/bevformer/__init__.py index 98d6e7e..333b1ba 100644 --- a/projects/mmdet3d_plugin/bevformer/__init__.py +++ b/projects/mmdet3d_plugin/bevformer/__init__.py @@ -4,3 +4,4 @@ from .modules import * from .runner import * from .hooks import * +from .loss import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/apis/mmdet_train.py b/projects/mmdet3d_plugin/bevformer/apis/mmdet_train.py index e57bd22..40e1aaa 100644 --- a/projects/mmdet3d_plugin/bevformer/apis/mmdet_train.py +++ b/projects/mmdet3d_plugin/bevformer/apis/mmdet_train.py @@ -34,11 +34,8 @@ def custom_train_detector(model, eval_model=None, meta=None): logger = get_root_logger(cfg.log_level) - - # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] - #assert len(dataset)==1s if 'imgs_per_gpu' in cfg.data: logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. ' 'Please use "samples_per_gpu" instead') @@ -53,17 +50,25 @@ def custom_train_detector(model, f'{cfg.data.imgs_per_gpu} in this experiments') cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu + if cfg.runner.type == 'IterBasedRunner': + use_streaming = True + warnings.warn('You are using IterBasedRunner and streaming input', UserWarning) + else: + use_streaming = False + warnings.warn(f'You are using {cfg.runner.type}', UserWarning) + data_loaders = [ build_dataloader( ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, - # cfg.gpus will be ignored if distributed len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, - shuffler_sampler=cfg.data.shuffler_sampler, # dict(type='DistributedGroupSampler'), - nonshuffler_sampler=cfg.data.nonshuffler_sampler, # dict(type='DistributedSampler'), + shuffler_sampler=cfg.data.shuffler_sampler, + nonshuffler_sampler=cfg.data.nonshuffler_sampler, + use_streaming=use_streaming, + cfg=cfg, ) for ds in dataset ] @@ -102,9 +107,10 @@ def custom_train_detector(model, warnings.warn( 'config is now expected to have a `runner` section, ' 'please set `runner` in your config.', UserWarning) - else: - if 'total_epochs' in cfg: - assert cfg.total_epochs == cfg.runner.max_epochs + # else: + # if 'total_epochs' in cfg: + # assert cfg.total_epochs == cfg.runner.max_epochs + if eval_model is not None: runner = build_runner( cfg.runner, diff --git a/projects/mmdet3d_plugin/bevformer/apis/test.py b/projects/mmdet3d_plugin/bevformer/apis/test.py index cd507e4..663182d 100644 --- a/projects/mmdet3d_plugin/bevformer/apis/test.py +++ b/projects/mmdet3d_plugin/bevformer/apis/test.py @@ -42,6 +42,7 @@ def custom_encode_mask_results(mask_results): dtype='uint8'))[0]) # encoded with RLE return [encoded_mask_results] + def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results @@ -61,6 +62,7 @@ def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): model.eval() bbox_results = [] mask_results = [] + occ_results = [] dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: @@ -70,7 +72,6 @@ def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) - # encode mask results if isinstance(result, dict): if 'bbox_results' in result.keys(): bbox_result = result['bbox_results'] @@ -80,9 +81,17 @@ def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): mask_result = custom_encode_mask_results(result['mask_results']) mask_results.extend(mask_result) have_mask = True + if 'voxel_semantics_preds' in result.keys() and result['voxel_semantics_preds'] is not None: + batch_size = result['voxel_semantics_preds'].shape[0] + occ_results.extend([result]) + if 'count_matrix' in result.keys(): + batch_size = 1 + occ_results.extend([result]) # for new occ results else: - batch_size = len(result) - bbox_results.extend(result) + batch_size = 1 + occ_results.extend([result]) + # batch_size = len(result) + # bbox_results.extend(result) #if isinstance(result[0], tuple): # assert False, 'this code is for instance segmentation, which our code will not utilize.' @@ -101,16 +110,16 @@ def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): else: mask_results = None else: - bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) - tmpdir = tmpdir+'_mask' if tmpdir is not None else None - if have_mask: - mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) - else: - mask_results = None + # bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) + # tmpdir = tmpdir+'_mask' if tmpdir is not None else None + # if have_mask: + # mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) + # else: + # mask_results = None + tmpdir = tmpdir + '_occ' if tmpdir is not None else None + occ_results = collect_results_cpu(occ_results, len(dataset), tmpdir) - if mask_results is None: - return bbox_results - return {'bbox_results': bbox_results, 'mask_results': mask_results} + return occ_results def collect_results_cpu(result_part, size, tmpdir=None): @@ -159,6 +168,5 @@ def collect_results_cpu(result_part, size, tmpdir=None): shutil.rmtree(tmpdir) return ordered_results - def collect_results_gpu(result_part, size): collect_results_cpu(result_part, size) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/apis/test_occ.py b/projects/mmdet3d_plugin/bevformer/apis/test_occ.py index 107b953..55c4e2b 100644 --- a/projects/mmdet3d_plugin/bevformer/apis/test_occ.py +++ b/projects/mmdet3d_plugin/bevformer/apis/test_occ.py @@ -59,21 +59,25 @@ def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): list: The prediction results. """ model.eval() - bbox_results = [] + pred_results = [] mask_results = [] + gt_results=[] batch_size=1 dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) # This line can prevent deadlock problem in some cases. - have_mask = False + for i, data in enumerate(data_loader): with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) + pred = model(return_loss=False, rescale=True, **data) + # print(type(data)) # encode mask results - - bbox_results.extend(result) + voxel_semantics,mask_lidar,mask_camera=data['voxel_semantics'],data['mask_lidar'],data['mask_camera'] + pred_results.append(pred) + mask_results.append(mask_camera) + gt_results.append(voxel_semantics) #if isinstance(result[0], tuple): # assert False, 'this code is for instance segmentation, which our code will not utilize.' @@ -85,19 +89,19 @@ def custom_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): prog_bar.update() # collect results from all ranks - if gpu_collect: - bbox_results = collect_results_gpu(bbox_results, len(dataset)) - if have_mask: - mask_results = collect_results_gpu(mask_results, len(dataset)) - else: - mask_results = None - else: - bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) - tmpdir = tmpdir+'_mask' if tmpdir is not None else None - if have_mask: - mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) - else: - mask_results = None + # if gpu_collect: + # bbox_results = collect_results_gpu(bbox_results, len(dataset)) + # if have_mask: + # mask_results = collect_results_gpu(mask_results, len(dataset)) + # else: + # mask_results = None + # else: + # bbox_results = collect_results_cpu(bbox_results, len(dataset), tmpdir) + # tmpdir = tmpdir+'_mask' if tmpdir is not None else None + # if have_mask: + # mask_results = collect_results_cpu(mask_results, len(dataset), tmpdir) + # else: + # mask_results = None if mask_results is None: return bbox_results @@ -150,6 +154,5 @@ def collect_results_cpu(result_part, size, tmpdir=None): shutil.rmtree(tmpdir) return ordered_results - def collect_results_gpu(result_part, size): collect_results_cpu(result_part, size) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/dense_heads/__init__.py b/projects/mmdet3d_plugin/bevformer/dense_heads/__init__.py index 7a21939..fbe2498 100644 --- a/projects/mmdet3d_plugin/bevformer/dense_heads/__init__.py +++ b/projects/mmdet3d_plugin/bevformer/dense_heads/__init__.py @@ -1,3 +1,3 @@ -from .bevformer_head import BEVFormerHead -from .occformer_head import OccFormerHead -from .occformer_head_3d import OccFormerHead3D \ No newline at end of file +from .occformer_head import CVTOccHead +from .occformer_head_waymo import CVTOccHeadWaymo +from .solo_head_occ_waymo import SOLOOccHeadWaymo \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/dense_heads/bevformer_head.py b/projects/mmdet3d_plugin/bevformer/dense_heads/bevformer_head.py deleted file mode 100644 index 91d38d1..0000000 --- a/projects/mmdet3d_plugin/bevformer/dense_heads/bevformer_head.py +++ /dev/null @@ -1,523 +0,0 @@ -# --------------------------------------------- -# Copyright (c) OpenMMLab. All rights reserved. -# --------------------------------------------- -# Modified by Zhiqi Li -# --------------------------------------------- - -import copy -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Linear, bias_init_with_prob -from mmcv.utils import TORCH_VERSION, digit_version - -from mmdet.core import (multi_apply, multi_apply, reduce_mean) -from mmdet.models.utils.transformer import inverse_sigmoid -from mmdet.models import HEADS -from mmdet.models.dense_heads import DETRHead -from mmdet3d.core.bbox.coders import build_bbox_coder -from projects.mmdet3d_plugin.core.bbox.util import normalize_bbox -from mmcv.cnn.bricks.transformer import build_positional_encoding -from mmcv.runner import force_fp32, auto_fp16 -from projects.mmdet3d_plugin.models.utils.bricks import run_time -import numpy as np -import mmcv -import cv2 as cv -from projects.mmdet3d_plugin.models.utils.visual import save_tensor - - -@HEADS.register_module() -class BEVFormerHead(DETRHead): - """Head of Detr3D. - Args: - with_box_refine (bool): Whether to refine the reference points - in the decoder. Defaults to False. - as_two_stage (bool) : Whether to generate the proposal from - the outputs of encoder. - transformer (obj:`ConfigDict`): ConfigDict is used for building - the Encoder and Decoder. - bev_h, bev_w (int): spatial shape of BEV queries. - """ - - def __init__(self, - *args, - with_box_refine=False, - as_two_stage=False, - transformer=None, - bbox_coder=None, - num_cls_fcs=2, - code_weights=None, - bev_h=30, - bev_w=30, - **kwargs): - - self.bev_h = bev_h - self.bev_w = bev_w - self.fp16_enabled = False - - self.with_box_refine = with_box_refine - self.as_two_stage = as_two_stage - if self.as_two_stage: - transformer['as_two_stage'] = self.as_two_stage - if 'code_size' in kwargs: - self.code_size = kwargs['code_size'] - else: - self.code_size = 10 - if code_weights is not None: - self.code_weights = code_weights - else: - self.code_weights = [1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.pc_range = self.bbox_coder.pc_range - self.real_w = self.pc_range[3] - self.pc_range[0] - self.real_h = self.pc_range[4] - self.pc_range[1] - self.num_cls_fcs = num_cls_fcs - 1 - super(BEVFormerHead, self).__init__( - *args, transformer=transformer, **kwargs) - self.code_weights = nn.Parameter(torch.tensor( - self.code_weights, requires_grad=False), requires_grad=False) - - def _init_layers(self): - """Initialize classification branch and regression branch of head.""" - cls_branch = [] - for _ in range(self.num_reg_fcs): - cls_branch.append(Linear(self.embed_dims, self.embed_dims)) - cls_branch.append(nn.LayerNorm(self.embed_dims)) - cls_branch.append(nn.ReLU(inplace=True)) - cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) - fc_cls = nn.Sequential(*cls_branch) - - reg_branch = [] - for _ in range(self.num_reg_fcs): - reg_branch.append(Linear(self.embed_dims, self.embed_dims)) - reg_branch.append(nn.ReLU()) - reg_branch.append(Linear(self.embed_dims, self.code_size)) - reg_branch = nn.Sequential(*reg_branch) - - def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - # last reg_branch is used to generate proposal from - # encode feature map when as_two_stage is True. - num_pred = (self.transformer.decoder.num_layers + 1) if \ - self.as_two_stage else self.transformer.decoder.num_layers - - if self.with_box_refine: - self.cls_branches = _get_clones(fc_cls, num_pred) - self.reg_branches = _get_clones(reg_branch, num_pred) - else: - self.cls_branches = nn.ModuleList( - [fc_cls for _ in range(num_pred)]) - self.reg_branches = nn.ModuleList( - [reg_branch for _ in range(num_pred)]) - - if not self.as_two_stage: - self.bev_embedding = nn.Embedding( - self.bev_h * self.bev_w, self.embed_dims) - self.query_embedding = nn.Embedding(self.num_query, - self.embed_dims * 2) - - def init_weights(self): - """Initialize weights of the DeformDETR head.""" - self.transformer.init_weights() - if self.loss_cls.use_sigmoid: - bias_init = bias_init_with_prob(0.01) - for m in self.cls_branches: - nn.init.constant_(m[-1].bias, bias_init) - - @auto_fp16(apply_to=('mlvl_feats')) - def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False): - """Forward function. - Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 5D-tensor with shape - (B, N, C, H, W). - prev_bev: previous bev featues - only_bev: only compute BEV features with encoder. - Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \ - Shape [nb_dec, bs, num_query, 9]. - """ - - bs, num_cam, _, _, _ = mlvl_feats[0].shape - dtype = mlvl_feats[0].dtype - object_query_embeds = self.query_embedding.weight.to(dtype) - bev_queries = self.bev_embedding.weight.to(dtype) - - bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), - device=bev_queries.device).to(dtype) - bev_pos = self.positional_encoding(bev_mask).to(dtype) - - if only_bev: # only use encoder to obtain BEV features, TODO: refine the workaround - return self.transformer.get_bev_features( - mlvl_feats, - bev_queries, - self.bev_h, - self.bev_w, - grid_length=(self.real_h / self.bev_h, - self.real_w / self.bev_w), - bev_pos=bev_pos, - img_metas=img_metas, - prev_bev=prev_bev, - ) - else: - outputs = self.transformer( - mlvl_feats, - bev_queries, - object_query_embeds, - self.bev_h, - self.bev_w, - grid_length=(self.real_h / self.bev_h, - self.real_w / self.bev_w), - bev_pos=bev_pos, - reg_branches=self.reg_branches if self.with_box_refine else None, # noqa:E501 - cls_branches=self.cls_branches if self.as_two_stage else None, - img_metas=img_metas, - prev_bev=prev_bev - ) - - bev_embed, hs, init_reference, inter_references = outputs - hs = hs.permute(0, 2, 1, 3) - outputs_classes = [] - outputs_coords = [] - for lvl in range(hs.shape[0]): - if lvl == 0: - reference = init_reference - else: - reference = inter_references[lvl - 1] - reference = inverse_sigmoid(reference) - outputs_class = self.cls_branches[lvl](hs[lvl]) - tmp = self.reg_branches[lvl](hs[lvl]) - - # TODO: check the shape of reference - assert reference.shape[-1] == 3 - tmp[..., 0:2] += reference[..., 0:2] - tmp[..., 0:2] = tmp[..., 0:2].sigmoid() - tmp[..., 4:5] += reference[..., 2:3] - tmp[..., 4:5] = tmp[..., 4:5].sigmoid() - tmp[..., 0:1] = (tmp[..., 0:1] * (self.pc_range[3] - - self.pc_range[0]) + self.pc_range[0]) - tmp[..., 1:2] = (tmp[..., 1:2] * (self.pc_range[4] - - self.pc_range[1]) + self.pc_range[1]) - tmp[..., 4:5] = (tmp[..., 4:5] * (self.pc_range[5] - - self.pc_range[2]) + self.pc_range[2]) - - # TODO: check if using sigmoid - outputs_coord = tmp - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - - outputs_classes = torch.stack(outputs_classes) - outputs_coords = torch.stack(outputs_coords) - - outs = { - 'bev_embed': bev_embed, - 'all_cls_scores': outputs_classes, - 'all_bbox_preds': outputs_coords, - 'enc_cls_scores': None, - 'enc_bbox_preds': None, - } - - return outs - - def _get_target_single(self, - cls_score, - bbox_pred, - gt_labels, - gt_bboxes, - gt_bboxes_ignore=None): - """"Compute regression and classification targets for one image. - Outputs from a single decoder layer of a single feature level are used. - Args: - cls_score (Tensor): Box score logits from a single decoder layer - for one image. Shape [num_query, cls_out_channels]. - bbox_pred (Tensor): Sigmoid outputs from a single decoder layer - for one image, with normalized coordinate (cx, cy, w, h) and - shape [num_query, 4]. - gt_bboxes (Tensor): Ground truth bboxes for one image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (Tensor): Ground truth class indices for one image - with shape (num_gts, ). - gt_bboxes_ignore (Tensor, optional): Bounding boxes - which can be ignored. Default None. - Returns: - tuple[Tensor]: a tuple containing the following for one image. - - labels (Tensor): Labels of each image. - - label_weights (Tensor]): Label weights of each image. - - bbox_targets (Tensor): BBox targets of each image. - - bbox_weights (Tensor): BBox weights of each image. - - pos_inds (Tensor): Sampled positive indices for each image. - - neg_inds (Tensor): Sampled negative indices for each image. - """ - - num_bboxes = bbox_pred.size(0) - # assigner and sampler - gt_c = gt_bboxes.shape[-1] - - assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes, - gt_labels, gt_bboxes_ignore) - - sampling_result = self.sampler.sample(assign_result, bbox_pred, - gt_bboxes) - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - - # label targets - labels = gt_bboxes.new_full((num_bboxes,), - self.num_classes, - dtype=torch.long) - labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] - label_weights = gt_bboxes.new_ones(num_bboxes) - - # bbox targets - bbox_targets = torch.zeros_like(bbox_pred)[..., :gt_c] - bbox_weights = torch.zeros_like(bbox_pred) - bbox_weights[pos_inds] = 1.0 - - # DETR - bbox_targets[pos_inds] = sampling_result.pos_gt_bboxes - return (labels, label_weights, bbox_targets, bbox_weights, - pos_inds, neg_inds) - - def get_targets(self, - cls_scores_list, - bbox_preds_list, - gt_bboxes_list, - gt_labels_list, - gt_bboxes_ignore_list=None): - """"Compute regression and classification targets for a batch image. - Outputs from a single decoder layer of a single feature level are used. - Args: - cls_scores_list (list[Tensor]): Box score logits from a single - decoder layer for each image with shape [num_query, - cls_out_channels]. - bbox_preds_list (list[Tensor]): Sigmoid outputs from a single - decoder layer for each image, with normalized coordinate - (cx, cy, w, h) and shape [num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - Returns: - tuple: a tuple containing the following targets. - - labels_list (list[Tensor]): Labels for all images. - - label_weights_list (list[Tensor]): Label weights for all \ - images. - - bbox_targets_list (list[Tensor]): BBox targets for all \ - images. - - bbox_weights_list (list[Tensor]): BBox weights for all \ - images. - - num_total_pos (int): Number of positive samples in all \ - images. - - num_total_neg (int): Number of negative samples in all \ - images. - """ - assert gt_bboxes_ignore_list is None, \ - 'Only supports for gt_bboxes_ignore setting to None.' - num_imgs = len(cls_scores_list) - gt_bboxes_ignore_list = [ - gt_bboxes_ignore_list for _ in range(num_imgs) - ] - - (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply( - self._get_target_single, cls_scores_list, bbox_preds_list, - gt_labels_list, gt_bboxes_list, gt_bboxes_ignore_list) - num_total_pos = sum((inds.numel() for inds in pos_inds_list)) - num_total_neg = sum((inds.numel() for inds in neg_inds_list)) - return (labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) - - def loss_single(self, - cls_scores, - bbox_preds, - gt_bboxes_list, - gt_labels_list, - gt_bboxes_ignore_list=None): - """"Loss function for outputs from a single decoder layer of a single - feature level. - Args: - cls_scores (Tensor): Box score logits from a single decoder layer - for all images. Shape [bs, num_query, cls_out_channels]. - bbox_preds (Tensor): Sigmoid outputs from a single decoder layer - for all images, with normalized coordinate (cx, cy, w, h) and - shape [bs, num_query, 4]. - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - gt_bboxes_ignore_list (list[Tensor], optional): Bounding - boxes which can be ignored for each image. Default None. - Returns: - dict[str, Tensor]: A dictionary of loss components for outputs from - a single decoder layer. - """ - num_imgs = cls_scores.size(0) - cls_scores_list = [cls_scores[i] for i in range(num_imgs)] - bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)] - cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list, - gt_bboxes_list, gt_labels_list, - gt_bboxes_ignore_list) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg) = cls_reg_targets - labels = torch.cat(labels_list, 0) - label_weights = torch.cat(label_weights_list, 0) - bbox_targets = torch.cat(bbox_targets_list, 0) - bbox_weights = torch.cat(bbox_weights_list, 0) - - # classification loss - cls_scores = cls_scores.reshape(-1, self.cls_out_channels) - # construct weighted avg_factor to match with the official DETR repo - cls_avg_factor = num_total_pos * 1.0 + \ - num_total_neg * self.bg_cls_weight - if self.sync_cls_avg_factor: - cls_avg_factor = reduce_mean( - cls_scores.new_tensor([cls_avg_factor])) - - cls_avg_factor = max(cls_avg_factor, 1) - loss_cls = self.loss_cls( - cls_scores, labels, label_weights, avg_factor=cls_avg_factor) - - # Compute the average number of gt boxes accross all gpus, for - # normalization purposes - num_total_pos = loss_cls.new_tensor([num_total_pos]) - num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item() - - # regression L1 loss - bbox_preds = bbox_preds.reshape(-1, bbox_preds.size(-1)) - normalized_bbox_targets = normalize_bbox(bbox_targets, self.pc_range) - isnotnan = torch.isfinite(normalized_bbox_targets).all(dim=-1) - bbox_weights = bbox_weights * self.code_weights - - loss_bbox = self.loss_bbox( - bbox_preds[isnotnan, :10], normalized_bbox_targets[isnotnan, - :10], bbox_weights[isnotnan, :10], - avg_factor=num_total_pos) - if digit_version(TORCH_VERSION) >= digit_version('1.8'): - loss_cls = torch.nan_to_num(loss_cls) - loss_bbox = torch.nan_to_num(loss_bbox) - return loss_cls, loss_bbox - - @force_fp32(apply_to=('preds_dicts')) - def loss(self, - gt_bboxes_list, - gt_labels_list, - preds_dicts, - gt_bboxes_ignore=None, - img_metas=None): - """"Loss function. - Args: - - gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image - with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels_list (list[Tensor]): Ground truth class indices for each - image with shape (num_gts, ). - preds_dicts: - all_cls_scores (Tensor): Classification score of all - decoder layers, has shape - [nb_dec, bs, num_query, cls_out_channels]. - all_bbox_preds (Tensor): Sigmoid regression - outputs of all decode layers. Each is a 4D-tensor with - normalized coordinate format (cx, cy, w, h) and shape - [nb_dec, bs, num_query, 4]. - enc_cls_scores (Tensor): Classification scores of - points on encode feature map , has shape - (N, h*w, num_classes). Only be passed when as_two_stage is - True, otherwise is None. - enc_bbox_preds (Tensor): Regression results of each points - on the encode feature map, has shape (N, h*w, 4). Only be - passed when as_two_stage is True, otherwise is None. - gt_bboxes_ignore (list[Tensor], optional): Bounding boxes - which can be ignored for each image. Default None. - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert gt_bboxes_ignore is None, \ - f'{self.__class__.__name__} only supports ' \ - f'for gt_bboxes_ignore setting to None.' - - all_cls_scores = preds_dicts['all_cls_scores'] - all_bbox_preds = preds_dicts['all_bbox_preds'] - enc_cls_scores = preds_dicts['enc_cls_scores'] - enc_bbox_preds = preds_dicts['enc_bbox_preds'] - - num_dec_layers = len(all_cls_scores) - device = gt_labels_list[0].device - - gt_bboxes_list = [torch.cat( - (gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]), - dim=1).to(device) for gt_bboxes in gt_bboxes_list] - - all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)] - all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] - all_gt_bboxes_ignore_list = [ - gt_bboxes_ignore for _ in range(num_dec_layers) - ] - - losses_cls, losses_bbox = multi_apply( - self.loss_single, all_cls_scores, all_bbox_preds, - all_gt_bboxes_list, all_gt_labels_list, - all_gt_bboxes_ignore_list) - - loss_dict = dict() - # loss of proposal generated from encode feature map. - if enc_cls_scores is not None: - binary_labels_list = [ - torch.zeros_like(gt_labels_list[i]) - for i in range(len(all_gt_labels_list)) - ] - enc_loss_cls, enc_losses_bbox = \ - self.loss_single(enc_cls_scores, enc_bbox_preds, - gt_bboxes_list, binary_labels_list, gt_bboxes_ignore) - loss_dict['enc_loss_cls'] = enc_loss_cls - loss_dict['enc_loss_bbox'] = enc_losses_bbox - - # loss from the last decoder layer - loss_dict['loss_cls'] = losses_cls[-1] - loss_dict['loss_bbox'] = losses_bbox[-1] - - # loss from other decoder layers - num_dec_layer = 0 - for loss_cls_i, loss_bbox_i in zip(losses_cls[:-1], - losses_bbox[:-1]): - loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i - loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i - num_dec_layer += 1 - return loss_dict - - @force_fp32(apply_to=('preds_dicts')) - def get_bboxes(self, preds_dicts, img_metas, rescale=False): - """Generate bboxes from bbox head predictions. - Args: - preds_dicts (tuple[list[dict]]): Prediction results. - img_metas (list[dict]): Point cloud and image's meta info. - Returns: - list[dict]: Decoded bbox, scores and labels after nms. - """ - - preds_dicts = self.bbox_coder.decode(preds_dicts) - - num_samples = len(preds_dicts) - ret_list = [] - for i in range(num_samples): - preds = preds_dicts[i] - bboxes = preds['bboxes'] - - bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 5] * 0.5 - - code_size = bboxes.shape[-1] - bboxes = img_metas[i]['box_type_3d'](bboxes, code_size) - scores = preds['scores'] - labels = preds['labels'] - - ret_list.append([bboxes, scores, labels]) - - return ret_list diff --git a/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head.py b/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head.py index 4ebe45e..51e4eda 100644 --- a/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head.py +++ b/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head.py @@ -4,266 +4,299 @@ # Modified by Zhiqi Li # --------------------------------------------- -import copy import torch import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Linear, bias_init_with_prob -from mmcv.utils import TORCH_VERSION, digit_version - -from mmdet.core import (multi_apply, multi_apply, reduce_mean) -from mmdet.models.utils.transformer import inverse_sigmoid -from mmdet.models import HEADS -from mmdet.models.dense_heads import DETRHead -from mmdet3d.core.bbox.coders import build_bbox_coder -from projects.mmdet3d_plugin.core.bbox.util import normalize_bbox from mmcv.cnn.bricks.transformer import build_positional_encoding from mmcv.runner import force_fp32, auto_fp16 -from projects.mmdet3d_plugin.models.utils.bricks import run_time -import numpy as np -import mmcv -import cv2 as cv -from projects.mmdet3d_plugin.models.utils.visual import save_tensor from mmcv.cnn.bricks.transformer import build_positional_encoding +from mmcv.runner import BaseModule, force_fp32 from mmdet.models.utils import build_transformer from mmdet.models.builder import build_loss -from mmcv.runner import BaseModule, force_fp32 +from mmdet.models import HEADS +from mmdet3d.core.bbox.coders import build_bbox_coder +from mmdet.models import LOSSES as mmdet_LOSSES @HEADS.register_module() -class OccFormerHead(BaseModule): - """Head of Detr3D. - Args: - with_box_refine (bool): Whether to refine the reference points - in the decoder. Defaults to False. - as_two_stage (bool) : Whether to generate the proposal from - the outputs of encoder. - transformer (obj:`ConfigDict`): ConfigDict is used for building - the Encoder and Decoder. - bev_h, bev_w (int): spatial shape of BEV queries. - """ - +class CVTOccHead(BaseModule): def __init__(self, - *args, with_box_refine=False, as_two_stage=False, transformer=None, bbox_coder=None, - num_cls_fcs=2, - code_weights=None, - bev_h=30, - bev_w=30, + bev_h=200, + bev_w=200, + num_classes=18, + occ_thr=0.3, loss_occ=None, - use_mask=False, + use_camera_mask=False, + use_lidar_mask=False, + use_free_mask=False, + use_focal_loss=False, positional_encoding=None, + use_refine_feat_loss=False, + refine_feat_loss_weight=None, **kwargs): - + super(CVTOccHead, self).__init__() + self.use_camera_mask = use_camera_mask + self.use_lidar_mask = use_lidar_mask + self.use_free_mask = use_free_mask + self.use_focal_loss = use_focal_loss self.bev_h = bev_h self.bev_w = bev_w + self.occ_thr = occ_thr self.fp16_enabled = False - self.num_classes=kwargs['num_classes'] - self.use_mask=use_mask - + self.num_classes = num_classes + if use_free_mask: + self.num_classes = self.num_classes - 1 self.with_box_refine = with_box_refine self.as_two_stage = as_two_stage if self.as_two_stage: transformer['as_two_stage'] = self.as_two_stage - if 'code_size' in kwargs: - self.code_size = kwargs['code_size'] - else: - self.code_size = 10 - if code_weights is not None: - self.code_weights = code_weights - else: - self.code_weights = [1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] - - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.pc_range = self.bbox_coder.pc_range - self.real_w = self.pc_range[3] - self.pc_range[0] - self.real_h = self.pc_range[4] - self.pc_range[1] - self.num_cls_fcs = num_cls_fcs - 1 - super(OccFormerHead, self).__init__() - self.loss_occ = build_loss(loss_occ) - self.positional_encoding = build_positional_encoding( - positional_encoding) + self.use_refine_feat_loss = use_refine_feat_loss + _loss=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0) # refine_feat_w not sigmoid, so here use_sigmoid=True + self.refine_feat_loss_func = mmdet_LOSSES.build(_loss) + self.refine_feat_loss_weight = refine_feat_loss_weight + + self.positional_encoding = build_positional_encoding(positional_encoding) self.transformer = build_transformer(transformer) self.embed_dims = self.transformer.embed_dims - if not self.as_two_stage: - self.bev_embedding = nn.Embedding( - self.bev_h * self.bev_w, self.embed_dims) - # def _init_layers(self): - # """Initialize classification branch and regression branch of head.""" - # cls_branch = [] - # for _ in range(self.num_reg_fcs): - # cls_branch.append(Linear(self.embed_dims, self.embed_dims)) - # cls_branch.append(nn.LayerNorm(self.embed_dims)) - # cls_branch.append(nn.ReLU(inplace=True)) - # cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) - # fc_cls = nn.Sequential(*cls_branch) - # - # reg_branch = [] - # for _ in range(self.num_reg_fcs): - # reg_branch.append(Linear(self.embed_dims, self.embed_dims)) - # reg_branch.append(nn.ReLU()) - # reg_branch.append(Linear(self.embed_dims, self.code_size)) - # reg_branch = nn.Sequential(*reg_branch) - # - # def _get_clones(module, N): - # return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - # - # # last reg_branch is used to generate proposal from - # # encode feature map when as_two_stage is True. - # num_pred = (self.transformer.decoder.num_layers + 1) if \ - # self.as_two_stage else self.transformer.decoder.num_layers - # - # if self.with_box_refine: - # self.cls_branches = _get_clones(fc_cls, num_pred) - # self.reg_branches = _get_clones(reg_branch, num_pred) - # else: - # self.cls_branches = nn.ModuleList( - # [fc_cls for _ in range(num_pred)]) - # self.reg_branches = nn.ModuleList( - # [reg_branch for _ in range(num_pred)]) - # - # if not self.as_two_stage: - # self.bev_embedding = nn.Embedding( - # self.bev_h * self.bev_w, self.embed_dims) - # self.query_embedding = nn.Embedding(self.num_query, - # self.embed_dims * 2) + self.bev_embedding = nn.Embedding(self.bev_h * self.bev_w, self.embed_dims) def init_weights(self): """Initialize weights of the DeformDETR head.""" self.transformer.init_weights() - # if self.loss_cls.use_sigmoid: - # bias_init = bias_init_with_prob(0.01) - # for m in self.cls_branches: - # nn.init.constant_(m[-1].bias, bias_init) - - @auto_fp16(apply_to=('mlvl_feats')) - def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False, test=False): - """Forward function. + + @auto_fp16(apply_to=('multi_level_feats')) + def forward(self, + multi_level_feats, + cur_img_metas, + prev_bev_list=[], + prev_img_metas=[], + only_bev=False, + **kwargs): + """ + Forward function. Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 5D-tensor with shape - (B, N, C, H, W). - prev_bev: previous bev featues - only_bev: only compute BEV features with encoder. + multi_level_feats (list[torch.Tensor]): Current multi level img features from the upstream network. + Each is a 5D-tensor img_feats with shape (bs, num_cams, embed_dims, h, w). + cur_img_metas (list[dict]): Meta information of each sample. The list has length of batch size. + prev_bev_list (list[torch.Tensor]): BEV features of previous frames. Each has shape (bs, bev_h*bev_w, embed_dims). + prev_img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys len_queue-1-prev_bev_list_len, ..., len_queue-2. + The element of each key is a dict. + So each dict has length of prev_bev_list_len. + only_bev: If this flag is true. The head only computes BEV features with encoder. Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \ - Shape [nb_dec, bs, num_query, 9]. + If only_bev: + _bev_embed (torch.Tensor): BEV features of the current frame with shape (bs, bev_h*bev_w, embed_dims). + else: + outs (dict): with keys "bev_embed, occ, extra". + - bev_embed (torch.Tensor): BEV features of the current frame with shape (bs, bev_h*bev_w, embed_dims). + - occ (torch.Tensor): Predicted occupancy features with shape (bs, w, h, total_z, c). + - extra (dict): extra information. if 'costvolume' in self.transformer, it will have 'refine_feat_w' key. """ - bs, num_cam, _, _, _ = mlvl_feats[0].shape - dtype = mlvl_feats[0].dtype - object_query_embeds = None - bev_queries = self.bev_embedding.weight.to(dtype) - bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), - device=bev_queries.device).to(dtype) + # Step 1: initialize BEV queries and mask + bs = multi_level_feats[0].shape[0] + dtype = multi_level_feats[0].dtype + bev_queries = self.bev_embedding.weight.to(dtype) + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), device=bev_queries.device).to(dtype) bev_pos = self.positional_encoding(bev_mask).to(dtype) - if only_bev: # only use encoder to obtain BEV features, TODO: refine the workaround - return self.transformer.get_bev_features( - mlvl_feats, - bev_queries, - self.bev_h, - self.bev_w, - grid_length=(self.real_h / self.bev_h, - self.real_w / self.bev_w), - bev_pos=bev_pos, - img_metas=img_metas, - prev_bev=prev_bev, - ) + # Step 2: get BEV features + if only_bev: + if len(prev_bev_list) == 0: + prev_bev = None + else: + prev_bev = prev_bev_list[-1] + + outputs = self.transformer.get_bev_features(multi_level_feats, + bev_queries, + bev_pos, + cur_img_metas, + prev_bev, + **kwargs) + _bev_embed = outputs['bev_embed'] + + return _bev_embed + else: - outputs = self.transformer( - mlvl_feats, - bev_queries, - object_query_embeds, - self.bev_h, - self.bev_w, - grid_length=(self.real_h / self.bev_h, - self.real_w / self.bev_w), - bev_pos=bev_pos, - reg_branches=None, # noqa:E501 - cls_branches=None, - img_metas=img_metas, - prev_bev=prev_bev - ) - bev_embed, occ_outs = outputs - # bev_embed, hs, init_reference, inter_references = outputs - # - # - # outs = { - # 'bev_embed': bev_embed, - # 'all_cls_scores': outputs_classes, - # 'all_bbox_preds': outputs_coords, - # 'enc_cls_scores': None, - # 'enc_bbox_preds': None, - # } - - # if test: - # return bev_embed, occ_outs - # else: - # return occ_outs - outs = { - 'bev_embed': bev_embed, - 'occ':occ_outs, - } - - return outs + outputs = self.transformer(multi_level_feats, + bev_queries, + bev_pos, + cur_img_metas, + prev_bev_list, + prev_img_metas, + **kwargs) + + bev_for_history, occ_outs, extra = outputs + outs = {'bev_embed': bev_for_history, 'occ':occ_outs, 'extra':extra} + + return outs @force_fp32(apply_to=('preds_dicts')) - def loss(self, - # gt_bboxes_list, - # gt_labels_list, - voxel_semantics_list, - mask_camera_list, + def loss(self, voxel_semantics, preds_dicts, - gt_bboxes_ignore=None, - img_metas=None): - - loss_dict=dict() - occ=preds_dicts['occ'] - assert voxel_semantics_list.min()>=0 and voxel_semantics_list.max()<=17 - losses = self.loss_single(voxel_semantics_list,mask_camera_list,occ) - loss_dict['loss_occ']=losses + mask_camera=None, + mask_lidar=None, + **kwargs): + ''' + Loss function. + Args: + voxel_semantics (torch.Tensor): Shape (bs, w, h, total_z) + valid_mask (torch.Tensor): 1 represent valid voxel, 0 represent invalid voxel. + Directly get from the data loader. shape (bs, w, h, total_z) + preds_dicts (dict): result from head with keys "bev_embed, occ, extra". + - occ (torch.Tensor): Predicted occupancy features with shape (bs, w, h, total_z, c). + Returns: + loss_dict (dict): Losses of different branch. + Default cvtocc model has refine_feat_loss loss and loss_occ_coheam loss. + ''' + + loss_dict = dict() + occ = preds_dicts['occ'] + assert voxel_semantics.min() >= 0 and voxel_semantics.max() <= self.num_classes-1, "semantic gt out of range" + losses = self.loss_single(voxel_semantics, mask_camera, occ) + loss_dict['loss_occ'] = losses + + extra = preds_dicts['extra'] + if self.use_refine_feat_loss: + if 'refine_feat_w' in extra: # has the key means it will not be None + refine_feat_w = extra['refine_feat_w'] + loss_dict['refine_feat_loss'] = self.get_refine_feat_loss(voxel_semantics, refine_feat_w, mask_camera, mask_lidar) + else: + loss_dict['refine_feat_loss'] = occ.reshape(-1).sum() * 0 + return loss_dict + + def get_refine_feat_loss(self, voxel_semantics, refine_feat_w, mask_camera=None, mask_lidar=None): + """ + Calculate refine_feat_loss from refine_feat_w + Args: + refine_feat_w (torch.Tensor): The weight without sigmoid. shape (bev_w, bev_h, total_z, 2). + Returns: + refine_feat_loss (float): + """ + + # Step 1: reshape refine_feat_w + refine_feat_w = refine_feat_w.reshape(-1, refine_feat_w.shape[-1]) # (w*h*total_z, 2) + + # Step 2: get the ground truth for refine feat weight from the occupancy ground truth. + refine_feat_gt = (voxel_semantics != self.num_classes-1) + refine_feat_gt = refine_feat_gt.reshape(-1).long() + + # Step 3: use `mask_camera` and `mask_lidar` to filter out the invalid points + mask = torch.ones_like(voxel_semantics) + if self.use_lidar_mask: + mask = torch.logical_and(mask, mask_lidar) + if self.use_camera_mask: + mask = torch.logical_and(mask, mask_camera) + + mask = mask.reshape(-1) + refine_feat_w_masked = refine_feat_w[mask] + refine_feat_gt_masked = refine_feat_gt[mask] + + # Step 4: calculate the loss + refine_feat_loss = self.refine_feat_loss_func(refine_feat_w_masked, refine_feat_gt_masked) + refine_feat_loss = self.refine_feat_loss_weight * refine_feat_loss + + return refine_feat_loss - def loss_single(self,voxel_semantics,mask_camera,preds_dicts): - if self.use_mask: - voxel_semantics=voxel_semantics.reshape(-1) - preds_dicts=preds_dicts.reshape(-1,self.num_classes) - mask_camera=mask_camera.reshape(-1) - num_total_samples=mask_camera.sum() - loss_occ=self.loss_occ(preds_dicts,voxel_semantics,mask_camera, avg_factor=num_total_samples) + def loss_single(self, voxel_semantics, mask_camera, preds_dicts): + if self.use_camera_mask: + voxel_semantics = voxel_semantics.reshape(-1) + preds_dicts = preds_dicts.reshape(-1,self.num_classes) + mask_camera = mask_camera.reshape(-1) + num_total_samples = mask_camera.sum() + loss_occ = self.loss_occ(preds_dicts, + voxel_semantics, + mask_camera, + avg_factor=num_total_samples) + else: voxel_semantics = voxel_semantics.reshape(-1) preds_dicts = preds_dicts.reshape(-1, self.num_classes) - loss_occ = self.loss_occ(preds_dicts, voxel_semantics,) - return loss_occ + if self.use_free_mask: + free_mask = voxel_semantics < self.num_classes + voxel_semantics = voxel_semantics[free_mask] + preds_dicts = preds_dicts[free_mask] + pos_num = voxel_semantics.shape[0] + + else: + pos_num = voxel_semantics.shape[0] + + loss_occ = self.loss_occ(preds_dicts, voxel_semantics.long(), avg_factor=pos_num) + return loss_occ + @force_fp32(apply_to=('preds_dicts')) - def get_occ(self, preds_dicts, img_metas, rescale=False): - """Generate bboxes from bbox head predictions. + def get_occ(self, preds_dicts): + """ + Generate bboxes from bbox head predictions. Args: preds_dicts (tuple[list[dict]]): Prediction results. - img_metas (list[dict]): Point cloud and image's meta info. Returns: - list[dict]: Decoded bbox, scores and labels after nms. + occ_out (torch.Tensor): Predicted occupancy map with shape (bs, h, w, z). + """ + + occ_out = preds_dicts['occ'] + if self.use_focal_loss: + # Default value of this flag is False + occ_out = occ_out.sigmoid() + + if self.use_free_mask: + # Default value of this flag is False + bs, h, w, z, channels = occ_out.shape + occ_out = occ_out.reshape(bs, -1, self.num_classes) + occ_out = torch.cat((occ_out, torch.ones_like(occ_out)[:,:, :1] * self.occ_thr), dim=-1) + occ_out = occ_out.reshape(bs,h,w,z,-1) + else: + occ_out = occ_out.softmax(-1) + + occ_out = occ_out.argmax(-1) + + return occ_out + + def compute_count_matrix(self, gtocc, predocc): + """ + Calculate count matrix. + Args: + voxel_semantics (torch.Tensor): semantic occpuancy ground truth. + voxel_semantics_preds (torch.Tensor): predicted semantic occpuancy. + both input are masked + Returns: + count_matrix (numpy.ndarray): count_matrix[i][j] counts the number of voxel with gt type i and pred type j. shape (num_classes, num_classes) + """ + + n_cl = self.num_classes + count_matrix = torch.zeros((n_cl, n_cl), device='cuda') + correct_idx = (gtocc >= 0) & (gtocc < n_cl) + count_matrix = torch.bincount(n_cl * gtocc[correct_idx].to(torch.int) + predocc[correct_idx].to(torch.int), + weights=None, minlength=n_cl ** 2).reshape(n_cl, n_cl) + + return count_matrix + + def eval_metrics(self, voxel_semantics, voxel_semantics_preds, camera_mask): """ - # return self.transformer.get_occ( - # preds_dicts, img_metas, rescale=rescale) - # print(img_metas[0].keys()) - occ_out=preds_dicts['occ'] - occ_score=occ_out.softmax(-1) - occ_score=occ_score.argmax(-1) + Evaluation. + Args: + voxel_semantics (torch.Tensor): semantic occpuancy ground truth. + voxel_semantics_preds (torch.Tensor): predicted semantic occpuancy. + camera_mask (torch.Tensor): camera mask. + all of them have shape (bs, w, h, total_z) + Returns: + results (dict): with key "count_matrix". + - count_matrix (numpy.ndarray): count_matrix[i][j] counts the number of voxel with gt type i and pred type j. shape (num_classes, num_classes) + """ + + masked_semantics_gt = voxel_semantics[camera_mask] + masked_semantics_pred = voxel_semantics_preds[camera_mask] + count_matrix = self.compute_count_matrix(gtocc=masked_semantics_gt, predocc=masked_semantics_pred) + results = {"count_matrix": count_matrix.cpu().numpy()} - return occ_score + return results \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_3d.py b/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_3d.py deleted file mode 100644 index 11fcf7c..0000000 --- a/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_3d.py +++ /dev/null @@ -1,269 +0,0 @@ -# --------------------------------------------- -# Copyright (c) OpenMMLab. All rights reserved. -# --------------------------------------------- -# Modified by Zhiqi Li -# --------------------------------------------- - -import copy -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Linear, bias_init_with_prob -from mmcv.utils import TORCH_VERSION, digit_version - -from mmdet.core import (multi_apply, multi_apply, reduce_mean) -from mmdet.models.utils.transformer import inverse_sigmoid -from mmdet.models import HEADS -from mmdet.models.dense_heads import DETRHead -from mmdet3d.core.bbox.coders import build_bbox_coder -from projects.mmdet3d_plugin.core.bbox.util import normalize_bbox -from mmcv.cnn.bricks.transformer import build_positional_encoding -from mmcv.runner import force_fp32, auto_fp16 -from projects.mmdet3d_plugin.models.utils.bricks import run_time -import numpy as np -import mmcv -import cv2 as cv -from projects.mmdet3d_plugin.models.utils.visual import save_tensor -from mmcv.cnn.bricks.transformer import build_positional_encoding -from mmdet.models.utils import build_transformer -from mmdet.models.builder import build_loss -from mmcv.runner import BaseModule, force_fp32 - -@HEADS.register_module() -class OccFormerHead3D(BaseModule): - """Head of Detr3D. - Args: - with_box_refine (bool): Whether to refine the reference points - in the decoder. Defaults to False. - as_two_stage (bool) : Whether to generate the proposal from - the outputs of encoder. - transformer (obj:`ConfigDict`): ConfigDict is used for building - the Encoder and Decoder. - bev_h, bev_w (int): spatial shape of BEV queries. - """ - - def __init__(self, - *args, - with_box_refine=False, - as_two_stage=False, - transformer=None, - bbox_coder=None, - num_cls_fcs=2, - code_weights=None, - bev_h=30, - bev_w=30, - loss_occ=None, - use_mask=False, - positional_encoding=None, - **kwargs): - - self.bev_h = bev_h - self.bev_w = bev_w - self.fp16_enabled = False - self.num_classes=kwargs['num_classes'] - self.use_mask=use_mask - - self.with_box_refine = with_box_refine - self.as_two_stage = as_two_stage - if self.as_two_stage: - transformer['as_two_stage'] = self.as_two_stage - if 'code_size' in kwargs: - self.code_size = kwargs['code_size'] - else: - self.code_size = 10 - if code_weights is not None: - self.code_weights = code_weights - else: - self.code_weights = [1.0, 1.0, 1.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2] - - - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.pc_range = self.bbox_coder.pc_range - self.real_w = self.pc_range[3] - self.pc_range[0] - self.real_h = self.pc_range[4] - self.pc_range[1] - self.num_cls_fcs = num_cls_fcs - 1 - super(OccFormerHead, self).__init__() - - self.loss_occ = build_loss(loss_occ) - self.positional_encoding = build_positional_encoding( - positional_encoding) - self.transformer = build_transformer(transformer) - self.embed_dims = self.transformer.embed_dims - - if not self.as_two_stage: - self.bev_embedding = nn.Embedding( - self.bev_h * self.bev_w, self.embed_dims) - # def _init_layers(self): - # """Initialize classification branch and regression branch of head.""" - # cls_branch = [] - # for _ in range(self.num_reg_fcs): - # cls_branch.append(Linear(self.embed_dims, self.embed_dims)) - # cls_branch.append(nn.LayerNorm(self.embed_dims)) - # cls_branch.append(nn.ReLU(inplace=True)) - # cls_branch.append(Linear(self.embed_dims, self.cls_out_channels)) - # fc_cls = nn.Sequential(*cls_branch) - # - # reg_branch = [] - # for _ in range(self.num_reg_fcs): - # reg_branch.append(Linear(self.embed_dims, self.embed_dims)) - # reg_branch.append(nn.ReLU()) - # reg_branch.append(Linear(self.embed_dims, self.code_size)) - # reg_branch = nn.Sequential(*reg_branch) - # - # def _get_clones(module, N): - # return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - # - # # last reg_branch is used to generate proposal from - # # encode feature map when as_two_stage is True. - # num_pred = (self.transformer.decoder.num_layers + 1) if \ - # self.as_two_stage else self.transformer.decoder.num_layers - # - # if self.with_box_refine: - # self.cls_branches = _get_clones(fc_cls, num_pred) - # self.reg_branches = _get_clones(reg_branch, num_pred) - # else: - # self.cls_branches = nn.ModuleList( - # [fc_cls for _ in range(num_pred)]) - # self.reg_branches = nn.ModuleList( - # [reg_branch for _ in range(num_pred)]) - # - # if not self.as_two_stage: - # self.bev_embedding = nn.Embedding( - # self.bev_h * self.bev_w, self.embed_dims) - # self.query_embedding = nn.Embedding(self.num_query, - # self.embed_dims * 2) - - def init_weights(self): - """Initialize weights of the DeformDETR head.""" - self.transformer.init_weights() - # if self.loss_cls.use_sigmoid: - # bias_init = bias_init_with_prob(0.01) - # for m in self.cls_branches: - # nn.init.constant_(m[-1].bias, bias_init) - - @auto_fp16(apply_to=('mlvl_feats')) - def forward(self, mlvl_feats, img_metas, prev_bev=None, only_bev=False, test=False): - """Forward function. - Args: - mlvl_feats (tuple[Tensor]): Features from the upstream - network, each is a 5D-tensor with shape - (B, N, C, H, W). - prev_bev: previous bev featues - only_bev: only compute BEV features with encoder. - Returns: - all_cls_scores (Tensor): Outputs from the classification head, \ - shape [nb_dec, bs, num_query, cls_out_channels]. Note \ - cls_out_channels should includes background. - all_bbox_preds (Tensor): Sigmoid outputs from the regression \ - head with normalized coordinate format (cx, cy, w, l, cz, h, theta, vx, vy). \ - Shape [nb_dec, bs, num_query, 9]. - """ - bs, num_cam, _, _, _ = mlvl_feats[0].shape - dtype = mlvl_feats[0].dtype - object_query_embeds = None - bev_queries = self.bev_embedding.weight.to(dtype) - - bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), - device=bev_queries.device).to(dtype) - bev_pos = self.positional_encoding(bev_mask).to(dtype) - - if only_bev: # only use encoder to obtain BEV features, TODO: refine the workaround - return self.transformer.get_bev_features( - mlvl_feats, - bev_queries, - self.bev_h, - self.bev_w, - grid_length=(self.real_h / self.bev_h, - self.real_w / self.bev_w), - bev_pos=bev_pos, - img_metas=img_metas, - prev_bev=prev_bev, - ) - else: - outputs = self.transformer( - mlvl_feats, - bev_queries, - object_query_embeds, - self.bev_h, - self.bev_w, - grid_length=(self.real_h / self.bev_h, - self.real_w / self.bev_w), - bev_pos=bev_pos, - reg_branches=None, # noqa:E501 - cls_branches=None, - img_metas=img_metas, - prev_bev=prev_bev - ) - bev_embed, occ_outs = outputs - # bev_embed, hs, init_reference, inter_references = outputs - # - # - # outs = { - # 'bev_embed': bev_embed, - # 'all_cls_scores': outputs_classes, - # 'all_bbox_preds': outputs_coords, - # 'enc_cls_scores': None, - # 'enc_bbox_preds': None, - # } - - # if test: - # return bev_embed, occ_outs - # else: - # return occ_outs - outs = { - 'bev_embed': bev_embed, - 'occ':occ_outs, - } - - return outs - - @force_fp32(apply_to=('preds_dicts')) - def loss(self, - # gt_bboxes_list, - # gt_labels_list, - voxel_semantics_list, - mask_camera_list, - preds_dicts, - gt_bboxes_ignore=None, - img_metas=None): - - loss_dict=dict() - occ=preds_dicts['occ'] - assert voxel_semantics_list.min()>=0 and voxel_semantics_list.max()<=17 - losses = self.loss_single(voxel_semantics_list,mask_camera_list,occ) - loss_dict['loss_occ']=losses - return loss_dict - - def loss_single(self,voxel_semantics,mask_camera,preds_dicts): - if self.use_mask: - voxel_semantics=voxel_semantics.reshape(-1) - preds_dicts=preds_dicts.reshape(-1,self.num_classes) - mask_camera=mask_camera.reshape(-1) - num_total_samples=mask_camera.sum() - loss_occ=self.loss_occ(preds_dicts,voxel_semantics,mask_camera, avg_factor=num_total_samples) - else: - voxel_semantics = voxel_semantics.reshape(-1) - preds_dicts = preds_dicts.reshape(-1, self.num_classes) - loss_occ = self.loss_occ(preds_dicts, voxel_semantics,) - return loss_occ - - @force_fp32(apply_to=('preds_dicts')) - def get_occ(self, preds_dicts, img_metas, rescale=False): - """Generate bboxes from bbox head predictions. - Args: - preds_dicts (tuple[list[dict]]): Prediction results. - img_metas (list[dict]): Point cloud and image's meta info. - Returns: - list[dict]: Decoded bbox, scores and labels after nms. - """ - # return self.transformer.get_occ( - # preds_dicts, img_metas, rescale=rescale) - # print(img_metas[0].keys()) - occ_out=preds_dicts['occ'] - occ_score=occ_out.softmax(-1) - occ_score=occ_score.argmax(-1) - - - return occ_score diff --git a/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_waymo.py b/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_waymo.py new file mode 100644 index 0000000..436e718 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/dense_heads/occformer_head_waymo.py @@ -0,0 +1,452 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmcv.cnn.bricks.transformer import build_positional_encoding +from mmcv.runner import force_fp32, auto_fp16 +from mmcv.cnn.bricks.transformer import build_positional_encoding +from mmcv.runner import BaseModule, force_fp32 +from mmdet.models import HEADS +from mmdet.models.utils import build_transformer +from mmdet.models.builder import build_loss +from mmdet.models.losses.cross_entropy_loss import CrossEntropyLoss +from mmdet.models import LOSSES as mmdet_LOSSES +from mmseg.models import LOSSES as LOSSES_SEG +from sklearn.neighbors import NearestNeighbors + +@HEADS.register_module() +class CVTOccHeadWaymo(BaseModule): + def __init__(self, + volume_flag=False, + with_box_refine=False, + as_two_stage=False, + voxel_size=None, + occ_voxel_size=None, + use_larger=True, + transformer=None, + bev_h=200, + bev_w=200, + bev_z=1, + num_classes=None, + loss_occ=None, + loss_binary_occ=None, + use_CDist=False, + CLASS_NAMES=None, + positional_encoding=None, + use_refine_feat_loss=False, + refine_feat_loss_weight=None, + **kwargs): + super(CVTOccHeadWaymo, self).__init__() + if not volume_flag: assert bev_z == 1 + self.volume_flag = volume_flag + self.bev_h = bev_h + self.bev_w = bev_w + self.bev_z = bev_z + self.fp16_enabled = False + self.num_classes = num_classes + self.use_CDist = use_CDist + self.CLASS_NAMES = CLASS_NAMES + self.with_box_refine = with_box_refine + self.as_two_stage = as_two_stage + if self.as_two_stage: + transformer['as_two_stage'] = self.as_two_stage + + self.voxel_size = voxel_size + self.occ_voxel_size = occ_voxel_size + self.use_larger = use_larger + self.loss_occ_fun = dict() + + for name, loss_dict in loss_occ.items(): + if LOSSES_SEG.get(loss_dict['type']) is not None: + self.loss_occ_fun['loss_occ_' + name] = LOSSES_SEG.build(loss_dict) + else: + _type = loss_dict['type'] + raise KeyError(f'{_type} not in LOSSES_SEG registry') + + if loss_binary_occ is not None: + self.loss_binary_occ_func = dict() + for name, loss_dict in loss_binary_occ.items(): + if LOSSES_SEG.get(loss_dict['type']) is not None: + self.loss_binary_occ_func['loss_occ_' + name] = LOSSES_SEG.build(loss_dict) + else: + _type = loss_dict['type'] + raise KeyError(f'{_type} not in LOSSES_SEG registry') + + self.use_refine_feat_loss = use_refine_feat_loss + _loss=dict(type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0) # refine_feat_w not sigmoid, so here use_sigmoid=True + self.refine_feat_loss_func = mmdet_LOSSES.build(_loss) + self.refine_feat_loss_weight = refine_feat_loss_weight + + self.positional_encoding = build_positional_encoding(positional_encoding) + self.transformer = build_transformer(transformer) + self.embed_dims = self.transformer.embed_dims + + if not self.as_two_stage: + # self.as_two_stage default value is False + self.bev_embedding = nn.Embedding(self.bev_z * self.bev_h * self.bev_w, self.embed_dims) + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + self.transformer.init_weights() + + @auto_fp16(apply_to=('multi_level_feats')) + def forward(self, + multi_level_feats, + img_metas, + prev_bev_list=[], + prev_img_metas=[], + only_bev=False, + **kwargs,): + """ + Forward function. + Args: + multi_level_feats (list[torch.Tensor]): Current multi level img features from the upstream network. + Each is a 5D-tensor img_feats with shape (bs, num_cams, embed_dims, h, w). + img_metas (list[dict]): Meta information of each sample. The list has length of batch size. + prev_bev_list (list[torch.Tensor]): BEV features of previous frames. Each has shape (bs, bev_h*bev_w, embed_dims). + prev_img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys len_queue-1-prev_bev_list_len, ..., len_queue-2. + The element of each key is a dict. + So each dict has length of prev_bev_list_len. + only_bev: If this flag is true. The head only computes BEV features with encoder. + Returns: + If only_bev: + _bev_embed (torch.Tensor): BEV features of the current frame with shape (bs, bev_h*bev_w, embed_dims). + else: + outs (dict): with keys "bev_embed, occ, extra". + """ + + # Step 1: initialize BEV queries and mask + bs = multi_level_feats[0].shape[0] + dtype = multi_level_feats[0].dtype + bev_queries = self.bev_embedding.weight.to(dtype) + if self.volume_flag: + bev_mask = torch.zeros((bs, self.bev_z, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + else: + bev_mask = torch.zeros((bs, self.bev_h, self.bev_w), + device=bev_queries.device).to(dtype) + bev_pos = self.positional_encoding(bev_mask).to(dtype) + + # Step 2: get BEV features + if only_bev: + if len(prev_bev_list) == 0: + prev_bev = None + else: + prev_bev = prev_bev_list[-1] + outputs = self.transformer.get_bev_features(multi_level_feats, + bev_queries, + bev_pos, + img_metas, + prev_bev, + **kwargs,) + _bev_embed = outputs['bev_embed'] + + return _bev_embed + + else: + outputs = self.transformer(multi_level_feats, + bev_queries, + bev_pos, + img_metas, + prev_bev_list, + prev_img_metas, + **kwargs,) + + bev_for_history, occ_outs, extra = outputs + outs = {'bev_embed': bev_for_history, 'occ':occ_outs, 'extra': extra} + + return outs + + @force_fp32(apply_to=('preds_dicts')) + def loss(self, voxel_semantics, + valid_mask, + preds_dicts, + **kwargs): + """ + Loss function. + Args: + voxel_semantics (torch.Tensor): Shape (bs, w, h, total_z) + valid_mask (torch.Tensor): 1 represent valid voxel, 0 represent invalid voxel. + Directly get from the data loader. shape (bs, w, h, total_z) + preds_dicts (dict): result from head with keys "bev_embed, occ, extra". + - occ (torch.Tensor): Predicted occupancy features with shape (bs, w, h, total_z, c). + Returns: + loss_dict (dict): Losses of different branch. + Default cvtocc model has refine_feat_loss loss and loss_occ_coheam loss. + """ + loss_dict = dict() + occ_outs = preds_dicts['occ'] + loss_dict = self.loss_single(voxel_semantics, valid_mask, occ_outs, binary_loss=False) + extra = preds_dicts['extra'] + # if 'outputs_list' in extra: + # assert False, "not implemented" + # pred_list = extra['outputs_list'] + # for iter_i, preds in enumerate(pred_list): + # losses = self.loss_single(voxel_semantics, mask_infov, mask_lidar, mask_camera, preds, binary_loss=True) + # for k,v in losses.items(): + # loss_dict['loss_occ_iter{}_{}'.format(iter_i, k)] = v + + if self.use_refine_feat_loss: + if 'refine_feat_w' in extra: # has the key means it will not be None + refine_feat_w = extra['refine_feat_w'] + loss_dict['refine_feat_loss'] = self.get_refine_feat_loss(voxel_semantics, refine_feat_w, valid_mask) + else: + loss_dict['refine_feat_loss'] = occ_outs.reshape(-1).sum() * 0 + + return loss_dict + + def get_refine_feat_loss(self, voxel_semantics, refine_feat_w, valid_mask): + """ + Calculate refine_feat_loss from refine_feat_w + Args: + refine_feat_w (torch.Tensor): The weight without sigmoid. shape (bev_w, bev_h, total_z, 2). + Returns: + refine_feat_loss (float): + """ + + # Step 1: reshape refine_feat_w + refine_feat_w = refine_feat_w.unsqueeze(0) + channels = refine_feat_w.shape[-1] + refine_feat_w = refine_feat_w.reshape(-1, channels) + + # Step 2: get the ground truth for refine feat weight from the occupancy ground truth. + refine_feat_gt = (voxel_semantics != self.num_classes-1) + refine_feat_gt = refine_feat_gt.reshape(-1).long() + + # Step 3: use valid_mask to filter out the invalid points + valid_mask = valid_mask.reshape(-1) + refine_feat_w_masked = refine_feat_w[valid_mask] + refine_feat_gt_masked = refine_feat_gt[valid_mask] + + # Step 4: calculate the loss + refine_feat_loss = self.refine_feat_loss_func(refine_feat_w_masked, refine_feat_gt_masked) + refine_feat_loss = self.refine_feat_loss_weight * refine_feat_loss + + return refine_feat_loss + + def get_loss(self, loss_occ_fun, cls_score, labels, valid_mask, weight=None): + """ + Calculate multiple losses using different loss functions. + Args: + loss_occ_fun (dict): A dictionary containing different loss functions. + The keys are loss function names, and the values are the corresponding loss functions. + Each loss function should accept the following arguments: + - cls_score: The predicted scores or logits from the model. + - labels: The ground-truth labels for the data. + - weight: Optional, a weighting tensor to apply to the loss. Default is None. + - avg_factor: Optional, a scalar factor to normalize the loss. Default is None. + cls_score (torch.Tensor): The predicted scores or logits from the model. + It should have a shape of (N, C, *), where N is the batch size, + C is the number of classes, and * denotes additional dimensions. + cls_score is pred[mask] (N, ) + labels (torch.Tensor): The ground-truth labels for the data. + It should have a shape of (N, *), where N is the batch size, + and * denotes additional dimensions. The values should be integers + representing class indices. + labels is voxel_semantics[mask](N, ) + weight (torch.Tensor, optional): Optional, a weighting tensor to apply to the loss. + It should have a shape that is broadcastable to the shape of `cls_score`. + Default is None, which means no weighting is applied. + avg_factor (int or None, optional): Optional, a scalar factor to normalize the loss. + If 'focal' is in the loss function names, this factor + represents the number of positive samples (mask.sum()). + For other loss functions, the default is None, which means + no normalization is performed. + + Returns: + dict: A dictionary containing the computed loss values for each loss function. + The keys are the loss function names, and the values are the corresponding loss values. + """ + + loss_occ = dict() + for loss_name in sorted(list(loss_occ_fun.keys())): + if 'focal' in loss_name: + avg_factor = valid_mask.sum() + else: + avg_factor = None + if 'lovasz' in loss_name: + cls_score = cls_score.reshape(*cls_score.shape, 1, 1) + labels = labels.reshape(*labels.shape, 1, 1) + + _loss = loss_occ_fun[loss_name](cls_score, labels, weight, avg_factor=avg_factor) + loss_occ[loss_name] = _loss + + return loss_occ # dict, key is loss_name, value is loss + + def loss_single(self, voxel_semantics, valid_mask, occ_outs, binary_loss=False): + """ + Args: + voxel_semantics (torch.Tensor): + occ_outs (torch.Tensor): Predicted occupancy features with shape (bs, w, h, total_z, c) + Returns: + loss_occ (dict): A dictionary containing the computed loss values for each loss function. + The keys are the loss function names, and the values are the corresponding loss values. + Default loss_occ_coheam loss + """ + + if binary_loss: + assert occ_outs.shape[-1] == 2 + binary_gt = voxel_semantics != self.num_classes-1 + bs, W, H, D = voxel_semantics.shape + _bs, _W, _H, _D, _ = occ_outs.shape + assert W % _W == 0 and H % _H == 0 and D % _D == 0 + scale_W, scale_H, scale_D = W//_W, H//_H, D//_D + + _scale = 1 + while _scale != scale_W: + binary_gt = binary_gt.reshape(bs, -1, 2, H, D) + binary_gt = torch.logical_or(binary_gt[:, :, 0, :, :], binary_gt[:, :, 1, :, :, :]) + _scale *= 2 + _scale = 1 + while _scale != scale_H: + binary_gt = binary_gt.reshape(bs, _W, -1, 2, D) + binary_gt = torch.logical_or(binary_gt[:, :, :, 0, :], binary_gt[:, :, :, 1, :]) + _scale *= 2 + _scale = 1 + while _scale != scale_D: + binary_gt = binary_gt.reshape(bs, _W, _H, -1, 2) + binary_gt = torch.logical_or(binary_gt[:, :, :, :, 0], binary_gt[:, :, :, :, 1]) + _scale *= 2 + binary_gt = binary_gt.long() + binary_gt=binary_gt.reshape(-1) + occ_outs=occ_outs.reshape(-1, 2) + mask=torch.ones_like(binary_gt, dtype=torch.bool) + loss_occ = self.get_loss(self.loss_binary_occ_func, occ_outs[mask], binary_gt[mask], mask) + else: + voxel_semantics=voxel_semantics.reshape(-1) + occ_outs = occ_outs.reshape(-1, self.num_classes) + valid_mask = valid_mask.reshape(-1) + loss_occ = self.get_loss(self.loss_occ_fun, + cls_score=occ_outs[valid_mask], + labels=voxel_semantics[valid_mask], + valid_mask=valid_mask) + + return loss_occ + + @force_fp32(apply_to=('preds_dicts')) + def get_occ(self, preds_dicts): + """ + Generate Occupancy semantics prediction. + Args: + preds_dicts (dict): with keys "bev_embed, occ, extra" + occ (torch.Tensor): Predicted occupancy features with shape (bs, w, h, total_z, c). + Returns: + occ_label (torch.Tensor): Occupancy semantics prediction with shape (bs, w, h, total_z). + """ + occ_out=preds_dicts['occ'] + occ_score=occ_out.softmax(-1) + occ_label=occ_score.argmax(-1) + + return occ_label + + def compute_CDist(self, gtocc, predocc, mask): + alpha = 1/3 # Hyperparameter + + # Squeeze dimensions + gtocc = gtocc.squeeze(0) + predocc = predocc.squeeze(0) + mask = mask.squeeze(0) + + # Use mask to change unobserved into 16 (out of range) + gtocc = torch.where(mask, gtocc, torch.ones_like(gtocc) * self.num_classes) + predocc = torch.where(mask, predocc, torch.ones_like(predocc) * self.num_classes) + + # Get all unique class labels + labels_tensor = torch.unique(torch.cat((gtocc, predocc), dim=0)) + labels_list = labels_tensor.tolist() + labels_list = [x for x in labels_list if x < (self.num_classes-1)] # skip free type + + CDist_tensor = torch.zeros((self.num_classes-1), device='cuda') + for label in labels_list: + + # Extract points for the current class + labeled_gtocc = torch.nonzero(gtocc == label).float() # (N_1, 3) + labeled_predocc = torch.nonzero(predocc == label).float() # (N_2, 3) + + if labeled_gtocc.shape[0] == 0 or labeled_predocc.shape[0] == 0: + # CDist_tensor[label] = 2 + CDist_tensor[label] = labeled_gtocc.shape[0] + labeled_predocc.shape[0] + continue + + # convert tensor to numpy + labeled_gtocc_np = labeled_gtocc.cpu().numpy() + labeled_predocc_np = labeled_predocc.cpu().numpy() + + # Use sklearn's NearestNeighbors to find nearest neighbors + reference_gt = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(labeled_gtocc_np) + reference_pred = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(labeled_predocc_np) + + dist_pred_to_gt, _ = reference_gt.kneighbors(labeled_predocc_np) + dist_gt_to_pred, _ = reference_pred.kneighbors(labeled_gtocc_np) + + dist_pred_to_gt = torch.from_numpy(dist_pred_to_gt).squeeze().to('cuda') + dist_gt_to_pred = torch.from_numpy(dist_gt_to_pred).squeeze().to('cuda') + + exp_dist1 = 1 - torch.exp(-dist_pred_to_gt * alpha) + exp_dist2 = 1 - torch.exp(-dist_gt_to_pred * alpha) + chamfer_distance = torch.sum(exp_dist1) + torch.sum(exp_dist2) + + CDist_tensor[label] = chamfer_distance.item() + + return CDist_tensor + + def compute_count_matrix(self, gtocc, predocc): + """ + Calculate count matrix. + Args: + voxel_semantics (torch.Tensor): semantic occpuancy ground truth. + voxel_semantics_preds (torch.Tensor): predicted semantic occpuancy. + both input are masked + Returns: + count_matrix (numpy.ndarray): count_matrix[i][j] counts the number of voxel with gt type i and pred type j. shape (num_classes, num_classes) + """ + + n_cl = self.num_classes + count_matrix = torch.zeros((n_cl, n_cl), device='cuda') + correct_idx = (gtocc >= 0) & (gtocc < n_cl) + count_matrix = torch.bincount(n_cl * gtocc[correct_idx].to(torch.int) + predocc[correct_idx].to(torch.int), + weights=None, minlength=n_cl ** 2).reshape(n_cl, n_cl) + + return count_matrix + + def eval_metrics(self, voxel_semantics, voxel_semantics_preds, valid_mask): + """ + Evaluation. + Args: + voxel_semantics (torch.Tensor): semantic occpuancy ground truth. + voxel_semantics_preds (torch.Tensor): predicted semantic occpuancy. + valid_mask (torch.Tensor): 1 represent valid voxel, 0 represent invalid voxel. Directly get from the data loader. + all of them have shape (bs, w, h, total_z) + Returns: + count_matrix (numpy.ndarray): count_matrix[i][j] counts the number of voxel with gt type i and pred type j. shape (num_classes, num_classes) + CDist_tensor (numpy.ndarray): CDist_tensor[i] is the chamfer distance for class i. (without free type) + """ + + # Step 1: compute chamfer distance (controlled by `self.use_CDist`) + if self.use_CDist: + CDist_tensor = self.compute_CDist(gtocc=voxel_semantics, predocc=voxel_semantics_preds, mask=valid_mask) + else: + CDist_tensor = torch.zeros((self.num_classes-1), device='cuda') + + # Step 2: compute mIoU + masked_semantics_gt = voxel_semantics[valid_mask] + masked_semantics_pred = voxel_semantics_preds[valid_mask] + count_matrix = self.compute_count_matrix(gtocc=masked_semantics_gt, predocc=masked_semantics_pred) + + # Step 3: count dict + # use count matrix is the same + # gt_count = torch.sum(count_matrix, dim=1) + # pred_count = torch.sum(count_matrix, dim=0) + + occ_results = {"CDist_tensor": CDist_tensor.cpu().numpy(), + "count_matrix": count_matrix.cpu().numpy(),} + + return occ_results diff --git a/projects/mmdet3d_plugin/bevformer/dense_heads/solo_head_occ_waymo.py b/projects/mmdet3d_plugin/bevformer/dense_heads/solo_head_occ_waymo.py new file mode 100644 index 0000000..081fdb1 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/dense_heads/solo_head_occ_waymo.py @@ -0,0 +1,260 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import torch +import torch.nn as nn +from mmdet.models import HEADS +from mmcv.cnn.bricks.transformer import build_positional_encoding +from mmcv.runner import force_fp32, auto_fp16 +from mmdet.models.builder import build_loss +from mmcv.runner import BaseModule, force_fp32 +from mmcv.cnn import PLUGIN_LAYERS, Conv2d, Conv3d, ConvModule, caffe2_xavier_init +from mmseg.models import LOSSES as LOSSES_SEG +from ..modules.unet import MYASPPHead + +@HEADS.register_module() +class SOLOOccHeadWaymo(BaseModule): + """Head of Detr3D. + Args: + with_box_refine (bool): Whether to refine the reference points + in the decoder. Defaults to False. + as_two_stage (bool) : Whether to generate the proposal from + the outputs of encoder. + transformer (obj:`ConfigDict`): ConfigDict is used for building + the Encoder and Decoder. + bev_h, bev_w (int): spatial shape of BEV queries. + """ + + def __init__(self, + *args, + FREE_LABEL=None, + embed_dims=256, + bev_z=1, + bev_h=200, + bev_w=200, + total_z=16, + num_classes=16, + use_infov_mask=True, + use_lidar_mask=False, + use_camera_mask=True, + act_cfg=dict(type='ReLU',inplace=True), + norm_cfg=dict(type='BN',), + loss_occ=None, + **kwargs): + self.FREE_LABEL = FREE_LABEL + self.embed_dims=embed_dims + self.bev_z = bev_z + self.bev_h = bev_h + self.bev_w = bev_w + self.total_z = total_z + self.fp16_enabled = False + self.num_classes = num_classes + self.use_infov_mask = use_infov_mask + self.use_lidar_mask = use_lidar_mask + self.use_camera_mask = use_camera_mask + + super(SOLOOccHeadWaymo, self).__init__() + self.loss_occ_fun = dict() + for name, loss_dict in loss_occ.items(): + if LOSSES_SEG.get(loss_dict['type']) is not None: + self.loss_occ_fun['loss_occ_' + name] = LOSSES_SEG.build(loss_dict) + else: + _type = loss_dict['type'] + raise KeyError(f'{_type} not in LOSSES_SEG registry') + + use_bias = norm_cfg is None + self.decoder = [] + conv_cfg = dict(type='Conv2d') + conv_num = 3 + # conv module + decoder_layers = [] + for _ in range(conv_num): + decoder_layers.append( + ConvModule( + self.embed_dims, # 256 + self.embed_dims, # 256 + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + ) # 256 -> 256 + # aspp + decoder_layers.append( + MYASPPHead( + is_volume=False, + in_channels=self.embed_dims, + in_index=3, + channels=self.embed_dims, + dilations=(1, 3, 6, 9), + dropout_ratio=0.1, + num_classes=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + align_corners=False, + # loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ) # 256 -> 256 + ) + # deconv to origin shape + _out_dim = self.embed_dims*4 + decoder_layers.append( + ConvModule( + self.embed_dims, + _out_dim, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + ) # 256 -> 256 * 4 + self.decoder = nn.Sequential(*decoder_layers) + + self.predicter = nn.Sequential( + nn.Linear(_out_dim//self.total_z, self.num_classes*2), # 256 * 4 // 16 -> 32 + nn.Softplus(), + nn.Linear(self.num_classes*2, self.num_classes), + ) + self.embed_dims = self.embed_dims + + def init_weights(self): + """Initialize weights of the DeformDETR head.""" + + @auto_fp16(apply_to=('mlvl_feats')) + def forward(self, bev_feats, **kwargs): + """ + Forward function of occupancy head of solofusion. + Args: + bev_feats (torch.Tensor): shape (bs, embed_dims=256, bev_h, bev_w) + Returns: + outs (dict): Output results. + - occ (torch.Tensor): shape (bs, bev_w, bev_h, total_z, num_classes) + """ + bs, _, _, _ = bev_feats.shape + occ_out = self.decoder(bev_feats) # (bs, embed_dim * 4, h, w) + occ_out = occ_out.permute(0, 3, 2, 1) # (bs, w, h, embed_dim * 4) + occ_out = occ_out.reshape(bs, self.bev_w, self.bev_h, self.total_z, -1) # (bs, w, h, z, channels * 4) + occ_out = occ_out.reshape(bs * self.bev_w * self.bev_h * self.total_z, -1) # (bs * w * h * z, channels * 4) + occ_out = self.predicter(occ_out) # (bs * w * h * z, channels * 4) + occ_out = occ_out.reshape(bs, self.bev_w, self.bev_h, self.total_z, self.num_classes) # (bs, w, h, z, num_classes) + outs = {'occ': occ_out} + + return outs + + @force_fp32(apply_to=('preds_dicts')) + def loss(self, + voxel_semantics, + valid_mask, + preds_dicts, + **kwargs, + ): + ''' + Loss function of occupancy head of solofusion. + Args: + voxel_semantics (torch.Tensor): 3D occupancy ground truth. shape (B, H, W, Z) + valid_mask (torch.Tensor): mask of valid area. shape (B, H, W, Z) + preds_dicts (dict): output of forward function. + - occ (torch.Tensor): shape (B, W, H, Z, num_classes) + Returns: + - loss_dict (dict): loss of occupancy head. + ''' + loss_dict = dict() + occ = preds_dicts['occ'] + loss_dict = self.loss_single(voxel_semantics, valid_mask, occ) + + return loss_dict + + + def get_loss(self,loss_occ_fun, cls_score, labels, weight=None): + assert labels.max() <= (self.num_classes - 1) and labels.min() >= 0, f"score out of range: {labels.max()} vs {labels.min()}" + assert cls_score.shape[0] == labels.shape[0], f"shape mismatch: {cls_score.shape} vs {labels.shape}" + + loss_occ = dict() + for loss_name in sorted(list(loss_occ_fun.keys())): + if 'lovasz' in loss_name: + cls_score = cls_score.reshape(*cls_score.shape, 1, 1) + labels = labels.reshape(*labels.shape, 1, 1) + _loss = loss_occ_fun[loss_name](cls_score, labels, weight) + loss_occ[loss_name] = _loss + + return loss_occ + + def loss_single(self, + voxel_semantics, + valid_mask, + occ_preds, + **kwargs + ): + valid_mask = valid_mask.reshape(-1) # (bs, w, h, z) -> (bs*w*h*z, ) + valid_mask = valid_mask.bool() + occ_preds = occ_preds.reshape(-1, self.num_classes) # (bs*w*h*z, num_classes) + voxel_semantics = voxel_semantics.reshape(-1) # (bs*w*h*z, ) + loss_ce = self.loss_ce = self.get_loss(self.loss_occ_fun, occ_preds[valid_mask], voxel_semantics[valid_mask]) + + return loss_ce #,loss_lovasz + + @force_fp32(apply_to=('preds_dicts')) + def get_occ(self, preds_dicts): + """ + Generate Occupancy semantics prediction. + Args: + preds_dicts (dict): with keys "bev_embed, occ, extra" + occ (torch.Tensor): Predicted occupancy features with shape (bs, w, h, total_z, c). + Returns: + occ_label (torch.Tensor): Occupancy semantics prediction with shape (bs, w, h, total_z). + """ + + occ_out = preds_dicts['occ'] + occ_out = occ_out.softmax(-1) + occ_out = occ_out.argmax(-1) + + return occ_out + + def compute_count_matrix(self, gtocc, predocc): + """ + Calculate count matrix. + Args: + voxel_semantics (torch.Tensor): semantic occpuancy ground truth. + voxel_semantics_preds (torch.Tensor): predicted semantic occpuancy. + both input are masked + Returns: + count_matrix (numpy.ndarray): count_matrix[i][j] counts the number of voxel with gt type i and pred type j. shape (num_classes, num_classes) + """ + + n_cl = self.num_classes + count_matrix = torch.zeros((n_cl, n_cl), device='cuda') + correct_idx = (gtocc >= 0) & (gtocc < n_cl) + count_matrix = torch.bincount(n_cl * gtocc[correct_idx].to(torch.int) + predocc[correct_idx].to(torch.int), + weights=None, minlength=n_cl ** 2).reshape(n_cl, n_cl) + + return count_matrix + + def eval_metrics(self, voxel_semantics, voxel_semantics_preds, valid_mask=None): + """ + Evaluation. + Args: + voxel_semantics (torch.Tensor): semantic occpuancy ground truth. + voxel_semantics_preds (torch.Tensor): predicted semantic occpuancy. + valid_mask (torch.Tensor): 1 represent valid voxel, 0 represent invalid voxel. Directly get from the data loader. + all of them have shape (bs, w, h, total_z) + Returns: + count_matrix (numpy.ndarray): count_matrix[i][j] counts the number of voxel with gt type i and pred type j. + shape (num_classes, num_classes) + """ + + masked_semantics_gt = voxel_semantics[valid_mask] + masked_semantics_pred = voxel_semantics_preds[valid_mask] + count_matrix = self.compute_count_matrix(gtocc=masked_semantics_gt, predocc=masked_semantics_pred) + + # use count matrix is the same + # gt_count = torch.sum(count_matrix, dim=1) + # pred_count = torch.sum(count_matrix, dim=0) + + occ_results = {"count_matrix": count_matrix.cpu().numpy(),} + + return occ_results \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/detectors/__init__.py b/projects/mmdet3d_plugin/bevformer/detectors/__init__.py index 44b2a21..4d2c8b9 100644 --- a/projects/mmdet3d_plugin/bevformer/detectors/__init__.py +++ b/projects/mmdet3d_plugin/bevformer/detectors/__init__.py @@ -1,3 +1,5 @@ -from .bevformer import BEVFormer -from .occformer import OccFormer -from .bevformer_fp16 import BEVFormer_fp16 \ No newline at end of file +from .occformer import CVTOcc +from .occformer_waymo import CVTOccWaymo +from .centerpoint_solo import CenterPoint_solo +from .bevdet_solo import BEVDet_solo +from .solofusion import SOLOFusion \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/detectors/bevdet_solo.py b/projects/mmdet3d_plugin/bevformer/detectors/bevdet_solo.py new file mode 100644 index 0000000..e548b9b --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/detectors/bevdet_solo.py @@ -0,0 +1,158 @@ +# Copyright (c) Phigent Robotics. All rights reserved. + +import torch +from mmcv.runner import force_fp32 +import torch.nn.functional as F + +from mmdet.models import DETECTORS +from .centerpoint_solo import CenterPoint_solo +# from .. import builder +from mmdet3d.models import builder + + +# @DETECTORS.register_module() +class BEVDet_solo(CenterPoint_solo): + def __init__(self, img_view_transformer, img_bev_encoder_backbone, img_bev_encoder_neck, **kwargs): + super(BEVDet_solo, self).__init__(**kwargs) + self.img_view_transformer = builder.build_neck(img_view_transformer) + self.img_bev_encoder_backbone = builder.build_backbone(img_bev_encoder_backbone) + self.img_bev_encoder_neck = builder.build_neck(img_bev_encoder_neck) + + def image_encoder(self,img): + imgs = img + B, N, C, imH, imW = imgs.shape + imgs = imgs.view(B * N, C, imH, imW) + x = self.img_backbone(imgs) + if self.with_img_neck: + x = self.img_neck(x) + _, output_dim, ouput_H, output_W = x.shape + x = x.view(B, N, output_dim, ouput_H, output_W) + return x + + def bev_encoder(self, x): + x = self.img_bev_encoder_backbone(x) + x = self.img_bev_encoder_neck(x) + return x + + def extract_img_feat(self, img, img_metas): + """Extract features of images.""" + x = self.image_encoder(img[0]) + x = self.img_view_transformer([x] + img[1:]) + x = self.bev_encoder(x) + return [x] + + def extract_feat(self, points, img, img_metas): + """Extract features from images and points.""" + img_feats = self.extract_img_feat(img, img_metas) + pts_feats = None + return (img_feats, pts_feats) + + def forward_train(self, + points=None, + img_metas=None, + gt_bboxes_3d=None, + gt_labels_3d=None, + gt_labels=None, + gt_bboxes=None, + img_inputs=None, + proposals=None, + gt_bboxes_ignore=None): + """Forward training function. + + Args: + points (list[torch.Tensor], optional): Points of each sample. + Defaults to None. + img_metas (list[dict], optional): Meta information of each sample. + Defaults to None. + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): + Ground truth 3D boxes. Defaults to None. + gt_labels_3d (list[torch.Tensor], optional): Ground truth labels + of 3D boxes. Defaults to None. + gt_labels (list[torch.Tensor], optional): Ground truth labels + of 2D boxes in images. Defaults to None. + gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in + images. Defaults to None. + img (torch.Tensor optional): Images of each sample with shape + (N, C, H, W). Defaults to None. + proposals ([list[torch.Tensor], optional): Predicted proposals + used for training Fast RCNN. Defaults to None. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + 2D boxes in images to be ignored. Defaults to None. + + Returns: + dict: Losses of different branches. + """ + img_feats, pts_feats = self.extract_feat( + points, img=img_inputs, img_metas=img_metas) + assert self.with_pts_bbox + losses = dict() + losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, + gt_labels_3d, img_metas, + gt_bboxes_ignore) + losses.update(losses_pts) + return losses + + def forward_test(self, points=None, img_metas=None, img_inputs=None, **kwargs): + """ + Args: + points (list[torch.Tensor]): the outer list indicates test-time + augmentations and inner torch.Tensor should have a shape NxC, + which contains all points in the batch. + img_metas (list[list[dict]]): the outer list indicates test-time + augs (multiscale, flip, etc.) and the inner list indicates + images in a batch + img (list[torch.Tensor], optional): the outer + list indicates test-time augmentations and inner + torch.Tensor should have a shape NxCxHxW, which contains + all images in the batch. Defaults to None. + """ + for var, name in [(img_inputs, 'img_inputs'), (img_metas, 'img_metas')]: + if not isinstance(var, list): + raise TypeError('{} must be a list, but got {}'.format( + name, type(var))) + + num_augs = len(img_inputs) + if num_augs != len(img_metas): + raise ValueError( + 'num of augmentations ({}) != num of image meta ({})'.format( + len(img_inputs), len(img_metas))) + + if not isinstance(img_inputs[0][0],list): + img_inputs = [img_inputs] if img_inputs is None else img_inputs + points = [points] if points is None else points + return self.simple_test(points[0], img_metas[0], img_inputs[0], **kwargs) + else: + return self.aug_test(None, img_metas[0], img_inputs[0], **kwargs) + + def aug_test(self, points, img_metas, img=None, rescale=False): + """Test function without augmentaiton.""" + combine_type = self.test_cfg.get('combine_type','output') + if combine_type=='output': + return self.aug_test_combine_output(points, img_metas, img, rescale) + elif combine_type=='feature': + return self.aug_test_combine_feature(points, img_metas, img, rescale) + else: + assert False + + def simple_test(self, points, img_metas, img=None, rescale=False): + """Test function without augmentaiton.""" + img_feats, _ = self.extract_feat(points, img=img, img_metas=img_metas) + bbox_list = [dict() for _ in range(len(img_metas))] + bbox_pts = self.simple_test_pts(img_feats, img_metas, rescale=rescale) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return bbox_list + + + def forward_dummy(self, points=None, img_metas=None, img_inputs=None, **kwargs): + img_feats, _ = self.extract_feat(points, img=img_inputs, img_metas=img_metas) + from mmdet3d.core.bbox.structures.box_3d_mode import LiDARInstance3DBoxes + img_metas=[dict(box_type_3d=LiDARInstance3DBoxes)] + bbox_list = [dict() for _ in range(1)] + assert self.with_pts_bbox + bbox_pts = self.simple_test_pts( + img_feats, img_metas, rescale=False) + for result_dict, pts_bbox in zip(bbox_list, bbox_pts): + result_dict['pts_bbox'] = pts_bbox + return bbox_list + \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/detectors/bevformer.py b/projects/mmdet3d_plugin/bevformer/detectors/bevformer.py deleted file mode 100644 index d860e27..0000000 --- a/projects/mmdet3d_plugin/bevformer/detectors/bevformer.py +++ /dev/null @@ -1,292 +0,0 @@ -# --------------------------------------------- -# Copyright (c) OpenMMLab. All rights reserved. -# --------------------------------------------- -# Modified by Zhiqi Li -# --------------------------------------------- - -import torch -from mmcv.runner import force_fp32, auto_fp16 -from mmdet.models import DETECTORS -from mmdet3d.core import bbox3d2result -from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector -from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask -import time -import copy -import numpy as np -import mmdet3d -from projects.mmdet3d_plugin.models.utils.bricks import run_time - - -@DETECTORS.register_module() -class BEVFormer(MVXTwoStageDetector): - """BEVFormer. - Args: - video_test_mode (bool): Decide whether to use temporal information during inference. - """ - - def __init__(self, - use_grid_mask=False, - pts_voxel_layer=None, - pts_voxel_encoder=None, - pts_middle_encoder=None, - pts_fusion_layer=None, - img_backbone=None, - pts_backbone=None, - img_neck=None, - pts_neck=None, - pts_bbox_head=None, - img_roi_head=None, - img_rpn_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None, - video_test_mode=False - ): - - super(BEVFormer, - self).__init__(pts_voxel_layer, pts_voxel_encoder, - pts_middle_encoder, pts_fusion_layer, - img_backbone, pts_backbone, img_neck, pts_neck, - pts_bbox_head, img_roi_head, img_rpn_head, - train_cfg, test_cfg, pretrained) - self.grid_mask = GridMask( - True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) - self.use_grid_mask = use_grid_mask - self.fp16_enabled = False - - # temporal - self.video_test_mode = video_test_mode - self.prev_frame_info = { - 'prev_bev': None, - 'scene_token': None, - 'prev_pos': 0, - 'prev_angle': 0, - } - - - def extract_img_feat(self, img, img_metas, len_queue=None): - """Extract features of images.""" - B = img.size(0) - if img is not None: - - # input_shape = img.shape[-2:] - # # update real input shape of each single img - # for img_meta in img_metas: - # img_meta.update(input_shape=input_shape) - - if img.dim() == 5 and img.size(0) == 1: - img.squeeze_() - elif img.dim() == 5 and img.size(0) > 1: - B, N, C, H, W = img.size() - img = img.reshape(B * N, C, H, W) - if self.use_grid_mask: - img = self.grid_mask(img) - - img_feats = self.img_backbone(img) - if isinstance(img_feats, dict): - img_feats = list(img_feats.values()) - else: - return None - if self.with_img_neck: - img_feats = self.img_neck(img_feats) - - img_feats_reshaped = [] - for img_feat in img_feats: - BN, C, H, W = img_feat.size() - if len_queue is not None: - img_feats_reshaped.append(img_feat.view(int(B/len_queue), len_queue, int(BN / B), C, H, W)) - else: - img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) - return img_feats_reshaped - - @auto_fp16(apply_to=('img')) - def extract_feat(self, img, img_metas=None, len_queue=None): - """Extract features from images and points.""" - - img_feats = self.extract_img_feat(img, img_metas, len_queue=len_queue) - - return img_feats - - - def forward_pts_train(self, - pts_feats, - gt_bboxes_3d, - gt_labels_3d, - img_metas, - gt_bboxes_ignore=None, - prev_bev=None): - """Forward function' - Args: - pts_feats (list[torch.Tensor]): Features of point cloud branch - gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth - boxes for each sample. - gt_labels_3d (list[torch.Tensor]): Ground truth labels for - boxes of each sampole - img_metas (list[dict]): Meta information of samples. - gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth - boxes to be ignored. Defaults to None. - prev_bev (torch.Tensor, optional): BEV features of previous frame. - Returns: - dict: Losses of each branch. - """ - - outs = self.pts_bbox_head( - pts_feats, img_metas, prev_bev) - loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs] - losses = self.pts_bbox_head.loss(*loss_inputs, img_metas=img_metas) - return losses - - def forward_dummy(self, img): - dummy_metas = None - return self.forward_test(img=img, img_metas=[[dummy_metas]]) - - def forward(self, return_loss=True, **kwargs): - """Calls either forward_train or forward_test depending on whether - return_loss=True. - Note this setting will change the expected inputs. When - `return_loss=True`, img and img_metas are single-nested (i.e. - torch.Tensor and list[dict]), and when `resturn_loss=False`, img and - img_metas should be double nested (i.e. list[torch.Tensor], - list[list[dict]]), with the outer list indicating test time - augmentations. - """ - if return_loss: - return self.forward_train(**kwargs) - else: - return self.forward_test(**kwargs) - - def obtain_history_bev(self, imgs_queue, img_metas_list): - """Obtain history BEV features iteratively. To save GPU memory, gradients are not calculated. - """ - self.eval() - - with torch.no_grad(): - prev_bev = None - bs, len_queue, num_cams, C, H, W = imgs_queue.shape - imgs_queue = imgs_queue.reshape(bs*len_queue, num_cams, C, H, W) - img_feats_list = self.extract_feat(img=imgs_queue, len_queue=len_queue) - for i in range(len_queue): - img_metas = [each[i] for each in img_metas_list] - if not img_metas[0]['prev_bev_exists']: - prev_bev = None - # img_feats = self.extract_feat(img=img, img_metas=img_metas) - img_feats = [each_scale[:, i] for each_scale in img_feats_list] - prev_bev = self.pts_bbox_head( - img_feats, img_metas, prev_bev, only_bev=True) - self.train() - return prev_bev - - @auto_fp16(apply_to=('img', 'points')) - def forward_train(self, - points=None, - img_metas=None, - gt_bboxes_3d=None, - gt_labels_3d=None, - gt_labels=None, - gt_bboxes=None, - img=None, - proposals=None, - gt_bboxes_ignore=None, - img_depth=None, - img_mask=None, - ): - """Forward training function. - Args: - points (list[torch.Tensor], optional): Points of each sample. - Defaults to None. - img_metas (list[dict], optional): Meta information of each sample. - Defaults to None. - gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): - Ground truth 3D boxes. Defaults to None. - gt_labels_3d (list[torch.Tensor], optional): Ground truth labels - of 3D boxes. Defaults to None. - gt_labels (list[torch.Tensor], optional): Ground truth labels - of 2D boxes in images. Defaults to None. - gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in - images. Defaults to None. - img (torch.Tensor optional): Images of each sample with shape - (N, C, H, W). Defaults to None. - proposals ([list[torch.Tensor], optional): Predicted proposals - used for training Fast RCNN. Defaults to None. - gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth - 2D boxes in images to be ignored. Defaults to None. - Returns: - dict: Losses of different branches. - """ - - len_queue = img.size(1) - prev_img = img[:, :-1, ...] - img = img[:, -1, ...] - - prev_img_metas = copy.deepcopy(img_metas) - prev_bev = self.obtain_history_bev(prev_img, prev_img_metas) - - img_metas = [each[len_queue-1] for each in img_metas] - if not img_metas[0]['prev_bev_exists']: - prev_bev = None - img_feats = self.extract_feat(img=img, img_metas=img_metas) - losses = dict() - losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, - gt_labels_3d, img_metas, - gt_bboxes_ignore, prev_bev) - - losses.update(losses_pts) - return losses - - def forward_test(self, img_metas, img=None, **kwargs): - for var, name in [(img_metas, 'img_metas')]: - if not isinstance(var, list): - raise TypeError('{} must be a list, but got {}'.format( - name, type(var))) - img = [img] if img is None else img - - if img_metas[0][0]['scene_token'] != self.prev_frame_info['scene_token']: - # the first sample of each scene is truncated - self.prev_frame_info['prev_bev'] = None - # update idx - self.prev_frame_info['scene_token'] = img_metas[0][0]['scene_token'] - - # do not use temporal information - if not self.video_test_mode: - self.prev_frame_info['prev_bev'] = None - - # Get the delta of ego position and angle between two timestamps. - tmp_pos = copy.deepcopy(img_metas[0][0]['can_bus'][:3]) - tmp_angle = copy.deepcopy(img_metas[0][0]['can_bus'][-1]) - if self.prev_frame_info['prev_bev'] is not None: - img_metas[0][0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] - img_metas[0][0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] - else: - img_metas[0][0]['can_bus'][-1] = 0 - img_metas[0][0]['can_bus'][:3] = 0 - - new_prev_bev, bbox_results = self.simple_test( - img_metas[0], img[0], prev_bev=self.prev_frame_info['prev_bev'], **kwargs) - # During inference, we save the BEV features and ego motion of each timestamp. - self.prev_frame_info['prev_pos'] = tmp_pos - self.prev_frame_info['prev_angle'] = tmp_angle - self.prev_frame_info['prev_bev'] = new_prev_bev - return bbox_results - - def simple_test_pts(self, x, img_metas, prev_bev=None, rescale=False): - """Test function""" - outs = self.pts_bbox_head(x, img_metas, prev_bev=prev_bev) - - bbox_list = self.pts_bbox_head.get_bboxes( - outs, img_metas, rescale=rescale) - bbox_results = [ - bbox3d2result(bboxes, scores, labels) - for bboxes, scores, labels in bbox_list - ] - return outs['bev_embed'], bbox_results - - def simple_test(self, img_metas, img=None, prev_bev=None, rescale=False): - """Test function without augmentaiton.""" - img_feats = self.extract_feat(img=img, img_metas=img_metas) - - bbox_list = [dict() for i in range(len(img_metas))] - new_prev_bev, bbox_pts = self.simple_test_pts( - img_feats, img_metas, prev_bev, rescale=rescale) - for result_dict, pts_bbox in zip(bbox_list, bbox_pts): - result_dict['pts_bbox'] = pts_bbox - return new_prev_bev, bbox_list diff --git a/projects/mmdet3d_plugin/bevformer/detectors/bevformer_fp16.py b/projects/mmdet3d_plugin/bevformer/detectors/bevformer_fp16.py deleted file mode 100644 index 5325e3c..0000000 --- a/projects/mmdet3d_plugin/bevformer/detectors/bevformer_fp16.py +++ /dev/null @@ -1,89 +0,0 @@ -# --------------------------------------------- -# Copyright (c) OpenMMLab. All rights reserved. -# --------------------------------------------- -# Modified by Zhiqi Li -# --------------------------------------------- - -from tkinter.messagebox import NO -import torch -from mmcv.runner import force_fp32, auto_fp16 -from mmdet.models import DETECTORS -from mmdet3d.core import bbox3d2result -from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector -from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask -from projects.mmdet3d_plugin.bevformer.detectors.bevformer import BEVFormer -import time -import copy -import numpy as np -import mmdet3d -from projects.mmdet3d_plugin.models.utils.bricks import run_time - - -@DETECTORS.register_module() -class BEVFormer_fp16(BEVFormer): - """ - The default version BEVFormer currently can not support FP16. - We provide this version to resolve this issue. - """ - - @auto_fp16(apply_to=('img', 'prev_bev', 'points')) - def forward_train(self, - points=None, - img_metas=None, - gt_bboxes_3d=None, - gt_labels_3d=None, - gt_labels=None, - gt_bboxes=None, - img=None, - proposals=None, - gt_bboxes_ignore=None, - img_depth=None, - img_mask=None, - prev_bev=None, - ): - """Forward training function. - Args: - points (list[torch.Tensor], optional): Points of each sample. - Defaults to None. - img_metas (list[dict], optional): Meta information of each sample. - Defaults to None. - gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): - Ground truth 3D boxes. Defaults to None. - gt_labels_3d (list[torch.Tensor], optional): Ground truth labels - of 3D boxes. Defaults to None. - gt_labels (list[torch.Tensor], optional): Ground truth labels - of 2D boxes in images. Defaults to None. - gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in - images. Defaults to None. - img (torch.Tensor optional): Images of each sample with shape - (N, C, H, W). Defaults to None. - proposals ([list[torch.Tensor], optional): Predicted proposals - used for training Fast RCNN. Defaults to None. - gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth - 2D boxes in images to be ignored. Defaults to None. - Returns: - dict: Losses of different branches. - """ - - img_feats = self.extract_feat(img=img, img_metas=img_metas) - - losses = dict() - losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, - gt_labels_3d, img_metas, - gt_bboxes_ignore, prev_bev=prev_bev) - losses.update(losses_pts) - return losses - - - def val_step(self, data, optimizer): - """ - In BEVFormer_fp16, we use this `val_step` function to inference the `prev_pev`. - This is not the standard function of `val_step`. - """ - - img = data['img'] - img_metas = data['img_metas'] - img_feats = self.extract_feat(img=img, img_metas=img_metas) - prev_bev = data.get('prev_bev', None) - prev_bev = self.pts_bbox_head(img_feats, img_metas, prev_bev=prev_bev, only_bev=True) - return prev_bev \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/detectors/centerpoint_solo.py b/projects/mmdet3d_plugin/bevformer/detectors/centerpoint_solo.py new file mode 100644 index 0000000..4ef56df --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/detectors/centerpoint_solo.py @@ -0,0 +1,196 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmdet3d.core import bbox3d2result, merge_aug_bboxes_3d +from mmdet.models import DETECTORS +# from .mvx_two_stage import MVXTwoStageDetector +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector + + +# @DETECTORS.register_module() +class CenterPoint_solo(MVXTwoStageDetector): + """Base class of Multi-modality VoxelNet.""" + + def __init__(self, + pts_voxel_layer=None, + pts_voxel_encoder=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(CenterPoint_solo, + self).__init__(pts_voxel_layer, pts_voxel_encoder, + pts_middle_encoder, pts_fusion_layer, + img_backbone, pts_backbone, img_neck, pts_neck, + pts_bbox_head, img_roi_head, img_rpn_head, + train_cfg, test_cfg, pretrained, init_cfg) + + def extract_pts_feat(self, pts, img_feats, img_metas): + """Extract features of points.""" + if not self.with_pts_bbox: + return None + voxels, num_points, coors = self.voxelize(pts) + + voxel_features = self.pts_voxel_encoder(voxels, num_points, coors) + batch_size = coors[-1, 0] + 1 + x = self.pts_middle_encoder(voxel_features, coors, batch_size) + x = self.pts_backbone(x) + if self.with_pts_neck: + x = self.pts_neck(x) + return x + + def forward_pts_train(self, + pts_feats, + gt_bboxes_3d, + gt_labels_3d, + img_metas, + gt_bboxes_ignore=None): + """Forward function for point cloud branch. + + Args: + pts_feats (list[torch.Tensor]): Features of point cloud branch + gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth + boxes for each sample. + gt_labels_3d (list[torch.Tensor]): Ground truth labels for + boxes of each sampole + img_metas (list[dict]): Meta information of samples. + gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth + boxes to be ignored. Defaults to None. + + Returns: + dict: Losses of each branch. + """ + outs = self.pts_bbox_head(pts_feats) + loss_inputs = [gt_bboxes_3d, gt_labels_3d, outs] + losses = self.pts_bbox_head.loss(*loss_inputs) + return losses + + def simple_test_pts(self, x, img_metas, rescale=False): + """Test function of point cloud branch.""" + outs = self.pts_bbox_head(x) + bbox_list = self.pts_bbox_head.get_bboxes( + outs, img_metas, rescale=rescale) + bbox_results = [ + bbox3d2result(bboxes, scores, labels) + for bboxes, scores, labels in bbox_list + ] + return bbox_results + + def aug_test_pts(self, feats, img_metas, rescale=False): + """Test function of point cloud branch with augmentaiton. + + The function implementation process is as follows: + + - step 1: map features back for double-flip augmentation. + - step 2: merge all features and generate boxes. + - step 3: map boxes back for scale augmentation. + - step 4: merge results. + + Args: + feats (list[torch.Tensor]): Feature of point cloud. + img_metas (list[dict]): Meta information of samples. + rescale (bool): Whether to rescale bboxes. Default: False. + + Returns: + dict: Returned bboxes consists of the following keys: + + - boxes_3d (:obj:`LiDARInstance3DBoxes`): Predicted bboxes. + - scores_3d (torch.Tensor): Scores of predicted boxes. + - labels_3d (torch.Tensor): Labels of predicted boxes. + """ + # only support aug_test for one sample + outs_list = [] + for x, img_meta in zip(feats, img_metas): + outs = self.pts_bbox_head(x) + # merge augmented outputs before decoding bboxes + for task_id, out in enumerate(outs): + for key in out[0].keys(): + if img_meta[0]['pcd_horizontal_flip']: + outs[task_id][0][key] = torch.flip( + outs[task_id][0][key], dims=[2]) + if key == 'reg': + outs[task_id][0][key][:, 1, ...] = 1 - outs[ + task_id][0][key][:, 1, ...] + elif key == 'rot': + outs[task_id][0][ + key][:, 1, + ...] = -outs[task_id][0][key][:, 1, ...] + elif key == 'vel': + outs[task_id][0][ + key][:, 1, + ...] = -outs[task_id][0][key][:, 1, ...] + if img_meta[0]['pcd_vertical_flip']: + outs[task_id][0][key] = torch.flip( + outs[task_id][0][key], dims=[3]) + if key == 'reg': + outs[task_id][0][key][:, 0, ...] = 1 - outs[ + task_id][0][key][:, 0, ...] + elif key == 'rot': + outs[task_id][0][ + key][:, 0, + ...] = -outs[task_id][0][key][:, 0, ...] + elif key == 'vel': + outs[task_id][0][ + key][:, 0, + ...] = -outs[task_id][0][key][:, 0, ...] + + outs_list.append(outs) + + preds_dicts = dict() + scale_img_metas = [] + + # concat outputs sharing the same pcd_scale_factor + for i, (img_meta, outs) in enumerate(zip(img_metas, outs_list)): + pcd_scale_factor = img_meta[0]['pcd_scale_factor'] + if pcd_scale_factor not in preds_dicts.keys(): + preds_dicts[pcd_scale_factor] = outs + scale_img_metas.append(img_meta) + else: + for task_id, out in enumerate(outs): + for key in out[0].keys(): + preds_dicts[pcd_scale_factor][task_id][0][key] += out[ + 0][key] + + aug_bboxes = [] + + for pcd_scale_factor, preds_dict in preds_dicts.items(): + for task_id, pred_dict in enumerate(preds_dict): + # merge outputs with different flips before decoding bboxes + for key in pred_dict[0].keys(): + preds_dict[task_id][0][key] /= len(outs_list) / len( + preds_dicts.keys()) + bbox_list = self.pts_bbox_head.get_bboxes( + preds_dict, img_metas[0], rescale=rescale) + bbox_list = [ + dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels) + for bboxes, scores, labels in bbox_list + ] + aug_bboxes.append(bbox_list[0]) + + if len(preds_dicts.keys()) > 1: + # merge outputs with different scales after decoding bboxes + merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, scale_img_metas, + self.pts_bbox_head.test_cfg) + return merged_bboxes + else: + for key in bbox_list[0].keys(): + bbox_list[0][key] = bbox_list[0][key].to('cpu') + return bbox_list[0] + + def aug_test(self, points, img_metas, imgs=None, rescale=False): + """Test function with augmentaiton.""" + img_feats, pts_feats = self.extract_feats(points, img_metas, imgs) + bbox_list = dict() + if pts_feats and self.with_pts_bbox: + pts_bbox = self.aug_test_pts(pts_feats, img_metas, rescale) + bbox_list.update(pts_bbox=pts_bbox) + return [bbox_list] diff --git a/projects/mmdet3d_plugin/bevformer/detectors/occformer.py b/projects/mmdet3d_plugin/bevformer/detectors/occformer.py index 205ffcc..ae0d425 100644 --- a/projects/mmdet3d_plugin/bevformer/detectors/occformer.py +++ b/projects/mmdet3d_plugin/bevformer/detectors/occformer.py @@ -4,26 +4,17 @@ # Modified by Zhiqi Li # --------------------------------------------- +import copy +import numpy as np import torch from mmcv.runner import force_fp32, auto_fp16 from mmdet.models import DETECTORS from mmdet3d.core import bbox3d2result from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask -import time -import copy -import numpy as np -import mmdet3d -from projects.mmdet3d_plugin.models.utils.bricks import run_time - @DETECTORS.register_module() -class OccFormer(MVXTwoStageDetector): - """BEVFormer. - Args: - video_test_mode (bool): Decide whether to use temporal information during inference. - """ - +class CVTOcc(MVXTwoStageDetector): def __init__(self, use_grid_mask=False, pts_voxel_layer=None, @@ -40,102 +31,122 @@ def __init__(self, train_cfg=None, test_cfg=None, pretrained=None, - video_test_mode=False - ): - - super(OccFormer, - self).__init__(pts_voxel_layer, pts_voxel_encoder, - pts_middle_encoder, pts_fusion_layer, - img_backbone, pts_backbone, img_neck, pts_neck, - pts_bbox_head, img_roi_head, img_rpn_head, - train_cfg, test_cfg, pretrained) - self.grid_mask = GridMask( - True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + video_test_mode=False, + queue_length=None, + save_results=False, + **kwargs): + super(CVTOcc, + self).__init__(pts_voxel_layer, + pts_voxel_encoder, + pts_middle_encoder, + pts_fusion_layer, + img_backbone, + pts_backbone, + img_neck, pts_neck, + pts_bbox_head, + img_roi_head, + img_rpn_head, + train_cfg, + test_cfg, + pretrained) + self.grid_mask = GridMask(True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) self.use_grid_mask = use_grid_mask self.fp16_enabled = False + self.save_results = save_results # temporal + self.queue_length = queue_length self.video_test_mode = video_test_mode self.prev_frame_info = { - 'prev_bev': None, + 'prev_bev_list': [], + 'prev_img_metas_list': [], 'scene_token': None, 'prev_pos': 0, 'prev_angle': 0, } + def extract_img_feat(self, img, len_queue=None): + """Extract features of images. + Args: + img (torch.Tensor): Image tensor with shape (bs, n_views, C, H, W). + But for previous img, its shape will be (bs*len_queue, n_views, C, H, W). + len_queue (int): The length of the queue. It is less or equal to self.queue_length. + It is used when extracting features of previous images. + Returns: + list[torch.Tensor]: Image features. Each with shape (bs, n_views, C, H, W). + But different scales (from FPN) will have different shapes. + For previous img, its shape will be (bs, len_queue, n_views, C, H, W). + """ + + bs_length, num_views, C, H, W = img.size() + bs_length_num_views = bs_length * num_views + img = img.reshape(bs_length_num_views, C, H, W) + + if self.use_grid_mask: + img = self.grid_mask(img) + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) - def extract_img_feat(self, img, img_metas, len_queue=None): - """Extract features of images.""" - B = img.size(0) - if img is not None: - - # input_shape = img.shape[-2:] - # # update real input shape of each single img - # for img_meta in img_metas: - # img_meta.update(input_shape=input_shape) - - if img.dim() == 5 and img.size(0) == 1: - img.squeeze_() - elif img.dim() == 5 and img.size(0) > 1: - B, N, C, H, W = img.size() - img = img.reshape(B * N, C, H, W) - if self.use_grid_mask: - img = self.grid_mask(img) - - img_feats = self.img_backbone(img) - if isinstance(img_feats, dict): - img_feats = list(img_feats.values()) - else: - return None if self.with_img_neck: img_feats = self.img_neck(img_feats) img_feats_reshaped = [] for img_feat in img_feats: - BN, C, H, W = img_feat.size() - if len_queue is not None: - img_feats_reshaped.append(img_feat.view(int(B/len_queue), len_queue, int(BN / B), C, H, W)) - else: - img_feats_reshaped.append(img_feat.view(B, int(BN / B), C, H, W)) + bs_length_num_views, C, H, W = img_feat.size() + if len_queue is not None: # for prev imgs + bs = int(bs_length / len_queue) + img_feats_reshaped.append(img_feat.view(bs, len_queue, num_views, C, H, W)) + else: # for current imgs + img_feats_reshaped.append(img_feat.view(bs_length, num_views, C, H, W)) + return img_feats_reshaped @auto_fp16(apply_to=('img')) - def extract_feat(self, img, img_metas=None, len_queue=None): + def extract_feat(self, img, len_queue=None): """Extract features from images and points.""" - - img_feats = self.extract_img_feat(img, img_metas, len_queue=len_queue) - + img_feats = self.extract_img_feat(img, len_queue=len_queue) return img_feats - def forward_pts_train(self, - pts_feats, - gt_bboxes_3d, - gt_labels_3d, + def forward_pts_train(self, multi_level_feats, voxel_semantics, mask_camera, - img_metas, - gt_bboxes_ignore=None, - prev_bev=None): - """Forward function' + mask_lidar, + cur_img_metas, + prev_bev_list=[], + prev_img_metas=[], + **kwargs): + """ + Forward function Args: - pts_feats (list[torch.Tensor]): Features of point cloud branch - gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`]): Ground truth - boxes for each sample. - gt_labels_3d (list[torch.Tensor]): Ground truth labels for - boxes of each sampole - img_metas (list[dict]): Meta information of samples. - gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth - boxes to be ignored. Defaults to None. - prev_bev (torch.Tensor, optional): BEV features of previous frame. + multi_level_feats (list[torch.Tensor]): Multi level img_feats. + voxel_semantics (torch.Tensor): Occupancy ground truth. + mask_camera (torch.Tensor): Camera mask. + mask_lidar (torch.Tensor): Lidar mask. + cur_img_metas (list[dict]): Meta information of samples. It has length of batch_size. + prev_bev_list (list[torch.Tensor]): BEV features of previous frames. + Each has shape (bs, bev_h*bev_w=40000, embed_dims=256). + prev_img_metas (list[dict[dict]]): Meta information of previous samples. Returns: - dict: Losses of each branch. + losses (dict): Losses of each branch. """ - outs = self.pts_bbox_head( - pts_feats, img_metas, prev_bev) - loss_inputs = [voxel_semantics, mask_camera, outs] - losses = self.pts_bbox_head.loss(*loss_inputs, img_metas=img_metas) + # use the occupancy head to get the occupancy output + outs = self.pts_bbox_head(multi_level_feats, + cur_img_metas, + prev_bev_list, + prev_img_metas, + only_bev=False, + **kwargs) + + # calculate the loss + losses = self.pts_bbox_head.loss(voxel_semantics, + outs, + mask_camera, + mask_lidar, + **kwargs) + return losses def forward_dummy(self, img): @@ -143,172 +154,264 @@ def forward_dummy(self, img): return self.forward_test(img=img, img_metas=[[dummy_metas]]) def forward(self, return_loss=True, **kwargs): - """Calls either forward_train or forward_test depending on whether - return_loss=True. + """ + Calls either forward_train or forward_test depending on whether return_loss=True. Note this setting will change the expected inputs. When - `return_loss=True`, img and img_metas are single-nested (i.e. - torch.Tensor and list[dict]), and when `resturn_loss=False`, img and - img_metas should be double nested (i.e. list[torch.Tensor], - list[list[dict]]), with the outer list indicating test time - augmentations. + `return_loss=True`, img and img_metas are single-nested + (i.e. torch.Tensor and list[dict]), + and when `resturn_loss=False`, img and img_metas should be + double nested (i.e. list[torch.Tensor], list[list[dict]]), + with the outer list indicating test time augmentations. """ if return_loss: return self.forward_train(**kwargs) else: return self.forward_test(**kwargs) - - def obtain_history_bev(self, imgs_queue, img_metas_list): - """Obtain history BEV features iteratively. To save GPU memory, gradients are not calculated. + + def obtain_history_bev(self, + prev_img_feats_list=[], + prev_img_metas=[], + prev_len_queue=0): + """ + Obtain history BEV features iteratively. + To save GPU memory, gradients are not calculated. + Args: + prev_img_feats_list (list[torch.Tensor]): The list has length eqauls to the scales. + Each tensor has shape: bs, prev_len_queue, n_views, C, H, W. + prev_img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys 0, 1, 2, ..., len-2. The element of each key is a dict. + prev_len_queue (int): The length of the queue - 1. + Returns: + prev_bev_list (list[torch.Tensor]): Each has shape ([bs, bev_h*bev_w=40000, embed_dims=256]). """ - self.eval() + self.eval() + prev_bev_list = [] with torch.no_grad(): - prev_bev = None - bs, len_queue, num_cams, C, H, W = imgs_queue.shape - imgs_queue = imgs_queue.reshape(bs*len_queue, num_cams, C, H, W) - img_feats_list = self.extract_feat(img=imgs_queue, len_queue=len_queue) - for i in range(len_queue): - img_metas = [each[i] for each in img_metas_list] - if not img_metas[0]['prev_bev_exists']: - prev_bev = None - # img_feats = self.extract_feat(img=img, img_metas=img_metas) - img_feats = [each_scale[:, i] for each_scale in img_feats_list] - prev_bev = self.pts_bbox_head( - img_feats, img_metas, prev_bev, only_bev=True) + for i in range(prev_len_queue): + img_feats = [each_scale[:, i, ...] for each_scale in prev_img_feats_list] + img_metas = [each_batch[i] for each_batch in prev_img_metas] # list[dict] of length equals to batch_size + if img_metas[0]['prev_bev_exists'] is not True: # HERE assume batch size = 1 + prev_bev_list = [] + + prev_bev = self.pts_bbox_head(multi_level_feats=img_feats, + cur_img_metas=img_metas, + prev_bev_list=prev_bev_list, + prev_img_metas=None, # useless + only_bev=True) + prev_bev_list.append(prev_bev) + self.train() - return prev_bev + + return prev_bev_list @auto_fp16(apply_to=('img', 'points')) - def forward_train(self, - points=None, - img_metas=None, - gt_bboxes_3d=None, - gt_labels_3d=None, + def forward_train(self, img=None, voxel_semantics=None, mask_lidar=None, mask_camera=None, - gt_labels=None, - gt_bboxes=None, - img=None, - proposals=None, - gt_bboxes_ignore=None, - img_depth=None, - img_mask=None, - ): - """Forward training function. + img_metas=None, + **kwargs): + """ + Forward training function. Args: - points (list[torch.Tensor], optional): Points of each sample. - Defaults to None. - img_metas (list[dict], optional): Meta information of each sample. - Defaults to None. - gt_bboxes_3d (list[:obj:`BaseInstance3DBoxes`], optional): - Ground truth 3D boxes. Defaults to None. - gt_labels_3d (list[torch.Tensor], optional): Ground truth labels - of 3D boxes. Defaults to None. - gt_labels (list[torch.Tensor], optional): Ground truth labels - of 2D boxes in images. Defaults to None. - gt_bboxes (list[torch.Tensor], optional): Ground truth 2D boxes in - images. Defaults to None. - img (torch.Tensor optional): Images of each sample with shape - (N, C, H, W). Defaults to None. - proposals ([list[torch.Tensor], optional): Predicted proposals - used for training Fast RCNN. Defaults to None. - gt_bboxes_ignore (list[torch.Tensor], optional): Ground truth - 2D boxes in images to be ignored. Defaults to None. + img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys 0, 1, 2, ..., len-1. The element of each key is a dict. + img (torch.Tensor): Images of each sample with shape (bs, len, n_views, C, H, W). + voxel_semantics (torch.Tensor): Occupancy ground truth + with shape (bs, bev_h, bev_w, total_z). + mask_camera (torch.Tensor): Camera mask with shape (bs, bev_w, bev_h, total_z). + mask_lidar (torch.Tensor): Lidar mask with shape (bs, bev_w, bev_h, total_z). Returns: - dict: Losses of different branches. + losses (dict): Losses of different branches. """ - - len_queue = img.size(1) - prev_img = img[:, :-1, ...] - img = img[:, -1, ...] - prev_img_metas = copy.deepcopy(img_metas) - prev_bev = self.obtain_history_bev(prev_img, prev_img_metas) + # Step 1: prepare cur_img_feats and cur_img_metas + batch_size, len_queue, _, _, _, _ = img.shape + cur_img = img[:, -1, ...] + cur_img_feats = self.extract_feat(img=cur_img) # list[tensor], each tensor is of shape (B, N, C, H, W). H and W are different across scales. + img_metas_deepcopy = copy.deepcopy(img_metas) + cur_img_metas = [each_batch[len_queue-1] for each_batch in img_metas] # list[dict] of length equals to batch_size + + # Step 2: prepare prev_bev_list, prev_img_metas + if cur_img_metas[0]['prev_bev_exists']: + prev_img = img[:, :-1, ...] + bs, prev_len_queue, num_cams, C, H, W = prev_img.shape + prev_img = prev_img.reshape(bs * prev_len_queue, num_cams, C, H, W) + with torch.no_grad(): + prev_img_feats = self.extract_feat(img=prev_img, len_queue=prev_len_queue) + + prev_img_metas = [] + for each_batch in img_metas_deepcopy: + each_batch.pop(len_queue - 1) + prev_img_metas.append(each_batch) # list[dict[dict]] + + prev_bev_list = self.obtain_history_bev(prev_img_feats, prev_img_metas, prev_len_queue) + + # Step 3: adjust the length of these two to be consistent + prev_bev_list_len = len(prev_bev_list) + for each_batch in prev_img_metas: + if len(each_batch) > prev_bev_list_len: + for i in range(0, len(each_batch) - prev_bev_list_len): # len(each_batch) = len_queue - 1 + each_batch.pop(i) + else: + prev_bev_list = [] + prev_img_metas = [{} for _ in range(batch_size)] - img_metas = [each[len_queue-1] for each in img_metas] - if not img_metas[0]['prev_bev_exists']: - prev_bev = None - img_feats = self.extract_feat(img=img, img_metas=img_metas) + # Step 4: forward in head to get losses losses = dict() - losses_pts = self.forward_pts_train(img_feats, gt_bboxes_3d, - gt_labels_3d,voxel_semantics,mask_camera, img_metas, - gt_bboxes_ignore, prev_bev) + losses_pts = self.forward_pts_train(multi_level_feats=cur_img_feats, + voxel_semantics=voxel_semantics, + mask_camera=mask_camera, + mask_lidar=mask_lidar, + cur_img_metas=cur_img_metas, + prev_bev_list=prev_bev_list, + prev_img_metas=prev_img_metas, + **kwargs) losses.update(losses_pts) return losses - def forward_test(self, img_metas, - img=None, - voxel_semantics=None, - mask_lidar=None, - mask_camera=None, - **kwargs): + def forward_test(self, + img_metas, + img=None, + voxel_semantics=None, + mask_camera=None, + mask_lidar=None, + **kwargs): + ''' + Forward inference function. + Args: + (all arg are be wrapped one more list. after we take it out, the type of each parameter are below) + img (torch.Tensor): Images of each sample with shape (bs, n_views, C, H, W). + img_metas (list[dict]): len is bs. + voxel_semantics (torch.Tensor): Occupancy ground truth with shape (bs, bev_h, bev_w, total_z). + mask_camera (torch.Tensor): Camera mask with shape (bs, bev_w, bev_h, total_z). + mask_lidar (torch.Tensor): Lidar mask with shape (bs, bev_w, bev_h, total_z). + Returns: + If self.save_result is true. We will save the occ_result for visualization. + voxel_semantics (numpy.ndarray): + voxel_semantics_preds (numpy.ndarray): Occupancy semantics prediction. The same shape. + valid_mask (numpy.ndarray): unified boolean mask for visible voxel. The same shape. + sample_idx (int): The index of the sample. + ''' + for var, name in [(img_metas, 'img_metas')]: if not isinstance(var, list): - raise TypeError('{} must be a list, but got {}'.format( - name, type(var))) - img = [img] if img is None else img - - if img_metas[0][0]['scene_token'] != self.prev_frame_info['scene_token']: - # the first sample of each scene is truncated - self.prev_frame_info['prev_bev'] = None - # update idx - self.prev_frame_info['scene_token'] = img_metas[0][0]['scene_token'] + raise TypeError('{} must be a list, but got {}'.format(name, type(var))) + + # Step 1: prepare the input + # all arg are be wrapped one more list, so we need to take the first element + if img is not None: img = img[0] + if voxel_semantics is not None: voxel_semantics = voxel_semantics[0] + if mask_camera is not None: mask_camera = mask_camera[0] + if mask_lidar is not None: mask_lidar = mask_lidar[0] + if img_metas is not None: img_metas = img_metas[0] + + # If the input frame is in a new scene, the prev_frame_info need to be reset. + if img_metas[0]['scene_token'] != self.prev_frame_info['scene_token']: + self.prev_frame_info['prev_bev_list'] = [] + self.prev_frame_info['prev_img_metas_list'] = [] + # update idx + self.prev_frame_info['scene_token'] = img_metas[0]['scene_token'] - # do not use temporal information if not self.video_test_mode: - self.prev_frame_info['prev_bev'] = None - - # Get the delta of ego position and angle between two timestamps. - tmp_pos = copy.deepcopy(img_metas[0][0]['can_bus'][:3]) - tmp_angle = copy.deepcopy(img_metas[0][0]['can_bus'][-1]) - if self.prev_frame_info['prev_bev'] is not None: - img_metas[0][0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] - img_metas[0][0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] + # defalut value of self.video_test_mode is True + self.prev_frame_info['prev_bev_list'] = [] + self.prev_frame_info['prev_img_metas_list'] = [] + + # Step 2: Get the delta of ego position and angle between two timestamps. + tmp_pos = copy.deepcopy(img_metas[0]['can_bus'][:3]) + tmp_angle = copy.deepcopy(img_metas[0]['can_bus'][-1]) + if len(self.prev_frame_info['prev_bev_list']) > 0: + img_metas[0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] + img_metas[0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] + else: + img_metas[0]['can_bus'][-1] = 0 + img_metas[0]['can_bus'][:3] = 0 + + # Step 3: prepare prev_bev_list, prev_img_metas_list + if len(self.prev_frame_info['prev_bev_list']) > 0: + prev_bev_list = self.prev_frame_info['prev_bev_list'] + prev_img_metas_list = self.prev_frame_info['prev_img_metas_list'] else: - img_metas[0][0]['can_bus'][-1] = 0 - img_metas[0][0]['can_bus'][:3] = 0 - - new_prev_bev, occ_results = self.simple_test( - img_metas[0], img[0], prev_bev=self.prev_frame_info['prev_bev'], **kwargs) - # During inference, we save the BEV features and ego motion of each timestamp. - DEBUG=True - - if DEBUG: - print('output',type(occ_results),type(voxel_semantics[0]),type(img_metas[0])) - print() - use_mask=False - if use_mask: - save_root = '/home/txy/occ_nus_out/use_mask/' - else: - save_root = '/home/txy/occ_nus_out/' - sample_idx=img_metas[0][0]['sample_idx'] - np.savez_compressed(save_root+sample_idx,output=occ_results.to(torch.uint8).cpu().numpy(), - gt=voxel_semantics[0].to(torch.uint8).cpu().numpy()) - print('saved',save_root+sample_idx) + prev_bev = torch.zeros([1, 40000, 256], device=img.device, dtype=img.dtype) + prev_bev_list = [prev_bev] + prev_img_metas_list = [img_metas[0].copy()] + + # convert the list to dict TODO + prev_img_metas_list_len = len(prev_img_metas_list) + prev_img_metas_dict = {} + for i in range(prev_img_metas_list_len): + prev_img_metas_dict[self.queue_length - 1 - prev_img_metas_list_len + i] = prev_img_metas_list[i] + # from 0 to self.queue_length - 2 + + # Step 4: forward in head to get occ_results + outs, occ_results = self.simple_test(img_metas, + img, + prev_bev_list=prev_bev_list, + prev_img_metas=[prev_img_metas_dict], + **kwargs) + + # Step 5: During inference, we save the BEV features and ego motion of each timestamp. self.prev_frame_info['prev_pos'] = tmp_pos self.prev_frame_info['prev_angle'] = tmp_angle - self.prev_frame_info['prev_bev'] = new_prev_bev - return occ_results - - def simple_test_pts(self, x, img_metas, prev_bev=None, rescale=False): - """Test function""" - outs = self.pts_bbox_head(x, img_metas, prev_bev=prev_bev,test=True) - - occ = self.pts_bbox_head.get_occ( - outs, img_metas, rescale=rescale) - - return outs['bev_embed'], occ - - - def simple_test(self, img_metas, img=None, prev_bev=None, rescale=False): - """Test function without augmentaiton.""" - img_feats = self.extract_feat(img=img, img_metas=img_metas) + new_prev_bev = outs['bev_embed'] + + if self.queue_length > 1: + if len (prev_bev_list) > (self.queue_length - 2): + del prev_bev_list[0] + prev_bev_list.append(new_prev_bev) + self.prev_frame_info['prev_bev_list'] = prev_bev_list + + if len(prev_img_metas_list) > (self.queue_length - 2): + del prev_img_metas_list[0] + prev_img_metas_list.append(img_metas[0]) + self.prev_frame_info['prev_img_metas_list'] = prev_img_metas_list + + if self.save_results: + results = { + 'occ_pred': occ_results.cpu().numpy().astype(np.uint8), + 'occ_gt': voxel_semantics, + 'mask_camera': mask_camera, + } + else: + results = self.pts_bbox_head.eval_metrics(occ_results, voxel_semantics, mask_camera) + scene_idx = img_metas[0]['scene_idx'] + frame_idx = img_metas[0]['frame_idx'] + results['scene_id'] = scene_idx + results['frame_id'] = frame_idx - # bbox_list = [dict() for i in range(len(img_metas))] - new_prev_bev, occ = self.simple_test_pts( - img_feats, img_metas, prev_bev, rescale=rescale) - # for result_dict, pts_bbox in zip(bbox_list, bbox_pts): - # result_dict['pts_bbox'] = pts_bbox - return new_prev_bev, occ + + return results + + def simple_test(self, + img_metas, + img=None, + prev_bev_list=[], + prev_img_metas=[], + **kwargs): + """ + Test function without augmentaiton. + Args: + img_metas (list[dict]): Meta information of each sample. + img (torch.Tensor): Images of each sample with shape (bs, n_views, C, H, W). + prev_bev_list (list[torch.Tensor]): BEV features of previous frames. + Each has shape (bs, bev_h*bev_w, embed_dims). + prev_img_metas (list[dict[dict]]): Meta information of previous samples. + Returns: + new_prev_bev (torch.Tensor): BEV features of the current frame with shape (bs, bev_h*bev_w, embed_dims). + occ (torch.Tensor): Predicted occupancy with shape (bs, bev_h, bev_w, total_z). + """ + multi_level_feats = self.extract_feat(img=img) + outs = self.pts_bbox_head(multi_level_feats, + img_metas, + prev_bev_list, + prev_img_metas, + only_bev=False, + **kwargs) + occ = self.pts_bbox_head.get_occ(outs) + + return outs, occ diff --git a/projects/mmdet3d_plugin/bevformer/detectors/occformer_waymo.py b/projects/mmdet3d_plugin/bevformer/detectors/occformer_waymo.py new file mode 100644 index 0000000..55e6721 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/detectors/occformer_waymo.py @@ -0,0 +1,428 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import copy +import torch +from mmcv.runner import auto_fp16, force_fp32 +from mmdet.models import DETECTORS +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector +from projects.mmdet3d_plugin.models.utils.grid_mask import GridMask + +@DETECTORS.register_module() +class CVTOccWaymo(MVXTwoStageDetector): + def __init__(self, + use_grid_mask=False, + pts_voxel_layer=None, + pts_voxel_encoder=None, + pts_middle_encoder=None, + pts_fusion_layer=None, + img_backbone=None, + clip_backbone=None, + pts_backbone=None, + img_neck=None, + pts_neck=None, + pts_bbox_head=None, + img_roi_head=None, + img_rpn_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + video_test_mode=False, + queue_length=1, + sampled_queue_length=1, + sample_num=None, + save_results=False, + use_temporal=None, + sample_policy_test=None, + **kwargs): + super(CVTOccWaymo, self).__init__(pts_voxel_layer, + pts_voxel_encoder, + pts_middle_encoder, + pts_fusion_layer, + img_backbone, + pts_backbone, + img_neck, + pts_neck, + pts_bbox_head, + img_roi_head, + img_rpn_head, + train_cfg, + test_cfg, + pretrained) + + self.grid_mask = GridMask(True, True, rotate=1, offset=False, ratio=0.5, mode=1, prob=0.7) + self.use_grid_mask = use_grid_mask + self.fp16_enabled = False + self.queue_length = queue_length + self.sample_num = sample_num # used in test + self.sampled_queue_length = sampled_queue_length + assert self.sampled_queue_length == len(self.sample_num), "sampled_queue_length should equal to len(sample_num)" + self.video_test_mode = video_test_mode + self.save_results = save_results + self.use_temporal = use_temporal + self.prev_frame_info = { + 'prev_bev_list': [], + 'scene_token': None, + 'prev_pos': 0, + 'prev_angle': 0, + 'prev_img_metas_list': [], + } + + def extract_img_feat(self, img, len_queue=None): + """Extract features of images. + Args: + img (torch.Tensor): Image tensor with shape (bs, n_views, C, H, W). + But for previous img, its shape will be (bs*len_queue, n_views, C, H, W). + len_queue (int): The length of the queue. It is less or equal to self.queue_length. + It is used when extracting features of previous images. + Returns: + list[torch.Tensor]: Image features. Each with shape (bs, n_views, C, H, W). + But different scales (from FPN) will have different shapes. + For previous img, its shape will be (bs, len_queue, n_views, C, H, W). + """ + + bs_length, num_views, C, H, W = img.size() + bs_length_num_views = bs_length * num_views + img = img.reshape(bs_length_num_views, C, H, W) + + if self.use_grid_mask: + img = self.grid_mask(img) + img_feats = self.img_backbone(img) + if isinstance(img_feats, dict): + img_feats = list(img_feats.values()) + + if self.with_img_neck: + img_feats = self.img_neck(img_feats) + + img_feats_reshaped = [] + for img_feat in img_feats: + bs_length_num_views, C, H, W = img_feat.size() + if len_queue is not None: # for prev imgs + bs = int(bs_length / len_queue) + img_feats_reshaped.append(img_feat.view(bs, len_queue, num_views, C, H, W)) + else: # for current imgs + img_feats_reshaped.append(img_feat.view(bs_length, num_views, C, H, W)) + + return img_feats_reshaped + + @auto_fp16(apply_to=('img')) + def extract_feat(self, img, len_queue=None): + """Extract features from images and points.""" + img_feats = self.extract_img_feat(img, len_queue=len_queue) + return img_feats + + def forward_pts_train(self, multi_level_feats, + voxel_semantics, + valid_mask, + cur_img_metas, + prev_bev_list=[], + prev_img_metas=[], + **kwargs): + """Forward training function. + Args: + multi_level_feats (list[torch.Tensor]): Multi level img_feats. + voxel_semantics (torch.Tensor): Occupancy ground truth. + cur_img_metas (list[dict]): Meta information of samples. It has length of batch_size. + prev_bev_list (list[torch.Tensor]): BEV features of previous frames. + Each has shape (bs, bev_h*bev_w=40000, embed_dims=256). + prev_img_metas (list[dict[dict]]): Meta information of previous samples. + Returns: + losses (dict): Losses of each branch. + """ + + outs = self.pts_bbox_head(multi_level_feats, + cur_img_metas, + prev_bev_list, + prev_img_metas, + only_bev=False, + **kwargs) + losses = self.pts_bbox_head.loss(voxel_semantics, valid_mask, preds_dicts=outs) + + return losses + + def forward_dummy(self, img): + dummy_metas = None + return self.forward_test(img=img, img_metas=[[dummy_metas]]) + + def forward(self, return_loss=True, **kwargs): + """ + Calls either forward_train or forward_test depending on whether return_loss=True. + Note this setting will change the expected inputs. When + `return_loss=True`, img and img_metas are single-nested + (i.e. torch.Tensor and list[dict]), + and when `resturn_loss=False`, img and img_metas should be + double nested (i.e. list[torch.Tensor], list[list[dict]]), + with the outer list indicating test time augmentations. + """ + + if return_loss: + return self.forward_train(**kwargs) + else: + return self.forward_test(**kwargs) + + def obtain_history_bev(self, + prev_img_feats_list=[], + prev_img_metas=[], + prev_len_queue=0): + """ + Obtain history BEV features iteratively. + To save GPU memory, gradients are not calculated. + Args: + prev_img_feats_list (list[torch.Tensor]): The list has length eqauls to the scales. + Each tensor has shape: bs, prev_len_queue, n_views, C, H, W. + prev_img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys 0, 1, 2, ..., len-2. The element of each key is a dict. + prev_len_queue (int): The length of the queue - 1. + Returns: + prev_bev_list (list[torch.Tensor]): Each has shape ([bs, bev_h*bev_w=40000, embed_dims=256]). + """ + + self.eval() + prev_bev_list = [] + with torch.no_grad(): + for i in range(prev_len_queue): + img_feats = [each_scale[:, i, ...] for each_scale in prev_img_feats_list] + img_metas = [each_batch[i] for each_batch in prev_img_metas] # list[dict] of length equals to batch_size + if img_metas[0]['prev_bev_exists'] is not True: # HERE assume batch size = 1 + prev_bev_list = [] + + prev_bev = self.pts_bbox_head(img_feats, + img_metas, + prev_bev_list, + prev_img_metas=None, # useless + only_bev=True) + prev_bev_list.append(prev_bev) + + self.train() + + return prev_bev_list + + @auto_fp16(apply_to=('img', 'points')) + def forward_train(self, img=None, + voxel_semantics=None, + valid_mask=None, + img_metas=None, + **kwargs): + """ + Forward training function. + Args: + img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys 0, 1, 2, ..., len-1. The element of each key is a dict. + img (torch.Tensor): Images of each sample with shape (bs, len, n_views, C, H, W). + voxel_semantics (torch.Tensor): Occupancy ground truth + with shape (bs, bev_h, bev_w, total_z). + valid_mask (torch.Tensor): unified boolean mask for visible voxel + with shape (bs, bev_w, bev_h, total_z). + Returns: + losses (dict): Losses of different branches. + """ + + # Step 1: prepare cur_img_feats and cur_img_metas + batch_size, len_queue, _, _, _, _ = img.shape + cur_img = img[:, -1, ...] + cur_img_feats = self.extract_feat(img=cur_img) # list[tensor], each tensor is of shape (B, N, C, H, W). H and W are different across scales. + img_metas_deepcopy = copy.deepcopy(img_metas) + cur_img_metas = [each_batch[len_queue-1] for each_batch in img_metas] # list[dict] of length equals to batch_size + + # Step 2: prepare prev_bev_list, prev_img_metas + if cur_img_metas[0]['prev_bev_exists']: + prev_img = img[:, :-1, ...] + bs, prev_len_queue, num_cams, C, H, W = prev_img.shape + prev_img = prev_img.reshape(bs*prev_len_queue, num_cams, C, H, W) + with torch.no_grad(): + prev_img_feats = self.extract_feat(img=prev_img, len_queue=prev_len_queue) + + prev_img_metas = [] + for each_batch in img_metas_deepcopy: + each_batch.pop(len_queue-1) + prev_img_metas.append(each_batch) # list[dict[dict]] + + prev_bev_list = self.obtain_history_bev(prev_img_feats, prev_img_metas, prev_len_queue) + + # Step 3: adjust the length of these two to be consistent + prev_bev_list_len = len(prev_bev_list) + for each_batch in prev_img_metas: + if len(each_batch) > prev_bev_list_len: + for i in range(0, len(each_batch) - prev_bev_list_len): # len(each_batch) = len_queue - 1 + each_batch.pop(i) + else: + prev_bev_list = [] + prev_img_metas = [{} for _ in range(batch_size)] + # Step 4: forward in head to get losses + losses = dict() + losses_pts = self.forward_pts_train(cur_img_feats, + voxel_semantics, + valid_mask, + cur_img_metas, + prev_bev_list, + prev_img_metas, + **kwargs) + losses.update(losses_pts) + + return losses + + def forward_test(self, img=None, + img_metas=None, + voxel_semantics=None, + valid_mask=None, + **kwargs): + ''' + Forward inference function. + Args: + (all arg are be wrapped one more list. after we take it out, the type of each parameter are below) + img (torch.Tensor): Images of each sample with shape (bs, n_views, C, H, W). + img_metas (list[dict]): len is bs. + voxel_semantics (torch.Tensor): Occupancy ground truth with shape (bs, bev_h, bev_w, total_z). + valid_mask (torch.Tensor): unified boolean mask for visible voxel with shape (bs, bev_w, bev_h, total_z). + Returns: + If self.save_result is true. We will save the occ_result for visualization. + voxel_semantics (numpy.ndarray): + voxel_semantics_preds (numpy.ndarray): Occupancy semantics prediction. The same shape. + valid_mask (numpy.ndarray): unified boolean mask for visible voxel. The same shape. + sample_idx (int): The index of the sample. + ''' + + # Step 1: prepare the input + # all arg are be wrapped one more list, so we need to take the first element + if img is not None: img = img[0] + if voxel_semantics is not None: voxel_semantics = voxel_semantics[0] + if valid_mask is not None: valid_mask = valid_mask[0] + if img_metas is not None: img_metas = img_metas[0] # list[dict] of length 1 + + # If the input frame is in a new scene, the prev_frame_info need to be reset. + scene_token = img_metas[0]['sample_idx'] // 1000 + if scene_token != self.prev_frame_info['scene_token']: + self.prev_frame_info['prev_bev_list'] = [] + self.prev_frame_info['prev_img_metas_list'] = [] + # update idx + self.prev_frame_info['scene_token'] = scene_token + + if not self.video_test_mode: + # defalut value of self.video_test_mode is True + self.prev_frame_info['prev_bev_list'] = [] + self.prev_frame_info['prev_img_metas_list'] = [] + + # Step 2: prepare prev_bev_list, prev_img_metas_list + prev_bev_list = self.prev_frame_info['prev_bev_list'] + len_queue = len(prev_bev_list) + prev_img_metas_list = self.prev_frame_info['prev_img_metas_list'] + assert len(prev_bev_list) == len(prev_img_metas_list), "len(prev_bev_list) should equal to len(prev_bev_list)" + + # Step 3: Get the delta of ego position and angle between two timestamps. + # tmp_pos = copy.deepcopy(img_metas[0]['can_bus'][:3]) + # tmp_angle = copy.deepcopy(img_metas[0]['can_bus'][-1]) + + # if len_queue > 0: + # img_metas[0]['can_bus'][:3] -= self.prev_frame_info['prev_pos'] + # img_metas[0]['can_bus'][-1] -= self.prev_frame_info['prev_angle'] + # else: + # img_metas[0]['can_bus'][-1] = 0 + # img_metas[0]['can_bus'][:3] = 0 + + # Step 4: sample the previous BEV features and img_metas + index_list = [] + for i in self.sample_num[:-1]: + if len_queue - i < 0: continue + # If the index is out of range, then it will be skipped. + # Therefore the total length of index_list will be sometimes shorted. + index_list.append(len_queue - i) + + # prepare sampled_prev_bev_list, sampled_prev_img_metas_list + sampled_prev_bev_list = [] + sampled_prev_img_metas_list = [] + if len_queue > 0: + for index in index_list: + sampled_prev_bev_list.append(prev_bev_list[index]) + sampled_prev_img_metas_list.append(prev_img_metas_list[index]) + + if len(sampled_prev_img_metas_list) == 0 and self.use_temporal is not None: + sampled_prev_img_metas_list.append(img_metas[0].copy()) + sampled_prev_bev_list.append(torch.zeros([1, 40000, 256], device='cuda', dtype=torch.float32)) + + # change sampled_prev_img_metas_list into a list[dict[dict]] + sampled_prev_img_metas_DICT = {} + sampled_prev_img_metas_list_len = len(sampled_prev_img_metas_list) + for i in range(sampled_prev_img_metas_list_len): + sampled_prev_img_metas_DICT[self.sampled_queue_length - 1 - sampled_prev_img_metas_list_len + i] = sampled_prev_img_metas_list[i] + # if sampled_queue_length - 1 = sampled_prev_img_metas_list_len, then the key will be 0, 1, 2, ..., len_queue-2 + # if sampled_queue_length - 1 > sampled_prev_img_metas_list_len, then the key will be len_queue - 1 - sampled_prev_bev_list_len, ..., len_queue-2 + # if `sampled_prev_img_metas_list` is empty, then `sampled_prev_img_metas_dict` will be an empty dict. + + # Step 5: forward + outs, voxel_semantics_preds = self.simple_test(img, img_metas, + sampled_prev_bev_list, + prev_img_metas=[sampled_prev_img_metas_DICT], + **kwargs) + + new_prev_bev = outs['bev_embed'] + extra = outs['extra'] + + # Step 6: update the self.prev_frame_info + # During inference, we save the BEV features and ego motion of each timestamp. + # self.prev_frame_info['prev_pos'] = tmp_pos + # self.prev_frame_info['prev_angle'] = tmp_angle + if self.queue_length > 1: + if len (prev_bev_list) > (self.queue_length - 2): + del prev_bev_list[0] + prev_bev_list.append(new_prev_bev) + self.prev_frame_info['prev_bev_list'] = prev_bev_list + + if len(prev_img_metas_list) > (self.queue_length - 2): + del prev_img_metas_list[0] + prev_img_metas_list.append(img_metas[0]) + self.prev_frame_info['prev_img_metas_list'] = prev_img_metas_list + + # Step 7: save the results (controlled by `self.save_results`) + # If you want to do visualization, you can set `self.save_results` to True. + # If you want to do evaluation, you can set `self.save_results` to False. + if self.save_results: + occ_results = { + "voxel_semantics": voxel_semantics.to(torch.uint8).cpu().numpy(), + "voxel_semantics_preds": voxel_semantics_preds.to(torch.uint8).cpu().numpy(), + "mask": valid_mask.to(torch.uint8).cpu().numpy(), + "sample_idx": img_metas[0]['sample_idx'], + } + + else: + occ_results = self.pts_bbox_head.eval_metrics(voxel_semantics, voxel_semantics_preds, valid_mask) + sample_idx = img_metas[0]['sample_idx'] + scene_id = sample_idx % 1000000 // 1000 + occ_results['scene_id'] = scene_id + frame_id = sample_idx % 1000 + occ_results['frame_id'] = frame_id + + return occ_results + + def simple_test(self, img, + img_metas, + prev_bev_list=[], + prev_img_metas=[], + **kwargs): + """ + Test function without augmentaiton. + Args: + img (torch.Tensor): Images of each sample with shape (bs, n_views, C, H, W). + img_metas (list[dict]): Meta information of each sample. + prev_bev_list (list[torch.Tensor]): BEV features of previous frames. + Each has shape (bs, bev_h*bev_w=40000, embed_dims=256). + prev_img_metas (list[dict[dict]]): Meta information of previous samples. + Returns: + outs (dict): with keys "bev_embed, occ, extra" + occ (torch.Tensor): Occupancy semantics prediction. + """ + + multi_level_feats = self.extract_feat(img=img) + outs = self.pts_bbox_head(multi_level_feats, + img_metas, + prev_bev_list, + prev_img_metas, + only_bev=False, + **kwargs) + occ = self.pts_bbox_head.get_occ(outs) + + return outs, occ \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/detectors/solofusion.py b/projects/mmdet3d_plugin/bevformer/detectors/solofusion.py new file mode 100644 index 0000000..9087b4f --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/detectors/solofusion.py @@ -0,0 +1,863 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from mmcv.runner import force_fp32, auto_fp16 +# from mmcv.ops.nms import batched_nms +from mmdet.models import DETECTORS +from mmdet3d.models import builder +from mmdet3d.models.detectors.mvx_two_stage import MVXTwoStageDetector +from san import tools as san_tools +from detectron2.engine import DefaultTrainer +from detectron2.checkpoint import DetectionCheckpointer +from .bevdet_solo import BEVDet_solo + +def generate_forward_transformation_matrix(img_meta_dict): + # res = torch.eye(3) + + # if 'transformation_3d_flow' in img_meta_dict: + # for transform_type in img_meta_dict['transformation_3d_flow']: + # if transform_type == "R": + # if "pcd_rotation" in img_meta_dict: + # res = img_meta_dict['pcd_rotation'].T @ res # .T since L158 of lidar_box3d has points @ rot + # elif transform_type == "S": + # if "pcd_scale_factor" in img_meta_dict: + # res = res * img_meta_dict['pcd_scale_factor'] + # elif transform_type == "T": + # if "pcd_trans" in img_meta_dict: + # assert torch.tensor(img_meta_dict['pcd_trans']).abs().sum() == 0, \ + # "I'm not supporting translation rn; need to convert to hom coords which is annoying" + # elif transform_type == "HF": # Horizontal is Y apparently + # if "pcd_horizontal_flip" in img_meta_dict: + # tmp = torch.eye(3) + # tmp[1, 1] = -1 + # res = tmp @ res + # elif transform_type == "VF": + # if "pcd_vertical_flip" in img_meta_dict: + # tmp = torch.eye(3) + # tmp[0, 0] = -1 + # res = tmp @ res + # else: + # raise Exception(str(img_meta_dict)) + + # For now, there is no data augmentation + hom_res = torch.eye(4) + return hom_res + +@DETECTORS.register_module() +class SOLOFusion(BEVDet_solo): + def __init__(self, + pre_process=None, + pre_process_neck=None, + input_sample_policy=None, + do_history=True, + interpolation_mode='bilinear', + history_cat_num=1, # Number of history frames to cat + history_queue_length=1, + history_cat_conv_out_channels=None, + FREE_LABEL=23, + num_classes=16, + do_history_stereo_fusion=False, + stereo_neck=None, + history_stereo_prev_step=1, + **kwargs): + super(SOLOFusion, self).__init__(**kwargs) + + self.FREE_LABEL=FREE_LABEL + self.num_classes=num_classes + self.input_sample_policy=input_sample_policy + #### Prior to history fusion, do some per-sample pre-processing. + self.single_bev_num_channels = self.img_view_transformer.numC_Trans + + # Lightweight MLP + self.embed = nn.Sequential( + nn.Conv2d(self.single_bev_num_channels, self.single_bev_num_channels, kernel_size=1, padding=0, stride=1), + nn.BatchNorm2d(self.single_bev_num_channels), + nn.ReLU(inplace=True), + nn.Conv2d(self.single_bev_num_channels, self.single_bev_num_channels, kernel_size=1, padding=0, stride=1), + nn.BatchNorm2d(self.single_bev_num_channels), + nn.ReLU(inplace=True)) + + # Preprocessing like BEVDet4D + self.pre_process = pre_process is not None + if self.pre_process: + self.pre_process_net = builder.build_backbone(pre_process) + + #### Deal with history + self.do_history = do_history + # if self.do_history: + self.interpolation_mode = interpolation_mode + self.history_queue_length = history_queue_length # 30 + self.queue_length = self.history_queue_length + 1 + self.history_cat_num = history_cat_num + self.sample_interval = self.history_queue_length // self.history_cat_num # 30 / 6 = 5 + self.sample_index = [(i+1) * self.sample_interval - 1 for i in range(self.history_cat_num)] + # [4, 9, 14, 19, 24, 29] + + self.history_cam_sweep_freq = 0.5 # seconds between each frame + history_cat_conv_out_channels = (history_cat_conv_out_channels + if history_cat_conv_out_channels is not None + else self.single_bev_num_channels) + # Embed each sample with its relative temporal offset with current timestep + self.history_keyframe_time_conv = nn.Sequential( + nn.Conv2d(self.single_bev_num_channels + 1, + self.single_bev_num_channels, + kernel_size=1, + padding=0, + stride=1), + nn.BatchNorm2d(self.single_bev_num_channels), + nn.ReLU(inplace=True)) + + # Then concatenate and send them through an MLP. + self.history_keyframe_cat_conv = nn.Sequential( + nn.Conv2d(self.single_bev_num_channels * (self.history_cat_num + 1), + history_cat_conv_out_channels, + kernel_size=1, + padding=0, + stride=1), + nn.BatchNorm2d(history_cat_conv_out_channels), + nn.ReLU(inplace=True)) + + self.history_sweep_time = None + + self.history_bev = None + self.history_seq_ids = None + self.history_forward_augs = None + self.history_global_to_lidar = None + + self.prev_frame_info = { + 'prev_bev_list': [], + 'scene_token': None, + 'prev_pos': 0, + 'prev_angle': 0, + 'prev_img_metas_list': [], + 'prev_stereo_feats_list': [], + 'prev_sweep_time_list': [],} + + #### Stereo depth fusion + self.do_history_stereo_fusion = do_history_stereo_fusion + if self.do_history_stereo_fusion: + self.stereo_neck = stereo_neck + if self.stereo_neck is not None: + self.stereo_neck = builder.build_neck(self.stereo_neck) + self.history_stereo_prev_step = history_stereo_prev_step + + self.prev_stereo_img_feats = None # B x N x C x H x W + self.prev_stereo_global_to_img = None # B x N x 4 x 4 + self.prev_stereo_img_forward_augs = None + + self.fp16_enabled = False + + @auto_fp16() + def image_encoder(self, img): + ''' + Encoder for image features. + Args: + img (torch.Tensor): Image tensor. shape (B, N, C=3, H, W) + Returns: + neck_feats (torch.Tensor): Image features. shape (B, N, output_dim, ouput_H, output_W) + stereo_feats (torch.Tensor): Stereo features. shape (B, N, C, H, W) + ''' + + # Step 1: use image backbone to extract image features + B, N, C, imH, imW = img.shape + img = img.view(B * N, C, imH, imW) + backbone_feats = self.img_backbone(img) + + # Step 2: use image neck + neck_feats = self.img_neck(backbone_feats) + if isinstance(neck_feats, list): + assert len(neck_feats) == 1 # SECONDFPN returns a length-one list + neck_feats = neck_feats[0] + + _, output_dim, ouput_H, output_W = neck_feats.shape + neck_feats = neck_feats.view(B, N, output_dim, ouput_H, output_W) + + # Step 3: use stereo_necks to extract stereo features + if self.do_history_stereo_fusion: + backbone_feats_detached = [tmp.detach() for tmp in backbone_feats] + stereo_feats = self.stereo_neck(backbone_feats_detached) + if isinstance(stereo_feats, list): + assert len(stereo_feats) == 1 # SECONDFPN returns a trivial list + stereo_feats = stereo_feats[0] + stereo_feats = F.normalize(stereo_feats, dim=1, eps=self.img_view_transformer.stereo_eps) + return neck_feats, stereo_feats.view(B, N, *stereo_feats.shape[1:]) + + else: + return neck_feats, None + + @force_fp32() + def get_depth_loss(self, depth_gt, depth): + """ + This was updated to be more similar to BEVDepth's original depth loss function. + """ + B, N, H, W = depth_gt.shape + fg_mask = (depth_gt != 0).view(-1) + depth_gt = (depth_gt - self.img_view_transformer.grid_config['dbound'][0])\ + /self.img_view_transformer.grid_config['dbound'][2] + depth_gt = torch.clip(torch.floor(depth_gt), 0, + self.img_view_transformer.D).to(torch.long) + assert depth_gt.max() < self.img_view_transformer.D + + depth_gt_logit = F.one_hot(depth_gt.reshape(-1), + num_classes=self.img_view_transformer.D) + depth_gt_logit = depth_gt_logit.reshape(B, N, H, W, + self.img_view_transformer.D).permute( + 0, 1, 4, 2, 3).to(torch.float32) # B x N x D x H x W + depth = depth.view(B, N, self.img_view_transformer.D, H, W).softmax(dim=2) + + depth_gt_logit = depth_gt_logit.permute(0, 1, 3, 4, 2).view(-1, self.img_view_transformer.D) + depth = depth.permute(0, 1, 3, 4, 2).contiguous().view(-1, self.img_view_transformer.D) + + loss_depth = (F.binary_cross_entropy( + depth[fg_mask], + depth_gt_logit[fg_mask], + reduction='none', + ).sum() / max(1.0, fg_mask.sum())) + loss_depth = self.img_view_transformer.loss_depth_weight * loss_depth + return loss_depth + + @force_fp32(apply_to=('rots', 'trans', 'intrins', 'post_rots', 'post_trans')) + def process_stereo_before_fusion(self, stereo_feats, img_metas, rots, trans, intrins, post_rots, post_trans): + ''' + Process stereo features before fusion. + Args: + stereo_feats (torch.Tensor): Stereo features. shape (B, N, C, H, W) + img_metas (List[dict]): Meta information of each sample. + rots (torch.Tensor): Rotation matrix. shape (B, N, 3, 3) + trans (torch.Tensor): Translation matrix. shape (B, N, 3) + intrins (torch.Tensor): Intrinsic matrix. shape (B, N, 3, 3) + post_rots (torch.Tensor): Post rotation matrix. shape (B, N, 3, 3) + post_trans (torch.Tensor): Post translation matrix. shape (B, N, 3) + Returns: + self.prev_stereo_img_feats (torch.Tensor): Previous stereo image features. shape (B, self.history_stereo_prev_step, N, C, H, W) + self.prev_stereo_global_to_img (torch.Tensor): Previous stereo global to image transformation matrix. shape (B, self.history_stereo_prev_step, N, 4, 4) + self.prev_stereo_img_forward_augs (torch.Tensor): Previous stereo image forward augmentation matrix. shape (B, self.history_stereo_prev_step, N, 4, 4) + global_to_img (torch.Tensor): Global to image transformation matrix. shape (B, N, 4, 4) + img_forward_augs(cam_to_cam_aug) (torch.Tensor): Image forward augmentation matrix. shape (B, N, 4, 4) + curr_unaug_cam_to_prev_unaug_cam (torch.Tensor): Current unaugmented camera to previous unaugmented camera transformation matrix. shape (B, N, N, 4, 4) + ''' + + # Step 1: get `start_of_sequence`, `global_to_curr_lidar_rt`, `lidar_forward_augs` + B, N, C, H, W = stereo_feats.shape + device = stereo_feats.device + start_of_sequence_list = [] + for img_meta in img_metas: + start_of_sequence_list.append(img_meta['start_of_sequence']) + start_of_sequence = torch.BoolTensor(start_of_sequence_list).to(device) + + global_to_curr_lidar_rt_list = [] + for img_meta in img_metas: + global_to_curr_lidar_rt_list.append(torch.tensor(img_meta['global_to_curr_lidar_rt'], device=device, dtype=torch.float32)) + global_to_curr_lidar_rt = torch.stack(global_to_curr_lidar_rt_list, dim=0) # B x 4 x 4 + + lidar_forward_augs_list = [] + for img_meta in img_metas: + lidar_forward_augs_list.append(generate_forward_transformation_matrix(img_meta)) + lidar_forward_augs = torch.stack(lidar_forward_augs_list, dim=0).to(rots.device) + + # Step 2: get `img_forward_augs`, `intrins4x4`, `cam_to_lidar_aug` + cam_to_cam_aug = rots.new_zeros((B, N, 4, 4)) + cam_to_cam_aug[:, :, 3, 3] = 1 + cam_to_cam_aug[:, :, :3, :3] = post_rots + cam_to_cam_aug[:, :, :3, 3] = post_trans + img_forward_augs = cam_to_cam_aug + + intrins4x4 = rots.new_zeros((B, N, 4, 4)) + intrins4x4[:, :, 3, 3] = 1 + intrins4x4[:, :, :3, :3] = intrins + + cam_to_lidar_aug = rots.new_zeros((B, N, 4, 4)) + cam_to_lidar_aug[:, :, 3, 3] = 1 + cam_to_lidar_aug[:, :, :3, :3] = rots + cam_to_lidar_aug[:, :, :3, 3] = trans # indeed `sensor2ego` in waymo + + # Step 3: get `global_to_img` + # Global -> Lidar unaug -> lidar aug -> cam space unaug -> cam xyd unaug + global_to_img = (intrins4x4 @ torch.inverse(cam_to_lidar_aug) + @ lidar_forward_augs.unsqueeze(1) @ global_to_curr_lidar_rt.unsqueeze(1)) # B x N x 4 x 4 + + # Step 3: padding the first frame + # Then, let's check if stereo saved values are none or we're the first in the sequence. + if self.prev_stereo_img_feats is None: + # Detach and clone the current stereo features to create a history buffer + prev_stereo_img_feats = stereo_feats.detach().clone() + # Reshape and repeat the stereo features to create a history buffer + prev_stereo_img_feats = prev_stereo_img_feats.unsqueeze(1).repeat(1, self.history_stereo_prev_step, 1, 1, 1, 1) + # B x history_stereo_prev_step x N x C x H x W + self.prev_stereo_img_feats = prev_stereo_img_feats + + # For `global_to_img` and `img_forward_augs`, they are the same + prev_stereo_global_to_img = global_to_img.clone() + prev_stereo_global_to_img = prev_stereo_global_to_img.unsqueeze(1).repeat(1, self.history_stereo_prev_step, 1, 1, 1) + # B x history_stereo_prev_step x N x 4 x 4 + self.prev_stereo_global_to_img = prev_stereo_global_to_img + + prev_stereo_img_forward_augs = img_forward_augs.clone() + prev_stereo_img_forward_augs = prev_stereo_img_forward_augs.unsqueeze(1).repeat(1, self.history_stereo_prev_step, 1, 1, 1) + # B x history_stereo_prev_step x N x 4 x 4 + self.prev_stereo_img_forward_augs = prev_stereo_img_forward_augs + + # self.prev_stereo_frame_idx = stereo_feats.new_zeros((B))[:, None].repeat( + # 1, self.history_stereo_prev_step + # ) # B x history_stereo_prev_step + else: + # self.prev_stereo_img_feats shape # B x history_stereo_prev_step x N x C x H x W + # stereo_feats # B x N x C x H x W + self.prev_stereo_img_feats[start_of_sequence] = stereo_feats[start_of_sequence].unsqueeze(1).detach().clone() + self.prev_stereo_global_to_img[start_of_sequence] = global_to_img[start_of_sequence].unsqueeze(1).clone() + self.prev_stereo_img_forward_augs[start_of_sequence] = img_forward_augs[start_of_sequence].unsqueeze(1).clone() + + # These are both B x N x 4 x 4. Want the result to be B x prev_N x curr_N x 4 x 4 + curr_unaug_cam_to_prev_unaug_cam = self.prev_stereo_global_to_img[:, 0][:, :, None, :, :] @ torch.inverse(global_to_img)[:, None, :, :, :] + + return (self.prev_stereo_img_feats[:, self.history_stereo_prev_step - 1], + self.prev_stereo_global_to_img[:, self.history_stereo_prev_step - 1], + self.prev_stereo_img_forward_augs[:, self.history_stereo_prev_step - 1], + global_to_img, + img_forward_augs, + curr_unaug_cam_to_prev_unaug_cam) + + def process_stereo_for_next_timestep(self, stereo_feats, global_to_img, img_forward_augs): + self.prev_stereo_img_feats[:, 1:] = self.prev_stereo_img_feats[:, :-1].clone() + self.prev_stereo_img_feats[:, 0] = stereo_feats.detach().clone() + self.prev_stereo_global_to_img[:, 1:] = self.prev_stereo_global_to_img[:, :-1].clone() + self.prev_stereo_global_to_img[:, 0] = global_to_img.clone() + self.prev_stereo_img_forward_augs[:, 1:] = self.prev_stereo_img_forward_augs[:, :-1].clone() + self.prev_stereo_img_forward_augs[:, 0] = img_forward_augs.detach().clone() + + @force_fp32() + def fuse_history(self, curr_bev, img_metas): + ''' + Fuse long term history into current BEV. + Args: + curr_bev (torch.Tensor): Current BEV. shape (B, base_bev_channels, H, W) + img_metas (List[dict]): Meta information of each sample. + Returns: + feats_to_return (torch.Tensor): Fused BEV features. shape (B, history_cat_conv_out_channels, H, W) + ''' + + # Step 1: get `seq_ids`, `start_of_sequence`, `forward_augs`, `global_to_curr_lidar_rt` + seq_ids = torch.LongTensor([img_meta['sequence_group_idx'] for img_meta in img_metas]) + seq_ids = seq_ids.to(curr_bev.device) + + start_of_sequence = torch.BoolTensor([img_meta['start_of_sequence'] for img_meta in img_metas]) + start_of_sequence = start_of_sequence.to(curr_bev.device) + + forward_augs_list = [] + for img_meta in img_metas: + forward_augs_list.append(generate_forward_transformation_matrix(img_meta)) + forward_augs = torch.stack(forward_augs_list, dim=0).to(curr_bev.device) + + global_to_curr_lidar_rt_list = [] + for img_meta in img_metas: + global_to_curr_lidar_rt_list.append(torch.tensor(img_meta['global_to_curr_lidar_rt'], device=curr_bev.device, dtype=torch.float32)) + global_to_curr_lidar_rt = torch.stack(global_to_curr_lidar_rt_list, dim=0) # B x 4 x 4 + + # Step 2: initialize history + if self.history_bev is None: + self.history_seq_ids = seq_ids.clone() + self.history_forward_augs = forward_augs.clone() + self.history_global_to_lidar = global_to_curr_lidar_rt.unsqueeze(1).repeat(1, self.history_queue_length, 1, 1) # B x T x 4 x 4 + # Repeat the first frame feature to be history + self.history_bev = curr_bev.unsqueeze(1).repeat(1, self.history_queue_length, 1, 1, 1) # B x T x 80 x H x W + # All 0s, representing current timestep. + self.history_sweep_time = curr_bev.new_zeros(curr_bev.shape[0], self.history_queue_length) # B x T + + # Step 3: detach history + self.history_bev = self.history_bev.detach() + self.history_forward_augs = self.history_forward_augs.detach() + self.history_sweep_time = self.history_sweep_time.detach() + self.history_global_to_lidar = self.history_global_to_lidar.detach() + + assert self.history_bev.dtype == torch.float32 + + # Step 4: Deal with the new sequences + # First, sanity check. For every non-start of sequence, history id and seq id should be same. + assert (self.history_seq_ids != seq_ids)[~start_of_sequence].sum() == 0, \ + "{}, {}, {}".format(self.history_seq_ids, seq_ids, start_of_sequence) + + # Step 5: Replace all the new sequences' positions in history with the curr_bev information + self.history_bev[start_of_sequence] = curr_bev[start_of_sequence].unsqueeze(1) # B x T x 80 x H x W + self.history_seq_ids[start_of_sequence] = seq_ids[start_of_sequence] + self.history_forward_augs[start_of_sequence] = forward_augs[start_of_sequence] + self.history_global_to_lidar[start_of_sequence] = global_to_curr_lidar_rt[start_of_sequence].unsqueeze(1) + + # Step 6: new timestep, everything in history gets pushed back one. + self.history_sweep_time = self.history_sweep_time + 1 + self.history_sweep_time[start_of_sequence] = 0 # zero the new sequence timestep starts + + # Step 7: Get grid idxs & grid2bev + B, c, h, w = curr_bev.shape + assert c == self.single_bev_num_channels, "channel dim is wrong in curr_bev" + dtype = curr_bev.dtype + device = curr_bev.device + xs = torch.linspace(0, w - 1, w, dtype=dtype, device=device).view(1, w).expand(h, w) + ys = torch.linspace(0, h - 1, h, dtype=dtype, device=device).view(h, 1).expand(h, w) + grid = torch.stack((xs, ys, torch.ones_like(xs), torch.ones_like(xs)), -1) # H x W x 4 + grid = grid.unsqueeze(0).repeat(B, 1, 1, 1) # B x H x W x 4 + grid = grid.unsqueeze(-1) # B x H x W x 4 x 1 + + # This converts BEV indices to meters + # IMPORTANT: the feat2bev[0, 3] is changed from feat2bev[0, 2] because previous was 2D rotation + # which has 2-th index as the hom index. Now, with 3D hom, 3-th is hom + feat2bev = torch.zeros((4,4),dtype=grid.dtype).to(grid) + feat2bev[0, 0] = self.img_view_transformer.dx[0] + feat2bev[1, 1] = self.img_view_transformer.dx[1] + feat2bev[0, 3] = self.img_view_transformer.bx[0] - self.img_view_transformer.dx[0] / 2. + feat2bev[1, 3] = self.img_view_transformer.bx[1] - self.img_view_transformer.dx[1] / 2. + feat2bev[2, 2] = 1 + feat2bev[3, 3] = 1 + feat2bev = feat2bev.view(1,4,4) + + # Step 8: Get flow for grid sampling. + # The flow is as follows. Starting from grid locations in curr bev, transform to BEV XY11, + # backward of current augmentations, curr lidar to prev lidar, forward of previous augmentations, + # transform to previous grid locations. + + ## global to prev lidar + sample_history_global_to_lidar = [self.history_global_to_lidar[:, sample_i, :, :] for sample_i in self.sample_index] + sample_history_global_to_lidar = torch.stack(sample_history_global_to_lidar, dim=1).to(curr_bev).detach() # B x cat_num x 4 x 4 + + # global to curr lidar + repeat_global_to_curr_lidar_rt = global_to_curr_lidar_rt.unsqueeze(1).repeat(1, self.history_cat_num, 1, 1) # B x cat_num x 4 x 4 + + # bev to grid(lidar) + repeat_feat2bev = feat2bev.unsqueeze(1).repeat(B, self.history_cat_num, 1, 1) # B x cat_num x 4 x 4 + + # transformation between unaug lidar and aug lidar + repeat_forward_augs = forward_augs.unsqueeze(1).repeat(1, self.history_cat_num, 1, 1) # B x cat_num x 4 x 4 + repeat_history_forward_augs = self.history_forward_augs.unsqueeze(1).repeat(1, self.history_cat_num, 1, 1) # B x cat_num x 4 x 4 + + # curr bev -> curr grid(lidar) -> global -> prev grid(lidar) -> prev bev + curr_feat2prev_feat = (torch.inverse(repeat_feat2bev) @ repeat_history_forward_augs @ sample_history_global_to_lidar + @ torch.inverse(repeat_global_to_curr_lidar_rt) @ torch.inverse(repeat_forward_augs) @ repeat_feat2bev) + # feat curbev global historybev history feat B x cat_num x 4 x 4 + + # repeat for h*w and batch size + repeat_curr_feat2prev_feat = curr_feat2prev_feat.view(B, self.history_cat_num, 1, 1, 4, 4).repeat(1, 1, h, w, 1, 1) # B x cat_num x h x w x 4 x 4 + repeat_curr_grid = grid.unsqueeze(1).repeat(1, self.history_cat_num, 1, 1, 1, 1) # B x cat_num x h x w x 4 x 1 + + # apply transformation matrix, grid -> grid + prev_grid = repeat_curr_feat2prev_feat @ repeat_curr_grid # B x cat_num x h x w x 4 x 1 + prev_grid = prev_grid.squeeze(-1) # B x cat_num x h x w x 4 + + # Step 9: use the wrapped grid to gridsample the prev bev + grid = prev_grid.reshape(B * self.history_cat_num, h, w, 4) # (B*cat_num) x h x w x 4 + normalize_factor = torch.tensor([w - 1.0, h - 1.0], dtype=dtype, device=device) + grid = grid[:,:,:,:2] / normalize_factor.view(1, 1, 1, 2) * 2.0 - 1.0 # (B*cat_num) x h x w x 2 + sample_history_bev = [self.history_bev[:, sample_i, :, :, :] for sample_i in self.sample_index] + sample_history_bev = torch.stack(sample_history_bev, dim=1).to(curr_bev).detach() # B x cat_num x 80 x h x w + sample_history_bev = sample_history_bev.reshape(B * self.history_cat_num, self.single_bev_num_channels, h, w) # B*cat_num x 80 x h x w + sample_history_bev = F.grid_sample(sample_history_bev, grid.to(dtype), align_corners=True, mode=self.interpolation_mode) # B*cat_num x 80 x h x w + sample_history_bev = sample_history_bev.reshape(B, self.history_cat_num, self.single_bev_num_channels, h, w) # B x cat_num x 80 x h x w + feats_to_return = torch.cat([curr_bev.unsqueeze(1), sample_history_bev], dim=1) # B x (1 + cat_num) x 80 x H x W + + # Step 10: Update history + # Reshape and concatenate features and timestep + sample_sweep_time = [self.history_sweep_time[:, sample_i] for sample_i in self.sample_index] + sample_sweep_time = torch.stack(sample_sweep_time, dim=1).detach() # B x cat_num + sample_sweep_time_add_curr = torch.cat([self.history_sweep_time.new_zeros(self.history_sweep_time.shape[0], 1), sample_sweep_time], dim=1) # B x (cat_num+1) + + feats_to_return = torch.cat( + [feats_to_return, sample_sweep_time_add_curr[:, :, None, None, None].repeat( + 1, 1, 1, feats_to_return.shape[3], feats_to_return.shape[4]) * self.history_cam_sweep_freq + ], dim=2) # B x (1 + cat_num) x (80 + 1) x H x W + + # Step 11: Time conv + feats_to_return = self.history_keyframe_time_conv( + feats_to_return.reshape(-1, *feats_to_return.shape[2:])).reshape( + feats_to_return.shape[0], feats_to_return.shape[1], self.single_bev_num_channels, *feats_to_return.shape[3:]) # B x (1 + cat_num) x 80 x H x W + + # Step 12: Cat keyframes & conv + feats_to_return = self.history_keyframe_cat_conv( + feats_to_return.reshape( + feats_to_return.shape[0], -1, feats_to_return.shape[3], feats_to_return.shape[4])) # B x [(1 + cat_num)*80] x H x W -> B x 160 x H x W + + # Stwp 13: Update history by moving everything down one group of single_bev_num_channels channels + # and adding in curr_bev. + # Clone is necessary since we're doing in-place operations on self.history_bev + self.history_bev[:, :-1] = torch.roll(self.history_bev[:, :-1], shifts=1, dims=1) + self.history_bev[:, 0] = curr_bev.detach().clone() # B x 80 x h x w -> B x T x 80 x H x W + + self.history_forward_augs = forward_augs.clone() # B x 4 x 4 + + self.history_sweep_time = torch.roll(self.history_sweep_time, shifts=1, dims=1) + self.history_sweep_time[:, 0] = 0 + + self.history_global_to_lidar = torch.roll(self.history_global_to_lidar, shifts=1, dims=1) + self.history_global_to_lidar[:, 0] = global_to_curr_lidar_rt.detach().clone() + + return feats_to_return + + def extract_img_feat(self, img_inputs, img_metas): + ''' + Extract image featrues from the input image and some image metas. + Args: + img_inputs (List[torch.Tensor]): With input image and some meta information. + - img (torch.Tensor): Image tensor. shape (B, N, C=3, H, W) + - rots (torch.Tensor): Rotation matrix. shape (B, N, 3, 3) + - trans (torch.Tensor): Translation matrix. shape (B, N, 3) + - intrins (torch.Tensor): Intrinsic matrix. shape (B, N, 3, 3) + - post_rots (torch.Tensor): Post rotation matrix. shape (B, N, 3, 3) + - post_trans (torch.Tensor): Post translation matrix. shape (B, N, 3) + img_metas (List[dict]): Meta information of each sample. length = batch_size. + Returns: + bev_encoder_out_list: + - bev_feat (torch.Tensor): BEV features. shape (B, C=256, H=200, W=200) + depth_digit (torch.Tensor): Depth digit. shape (B, N, D, H, W) + ''' + + # Step 1: Extract image features and stereo features by `image_encoder` + img = img_inputs[0] + rots, trans, intrins, post_rots, post_trans = img_inputs[1:6] + curr_img_encoder_feats, curr_stereo_feats = self.image_encoder(img) + + # Step 2: Stereo + if not self.do_history_stereo_fusion: + bev_feat, depth_digit = self.img_view_transformer(curr_img_encoder_feats, rots, trans, intrins, post_rots, post_trans) + else: + # pre_process + prev_stereo_feats, prev_global2img, prev_img_forward_aug, curr_global2img, curr_img_forward_aug, curr_unaug_cam_to_prev_unaug_cam = \ + self.process_stereo_before_fusion(stereo_feats=curr_stereo_feats, + img_metas=img_metas, + rots=rots, + trans=trans, + intrins=intrins, + post_rots=post_rots, + post_trans=post_trans) + + # short term history fusion + bev_feat, depth_digit = self.img_view_transformer( + curr_img_encoder_feats, + rots, trans, intrins, post_rots, post_trans, + curr_stereo_feats, + prev_stereo_feats, + prev_global2img, + prev_img_forward_aug, + curr_global2img, + curr_img_forward_aug, + curr_unaug_cam_to_prev_unaug_cam + ) + + # post_process for next timestep + self.process_stereo_for_next_timestep(stereo_feats=curr_stereo_feats, + global_to_img=curr_global2img, + img_forward_augs=curr_img_forward_aug) + + # Step 3: Pre-process BEV features + bev_feat = self.pre_process_net(bev_feat)[0] # singleton list + + # Step 4: add bev embedding + bev_feat = self.embed(bev_feat) # B x base_bev_channels x H x W + + # Step 5: Fuse History long term + if self.do_history: + bev_feat = self.fuse_history(bev_feat, img_metas) + + # Step 6: BEV encoder + bev_encoder_out_list = self.bev_encoder(bev_feat) # see definition of `bev_encoder` in class `BEVDet_solo` + + return bev_encoder_out_list, depth_digit + + def get_transform_matrix(self, img_metas_list): + ''' + Args: + img_metas (List[dict]): Meta information of each sample. length = batch_size. + With keys 'rots', 'trans', 'intrins', 'post_rots', 'post_trans' etc. + Returns: + outs (list): list of torch.Tensor. Inside are: + - rots (torch.Tensor): rotation part of `sensor2ego`. shape (B, N, 3, 3) + - trans (torch.Tensor): translation part of `sensor2ego`. shape (B, N, 3) + - intrins (torch.Tensor): intrinsic of camera. shape (B, N, 3, 3) + - post_rots (torch.Tensor): Because of data augmentation, the `rots` pose matrix will need to be recalibrated. + `post_rots` is the recalibrated part. If no data augmentation, it is np.eye(3). + shape (B, N, 3, 3) + - post_trans (torch.Tensor): Because of data augmentation, the `trans` pose matrix will need to be recalibrated. + `post_trans` is the recalibrated part. If no data augmentation, it is np.zeros(3). + shape (B, N, 3) + ''' + rots_list = [] + trans_list = [] + intrins_list = [] + post_rots_list = [] + post_trans_list = [] + for img_metas in img_metas_list: + rots_list.append(torch.tensor(img_metas['rots'], device='cuda', dtype=torch.float32)) + trans_list.append(torch.tensor(img_metas['trans'], device='cuda', dtype=torch.float32)) + intrins_list.append(torch.tensor(img_metas['intrins'], device='cuda', dtype=torch.float32)) + post_rots_list.append(torch.tensor(img_metas['post_rots'], device='cuda', dtype=torch.float32)) + post_trans_list.append(torch.tensor(img_metas['post_trans'], device='cuda', dtype=torch.float32)) + rots = torch.stack(rots_list, dim=0) # B x N x 3 x 3 + trans = torch.stack(trans_list, dim=0) + intrins = torch.stack(intrins_list, dim=0) + post_rots = torch.stack(post_rots_list, dim=0) + post_trans = torch.stack(post_trans_list, dim=0) + outs = [rots, trans, intrins, post_rots, post_trans] + return outs + + def forward_train(self, img=None, + img_metas=None, + voxel_semantics=None, + valid_mask=None, + **kwargs): + ''' + Args: + img (torch.Tensor): multi view image input of current frame. shape (B, N, C, H, W) + voxel_semantics (torch.Tensor): 3D occupancy ground truth. shape (B, H, W, Z) + valid_mask (torch.Tensor): unified boolean mask for visible voxel. shape (B, H, W, Z) + img_metas (List[dict]): Meta information of each sample. length = batch_size. + Returns: + losses (dict): dict of loss. + ''' + losses = dict() + # Step 1: extract some meta information from `img_metas` + transform_tensor_list = self.get_transform_matrix(img_metas) + + # Step 2: extract image feature + img_inputs = [img] + transform_tensor_list + bev_encoder_out_list, depth = self.extract_img_feat(img_inputs, img_metas) + + # # If we're training depth... + # Step 3: get depth loss + # depth_gt = img_inputs[-1] + # loss_depth = self.get_depth_loss(depth_gt, depth) + # losses['loss_depth'] = loss_depth + + # Step 4: get occ loss, this can refer to bevdet_occ code + occ_outs = self.pts_bbox_head(bev_encoder_out_list[0], **kwargs) + loss_inputs = [voxel_semantics, valid_mask, occ_outs] + losses_pts = self.pts_bbox_head.loss(*loss_inputs, **kwargs)# the loss will be changed again + losses.update(losses_pts) + + return losses + + def fuse_history_test(self, all_bev_feats_list, all_img_metas_list, sweep_time_list): + # padding + if len(all_bev_feats_list) != (self.history_cat_num + 1): + assert False + all_bev_feats_list = [all_bev_feats_list[0].clone() for _ in range((self.history_cat_num + 1) - len(all_bev_feats_list))] + all_bev_feats_list + all_img_metas_list = [all_img_metas_list[0].copy() for _ in range((self.history_cat_num + 1) - len(all_img_metas_list))] + all_img_metas_list + sweep_time_list = [sweep_time_list[0] for _ in range((self.history_cat_num + 1) - len(sweep_time_list))] + sweep_time_list + assert len(all_bev_feats_list) == (self.history_cat_num + 1), "warning! padding is wrong!" + + # get the current grid + curr_bev = all_bev_feats_list[0] + n, c, h, w = curr_bev.shape + xs = torch.linspace(0, w - 1, w, dtype=curr_bev.dtype, device=curr_bev.device).view(1, w).expand(h, w) + ys = torch.linspace(0, h - 1, h, dtype=curr_bev.dtype, device=curr_bev.device).view(h, 1).expand(h, w) + grid = torch.stack((xs, ys, torch.ones_like(xs), torch.ones_like(xs)), -1).view(1, h, w, 4).expand(n, h, w, 4).view(n,h,w,4,1) + # bs, h, w, 4, 1 + + # get feat2bev + feat2bev = torch.zeros((4,4),dtype=grid.dtype).to(grid) + feat2bev[0, 0] = self.img_view_transformer.dx[0] + feat2bev[1, 1] = self.img_view_transformer.dx[1] + feat2bev[0, 3] = self.img_view_transformer.bx[0] - self.img_view_transformer.dx[0] / 2. + feat2bev[1, 3] = self.img_view_transformer.bx[1] - self.img_view_transformer.dx[1] / 2. + feat2bev[2, 2] = 1 + feat2bev[3, 3] = 1 + feat2bev = feat2bev.view(1, 4, 4) # (1, 4, 4) + feat2bev = feat2bev.repeat(self.history_cat_num, 1, 1).double() # (cat_num, 4, 4) + + # get ego2global + ego2global_list = [] + for img_meta in all_img_metas_list: + ego2global_tr = torch.tensor(img_meta['ego2global'], device='cuda', dtype=torch.float64) # (4, 4) + ego2global_list.append(ego2global_tr) + + cur_ego2global = ego2global_list[0] # (4, 4) + repeat_cur_ego2global = cur_ego2global.unsqueeze(0).repeat(self.history_cat_num, 1, 1) # (cat_num, 4, 4) + cat_prev_ego2global = torch.stack(ego2global_list[1:], dim=0) # (cat_num, 4, 4) + rt_flow = torch.inverse(feat2bev) @ torch.inverse(cat_prev_ego2global) @ repeat_cur_ego2global @ feat2bev # (cat_num, 4, 4) + + # reshape grid and wrap + grid = grid.repeat(self.history_cat_num, 1, 1, 1, 1).double() # (cat_num, h, w, 4, 1) + repeat_rt_flow = rt_flow.unsqueeze(1).unsqueeze(2).repeat(1, h, w, 1, 1) # (cat_num, h, w, 4, 4) + grid = repeat_rt_flow @ grid # (cat_num, h, w, 4, 1) + + # normalize + normalize_factor = torch.tensor([w - 1.0, h - 1.0], dtype=curr_bev.dtype, device=curr_bev.device) + grid = grid[:,:,:,:2,0] / normalize_factor.view(1, 1, 1, 2) * 2.0 - 1.0 # (cat_num, h, w, 2) + + # grid sample + prev_bev = torch.cat(all_bev_feats_list[1:], dim=0) # (cat_num, channels, H, W) len=7 + sampled_history_bev = F.grid_sample(prev_bev, grid.to(curr_bev.dtype), align_corners=True, mode=self.interpolation_mode) # cat_num, c, h, w + + ## cat history and reshape + feats_cat = torch.cat([curr_bev, sampled_history_bev], dim=0) # (1 + cat_num, 80, H, W) + feats_to_return = feats_cat.reshape( + 1, self.history_cat_num + 1, self.single_bev_num_channels, feats_cat.shape[2], feats_cat.shape[3]) # B x (1 + cat_num) x 80 x H x W + + # concatenate features and timestep + sweep_time = torch.tensor([sweep_time_list], device='cuda', dtype=curr_bev.dtype) # (1, 1+catnum) + feats_to_return = torch.cat( + [feats_to_return, sweep_time[:, :, None, None, None].repeat( + 1, 1, 1, feats_to_return.shape[3], feats_to_return.shape[4]) * self.history_cam_sweep_freq + ], dim=2) # B x (1 + T) x (80 + 1) x H x W + + # Time conv + feats_to_return = self.history_keyframe_time_conv( + feats_to_return.reshape(-1, *feats_to_return.shape[2:])).reshape( + feats_to_return.shape[0], feats_to_return.shape[1], -1, *feats_to_return.shape[3:]) # B x (1 + T) x 80 x H x W + + # Cat keyframes & conv + feats_to_return = self.history_keyframe_cat_conv( + feats_to_return.reshape( + feats_to_return.shape[0], -1, feats_to_return.shape[3], feats_to_return.shape[4])) # B x C x H x W + + return feats_to_return + + + def simple_test(self, img_metas, img=None, **kwargs): + ''' + Test Function without augmentaiton. + Args: + img (torch.Tensor): multi view image input of current frame. shape (B, N, C, H, W) + img_metas (List[dict]): Meta information of each sample. length = batch_size. + Returns: + occ_outs (torch.Tensor): Occupancy prediction. shape (B, H, W, Z) + ''' + + # Step 1: prepare the img_inputs + bs, num_views, C, H, W = img.shape + curr_img_encoder_feats, curr_stereo_feats = self.image_encoder(img) + cur_rots = torch.tensor(img_metas[0]['rots'], device='cuda', dtype=torch.float32).unsqueeze(0) # 1 x N x 3 x 3 + cur_trans = torch.tensor(img_metas[0]['trans'], device='cuda', dtype=torch.float32).unsqueeze(0) # 1 x N x 3 x 3 + cur_intrins = torch.tensor(img_metas[0]['intrins'], device='cuda', dtype=torch.float32).unsqueeze(0) # 1 x N x 3 x 3 + cur_post_rots = torch.tensor(img_metas[0]['post_rots'], device='cuda', dtype=torch.float32).unsqueeze(0) # 1 x N x 3 x 3 + cur_post_trans = torch.tensor(img_metas[0]['post_trans'], device='cuda', dtype=torch.float32).unsqueeze(0) # 1 x N x 3 x 3 + + # Step 2: prepare `prev_stereo_feats` and `prev_img_metas_list` + scene_token = img_metas[0]['sample_idx'] // 1000 + if scene_token != self.prev_frame_info['scene_token']: + self.prev_frame_info['prev_img_metas_list'] = [img_metas[0].copy() for _ in range(self.history_queue_length)] # length = 30 + self.prev_frame_info['prev_stereo_feats_list'] = [curr_stereo_feats.clone() for _ in range(self.history_stereo_prev_step)] # length = 5 + prev_stereo_feats_list = self.prev_frame_info['prev_stereo_feats_list'] + history_last_stereo_feats = prev_stereo_feats_list[self.history_stereo_prev_step - 1] + prev_img_metas_list = self.prev_frame_info['prev_img_metas_list'] + + # Step 3: do short term fusion + if self.do_history_stereo_fusion: + curr_ego2global_np = np.asarray(img_metas[0]['ego2global']) + curr_ego2global = torch.tensor(curr_ego2global_np, device='cuda', dtype=torch.float64) # (4, 4) + curr_lidar2img_np = np.asarray(img_metas[0]['lidar2img']) + curr_lidar2img = torch.tensor(curr_lidar2img_np, device='cuda', dtype=torch.float64) + curr_global2img = curr_lidar2img @ torch.inverse(curr_ego2global) + curr_global2img = curr_global2img.unsqueeze(0) # 1 x N x 4 x 4 + + prev_ego2global_np = np.asarray(prev_img_metas_list[self.history_stereo_prev_step - 1]['ego2global']) + prev_ego2global = torch.tensor(prev_ego2global_np, device='cuda', dtype=torch.float64) # (4, 4) + prev_lidar2img_np = np.asarray(prev_img_metas_list[self.history_stereo_prev_step - 1]['lidar2img']) + prev_lidar2img = torch.tensor(prev_lidar2img_np, device='cuda', dtype=torch.float64) # (5, 4, 4) + prev_global2img = prev_lidar2img @ torch.inverse(prev_ego2global) # (5, 4, 4) + prev_global2img = prev_global2img.unsqueeze(0) # 1 x N x 4 x 4 + + curr_unaug_cam_to_prev_unaug_cam = prev_global2img[:, :, None, :, :] @ torch.inverse(curr_global2img)[:, None, :, :, :] # 1 x N x N x 4 x 4 + + prev_img_forward_aug = torch.eye(4).to('cuda').repeat(bs, num_views, 1, 1).double() # B x N x 4 x 4 + curr_img_forward_aug = torch.eye(4).to('cuda').repeat(bs, num_views, 1, 1).double() # B x N x 4 x 4 + + raw_bev_feat, _ = self.img_view_transformer(curr_img_encoder_feats, cur_rots, cur_trans, + cur_intrins, cur_post_rots, cur_post_trans, # each B x N x 3 (x 3) + curr_stereo_feats, history_last_stereo_feats, + prev_global2img, prev_img_forward_aug, + curr_global2img, curr_img_forward_aug, + curr_unaug_cam_to_prev_unaug_cam) + + else: + # no short term fusion + raw_bev_feat, _ = self.img_view_transformer(curr_img_encoder_feats, cur_rots, cur_trans, cur_intrins, cur_post_rots, cur_post_trans) + + bev_feat = self.pre_process_net(raw_bev_feat)[0] + bev_feat = self.embed(bev_feat) + new_bev_for_history = bev_feat.clone() + + # Step 4: long term fusion + if self.do_history: + # prepare the history + if scene_token != self.prev_frame_info['scene_token']: + self.prev_frame_info['prev_bev_list'] = [bev_feat.clone() for _ in range(self.history_queue_length)] # length = 30 + self.prev_frame_info['prev_sweep_time_list'] = [0 for _ in range(self.history_queue_length)] # length = 30 + prev_sweep_time_list = self.prev_frame_info['prev_sweep_time_list'] + else: + # new timestep, everything in history gets pushed back one. + prev_sweep_time_list = [i+1 for i in self.prev_frame_info['prev_sweep_time_list']] + prev_bev_list = self.prev_frame_info['prev_bev_list'] + + # sample + sampled_prev_bev_list = [] + sampled_prev_img_metas_list = [] + sampled_sweep_time_list = [] + for index in self.sample_index: + sampled_prev_bev_list.append(prev_bev_list[index]) + sampled_prev_img_metas_list.append(prev_img_metas_list[index]) + sampled_sweep_time_list.append(prev_sweep_time_list[index]) + + all_bev_feats_list = [bev_feat] + list(sampled_prev_bev_list) + all_img_metas_list = img_metas + list(sampled_prev_img_metas_list) + sweep_time_list = [0] + sampled_sweep_time_list + + # fuse + bev_feat = self.fuse_history_test(all_bev_feats_list, all_img_metas_list, sweep_time_list) + + # update the `prev_sweep_time_list` and `prev_bev_list` + del prev_sweep_time_list[-1] + prev_sweep_time_list = [0] + prev_sweep_time_list + self.prev_frame_info['prev_sweep_time_list'] = prev_sweep_time_list + + del prev_bev_list[-1] + prev_bev_list.insert(0, new_bev_for_history) + self.prev_frame_info['prev_bev_list'] = prev_bev_list + + # update + self.prev_frame_info['scene_token'] = scene_token + + del prev_img_metas_list[-1] + prev_img_metas_list.insert(0, img_metas[0]) + self.prev_frame_info['prev_img_metas_list'] = prev_img_metas_list + + del prev_stereo_feats_list[-1] + prev_stereo_feats_list.insert(0, curr_stereo_feats) + self.prev_frame_info['prev_stereo_feats_list'] = prev_stereo_feats_list + + bev_encoder_out_list = self.bev_encoder(bev_feat) + + occ_outs = self.pts_bbox_head(bev_encoder_out_list[0]) # bs, h, w, z, c + occ_outs = self.pts_bbox_head.get_occ(occ_outs) # bs, h, w, z + + return occ_outs + + def forward_test(self, img_metas, + img=None, + voxel_semantics=None, + valid_mask=None, + **kwargs): + ''' + Test function for the model. + Args: + (all arg are be wrapped one more list. after we take it out, the type of each parameter are below) + img_metas (List[dict]): Meta information of each sample. length = batch_size = 1 + img (torch.Tensor): multi view image input of current frame. shape (B, N, C, H, W) + voxel_semantics (torch.Tensor): 3D occupancy ground truth. shape (B, H, W, Z) + valid_mask (torch.Tensor): unified boolean mask for visible voxel. shape (B, H, W, Z) + Returns: + occ_results (dict): dict of evaluation results. + ''' + + # Step 1: prepare the input + if voxel_semantics is not None: voxel_semantics = voxel_semantics[0] + if valid_mask is not None: valid_mask = valid_mask[0] + if img is not None: img = img[0] + if img_metas is not None: img_metas = img_metas[0] + + voxel_semantics_pred = self.simple_test(img_metas=img_metas, img=img, **kwargs) + + occ_results = self.pts_bbox_head.eval_metrics(voxel_semantics, voxel_semantics_pred, valid_mask=valid_mask) + sample_idx = img_metas[0]['sample_idx'] + scene_id = sample_idx % 1000000 // 1000 + occ_results['scene_id'] = scene_id + frame_id = sample_idx % 1000 + occ_results['frame_id'] = frame_id + + return occ_results diff --git a/projects/mmdet3d_plugin/bevformer/loss/__init__.py b/projects/mmdet3d_plugin/bevformer/loss/__init__.py new file mode 100644 index 0000000..af96b1c --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/loss/__init__.py @@ -0,0 +1 @@ +from . import ohem \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/loss/lovasz_losses.py b/projects/mmdet3d_plugin/bevformer/loss/lovasz_losses.py new file mode 100644 index 0000000..210d279 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/loss/lovasz_losses.py @@ -0,0 +1,319 @@ + +""" +Lovasz-Softmax and Jaccard hinge loss in PyTorch +Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License) +""" + +from __future__ import print_function, division + +import torch +from torch.autograd import Variable +import torch.nn.functional as F +import numpy as np +try: + from itertools import ifilterfalse +except ImportError: # py3k + from itertools import filterfalse as ifilterfalse + +def lovasz_grad(gt_sorted): + """ + Computes gradient of the Lovasz extension w.r.t sorted errors + See Alg. 1 in paper + """ + p = len(gt_sorted) + gts = gt_sorted.sum() + intersection = gts - gt_sorted.float().cumsum(0) + union = gts + (1 - gt_sorted).float().cumsum(0) + jaccard = 1. - intersection / union + if p > 1: # cover 1-pixel case + jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] + return jaccard + + +def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): + """ + IoU for foreground class + binary: 1 foreground, 0 background + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + intersection = ((label == 1) & (pred == 1)).sum() + union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() + if not union: + iou = EMPTY + else: + iou = float(intersection) / float(union) + ious.append(iou) + iou = mean(ious) # mean accross images if per_image + return 100 * iou + + +def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): + """ + Array of IoU for each (non ignored) class + """ + if not per_image: + preds, labels = (preds,), (labels,) + ious = [] + for pred, label in zip(preds, labels): + iou = [] + for i in range(C): + if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) + intersection = ((label == i) & (pred == i)).sum() + union = ((label == i) | ((pred == i) & (label != ignore))).sum() + if not union: + iou.append(EMPTY) + else: + iou.append(float(intersection) / float(union)) + ious.append(iou) + ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image + return 100 * np.array(ious) + + +# --------------------------- BINARY LOSSES --------------------------- + + +def lovasz_hinge(logits, labels, per_image=True, ignore=None): + """ + Binary Lovasz hinge loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + per_image: compute the loss per image instead of per batch + ignore: void class id + """ + if per_image: + loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) + for log, lab in zip(logits, labels)) + else: + loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) + return loss + + +def lovasz_hinge_flat(logits, labels): + """ + Binary Lovasz hinge loss + logits: [P] Variable, logits at each prediction (between -\infty and +\infty) + labels: [P] Tensor, binary ground truth labels (0 or 1) + ignore: label to ignore + """ + if len(labels) == 0: + # only void pixels, the gradients should be 0 + return logits.sum() * 0. + signs = 2. * labels.float() - 1. + errors = (1. - logits * Variable(signs)) + errors_sorted, perm = torch.sort(errors, dim=0, descending=True) + perm = perm.data + gt_sorted = labels[perm] + grad = lovasz_grad(gt_sorted) + loss = torch.dot(F.relu(errors_sorted), Variable(grad)) + return loss + + +def flatten_binary_scores(scores, labels, ignore=None): + """ + Flattens predictions in the batch (binary case) + Remove labels equal to 'ignore' + """ + scores = scores.view(-1) + labels = labels.view(-1) + if ignore is None: + return scores, labels + valid = (labels != ignore) + vscores = scores[valid] + vlabels = labels[valid] + return vscores, vlabels + + +class StableBCELoss(torch.nn.modules.Module): + def __init__(self): + super(StableBCELoss, self).__init__() + def forward(self, input, target): + neg_abs = - input.abs() + loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() + return loss.mean() + + +def binary_xloss(logits, labels, ignore=None): + """ + Binary Cross entropy loss + logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) + labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) + ignore: void class id + """ + logits, labels = flatten_binary_scores(logits, labels, ignore) + loss = StableBCELoss()(logits, Variable(labels.float())) + return loss + + +# --------------------------- MULTICLASS LOSSES --------------------------- + + +def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None): + """ + Multi-class Lovasz-Softmax loss + probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). + Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. + labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) + classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. + per_image: compute the loss per image instead of per batch + ignore: void class labels + """ + if per_image: + loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes) + for prob, lab in zip(probas, labels)) + else: + loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes) + return loss + + +def lovasz_softmax_flat(probas, labels, classes='present'): + """ + Multi-class Lovasz-Softmax loss + probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) + labels: [P] Tensor, ground truth labels (between 0 and C - 1) + classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. + """ + if probas.numel() == 0: + # only void pixels, the gradients should be 0 + return probas * 0. + C = probas.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + fg = (labels == c).float() # foreground for class c + if (classes == 'present' and fg.sum() == 0): + continue + if C == 1: + if len(classes) > 1: + raise ValueError('Sigmoid output possible only with 1 class') + class_pred = probas[:, 0] + else: + class_pred = probas[:, c] + errors = (Variable(fg) - class_pred).abs() + errors_sorted, perm = torch.sort(errors, 0, descending=True) + perm = perm.data + fg_sorted = fg[perm] + losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) + return mean(losses) + + +def flatten_probas(probas, labels, ignore=None): + """ + Flattens predictions in the batch + """ + # if probas.dim() == 3: + # # assumes output of a sigmoid layer + # B, H, W = probas.size() + # probas = probas.view(B, 1, H, W) + # elif probas.dim() == 5: + # #3D segmentation + # B, C, L, H, W = probas.size() + # probas = probas.contiguous().view(B, C, L, H*W) + # B, C, H, W = probas.size() + # probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C + labels = labels.view(-1) + if ignore is None: + return probas, labels + valid = (labels != ignore) + vprobas = probas[valid.nonzero().squeeze()] + # print(labels) + # print(valid) + vlabels = labels[valid] + return vprobas, vlabels + +def xloss(logits, labels, ignore=None): + """ + Cross entropy loss + """ + return F.cross_entropy(logits, Variable(labels), ignore_index=255) + +def jaccard_loss(probas, labels,ignore=None, smooth = 100, bk_class = None): + """ + Something wrong with this loss + Multi-class Lovasz-Softmax loss + probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). + Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. + labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) + classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. + per_image: compute the loss per image instead of per batch + ignore: void class labels + """ + vprobas, vlabels = flatten_probas(probas, labels, ignore) + + + true_1_hot = torch.eye(vprobas.shape[1])[vlabels] + + if bk_class: + one_hot_assignment = torch.ones_like(vlabels) + one_hot_assignment[vlabels == bk_class] = 0 + one_hot_assignment = one_hot_assignment.float().unsqueeze(1) + true_1_hot = true_1_hot*one_hot_assignment + + true_1_hot = true_1_hot.to(vprobas.device) + intersection = torch.sum(vprobas * true_1_hot) + cardinality = torch.sum(vprobas + true_1_hot) + loss = (intersection + smooth / (cardinality - intersection + smooth)).mean() + return (1-loss)*smooth + +def hinge_jaccard_loss(probas, labels,ignore=None, classes = 'present', hinge = 0.1, smooth =100): + """ + Multi-class Hinge Jaccard loss + probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). + Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. + labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) + classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. + ignore: void class labels + """ + vprobas, vlabels = flatten_probas(probas, labels, ignore) + C = vprobas.size(1) + losses = [] + class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes + for c in class_to_sum: + if c in vlabels: + c_sample_ind = vlabels == c + cprobas = vprobas[c_sample_ind,:] + non_c_ind =np.array([a for a in class_to_sum if a != c]) + class_pred = cprobas[:,c] + max_non_class_pred = torch.max(cprobas[:,non_c_ind],dim = 1)[0] + TP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.) + smooth + FN = torch.sum(torch.clamp(max_non_class_pred - class_pred, min = -hinge)+hinge) + + if (~c_sample_ind).sum() == 0: + FP = 0 + else: + nonc_probas = vprobas[~c_sample_ind,:] + class_pred = nonc_probas[:,c] + max_non_class_pred = torch.max(nonc_probas[:,non_c_ind],dim = 1)[0] + FP = torch.sum(torch.clamp(class_pred - max_non_class_pred, max = hinge)+1.) + + losses.append(1 - TP/(TP+FP+FN)) + + if len(losses) == 0: return 0 + return mean(losses) + +# --------------------------- HELPER FUNCTIONS --------------------------- +def isnan(x): + return x != x + + +def mean(l, ignore_nan=False, empty=0): + """ + nanmean compatible with generators. + """ + l = iter(l) + if ignore_nan: + l = ifilterfalse(isnan, l) + try: + n = 1 + acc = next(l) + except StopIteration: + if empty == 'raise': + raise ValueError('Empty mean') + return empty + for n, v in enumerate(l, 2): + acc += v + if n == 1: + return acc + return acc / n diff --git a/projects/mmdet3d_plugin/bevformer/loss/ohem.py b/projects/mmdet3d_plugin/bevformer/loss/ohem.py new file mode 100644 index 0000000..ef9911f --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/loss/ohem.py @@ -0,0 +1,119 @@ +import torch +import torch.nn as nn +from mmdet.models.losses.focal_loss import FocalLoss +from mmdet.models.losses.cross_entropy_loss import CrossEntropyLoss +from mmdet.models.losses.utils import reduce_loss +from mmseg.models import LOSSES + + +@LOSSES.register_module() +class CrossEntropyOHEMLoss(nn.Module): + """ + Cross Entropy Loss with additional OHEM. + + Args: + top_ratio (float): top ratio to be mined. Default: 0.3. + top_weight (float): scaling weight given to top hard examples mined. Default: 1.0. + weight_per_cls (list, tuple, Tensor, np.ndarray): a manual rescaling weight given to each class. + ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. + ce_loss_weight (float): weight to reweigh CE loss output. Default: 1.0. + reduction (str): mean, sum and none are supported. + """ + def __init__(self, + use_sigmoid=False, + use_mask=False, + class_weight=None, + ignore_index=None, + loss_weight=1.0, + top_ratio=0.3, + use_ohem=True, + top_weight=1.0, + reduction='mean'): + super(CrossEntropyOHEMLoss, self).__init__() + self.top_ratio = top_ratio + self.top_weight = top_weight + assert reduction in ('mean', 'sum', 'none') + self.reduction = reduction + self.use_ohem = use_ohem + + self.ce_loss = CrossEntropyLoss(use_sigmoid, use_mask, + reduction='none', class_weight=class_weight, + ignore_index=ignore_index, loss_weight=loss_weight) + + def forward(self, + input, + target, + weight=None, + avg_factor=None): + loss = self.ce_loss(input, target, weight=weight, avg_factor=avg_factor) + size = loss.size() + loss: torch.Tensor = loss.reshape(-1) + if not self.use_ohem: return loss + + k = max(int(self.top_ratio * loss.shape[0]), 1) + loss_topk, topk_idx = torch.topk(loss, k, largest=True, sorted=False) + + if self.reduction != 'none': + loss = reduce_loss(loss, self.reduction) + loss_topk = reduce_loss(loss_topk, self.reduction) + return loss + self.top_weight * loss_topk + else: + loss[topk_idx] += self.top_weight * loss_topk + return loss.reshape(size) + + +@LOSSES.register_module() +class FocalOHEMLoss(nn.Module): + """ + Cross Entropy Loss with additional OHEM. + + Args: + use_sigmoid (bool): Only support sigmoid in FocalLoss. Must be True. + gamma (float): gamma of FocalLoss. Default: 2.0. + alpha (float): alpha of FocalLoss. Default: 0.5. + focal_loss_weight (float): rescaling weight given to FocalLoss. Default: 1.0. + activated (bool, optional): Whether the input is activated. + If True, it means the input has been activated and can be + treated as probabilities. Else, it should be treated as logits. + Defaults to False. + top_ratio (float): top ratio to be mined. Default: 0.3. + top_weight (float): scaling weight given to top hard examples mined. Default: 1.0. + reduction (str): mean, sum and none are supported. + """ + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0, + activated=False, + top_ratio=0.3, + top_weight=1.0, + reduction='mean'): + super(FocalOHEMLoss, self).__init__() + self.top_ratio = top_ratio + self.top_weight = top_weight + assert reduction in ('mean', 'sum', 'none') + self.reduction = reduction + + self.focal_loss = FocalLoss(use_sigmoid, gamma, alpha, + reduction='none', loss_weight=loss_weight, activated=activated) + + def forward(self, + input, + target, + weight=None, + avg_factor=None): + loss = self.focal_loss(input, target, weight=weight, avg_factor=avg_factor) + size = loss.size() + loss: torch.Tensor = loss.reshape(-1) + + k = max(int(self.top_ratio * loss.shape[0]), 1) + loss_topk, topk_idx = torch.topk(loss, k, largest=True, sorted=False) + + if self.reduction != 'none': + loss = reduce_loss(loss, self.reduction) + loss_topk = reduce_loss(loss_topk, self.reduction) + return loss + self.top_weight * loss_topk + else: + loss[topk_idx] += self.top_weight * loss_topk + return loss.reshape(size) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/modules/__init__.py b/projects/mmdet3d_plugin/bevformer/modules/__init__.py index daff9f0..c4fc124 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/__init__.py +++ b/projects/mmdet3d_plugin/bevformer/modules/__init__.py @@ -3,7 +3,22 @@ from .temporal_self_attention import TemporalSelfAttention from .encoder import BEVFormerEncoder, BEVFormerLayer from .decoder import DetectionTransformerDecoder -from .occ_transformer import OccTransformer -from .encoder_test import BEVFormerEncoderTest -from .encoder_3d import BEVFormerEncoder3D - +from .occ_transformer import CVTOccTransformer +from .encoder_3d import BEVFormerEncoder3D,OccFormerLayer3D +from .temporal_self_attention_3d import TemporalSelfAttention3D +from .spatial_cross_attention_3d import MSDeformableAttention4D +from .encoder_3d_conv import BEVFormerEncoder3DConv +from .encoder_waymo import BEVFormerEncoderWaymo, BEVFormerLayerWaymo +from .occ_transformer_waymo import CVTOccTransformerWaymo +from .hybrid_transformer import HybridTransformer +from .voxel_encoder import VoxelFormerEncoder,VoxelFormerLayer +from .vol_encoder import VolFormerEncoder,VolFormerLayer +from .pyramid_transformer import PyramidTransformer +from .resnet import CustomResNet +from .residual_block_3d import ResidualBlock +from .occ_conv_decoder import OccConvDecoder +from .occ_conv_decoder_3d import OccConvDecoder3D +from .cost_volume_module import CostVolumeModule +from .concat_conv_module import ConcatConvModule +from .view_transformer import ViewTransformerLiftSplatShoot_solo, SELikeModule +from .view_transformer_solo import ViewTransformerSOLOFusion \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/modules/concat_conv_module.py b/projects/mmdet3d_plugin/bevformer/modules/concat_conv_module.py new file mode 100644 index 0000000..6375909 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/concat_conv_module.py @@ -0,0 +1,219 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule + +class TemporalNet(nn.Module): + def __init__(self, + in_channels, + out_channels, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU',inplace=True)): + super(TemporalNet, self).__init__() + self.conv1 = ConvModule( + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + self.conv2 = ConvModule( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + self.conv3 = ConvModule( + out_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + ) + self.downsample = ConvModule( + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + ) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + out += self.downsample(x) + out = F.relu(out) + return out + +class ConcatConvModule(BaseModule): + def __init__(self, + bev_h=200, + bev_w=200, + total_z=16, # not bev_z + channels=16, + pc_range=[-40, -40, -1.0, 40, 40, 5.4], + voxel_size=[0.4, 0.4, 0.4], + sampled_queue_length=7, + ): + super(ConcatConvModule, self).__init__() + self.bev_h = bev_h + self.bev_w = bev_w + self.total_z = total_z + self.channels = channels + self.pc_range = pc_range + self.voxel_size = voxel_size + self.sampled_queue_length = sampled_queue_length + self.concatChannels=self.channels * self.sampled_queue_length + self.resnet = TemporalNet(in_channels=self.concatChannels, + out_channels=self.channels, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', ), + act_cfg=dict(type='ReLU',inplace=True), + ) + bias_x = (pc_range[3] + pc_range[0]) / 2 # 0 + bias_y = (pc_range[4] + pc_range[1]) / 2 # 0 + bias_z = (pc_range[5] + pc_range[2]) / 2 # 2.2 + half_x = (pc_range[3] - pc_range[0]) / 2 # 40 + half_y = (pc_range[4] - pc_range[1]) / 2 # 40 + half_z = (pc_range[5] - pc_range[2]) / 2 # 3.2 + self.bias = torch.tensor([bias_x, bias_y, bias_z], device='cuda') # [0, 0, 2.2] + self.norm_factors = torch.tensor([half_x, half_y, half_z], device='cuda') # [40, 40, 3.2] + + self.x_idx_begins = self.pc_range[0] + self.voxel_size[0]/2 + self.y_idx_begins = self.pc_range[1] + self.voxel_size[1]/2 + self.z_idx_begins = self.pc_range[2] + self.voxel_size[2]/2 + + self.x_idx_ends = self.pc_range[3] - self.voxel_size[0]/2 + self.y_idx_ends = self.pc_range[4] - self.voxel_size[1]/2 + self.z_idx_ends = self.pc_range[5] - self.voxel_size[2]/2 + + def get_bev_ref(self, W, H, Z): + """ + Get reference point for reshaped BEV volume. + Args: + W (int): width of the BEV volume. + H (int): height of the BEV volume. + Z (int): depth of the BEV volume. + Returns: + ref_3d (torch.Tensor): shape (w, h, z, 4, 1) + """ + + x_idx = torch.linspace(self.x_idx_begins, self.x_idx_ends, W, device='cuda', dtype=torch.float64) + y_idx = torch.linspace(self.y_idx_begins, self.y_idx_ends, H, device='cuda', dtype=torch.float64) + z_idx = torch.linspace(self.z_idx_begins, self.z_idx_ends, Z, device='cuda', dtype=torch.float64) + + grid_x_idx, grid_y_idx, grid_z_idx = torch.meshgrid(x_idx, y_idx, z_idx, indexing='ij') + grid_ones = torch.ones_like(grid_x_idx) + ref_3d = torch.stack((grid_x_idx, grid_y_idx, grid_z_idx, grid_ones), -1) + ref_3d = ref_3d.unsqueeze(4) + return ref_3d + + def gather_feature(self, features, locations, device='cuda', dtype=torch.float32): + """ + Args: + features (torch.Tensor): (len-1, c, z, h, w) + locations (torch.Tensor): (len-1, w, h, z, 3) + returns: + grid_sampled_features (torch.Tensor): (len-1, c, z, h, w) no mask + """ + features = features.to(dtype) + locations = locations.to(dtype) + + # norm the location and reshape locations to (len-1, z, h, w, 3) + locations = (locations - self.bias) / self.norm_factors # norm the location to [-1, 1] + locations = locations.permute(0, 3, 2, 1, 4) # (len-1, z, h, w, 3) + + grid_sampled_features = F.grid_sample(features, locations, align_corners=False) # (len-1, c, z, h, w) + # default to be bilinear interpolation and no align corners + + return grid_sampled_features # (len-1, c, z, h, w) + + def forward(self, bev_list, img_metas_list): + """ + Args: + bev_list (list[torch.Tensor]): each has shape (bs, h*w, embed_dims). + img_metas_list (list[dict]): Include current img meta info dict. + return: + updated_bev (torch.Tensor): fused BEV feature with shape (bs, h*w, embed_dims) + """ + + # Step 1: prepare the input parameters + bs, bev_HW, embed_dims = bev_list[0].shape + bev_h = self.bev_h + bev_w = self.bev_w + total_z = self.total_z + channels = self.channels + pc_range = self.pc_range + + # get ego2global from img_metas + ego2global_list = [] + for i in range(len(bev_list)): + img_metas = img_metas_list[i] + ego2global = img_metas['ego2global'] # (4, 4) numpy array + ego2global_list.append(torch.tensor(ego2global, device='cuda', dtype=torch.float64)) + cat_prev_ego2global = torch.stack(ego2global_list[:-1], dim=0) # (len-1, 4, 4) + assert bs == 1, "Only support batch size 1" + cat_prev_ego2global = cat_prev_ego2global.unsqueeze(0).repeat(bs, 1, 1, 1) # (bs, len-1, 4, 4) + cur_ego2global = ego2global_list[-1] # (4, 4) + cur_ego2global = cur_ego2global.unsqueeze(0).repeat(bs, 1, 1) # (bs, 4, 4) + + # Reshape the input + cat_bev = torch.cat(bev_list, dim=0) + cat_bev = cat_bev.permute(0, 2, 1).reshape(bs*len(bev_list), channels, total_z, bev_h, bev_w) + + # Step 2: prepare reference point for BEV volume + # sample reference point for current ego coordinate + bev_ref = self.get_bev_ref(H=bev_h, W=bev_w, Z=total_z) + bev_ref = bev_ref.unsqueeze(3).repeat(1, 1, 1, len(bev_list)-1, 1, 1) # (w, h, z, len-1, 4, 1) + + # Step 3: use `ego2global` to wrap the reference point to the previous BEV volume + prev_bev_ref = torch.inverse(cat_prev_ego2global) @ cur_ego2global @ bev_ref + prev_bev_ref = prev_bev_ref.squeeze(-1) + prev_bev_ref = prev_bev_ref[..., :3] + prev_bev_ref = prev_bev_ref.permute(3, 0, 1, 2, 4) # (len-1, w, h, z, 3) + + # Step 4: get mask + mask = ( + (prev_bev_ref[..., 0] > pc_range[0]) | (prev_bev_ref[..., 0] < pc_range[3]) | + (prev_bev_ref[..., 1] > pc_range[1]) | (prev_bev_ref[..., 1] < pc_range[4]) | + (prev_bev_ref[..., 2] > pc_range[2]) | (prev_bev_ref[..., 2] < pc_range[5]) + ) # out-of-range points will be 0. with shape (len-1, w, h, z) + + # Step 5: gather_feature + prev_bev = self.gather_feature(cat_bev[:-1], prev_bev_ref) # (len-1, c, z, h, w) + prev_bev = prev_bev.permute(0, 4, 3, 2, 1) # (len-1, w, h, z, c) + prev_bev[~mask] = 0 + prev_bev = prev_bev.permute(0, 4, 3, 2, 1) # (len-1, c, z, h, w) + prev_bev = torch.flatten(prev_bev, start_dim=0, end_dim=1) # (len-1 * c, z, h, w) + + # Step 6: padding + cur_bev = cat_bev[-1] # c, z, h, w + cat_bev = torch.cat((prev_bev, cur_bev), dim=0) # (len * c, z, h, w) + if len(bev_list) < self.sampled_queue_length: + pad_len = self.sampled_queue_length - len(bev_list) + pad_bev = torch.zeros((pad_len*channels, total_z, bev_h, bev_w), device='cuda', dtype=cat_bev.dtype) + cat_bev = torch.cat((pad_bev, cat_bev), dim=0) # (queue_length * c, z, h, w) + + # Step 7: use resnet to fuse the temporal information into a updated BEV feature + cat_bev = cat_bev.unsqueeze(0) # (bs, c * queue_length, z, h, w) + update_bev = self.resnet(cat_bev) # (bs, c, z, h, w) + update_bev = update_bev.reshape(bs, embed_dims, bev_HW).permute(0, 2, 1) # (bs, h*w, embed_dims) + + return update_bev \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/modules/cost_volume_module.py b/projects/mmdet3d_plugin/bevformer/modules/cost_volume_module.py new file mode 100644 index 0000000..01976af --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/cost_volume_module.py @@ -0,0 +1,287 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule +from .residual_block_3d import ResidualBlock + +class TemporalNet(nn.Module): + def __init__(self, in_channels=240, + out_channels=[16, 128, 64, 32], + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU',inplace=True), + ): + super(TemporalNet, self).__init__() + self.conv_head = ConvModule(in_channels, + out_channels[0], + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.layer1 = self.make_layer(out_channels[0], out_channels[1], num_blocks=2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.layer2 = self.make_layer(out_channels[1], out_channels[2], num_blocks=2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.layer3 = self.make_layer(out_channels[2], out_channels[3], num_blocks=2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.conv_back = ConvModule(out_channels[3], + 2, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def make_layer(self, in_channels, + out_channels, + num_blocks=2, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU',inplace=True)): + layers = [] + for _ in range(num_blocks): + layers.append(ResidualBlock(in_channels, + out_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + in_channels = out_channels # after one round, the inchannel will become outchannel + return nn.Sequential(*layers) + + def forward(self, bev_3d): + bev_3d = self.conv_head(bev_3d) + bev_3d = self.layer1(bev_3d) + bev_3d = self.layer2(bev_3d) + bev_3d = self.layer3(bev_3d) + bev_3d = self.conv_back(bev_3d) + return bev_3d + +class CostVolumeModule(BaseModule): + def __init__(self, + bev_h=200, + bev_w=200, + total_z=16, # not bev_z + channels=16, + pc_range=[-40, -40, -1.0, 40, 40, 5.4], + voxel_size=[0.4, 0.4, 0.4], + sampled_queue_length=7, + scales=[0.8, 0.9, 1.0, 1.1, 1.2], + ): + super(CostVolumeModule, self).__init__() + self.bev_h = bev_h + self.bev_w = bev_w + self.total_z = total_z + self.channels = channels + self.pc_range = pc_range + self.voxel_size = voxel_size + self.sampled_queue_length = sampled_queue_length + self.scales = scales + self.scales_len = len(scales) + self.scalesChannels=self.channels * self.sampled_queue_length * self.scales_len + self.out_channels=[16, 128, 64, 32] + + self.resnet = TemporalNet(in_channels=self.scalesChannels, + out_channels=self.out_channels, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d', ), + act_cfg=dict(type='ReLU',inplace=True), + ) + bias_x = (pc_range[3] + pc_range[0]) / 2 # 0 + bias_y = (pc_range[4] + pc_range[1]) / 2 # 0 + bias_z = (pc_range[5] + pc_range[2]) / 2 # 2.2 + half_x = (pc_range[3] - pc_range[0]) / 2 # 40 + half_y = (pc_range[4] - pc_range[1]) / 2 # 40 + half_z = (pc_range[5] - pc_range[2]) / 2 # 3.2 + self.bias = torch.tensor([bias_x, bias_y, bias_z], device='cuda') # [0, 0, 2.2] + self.norm_factors = torch.tensor([half_x, half_y, half_z], device='cuda') # [40, 40, 3.2] + + self.x_idx_begins = self.pc_range[0] + self.voxel_size[0]/2 + self.y_idx_begins = self.pc_range[1] + self.voxel_size[1]/2 + self.z_idx_begins = self.pc_range[2] + self.voxel_size[2]/2 + + self.x_idx_ends = self.pc_range[3] - self.voxel_size[0]/2 + self.y_idx_ends = self.pc_range[4] - self.voxel_size[1]/2 + self.z_idx_ends = self.pc_range[5] - self.voxel_size[2]/2 + + + def get_bev_ref(self, W, H, Z): + """ + Get reference point for reshaped BEV volume. + Args: + W (int): width of the BEV volume. + H (int): height of the BEV volume. + Z (int): depth of the BEV volume. + Returns: + ref_3d (torch.Tensor): shape (w, h, z, 4, 1) + """ + + x_idx = torch.linspace(self.x_idx_begins, self.x_idx_ends, W, device='cuda', dtype=torch.float64) + y_idx = torch.linspace(self.y_idx_begins, self.y_idx_ends, H, device='cuda', dtype=torch.float64) + z_idx = torch.linspace(self.z_idx_begins, self.z_idx_ends, Z, device='cuda', dtype=torch.float64) + + grid_x_idx, grid_y_idx, grid_z_idx = torch.meshgrid(x_idx, y_idx, z_idx, indexing='ij') # all with shape (w, h, z) + grid_ones = torch.ones_like(grid_x_idx) + ref_3d = torch.stack((grid_x_idx, grid_y_idx, grid_z_idx, grid_ones), -1) # (x, h, z, 4) + ref_3d = ref_3d.unsqueeze(4) + + return ref_3d + + def gather_feature_scales(self, features, locations, dtype=torch.float32): + """ + Use grid sample to gather feature from previous BEV volume. + Args: + features (torch.Tensor): shape (bs, len, w, h, z, c). + locations (torch.Tensor): shape (bs, len*len(scales), w, h, z, 3). + Returns: + grid_sampled_features (torch.Tensor): shape (bs, len, len(scales), w, h, z, c). + """ + + features = features.to(dtype) + locations = locations.to(dtype) + + # norm the location and reshape locations shape + locations = (locations - self.bias) / self.norm_factors # [-1, 1] + locations = locations.permute(0, 1, 4, 3, 2, 5) # (bs, len*len(scales), z, h, w, 3) + locations = locations.reshape(-1, *locations.shape[2:]) # (bs*len*len(scales), z, h, w, 3) + + # reshape features shape + queue_len = features.shape[1] + features = features.permute(0, 1, 5, 4, 3, 2) # (bs, len, c, z, h, w) + features = features.unsqueeze(2) # (bs, len, 1, c, z, h, w) + features = features.repeat(1, 1, self.scales_len, 1, 1, 1, 1) # (bs, len, len(scales), c, z, h, w) + features = features.reshape(-1, *features.shape[3:]) # (bs*len*len(scales), c, z, h, w) + + grid_sampled_features = F.grid_sample(features, locations, align_corners=False) # (bs*len*len(scales), c, z, h, w) + # default to be bilinear interpolation and no align corners + + grid_sampled_features = grid_sampled_features.reshape(-1, queue_len, self.scales_len, *grid_sampled_features.shape[1:]) # (bs, len, len(scales), c, z, h, w) + grid_sampled_features = grid_sampled_features.permute(0, 1, 2, 6, 5, 4, 3) # (bs, len, len(scales), w, h, z, c) + + return grid_sampled_features + + def forward(self, bev_list, img_metas_list): + """ + Forward pass of the `CostVolume` temporal fusion method. + Args: + bev_list (list[torch.Tensor]): each has shape (bs, h*w, embed_dims). + img_metas_list (list[dict]): include current img meta info dict. + return: + updated_bev (torch.Tensor): fused BEV feature with shape (bs, h*w, embed_dims) + refine_feat_w (torch.Tensor): shape (w, h, z, c). used to calculate refine_feat loss + """ + + # Step 1: prepare the input parameters + bs, bev_HW, embed_dims = bev_list[0].shape + bev_h = self.bev_h + bev_w = self.bev_w + total_z = self.total_z + channels = self.channels + pc_range = self.pc_range + + # get ego2global from img_metas + ego2global_list = [] + for i in range(len(bev_list)): + img_metas = img_metas_list[i] + ego2global = img_metas['ego2global'] # (4, 4) numpy array + ego2global_list.append(torch.tensor(ego2global, device='cuda', dtype=torch.float64)) + cat_prev_ego2global = torch.stack(ego2global_list[:-1], dim=0) # (len-1, 4, 4) + assert bs == 1, "Only support batch size 1" + cat_prev_ego2global = cat_prev_ego2global.unsqueeze(0).repeat(bs, 1, 1, 1) # (bs, len-1, 4, 4) + cur_ego2global = ego2global_list[-1] # (4, 4) + cur_ego2global = cur_ego2global.unsqueeze(0).repeat(bs, 1, 1) # (bs, 4, 4) + + # Reshape the input + len_time = len(bev_list) + cat_bev = torch.stack(bev_list, dim=1) # (bs, len_time, h*w, embed_dims) + cat_bev = cat_bev.permute(0, 1, 3, 2) # (bs, len_time, embed_dims, h*w) + cat_bev = cat_bev.reshape(bs, len_time, channels, total_z, bev_h, bev_w) + cat_bev = cat_bev.permute(0, 1, 5, 4, 3, 2) # (bs, len_time, w, h, z, c) + + # Step 2: prepare the reference point for BEV volume + # sample reference point for current ego coordinate + bev_ref = self.get_bev_ref(H=bev_h, W=bev_w, Z=total_z) # (w, h, z, 4, 1) + cur_center_bev_ref = bev_ref.squeeze(-1) # (w, h, z, 4) + cur_center_bev_ref = cur_center_bev_ref[..., :3] # (w, h, z, 3) + + # sample cur point to a line according to a series of scales(strides) + scales = self.scales + scales = torch.tensor(scales, device='cuda', dtype=torch.float32) # (scales_len, ) + scales = scales.unsqueeze(1).repeat(1, 3) # shape (scales_len, 3) + # Because we need to scale the x, y, and z components by the same multiple, we set this component to 3 here + + repeat_cur_center_bev_ref = cur_center_bev_ref.unsqueeze(3).repeat(1, 1, 1, self.scales_len, 1) # (w, h, z, scales_len, 3) + cur_line_bev_ref = repeat_cur_center_bev_ref * scales # (w, h, z, scales_len, 3) + + # reshape cur_line prepared for wrapping + repeat_cur_line_bev_ref = cur_line_bev_ref.unsqueeze(3).repeat(1, 1, 1, len_time-1, 1, 1) # (w, h, z, len_time-1, scales_len, 3) + repeat_cur_line_bev_ref = repeat_cur_line_bev_ref.permute(4, 0, 1, 2, 3, 5) # (scales_len, w, h, z, len-1, 3) + ones_tensor = torch.ones_like(repeat_cur_line_bev_ref[..., 0:1]) # (scales_len, w, h, z, len-1, 1) + repeat_cur_line_bev_ref = torch.cat((repeat_cur_line_bev_ref, ones_tensor), dim=-1) # (scales_len, w, h, z, len-1, 4) + repeat_cur_line_bev_ref = repeat_cur_line_bev_ref.unsqueeze(6) # (scales_len, w, h, z, len-1, 4, 1) + + # Step 3: use `ego2global` to wrap the reference point to the previous BEV volume + # process the data add batch size + repeat_cur_line_bev_ref = repeat_cur_line_bev_ref.unsqueeze(4) # (scales_len, w, h, z, 1, len-1, 4, 1) + repeat_cur_line_bev_ref = repeat_cur_line_bev_ref.repeat(1, 1, 1, 1, bs, 1, 1, 1) # (scales_len, w, h, z, bs, len-1, 4, 1) + cur_ego2global = cur_ego2global.unsqueeze(1).repeat(1, len_time-1, 1, 1) # (bs, len-1, 4, 4) + + # use ego2global to transform the reference point to prev_bev coordinate, estimate egomotion + prev_line_bev_ref = torch.inverse(cat_prev_ego2global) @ cur_ego2global @ repeat_cur_line_bev_ref # (scales_len, w, h, z, bs, len-1, 4, 1) + repeat_cur_line_bev_ref = repeat_cur_line_bev_ref.permute(4, 5, 0, 1, 2, 3, 6, 7) # (bs, len_time-1, scales_len, w, h, z, 4, 1) + cur_line_bev_ref = repeat_cur_line_bev_ref[:, 0:1, ...] # (bs, 1, scales_len, w, h, z, 4, 1) + prev_line_bev_ref = prev_line_bev_ref.permute(4, 5, 0, 1, 2, 3, 6, 7) # (bs, len_time-1, scales_len, w, h, z, 4, 1) + line_bev_ref = torch.cat((prev_line_bev_ref, cur_line_bev_ref), dim=1) # (bs, len_time, scales_len, w, h, z, 4, 1) + + line_bev_ref = line_bev_ref.squeeze(-1) # (bs, len, scales_len, w, h, z, 4) + line_bev_ref = line_bev_ref[..., :3] # (bs, len, scales_len, w, h, z, 3) + + # Step 4: get mask + mask_for_all = ( + (line_bev_ref[..., 0] > pc_range[0]) & (line_bev_ref[..., 0] < pc_range[3]) & + (line_bev_ref[..., 1] > pc_range[1]) & (line_bev_ref[..., 1] < pc_range[4]) & + (line_bev_ref[..., 2] > pc_range[2]) & (line_bev_ref[..., 2] < pc_range[5]) + ) # out-of-range points will be 0. with shape (len, scales_len, w, h, z) + + # Step 5: gather feature for all reference + line_bev_ref = line_bev_ref.reshape(bs, len_time*self.scales_len, bev_w, bev_h, total_z, 3) # (bs, len*scales_len, w, h, z, 3) + line_bev = self.gather_feature_scales(cat_bev, line_bev_ref, dtype=cat_bev.dtype) # (bs, len, scales_len, w, h, z, c) + line_bev[~mask_for_all] = 0 + + extra = {} + # Step 6: padding the line_bev by zero tensor. (because convolution need same length) + true_queue_length = self.sampled_queue_length + if len_time < true_queue_length: + zero_bev = torch.zeros_like(line_bev[:, 0:1]) # (bs, 1, scales_len, w, h, z, c) + zero_bev = zero_bev.repeat(1, true_queue_length-len_time, 1, 1, 1, 1, 1) # (bs, pad_len, scales_len, w, h, z, c) + line_bev = torch.cat((zero_bev, line_bev), dim=1) # (bs, true_q_len, scales_len, w, h, z, c) + + # Step 7: use resnet to fuse the temporal information into a weight + line_bev = line_bev.reshape(bs, -1, *line_bev.shape[3:]) # (bs, true_q_len*scales_len, w, h, z, c) + line_bev = line_bev.permute(0, 1, 5, 4, 3, 2) # (bs, true_q_len*scales_len, c, z, h, w) + line_bev = line_bev.reshape(bs, -1, *line_bev.shape[3:]) # (bs, true_q_len*scales_len*c, z, h, w) + + refine_feat_w = self.resnet(line_bev) # (bs, 2, z, h, w) # 2 channels setting is used for focal loss + refine_feat_w = refine_feat_w.permute(0, 4, 3, 2, 1) # (bs, w, h, z, 2) + extra['refine_feat_w'] = refine_feat_w + + # Step 8: update the BEV volume + update_bev = cat_bev[:, -1] * refine_feat_w[..., 1:].sigmoid() # (bs, w, h, z, c) * (bs, w, h, z, 1) = (bs, w, h, z, c) + update_bev = update_bev.permute(0, 4, 3, 2, 1) # (bs, c, z, h, w) + update_bev = update_bev.reshape(-1, embed_dims, bev_HW).permute(0, 2, 1) # (1, h*w, embed_dims) + + return update_bev, extra \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/modules/custom_base_transformer_layer.py b/projects/mmdet3d_plugin/bevformer/modules/custom_base_transformer_layer.py index a5d994c..186dc7d 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/custom_base_transformer_layer.py +++ b/projects/mmdet3d_plugin/bevformer/modules/custom_base_transformer_layer.py @@ -73,7 +73,7 @@ def __init__(self, attn_cfgs=None, ffn_cfgs=dict( type='FFN', - embed_dims=256, + # embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0., @@ -91,11 +91,11 @@ def __init__(self, ffn_num_fcs='num_fcs') for ori_name, new_name in deprecated_args.items(): if ori_name in kwargs: - warnings.warn( - f'The arguments `{ori_name}` in BaseTransformerLayer ' - f'has been deprecated, now you should set `{new_name}` ' - f'and other FFN related arguments ' - f'to a dict named `ffn_cfgs`. ') + # warnings.warn( + # f'The arguments `{ori_name}` in BaseTransformerLayer ' + # f'has been deprecated, now you should set `{new_name}` ' + # f'and other FFN related arguments ' + # f'to a dict named `ffn_cfgs`. ') ffn_cfgs[new_name] = kwargs[ori_name] super(MyCustomBaseTransformerLayer, self).__init__(init_cfg) @@ -150,9 +150,12 @@ def __init__(self, assert len(ffn_cfgs) == num_ffns for ffn_index in range(num_ffns): if 'embed_dims' not in ffn_cfgs[ffn_index]: - ffn_cfgs['embed_dims'] = self.embed_dims + ffn_cfgs[ffn_index]['embed_dims'] = self.embed_dims else: - assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims + # print() + # print('ffn_cfgs ',ffn_cfgs[ffn_index]['embed_dims'] ,self.embed_dims) + assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims, \ + 'ffn:{} self.embed_dims: {}'.format(ffn_cfgs[ffn_index]['embed_dims'],self.embed_dims) self.ffns.append( build_feedforward_network(ffn_cfgs[ffn_index])) diff --git a/projects/mmdet3d_plugin/bevformer/modules/decode_head.py b/projects/mmdet3d_plugin/bevformer/modules/decode_head.py new file mode 100644 index 0000000..303e177 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/decode_head.py @@ -0,0 +1,307 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn as nn +from mmcv.runner import BaseModule, auto_fp16, force_fp32 + +from mmseg.core import build_pixel_sampler +from mmseg.ops import resize +from mmseg.models.builder import build_loss +from mmseg.models.losses import accuracy + + +class BaseDecodeHead(BaseModule, metaclass=ABCMeta): + """Base class for BaseDecodeHead. + + Args: + in_channels (int|Sequence[int]): Input channels. + channels (int): Channels after modules, before conv_seg. + num_classes (int): Number of classes. + out_channels (int): Output channels of conv_seg. + threshold (float): Threshold for binary segmentation in the case of + `out_channels==1`. Default: None. + dropout_ratio (float): Ratio of dropout layer. Default: 0.1. + conv_cfg (dict|None): Config of conv layers. Default: None. + norm_cfg (dict|None): Config of norm layers. Default: None. + act_cfg (dict): Config of activation layers. + Default: dict(type='ReLU') + in_index (int|Sequence[int]): Input feature index. Default: -1 + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + Default: None. + loss_decode (dict | Sequence[dict]): Config of decode loss. + The `loss_name` is property of corresponding loss function which + could be shown in training log. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_ce'. + e.g. dict(type='CrossEntropyLoss'), + [dict(type='CrossEntropyLoss', loss_name='loss_ce'), + dict(type='DiceLoss', loss_name='loss_dice')] + Default: dict(type='CrossEntropyLoss'). + ignore_index (int | None): The label index to be ignored. When using + masked BCE loss, ignore_index should be set to None. Default: 255. + sampler (dict|None): The config of segmentation map sampler. + Default: None. + align_corners (bool): align_corners argument of F.interpolate. + Default: False. + downsample_label_ratio (int): The ratio to downsample seg_label + in losses. downsample_label_ratio > 1 will reduce memory usage. + Disabled if downsample_label_ratio = 0. + Default: 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + channels, + *, + num_classes, + out_channels=None, + threshold=None, + dropout_ratio=0.1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + in_index=-1, + input_transform=None, + loss_decode=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + ignore_index=255, + sampler=None, + align_corners=False, + downsample_label_ratio=0, + init_cfg=dict( + type='Normal', std=0.01, override=dict(name='conv_seg'))): + super(BaseDecodeHead, self).__init__(init_cfg) + self._init_inputs(in_channels, in_index, input_transform) + self.channels = channels + self.dropout_ratio = dropout_ratio + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.in_index = in_index + + self.ignore_index = ignore_index + self.align_corners = align_corners + self.downsample_label_ratio = downsample_label_ratio + if not isinstance(self.downsample_label_ratio, int) or \ + self.downsample_label_ratio < 0: + warnings.warn('downsample_label_ratio should ' + 'be set as an integer equal or larger than 0.') + + if out_channels is None: + if num_classes == 2: + warnings.warn('For binary segmentation, we suggest using' + '`out_channels = 1` to define the output' + 'channels of segmentor, and use `threshold`' + 'to convert seg_logist into a prediction' + 'applying a threshold') + out_channels = num_classes + + if out_channels != num_classes and out_channels != 1: + raise ValueError( + 'out_channels should be equal to num_classes,' + 'except binary segmentation set out_channels == 1 and' + f'num_classes == 2, but got out_channels={out_channels}' + f'and num_classes={num_classes}') + + if out_channels == 1 and threshold is None: + threshold = 0.3 + # warnings.warn('threshold is not defined for binary, and defaults to 0.3') + self.num_classes = num_classes + self.out_channels = out_channels + self.threshold = threshold + + if isinstance(loss_decode, dict): + self.loss_decode = build_loss(loss_decode) + elif isinstance(loss_decode, (list, tuple)): + self.loss_decode = nn.ModuleList() + for loss in loss_decode: + self.loss_decode.append(build_loss(loss)) + else: + raise TypeError(f'loss_decode must be a dict or sequence of dict,\ + but got {type(loss_decode)}') + + if sampler is not None: + self.sampler = build_pixel_sampler(sampler, context=self) + else: + self.sampler = None + + # self.conv_seg = nn.Conv2d(channels, self.out_channels, kernel_size=1) + if dropout_ratio > 0: + self.dropout = nn.Dropout2d(dropout_ratio) + else: + self.dropout = None + self.fp16_enabled = False + + def extra_repr(self): + """Extra repr.""" + s = f'input_transform={self.input_transform}, ' \ + f'ignore_index={self.ignore_index}, ' \ + f'align_corners={self.align_corners}' + return s + + def _init_inputs(self, in_channels, in_index, input_transform): + """Check and initialize input transforms. + + The in_channels, in_index and input_transform must match. + Specifically, when input_transform is None, only single feature map + will be selected. So in_channels and in_index must be of type int. + When input_transform + + Args: + in_channels (int|Sequence[int]): Input channels. + in_index (int|Sequence[int]): Input feature index. + input_transform (str|None): Transformation type of input features. + Options: 'resize_concat', 'multiple_select', None. + 'resize_concat': Multiple feature maps will be resize to the + same size as first one and than concat together. + Usually used in FCN head of HRNet. + 'multiple_select': Multiple feature maps will be bundle into + a list and passed into decode head. + None: Only one select feature map is allowed. + """ + + if input_transform is not None: + assert input_transform in ['resize_concat', 'multiple_select'] + self.input_transform = input_transform + self.in_index = in_index + if input_transform is not None: + assert isinstance(in_channels, (list, tuple)) + assert isinstance(in_index, (list, tuple)) + assert len(in_channels) == len(in_index) + if input_transform == 'resize_concat': + self.in_channels = sum(in_channels) + else: + self.in_channels = in_channels + else: + assert isinstance(in_channels, int) + assert isinstance(in_index, int) + self.in_channels = in_channels + + def _transform_inputs(self, inputs): + """Transform inputs for decoder. + + Args: + inputs (list[Tensor]): List of multi-level img features. + + Returns: + Tensor: The transformed inputs + """ + + if self.input_transform == 'resize_concat': + inputs = [inputs[i] for i in self.in_index] + upsampled_inputs = [ + resize( + input=x, + size=inputs[0].shape[2:], + mode='bilinear', + align_corners=self.align_corners) for x in inputs + ] + inputs = torch.cat(upsampled_inputs, dim=1) + elif self.input_transform == 'multiple_select': + inputs = [inputs[i] for i in self.in_index] + else: + inputs = inputs[self.in_index] + + return inputs + + @auto_fp16() + @abstractmethod + def forward(self, inputs): + """Placeholder of forward function.""" + pass + + def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg): + """Forward function for training. + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + train_cfg (dict): The training config. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + seg_logits = self(inputs) + losses = self.losses(seg_logits, gt_semantic_seg) + return losses + + def forward_test(self, inputs, img_metas, test_cfg): + """Forward function for testing. + + Args: + inputs (list[Tensor]): List of multi-level img features. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + test_cfg (dict): The testing config. + + Returns: + Tensor: Output segmentation map. + """ + return self.forward(inputs) + + def cls_seg(self, feat): + """Classify each pixel.""" + if self.dropout is not None: + feat = self.dropout(feat) + output = self.conv_seg(feat) + return output + + @force_fp32(apply_to=('seg_logit', )) + def losses(self, seg_logit, seg_label): + """Compute segmentation loss.""" + loss = dict() + if self.downsample_label_ratio > 0: + seg_label = seg_label.float() + target_size = (seg_label.shape[2] // self.downsample_label_ratio, + seg_label.shape[3] // self.downsample_label_ratio) + seg_label = resize( + input=seg_label, size=target_size, mode='nearest') + seg_label = seg_label.long() + seg_logit = resize( + input=seg_logit, + size=seg_label.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + if self.sampler is not None: + seg_weight = self.sampler.sample(seg_logit, seg_label) + else: + seg_weight = None + seg_label = seg_label.squeeze(1) + + if not isinstance(self.loss_decode, nn.ModuleList): + losses_decode = [self.loss_decode] + else: + losses_decode = self.loss_decode + for loss_decode in losses_decode: + if loss_decode.loss_name not in loss: + loss[loss_decode.loss_name] = loss_decode( + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + else: + loss[loss_decode.loss_name] += loss_decode( + seg_logit, + seg_label, + weight=seg_weight, + ignore_index=self.ignore_index) + + loss['acc_seg'] = accuracy( + seg_logit, seg_label, ignore_index=self.ignore_index) + return loss diff --git a/projects/mmdet3d_plugin/bevformer/modules/dts_transformer.py b/projects/mmdet3d_plugin/bevformer/modules/dts_transformer.py new file mode 100644 index 0000000..e696bcd --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/dts_transformer.py @@ -0,0 +1,541 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import xavier_init +from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence, build_positional_encoding +from mmcv.runner.base_module import BaseModule + +from mmdet.models.utils.builder import TRANSFORMER +from torch.nn.init import normal_ +from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from mmcv.runner.base_module import BaseModule +from torchvision.transforms.functional import rotate +from .temporal_self_attention import TemporalSelfAttention +from .spatial_cross_attention import MSDeformableAttention3D +from .decoder import CustomMSDeformableAttention +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from mmcv.runner import force_fp32, auto_fp16 +from mmcv.cnn import PLUGIN_LAYERS, Conv2d,Conv3d, ConvModule, caffe2_xavier_init +from functools import partial +import spconv.pytorch as spconv +from mmdet.models.builder import build_loss + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = spconv.SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + + +@TRANSFORMER.register_module() +class HybridTransformer(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + decoder=None, + act_cfg=None, + norm_cfg_3d=dict(type='SyncBN', requires_grad=True), + position=None, # positional embedding of query point + encoder_embed_dims=[256, 256, 128, 64], + feature_map_z=[1, 4, 8, 16], + dilations=[2,2,2,2], + paddings=[2,2,2,2], + num_convs=[3,2,2,2], + embed_dims=256, + more_conv=False, + use_conv=False, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + can_bus_norm=True, + use_cams_embeds=True, + rotate_center=[100, 100], + decoder_on_bev=True, + loss_bin_occ=None, + bev_z=16, + **kwargs): + super(HybridTransformer, self).__init__(**kwargs) + self.more_conv=more_conv + self.num_convs=num_convs + self.use_conv=use_conv + self.encoders = [] + self.positional_encodings = [] + self.encoder_block_num = len(encoder) + self.encoder_keys = [] + self.feature_map_z = feature_map_z + self.encoder_embed_dims = encoder_embed_dims + self.dilations = dilations + self.paddings=paddings + self.norm_cfg_3d=norm_cfg_3d + self.act_cfg=act_cfg + for encoder_key in encoder: + self.encoder_keys.append(encoder_key) + self.encoders.append(build_transformer_layer_sequence(encoder[encoder_key])) + self.positional_encodings.append(build_positional_encoding(position[encoder_key])) + + # register model + for i, layer in enumerate(self.encoders): + self.add_module('encoder_{}'.format(i), layer) + for i, layer in enumerate(self.positional_encodings): + self.add_module('pos_{}'.format(i), layer) + + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + self.embed_dim_ratios=[ embed_dims//dim for dim in encoder_embed_dims] + + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + self.decoder_on_bev = decoder_on_bev + self.bev_z = bev_z + self.two_stage_num_proposals = two_stage_num_proposals + self.loss_bin_occ=build_loss(loss_bin_occ) + + self.init_layers() + self.rotate_center = rotate_center + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + # self.reference_points = nn.Linear(self.embed_dims, 3) + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + # mid-stage bev->voxe->voxel-> voxel + block=post_act_block + for i in range(self.encoder_block_num-1): + conv = [] + if i==0: + for j in range(self.num_convs[i]): + conv.append( + ConvModule( + self.encoder_embed_dims[i], + self.encoder_embed_dims[i], + kernel_size=3, + stride=1, + padding=self.paddings[i], + dilation=self.dilations[i], + # bias=use_bias_3d, + conv_cfg=dict(type='Conv2d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg),) + conv = nn.Sequential(*conv) + self.convs.append(conv) + self.add_module('dense_convs_{}'.format(i), conv) + else: + for j in range(self.num_convs[i]): + if j== 0: + conv.append( + block(self.encoder_embed_dims[i], + self.encoder_embed_dims[i], + 3, + norm_fn=norm_fn, + stride=1, + padding=1, + indice_key='spconv_{}'.format(i), + conv_type='spconv') + ) + else: + conv.append( + block(self.encoder_embed_dims[i], + self.encoder_embed_dims[i], + 3, + norm_fn=norm_fn, + stride=1, + padding=1, + indice_key='subm_{}'.format(i), + ) + ) + conv=spconv.SparseSequential(conv) + self.convs.append(conv) + self.add_module('sparse_convs_{}'.format(i), conv) + + self.occ_predictors=[] + for i in range(self.encoder_block_num - 1): + occ_predictor=nn.Conv2d(self.encoder_embed_dims[i],1,kernel_size=1) + self.occ_predictors.append( + occ_predictor + ) + self.add_module('occ_predictor_{}'.format(i), occ_predictor) + + self.image_feature_map_1_2 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//2), + nn.ReLU(inplace=True), + ) + self.image_feature_map_1_4 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//4), + nn.ReLU(inplace=True), + ) + if 8 in self.embed_dim_ratios: + self.image_feature_map_1_8 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//8), + nn.ReLU(inplace=True), + ) + else: + self.image_feature_map_1_8 = None + + if 16 in self.embed_dim_ratios: + self.image_feature_map_1_16 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//16), + nn.ReLU(inplace=True), + ) + else: + self.image_feature_map_1_16 = None + + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + # xavier_init(self.reference_points, distribution='uniform', bias=0.) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + + xavier_init(self.image_feature_map_1_2, distribution='uniform', bias=0.) + xavier_init(self.image_feature_map_1_4, distribution='uniform', bias=0.) + if self.image_feature_map_1_8 is not None: + xavier_init(self.image_feature_map_1_8, distribution='uniform', bias=0.) + if self.image_feature_map_1_16 is not None: + xavier_init(self.image_feature_map_1_16, distribution='uniform', bias=0.) + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_voxel_features( + self, + mlvl_feats, + bev_queries, + bev_z, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + """ + obtain bev features. + """ + + bs = mlvl_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) # (num_query, bs, embed_dims) + bev_pos = None + # bev_pos = bev_pos.flatten(2).permute(2, 0, 1) # (num_query, bs, embed_dims) + + # obtain rotation angle and shift with ego motion + delta_x = np.array([each['can_bus'][0] + for each in kwargs['img_metas']]) + delta_y = np.array([each['can_bus'][1] + for each in kwargs['img_metas']]) + ego_angle = np.array( + [each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']]) + grid_length_y = grid_length[0] + grid_length_x = grid_length[1] + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor( + [shift_x, shift_y]).permute(1, 0) # (2, bs) -> (bs, 2) + + # add can bus signals + can_bus = bev_queries.new_tensor( + [each['can_bus'] for each in kwargs['img_metas']]) # [:, :] + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus * self.use_can_bus # (query_num, bs, embed_dims) + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, + None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=bev_queries.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten_original = feat_flatten.permute( + 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + feat_flatten_map1_2 = self.image_feature_map_1_2(feat_flatten_original) + feat_flatten_map1_4 = self.image_feature_map_1_4(feat_flatten_original) + if self.image_feature_map_1_8 is not None: + feat_flatten_map1_8 = self.image_feature_map_1_8(feat_flatten_original) + else: + feat_flatten_map1_8 = None + if self.image_feature_map_1_16 is not None: + feat_flatten_map1_16 = self.image_feature_map_1_16(feat_flatten_original) + else: + feat_flatten_map1_16 = None + + block_features = [] + bin_occ_loss=dict() + for block_index in range(self.encoder_block_num): + # encoder: BEV -> Voxeli -> Voxelj -> Voxelk + # print('bev_query.shape:', block_index, bev_queries.shape) + block_bev_z = self.feature_map_z[block_index] + block_embed_dims = self.encoder_embed_dims[block_index] + if block_bev_z == 1: + bev_mask = torch.zeros((bs, bev_h, bev_w), + device=bev_queries.device).to(bev_queries.dtype) + else: + bev_mask = torch.zeros((bs, block_bev_z, bev_h, bev_w), + device=bev_queries.device).to(bev_queries.dtype) + pos = self.positional_encodings[block_index](bev_mask).to(bev_queries.dtype) # (bs, embed_dims, h, w) + pos = pos.flatten(2).permute(2, 0, 1) # (query_num, bs, embed_dims) + + if block_embed_dims == self.embed_dims: + feat_flatten = feat_flatten_original + elif block_embed_dims*2 == self.embed_dims: + feat_flatten = feat_flatten_map1_2 + elif block_embed_dims*4 == self.embed_dims: + feat_flatten = feat_flatten_map1_4 + elif block_embed_dims*8 == self.embed_dims: + feat_flatten = feat_flatten_map1_8 + elif block_embed_dims*16 == self.embed_dims: + feat_flatten = feat_flatten_map1_16 + + # if prev_bev is not None: # (bs, num_query, embed_dims) + # stage_prev_bev = prev_bev[block_index] + # if block_bev_z == 1: # 2D BEV + # if stage_prev_bev.shape[1] == bev_h * bev_w: + # stage_prev_bev = stage_prev_bev.permute(1, 0, 2) # (num_query, bs, embed_dims) + # if self.rotate_prev_bev: + # for i in range(bs): + # # num_prev_bev = prev_bev.size(1) + # rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + # tmp_prev_bev = stage_prev_bev[:, i].reshape( + # bev_h, bev_w, -1).permute(2, 0, 1) # (embed_dims, bev_h, bev_w) + # tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + # center=self.rotate_center) # TODO: for 3D voxel + # tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + # bev_h * bev_w, 1, -1) + # stage_prev_bev[:, i] = tmp_prev_bev[:, 0] + # + # else: # 3D Voxel + # if stage_prev_bev.shape[1] == block_bev_z* bev_h * bev_w: + # stage_prev_bev = stage_prev_bev.permute(1, 0, 2) # (num_query, bs, embed_dims) + # if self.rotate_prev_bev: # revise for 3D feature map + # for i in range(bs): + # rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + # tmp_prev_bev = stage_prev_bev[:, i].reshape(block_bev_z, bev_h, bev_w, -1).permute(3, 0, 1, 2) # (embed_dims, bev_z, bev_h, bev_w) + # tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, center=self.rotate_center) + # tmp_prev_bev = tmp_prev_bev.permute(1, 2, 3, 0).reshape(block_bev_z * bev_h * bev_w, 1, -1) + # stage_prev_bev[:, i] = tmp_prev_bev[:, 0] + # else: + # stage_prev_bev = None + + # print() + # print('bev_queries',bev_queries.shape) + # print() + stage_prev_bev=None + output = self.encoders[block_index]( + bev_queries, + feat_flatten, + feat_flatten, + bev_z=block_bev_z, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=stage_prev_bev, + shift=shift, + **kwargs + ) + if block_index==0: + channels = self.encoder_embed_dims[block_index] + output = output.view(bev_h, bev_w, bs, channels) + output = output.permut(2,3,0,1) + output = self.convs[block_index] + output = output.flatten(2).permute(0,2,1) # to shape(bs,seq_len,C) + occ_pred=self.occ_predictor(output) + occ_gt=self.get_occ_gt() + occ_loss=self.loss_bin_occ(occ_pred,occ_gt) + bin_occ_loss['bin_occ_loss_{}'.format(block_index)]=occ_loss + occ_gt=occ_gt.reshape(-1) + output=output.reshape(-1,channels)[occ_gt] # + else: + # output shape(bs,seq_len,C) + output = output.view(bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + block_features.append(output) + if self.use_conv: + if block_index < self.encoder_block_num - 1: # bev-> voxel or voxel_i -> voxel_j + bev_queries = output.view(block_bev_z, bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + bev_queries = bev_queries.permute(3,4,0,1,2) + # bev_queries = bev_queries.flatten(3) # (bev_h, bev_w, bs, embed_dims1*z1) + bev_queries = self.convs[block_index](bev_queries) + bev_queries = bev_queries.view(bs,self.encoder_embed_dims[block_index + 1], + self.feature_map_z[block_index + 1],bev_h, bev_w, + ) + bev_queries = bev_queries.permute(2,3,4,0,1) + bev_queries = bev_queries.reshape(-1, bs, self.encoder_embed_dims[block_index + 1]) # (num_query, bs, embed_dims) + else: + if block_index < self.encoder_block_num-1: # bev-> voxel or voxel_i -> voxel_j + bev_queries = output.view(block_bev_z, bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + bev_queries = bev_queries.permute(1, 2, 3, 0, 4) + bev_queries = bev_queries.flatten(3) # (bev_h, bev_w, bs, embed_dims1*z1) + bev_queries = self.bev_voxel_transfers[block_index](bev_queries) # (bev_h, bev_w, bs, embed_dims2*z2) + bev_queries = bev_queries.view(bev_h, bev_w, bs, self.feature_map_z[block_index+1], self.encoder_embed_dims[block_index+1]) + bev_queries = bev_queries.permute(3, 0, 1, 2, 4) + bev_queries = bev_queries.reshape(-1, bs, self.encoder_embed_dims[block_index+1]) # (num_query, bs, embed_dims) + + return block_features # is a list + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'object_query_embed', 'prev_bev', 'bev_pos')) + def forward(self, + mlvl_feats, + bev_queries, + object_query_embed, + bev_z, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + reg_branches=None, + cls_branches=None, + prev_bev=None, + **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, num_cams, embed_dims, h, w]. + bev_queries (Tensor): (bev_h*bev_w, c) + bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) + object_query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - bev_embed: BEV features + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + + block_features = self.get_voxel_features( + mlvl_feats, + bev_queries, + bev_z, + bev_h, + bev_w, + grid_length=grid_length, + bev_pos=bev_pos, + prev_bev=prev_bev, + **kwargs) # voxel_embed shape: (bs, num_query, embed_dims) + + return block_features + + def get_dense_voxel_coors(self,bev_): + ref_z, ref_y, ref_x = torch.meshgrid( + torch.linspace(0.2, + n_voxel - 0.2, + n_voxel, + dtype=dtype, + device=device), + torch.linspace(0.2, + H - 0.2, + H, + dtype=dtype, + device=device), + torch.linspace(0.2, + W - 0.2, + W, + dtype=dtype, + device=device) + ) # shape: (bev_z, bev_h, bev_w) + ref_z = ref_z.reshape(-1)[None] / n_voxel + ref_y = ref_y.reshape(-1)[None] / H + ref_x = ref_x.reshape(-1)[None] / W + ref_2d = torch.stack((ref_x, ref_y, ref_z), -1) + + + + diff --git a/projects/mmdet3d_plugin/bevformer/modules/encoder.py b/projects/mmdet3d_plugin/bevformer/modules/encoder.py index fddfaef..6c21930 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/encoder.py +++ b/projects/mmdet3d_plugin/bevformer/modules/encoder.py @@ -24,7 +24,6 @@ ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) - @TRANSFORMER_LAYER_SEQUENCE.register_module() class BEVFormerEncoder(TransformerLayerSequence): @@ -37,28 +36,40 @@ class BEVFormerEncoder(TransformerLayerSequence): `LN`. """ - def __init__(self, *args, pc_range=None, num_points_in_pillar=4, return_intermediate=False, dataset_type='nuscenes', + def __init__(self, *args, + bev_h=200, + bev_w=200, + pc_range=None, + num_points_in_pillar=4, + return_intermediate=False, **kwargs): super(BEVFormerEncoder, self).__init__(*args, **kwargs) + self.bev_h = bev_h + self.bev_w = bev_w self.return_intermediate = return_intermediate - self.num_points_in_pillar = num_points_in_pillar self.pc_range = pc_range self.fp16_enabled = False @staticmethod - def get_reference_points(H, W, Z=8, num_points_in_pillar=4, dim='3d', bs=1, device='cuda', dtype=torch.float): - """Get the reference points used in SCA and TSA. + def get_reference_points(H, W, Z, + num_points_in_pillar, + dim, + bs=1, + device='cuda', + dtype=torch.float): + """ + Get the reference points used in SCA and TSA. Args: H, W: spatial shape of bev. - Z: hight of pillar. - D: sample D points uniformly from each pillar. - device (obj:`device`): The device where - reference_points should be. - Returns: - Tensor: reference points used in decoder, has \ - shape (bs, num_keys, num_levels, 2). + Z: hight of pillar. only used if dim == '3d'. + num_points_in_pillar: number of points in a pillar. only used if dim == '3d'. + Returns: controlled by flag `dim` + ref_3d (Tensor): If dim==`3d`, this function return 3d reference points used in `SCA`. + It has shape (bs, num_points_in_pillar, h*w, 3). + ref_2d (Tensor): If dim==`2d`, this function return 2d reference points used in `TSA`. + It has shape (bs, h*w, 1, 2). """ # reference points in 3D space, used in spatial cross-attention (SCA) @@ -71,61 +82,69 @@ def get_reference_points(H, W, Z=8, num_points_in_pillar=4, dim='3d', bs=1, devi device=device).view(1, H, 1).expand(num_points_in_pillar, H, W) / H ref_3d = torch.stack((xs, ys, zs), -1) ref_3d = ref_3d.permute(0, 3, 1, 2).flatten(2).permute(0, 2, 1) - ref_3d = ref_3d[None].repeat(bs, 1, 1, 1) #shape: (bs,num_points_in_pillar,h*w,3) + ref_3d = ref_3d.unsqueeze(0).repeat(bs, 1, 1, 1) # shape: (bs,num_points_in_pillar,h*w,3) return ref_3d # reference points on 2D bev plane, used in temporal self-attention (TSA). elif dim == '2d': ref_y, ref_x = torch.meshgrid( - torch.linspace( - 0.5, H - 0.5, H, dtype=dtype, device=device), - torch.linspace( - 0.5, W - 0.5, W, dtype=dtype, device=device) + torch.linspace(0.5, H - 0.5, H, dtype=dtype, device=device), + torch.linspace(0.5, W - 0.5, W, dtype=dtype, device=device), + indexing='ij' ) - ref_y = ref_y.reshape(-1)[None] / H - ref_x = ref_x.reshape(-1)[None] / W + ref_y = ref_y.reshape(-1).unsqueeze(0) / H + ref_x = ref_x.reshape(-1).unsqueeze(0) / W ref_2d = torch.stack((ref_x, ref_y), -1) ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) return ref_2d - # This function must use fp32!!! @force_fp32(apply_to=('reference_points', 'img_metas')) def point_sampling(self, reference_points, pc_range, img_metas): - ego2lidar=img_metas[0]['ego2lidar'] - lidar2img = [] + """ + This method performs point sampling by converting reference points from a 3D coordinate system to a 2D BEV (Bird's Eye View) coordinate system. + It takes 3d reference points(ref_3d), point_cloud_range(pc_range), and img_metas as inputs, + and it returns sampled reference points in the BEV coordinate system (reference_points_cam) and a binary mask indicating valid BEV points (bev_mask). + Args: + reference_points (Tensor): 3d reference points with shape (bs, num_points_in_pillar, h*w, 3). + pc_range (List): [x1, y1, z1, x2, y2, z2], the range of point cloud. + img_metas (list[dict]): current img meta info. The list has length of batch size. + Returns: + reference_points_cam (Tensor): projected reference points in the camera coordinate system with shape (num_cam, bs, h*w, num_points_in_pillar, 2). + bev_mask (Tensor): binary mask indicating valid points in `reference_points_cam` with shape (num_cam, bs, h*w, num_points_in_pillar). + """ - for img_meta in img_metas: - lidar2img.append(img_meta['lidar2img']) - lidar2img = np.asarray(lidar2img) - lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4) + # Step 1: prepare `ego2lidar` and `lidar2img` transformation matrix + ego2lidar = img_metas[0]['ego2lidar'] ego2lidar = reference_points.new_tensor(ego2lidar) - # ego2lidar = ego2lidar.unsqueeze(dim=0).repeat(num_imgs,1,1).unsqueeze(0) + lidar2img = [img_meta['lidar2img'] for img_meta in img_metas] + lidar2img = reference_points.new_tensor(np.asarray(lidar2img)) # (bs, num_cam, 4, 4) + # Step 2: denormalize the reference points(convert it into the ego system coordinate) reference_points = reference_points.clone() - - reference_points[..., 0:1] = reference_points[..., 0:1] * \ - (pc_range[3] - pc_range[0]) + pc_range[0] - reference_points[..., 1:2] = reference_points[..., 1:2] * \ - (pc_range[4] - pc_range[1]) + pc_range[1] - reference_points[..., 2:3] = reference_points[..., 2:3] * \ - (pc_range[5] - pc_range[2]) + pc_range[2] - - reference_points = torch.cat( - (reference_points, torch.ones_like(reference_points[..., :1])), -1) - - reference_points = reference_points.permute(1, 0, 2, 3) #shape: (num_points_in_pillar,bs,h*w,4) - D, B, num_query = reference_points.size()[:3] # D=num_points_in_pillar , num_query=h*w + reference_points[..., 0:1] = reference_points[..., 0:1] * (pc_range[3] - pc_range[0]) + pc_range[0] + reference_points[..., 1:2] = reference_points[..., 1:2] * (pc_range[4] - pc_range[1]) + pc_range[1] + reference_points[..., 2:3] = reference_points[..., 2:3] * (pc_range[5] - pc_range[2]) + pc_range[2] + reference_points = torch.cat((reference_points, torch.ones_like(reference_points[..., :1])), -1) + reference_points = reference_points.permute(1, 0, 2, 3) # shape: (num_points_in_pillar, bs, h*w, 4) + + # Step 3: reshape transform matrix and reference points + num_points_in_pillar, bs, num_query_HW, _ = reference_points.size() num_cam = lidar2img.size(1) - - reference_points = reference_points.view( - D, B, 1, num_query, 4).repeat(1, 1, num_cam, 1, 1).unsqueeze(-1) #shape: (num_points_in_pillar,bs,num_cam,h*w,4) - - lidar2img = lidar2img.view( - 1, B, num_cam, 1, 4, 4).repeat(D, 1, 1, num_query, 1, 1) - ego2lidar=ego2lidar.view(1,1,1,1,4,4).repeat(D,1,num_cam,num_query,1,1) - reference_points_cam = torch.matmul(torch.matmul(lidar2img.to(torch.float32),ego2lidar.to(torch.float32)),reference_points.to(torch.float32)).squeeze(-1) + reference_points = reference_points.unsqueeze(2).repeat(1, 1, num_cam, 1, 1) # (num_points_in_pillar, bs, num_cam, h*w, 4) + reference_points = reference_points.unsqueeze(-1) # (num_points_in_pillar, bs, num_cam, h*w, 4, 1) + lidar2img = lidar2img.view(1, bs, num_cam, 1, 4, 4).repeat(num_points_in_pillar, 1, 1, num_query_HW, 1, 1) + ego2lidar = ego2lidar.view(1,1,1,1,4,4).repeat(num_points_in_pillar, bs, num_cam, num_query_HW, 1, 1) + + # Step 4: project the reference points to the image plane + lidar2img = lidar2img.to(torch.float32) + ego2lidar = ego2lidar.to(torch.float32) + ego2img = torch.matmul(lidar2img, ego2lidar) # (num_points_in_pillar, bs, num_cam, num_query_HW, 4, 4) + reference_points = reference_points.to(torch.float32) + reference_points_img = torch.matmul(ego2img, reference_points) + reference_points_cam = reference_points_img.squeeze(-1) + + # Step 5: normalize the camera reference points eps = 1e-5 - bev_mask = (reference_points_cam[..., 2:3] > eps) reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum( reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps) @@ -133,20 +152,18 @@ def point_sampling(self, reference_points, pc_range, img_metas): reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1] reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0] + # Step 6: use bev_mask to filter out the points that are outside the image boundary bev_mask = (bev_mask & (reference_points_cam[..., 1:2] > 0.0) & (reference_points_cam[..., 1:2] < 1.0) & (reference_points_cam[..., 0:1] < 1.0) & (reference_points_cam[..., 0:1] > 0.0)) + if digit_version(TORCH_VERSION) >= digit_version('1.8'): bev_mask = torch.nan_to_num(bev_mask) else: - bev_mask = bev_mask.new_tensor( - np.nan_to_num(bev_mask.cpu().numpy())) - - reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) #shape: (num_cam,bs,h*w,num_points_in_pillar,2) - - - bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) + bev_mask = bev_mask.new_tensor(np.nan_to_num(bev_mask.cpu().numpy())) + reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # (num_cam, bs, h*w, num_points_in_pillar, 2) + bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) # (num_cam, bs, h*w, num_points_in_pillar) return reference_points_cam, bev_mask @@ -155,87 +172,97 @@ def forward(self, bev_query, key, value, - *args, - bev_h=None, - bev_w=None, bev_pos=None, spatial_shapes=None, level_start_index=None, - valid_ratios=None, prev_bev=None, - shift=0., + img_metas=None, + shift=None, **kwargs): - """Forward function for `TransformerDecoder`. + """ + Encoder of bevformer, which contains multiple layers. It can construct BEV features from flattened multi level image features. Args: - bev_query (Tensor): Input BEV query with shape - `(num_query, bs, embed_dims)`. - key & value (Tensor): Input multi-cameta features with shape - (num_cam, num_value, bs, embed_dims) - reference_points (Tensor): The reference - points of offset. has shape - (bs, num_query, 4) when as_two_stage, - otherwise has shape ((bs, num_query, 2). - valid_ratios (Tensor): The radios of valid - points on the feature map, has shape - (bs, num_levels, 2) + bev_query (Tensor): Input BEV query with shape (num_query, bs, embed_dims). + key & value (Tensor): Input multi-camera features with shape (num_cam, num_value, bs, embed_dims). + bev_pos (torch.Tensor): bev position embedding with shape (bs, embed_dims, 1, h, w). + spatial_shapes (Tensor): spatial shapes of multi-level features. + level_start_index (Tensor): index of mlvl_feat in all level features + prev_bev (Tensor): shape (bev_h*bev_w, bs, embed_dims) if use temporal self attention. + img_metas (list[dict]): current img meta info. The list has length of batch size. + shift (Tensor): If `use_can_bus`, the `shift` tensor get from `can_bus` in img_metas. + If not, `shift` tensor is bev_queries.new_zeros((1, 2)). Returns: - Tensor: Results with shape [1, num_query, bs, embed_dims] when - return_intermediate is `False`, otherwise it has shape - [num_layers, num_query, bs, embed_dims]. + output (Tensor): forwarded results with shape (bs, num_query, embed_dims). + """ + # Step 1: prepare the reference points. 3d reference points for spatial cross-attention (SCA) and 2d reference points for temporal self-attention (TSA). + bev_h, bev_w = self.bev_h, self.bev_w + _dim = 2 output = bev_query intermediate = [] - - ref_3d = self.get_reference_points( - bev_h, bev_w, self.pc_range[5]-self.pc_range[2], self.num_points_in_pillar, dim='3d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) - ref_2d = self.get_reference_points( - bev_h, bev_w, dim='2d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) - - reference_points_cam, bev_mask = self.point_sampling( - ref_3d, self.pc_range, kwargs['img_metas']) - + pc_range_z = self.pc_range[5] - self.pc_range[2] + + ref_3d = self.get_reference_points(H=bev_h, W=bev_w, Z=pc_range_z, + num_points_in_pillar=self.num_points_in_pillar, + dim='3d', + bs=bev_query.size(1), + device=bev_query.device, + dtype=bev_query.dtype) + # ref_3d: (bs, num_points_in_pillar, h*w, 3) + + ref_2d = self.get_reference_points(H=bev_h, W=bev_w, Z=None, + num_points_in_pillar=None, + dim='2d', + bs=bev_query.size(1), + device=bev_query.device, + dtype=bev_query.dtype) + # ref_2d: (bs, h*w, 1, 2) + + # Step 2: project the 3d reference points to the camera coordinate system and get the binary mask. + reference_points_cam, bev_mask = self.point_sampling(ref_3d, self.pc_range, img_metas=img_metas) + # reference_points_cam: (num_cam, bs, h*w, num_points_in_pillar, 2) + # bev_mask: (num_cam, bs, h*w, num_points_in_pillar) + + # Step 3: prepare the shift reference points for prev BEV features. # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. shift_ref_2d = ref_2d # .clone() shift_ref_2d += shift[:, None, None, :] - # (num_query, bs, embed_dims) -> (bs, num_query, embed_dims) + # Step 4: reshape the bev_query and bev_pos bev_query = bev_query.permute(1, 0, 2) - bev_pos = bev_pos.permute(1, 0, 2) - bs, len_bev, num_bev_level, _ = ref_2d.shape + if bev_pos is not None: bev_pos = bev_pos.permute(1, 0, 2) + + # Step 5: prepare prev_bev and hybird_ref_2d + bs, len_bev, num_bev_level, _ = ref_2d.shape # (bs, h*w, 1, 2) if prev_bev is not None: prev_bev = prev_bev.permute(1, 0, 2) - prev_bev = torch.stack( - [prev_bev, bev_query], 1).reshape(bs*2, len_bev, -1) - hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( - bs*2, len_bev, num_bev_level, 2) + prev_bev = torch.stack([prev_bev, bev_query], 1).reshape(bs*2, len_bev, -1) + hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape(bs*2, len_bev, num_bev_level, _dim) else: - hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape( - bs*2, len_bev, num_bev_level, 2) - - for lid, layer in enumerate(self.layers): - output = layer( - bev_query, - key, - value, - *args, - bev_pos=bev_pos, - ref_2d=hybird_ref_2d, - ref_3d=ref_3d, - bev_h=bev_h, - bev_w=bev_w, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - reference_points_cam=reference_points_cam, - bev_mask=bev_mask, - prev_bev=prev_bev, - **kwargs) - + hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape(bs*2, len_bev, num_bev_level, _dim) + + # Step 6: run the encoder layers + for layer_idx, layer in enumerate(self.layers): + output = layer(query=bev_query, + key=key, + value=value, + bev_pos=bev_pos, + ref_2d=hybird_ref_2d, + ref_3d=ref_3d, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + prev_bev=prev_bev, + **kwargs) + + # Step 7: update the input `bev_query` of the next layer according to the output of the current layer bev_query = output - if self.return_intermediate: + if self.return_intermediate: # Default value is False intermediate.append(output) - if self.return_intermediate: + if self.return_intermediate: # Default value is False return torch.stack(intermediate) return output @@ -255,17 +282,19 @@ class BEVFormerLayer(MyCustomBaseTransformerLayer): in ffn. Default 0.0. operation_order (tuple[str]): The execution order of operation in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). - Default:None + Default:None act_cfg (dict): The activation config for FFNs. Default: `LN` norm_cfg (dict): Config dict for normalization layer. Default: `LN`. ffn_num_fcs (int): The number of fully-connected layers in FFNs. - Default:2. + Default:2. """ def __init__(self, attn_cfgs, feedforward_channels, + bev_h=200, + bev_w=200, ffn_dropout=0.0, operation_order=None, act_cfg=dict(type='ReLU', inplace=True), @@ -282,12 +311,10 @@ def __init__(self, ffn_num_fcs=ffn_num_fcs, **kwargs) self.fp16_enabled = False - assert len(operation_order) == 6 - assert set(operation_order) == set( - ['self_attn', 'norm', 'cross_attn', 'ffn']) + self.bev_h = bev_h + self.bev_w = bev_w - def forward(self, - query, + def forward(self, query, key=None, value=None, bev_pos=None, @@ -298,111 +325,104 @@ def forward(self, key_padding_mask=None, ref_2d=None, ref_3d=None, - bev_h=None, - bev_w=None, reference_points_cam=None, mask=None, spatial_shapes=None, level_start_index=None, + bev_mask=None, prev_bev=None, **kwargs): - """Forward function for `TransformerDecoderLayer`. - - **kwargs contains some specific arguments of attentions. - + """ + Forward function for `TransformerDecoderLayer`. Args: - query (Tensor): The input query with shape - [num_queries, bs, embed_dims] if - self.batch_first is False, else - [bs, num_queries embed_dims]. - key (Tensor): The key tensor with shape [num_keys, bs, - embed_dims] if self.batch_first is False, else - [bs, num_keys, embed_dims] . + query (Tensor): The input BEV query with shape (bs, num_queries, embed_dims). + key (Tensor): The key tensor is flattened multi level image feature with shape (num_cam, num_value, bs, embed_dims). value (Tensor): The value tensor with same shape as `key`. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. - Default: None. - attn_masks (List[Tensor] | None): 2D Tensor used in - calculation of corresponding attention. The length of - it should equal to the number of `attention` in - `operation_order`. Default: None. - query_key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_queries]. Only used in `self_attn` layer. - Defaults to None. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_keys]. Default: None. - + bev_pos (Tensor): bev position embedding with shape (bs, embed_dims, 1, h, w). + ref_2d (Tensor): hybird 2D reference points used in TSA. + If `prev_bev` is None, it has shape (bs, h*w, 1, 2). + else, it has shape (bs*2, h*w, 1, 2). + ref_3d (Tensor): 3D reference points used in SCA with shape (bs, num_points_in_pillar, h*w, 3). + reference_points_cam (Tensor): projected reference points in the camera coordinate system with shape (num_cam, bs, h*w, num_points_in_pillar, 2). + spatial_shapes (Tensor): spatial shapes of multi-level features. + level_start_index (Tensor): index of mlvl_feat in all level features + bev_mask (Tensor): binary mask indicating valid points in `reference_points_cam` with shape (num_cam, bs, h*w, num_points_in_pillar). + prev_bev (Tensor): shape (bs*2, bev_h*bev_w, embed_dims) if use temporal self attention. + Others are None. Returns: - Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + query (Tensor): forwarded query results with shape [num_queries, bs, embed_dims]. """ + # Step 1: prepare the index of the current layer norm_index = 0 attn_index = 0 ffn_index = 0 identity = query + + # Step 2: prepare the attention masks if attn_masks is None: attn_masks = [None for _ in range(self.num_attn)] elif isinstance(attn_masks, torch.Tensor): - attn_masks = [ - copy.deepcopy(attn_masks) for _ in range(self.num_attn) - ] + attn_masks = [copy.deepcopy(attn_masks) for _ in range(self.num_attn)] warnings.warn(f'Use same attn_mask in all attentions in ' f'{self.__class__.__name__} ') else: assert len(attn_masks) == self.num_attn, f'The length of ' \ f'attn_masks {len(attn_masks)} must be equal ' \ f'to the number of attention in ' \ - f'operation_order {self.num_attn}' + f'operation_order {self.num_attn}' + # Step 3: run the encoder layers for layer in self.operation_order: - # temporal self attention + # Step 4: run the self-attention layer if layer == 'self_attn': - - query = self.attentions[attn_index]( - query, - prev_bev, - prev_bev, - identity if self.pre_norm else None, - query_pos=bev_pos, - key_pos=bev_pos, - attn_mask=attn_masks[attn_index], - key_padding_mask=query_key_padding_mask, - reference_points=ref_2d, - spatial_shapes=torch.tensor( - [[bev_h, bev_w]], device=query.device), - level_start_index=torch.tensor([0], device=query.device), - **kwargs) + spatial_shapes_tsa = torch.tensor([[self.bev_h, self.bev_w]], device=query.device) + level_start_index_tsa = torch.tensor([0], device=query.device) + + query = self.attentions[attn_index](query=query, + key=prev_bev, + value=prev_bev, + residual=identity if self.pre_norm else None, + query_pos=bev_pos, + key_pos=bev_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + reference_points=ref_2d, + spatial_shapes=spatial_shapes_tsa, + level_start_index=level_start_index_tsa, + **kwargs) + attn_index += 1 - identity = query + identity = query # identity will not go through the normalization layer. + # There is always a normlization layer after the self-attention layer, cross-attention layer and ffn layer. elif layer == 'norm': query = self.norms[norm_index](query) norm_index += 1 - # spaital cross attention + # Step 5: run the cross-attention layer elif layer == 'cross_attn': - query = self.attentions[attn_index]( - query, - key, - value, - identity if self.pre_norm else None, - query_pos=query_pos, - key_pos=key_pos, - reference_points=ref_3d, - reference_points_cam=reference_points_cam, - mask=mask, - attn_mask=attn_masks[attn_index], - key_padding_mask=key_padding_mask, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - **kwargs) + query = self.attentions[attn_index](query, + key, + value, + residual=identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + reference_points=ref_3d, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + mask=mask, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + **kwargs) + attn_index += 1 identity = query elif layer == 'ffn': - query = self.ffns[ffn_index]( - query, identity if self.pre_norm else None) + query = self.ffns[ffn_index](query, identity if self.pre_norm else None) ffn_index += 1 return query diff --git a/projects/mmdet3d_plugin/bevformer/modules/encoder_3d.py b/projects/mmdet3d_plugin/bevformer/modules/encoder_3d.py index 5f659f0..4cfec68 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/encoder_3d.py +++ b/projects/mmdet3d_plugin/bevformer/modules/encoder_3d.py @@ -43,8 +43,7 @@ def __init__(self, *args, pc_range=None, num_points_in_voxel=2,num_voxel=8, retu super(BEVFormerEncoder3D, self).__init__(*args, **kwargs) self.return_intermediate = return_intermediate - self.num_points_in_pillar = num_points_in_voxel - self.n_p_in_voxel=num_voxel + self.n_p_in_voxel=num_points_in_voxel self.n_voxel=num_voxel self.pc_range = pc_range self.fp16_enabled = False @@ -62,11 +61,11 @@ def get_reference_points(H, W, Z=8, n_p_in_voxel=2,n_voxel=8, dim='3d', bs=1, de Tensor: reference points used in decoder, has \ shape (bs, num_keys, num_levels, 2). """ - n_p_in_pillar=n_voxel*n_voxel + n_p_in_pillar=n_voxel*n_p_in_voxel # reference points in 3D space, used in spatial cross-attention (SCA) if dim == '3d': - zs = torch.linspace(0.2, Z - 0.2, n_p_in_pillar, dtype=dtype, - device=device).view(n_voxel,n_p_in_pillar, 1, 1).permute(1,0,2,3).expand(n_p_in_voxel,n_voxel, H, W) / Z + zs = torch.linspace(0.2, n_voxel - 0.2, n_p_in_pillar, dtype=dtype, + device=device).view(n_voxel,n_p_in_voxel, 1, 1).permute(1,0,2,3).expand(n_p_in_voxel,n_voxel, H, W) / n_voxel xs = torch.linspace(0.2, W - 0.2, W, dtype=dtype, device=device).view(1,1, 1, W).expand(n_p_in_voxel,n_voxel, H, W) / W @@ -79,16 +78,28 @@ def get_reference_points(H, W, Z=8, n_p_in_voxel=2,n_voxel=8, dim='3d', bs=1, de # reference points on 2D bev plane, used in temporal self-attention (TSA). elif dim == '2d': - ref_y, ref_x = torch.meshgrid( - torch.linspace( - 0.5, H - 0.5, H, dtype=dtype, device=device), - torch.linspace( - 0.5, W - 0.5, W, dtype=dtype, device=device) - ) + ref_z, ref_y, ref_x = torch.meshgrid( + torch.linspace(0.2, + n_voxel - 0.2, + n_voxel, + dtype=dtype, + device=device), + torch.linspace(0.2, + H - 0.2, + H, + dtype=dtype, + device=device), + torch.linspace(0.2, + W - 0.2, + W, + dtype=dtype, + device=device) + ) # shape: (bev_z, bev_h, bev_w) + ref_z = ref_z.reshape(-1)[None] / n_voxel ref_y = ref_y.reshape(-1)[None] / H ref_x = ref_x.reshape(-1)[None] / W - ref_2d = torch.stack((ref_x, ref_y), -1) - ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) + ref_2d = torch.stack((ref_x, ref_y, ref_z), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) # (bs, num_query, 1, 3) return ref_2d # This function must use fp32!!! @@ -159,6 +170,7 @@ def forward(self, key, value, *args, + bev_z=None, bev_h=None, bev_w=None, bev_pos=None, @@ -198,9 +210,21 @@ def forward(self, reference_points_cam, bev_mask = self.point_sampling( ref_3d, self.pc_range, kwargs['img_metas']) + # # DEBUG_TMP + # import pickle as pkl + # with open('work_dirs/ref_nuscene.pkl', 'wb') as f: + # pkl.dump( + # {"ref_3d":ref_3d, "ref_2d": ref_2d, "reference_points_cam": reference_points_cam, "bev_mask": bev_mask}, + # f + # ) + # breakpoint() + + # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. shift_ref_2d = ref_2d # .clone() - shift_ref_2d += shift[:, None, None, :] + shift3d = shift.new_zeros(1, 3) + shift3d[:, :2] = shift + shift_ref_2d += shift3d[:, None, None, :] # (num_query, bs, embed_dims) -> (bs, num_query, embed_dims) bev_query = bev_query.permute(1, 0, 2) @@ -211,11 +235,10 @@ def forward(self, prev_bev = torch.stack( [prev_bev, bev_query], 1).reshape(bs*2, len_bev, -1) hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( - bs*2, len_bev, num_bev_level, 2) + bs*2, len_bev, num_bev_level, 3) else: hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape( - bs*2, len_bev, num_bev_level, 2) - + bs*2, len_bev, num_bev_level, 3) for lid, layer in enumerate(self.layers): output = layer( bev_query, @@ -225,6 +248,7 @@ def forward(self, bev_pos=bev_pos, ref_2d=hybird_ref_2d, ref_3d=ref_3d, + bev_z=bev_z, bev_h=bev_h, bev_w=bev_w, spatial_shapes=spatial_shapes, @@ -243,169 +267,169 @@ def forward(self, return output -# -# @TRANSFORMER_LAYER.register_module() -# class BEVFormerLayer(MyCustomBaseTransformerLayer): -# """Implements decoder layer in DETR transformer. -# Args: -# attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): -# Configs for self_attention or cross_attention, the order -# should be consistent with it in `operation_order`. If it is -# a dict, it would be expand to the number of attention in -# `operation_order`. -# feedforward_channels (int): The hidden dimension for FFNs. -# ffn_dropout (float): Probability of an element to be zeroed -# in ffn. Default 0.0. -# operation_order (tuple[str]): The execution order of operation -# in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). -# Default:None -# act_cfg (dict): The activation config for FFNs. Default: `LN` -# norm_cfg (dict): Config dict for normalization layer. -# Default: `LN`. -# ffn_num_fcs (int): The number of fully-connected layers in FFNs. -# Default:2. -# """ -# -# def __init__(self, -# attn_cfgs, -# feedforward_channels, -# ffn_dropout=0.0, -# operation_order=None, -# act_cfg=dict(type='ReLU', inplace=True), -# norm_cfg=dict(type='LN'), -# ffn_num_fcs=2, -# **kwargs): -# super(BEVFormerLayer, self).__init__( -# attn_cfgs=attn_cfgs, -# feedforward_channels=feedforward_channels, -# ffn_dropout=ffn_dropout, -# operation_order=operation_order, -# act_cfg=act_cfg, -# norm_cfg=norm_cfg, -# ffn_num_fcs=ffn_num_fcs, -# **kwargs) -# self.fp16_enabled = False -# assert len(operation_order) == 6 -# assert set(operation_order) == set( -# ['self_attn', 'norm', 'cross_attn', 'ffn']) -# -# def forward(self, -# query, -# key=None, -# value=None, -# bev_pos=None, -# query_pos=None, -# key_pos=None, -# attn_masks=None, -# query_key_padding_mask=None, -# key_padding_mask=None, -# ref_2d=None, -# ref_3d=None, -# bev_h=None, -# bev_w=None, -# reference_points_cam=None, -# mask=None, -# spatial_shapes=None, -# level_start_index=None, -# prev_bev=None, -# **kwargs): -# """Forward function for `TransformerDecoderLayer`. -# -# **kwargs contains some specific arguments of attentions. -# -# Args: -# query (Tensor): The input query with shape -# [num_queries, bs, embed_dims] if -# self.batch_first is False, else -# [bs, num_queries embed_dims]. -# key (Tensor): The key tensor with shape [num_keys, bs, -# embed_dims] if self.batch_first is False, else -# [bs, num_keys, embed_dims] . -# value (Tensor): The value tensor with same shape as `key`. -# query_pos (Tensor): The positional encoding for `query`. -# Default: None. -# key_pos (Tensor): The positional encoding for `key`. -# Default: None. -# attn_masks (List[Tensor] | None): 2D Tensor used in -# calculation of corresponding attention. The length of -# it should equal to the number of `attention` in -# `operation_order`. Default: None. -# query_key_padding_mask (Tensor): ByteTensor for `query`, with -# shape [bs, num_queries]. Only used in `self_attn` layer. -# Defaults to None. -# key_padding_mask (Tensor): ByteTensor for `query`, with -# shape [bs, num_keys]. Default: None. -# -# Returns: -# Tensor: forwarded results with shape [num_queries, bs, embed_dims]. -# """ -# -# norm_index = 0 -# attn_index = 0 -# ffn_index = 0 -# identity = query -# if attn_masks is None: -# attn_masks = [None for _ in range(self.num_attn)] -# elif isinstance(attn_masks, torch.Tensor): -# attn_masks = [ -# copy.deepcopy(attn_masks) for _ in range(self.num_attn) -# ] -# warnings.warn(f'Use same attn_mask in all attentions in ' -# f'{self.__class__.__name__} ') -# else: -# assert len(attn_masks) == self.num_attn, f'The length of ' \ -# f'attn_masks {len(attn_masks)} must be equal ' \ -# f'to the number of attention in ' \ -# f'operation_order {self.num_attn}' -# -# for layer in self.operation_order: -# # temporal self attention -# if layer == 'self_attn': -# -# query = self.attentions[attn_index]( -# query, -# prev_bev, -# prev_bev, -# identity if self.pre_norm else None, -# query_pos=bev_pos, -# key_pos=bev_pos, -# attn_mask=attn_masks[attn_index], -# key_padding_mask=query_key_padding_mask, -# reference_points=ref_2d, -# spatial_shapes=torch.tensor( -# [[bev_h, bev_w]], device=query.device), -# level_start_index=torch.tensor([0], device=query.device), -# **kwargs) -# attn_index += 1 -# identity = query -# -# elif layer == 'norm': -# query = self.norms[norm_index](query) -# norm_index += 1 -# -# # spaital cross attention -# elif layer == 'cross_attn': -# query = self.attentions[attn_index]( -# query, -# key, -# value, -# identity if self.pre_norm else None, -# query_pos=query_pos, -# key_pos=key_pos, -# reference_points=ref_3d, -# reference_points_cam=reference_points_cam, -# mask=mask, -# attn_mask=attn_masks[attn_index], -# key_padding_mask=key_padding_mask, -# spatial_shapes=spatial_shapes, -# level_start_index=level_start_index, -# **kwargs) -# attn_index += 1 -# identity = query -# -# elif layer == 'ffn': -# query = self.ffns[ffn_index]( -# query, identity if self.pre_norm else None) -# ffn_index += 1 -# -# return query +@TRANSFORMER_LAYER.register_module() +class OccFormerLayer3D(MyCustomBaseTransformerLayer): + """Implements decoder layer in DETR transformer. + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(OccFormerLayer3D, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + self.fp16_enabled = False + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + def forward(self, + query, + key=None, + value=None, + bev_pos=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + ref_2d=None, + ref_3d=None, + bev_z=None, + bev_h=None, + bev_w=None, + reference_points_cam=None, + mask=None, + spatial_shapes=None, + level_start_index=None, + prev_bev=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + + **kwargs contains some specific arguments of attentions. + + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + # temporal self attention + if layer == 'self_attn': + + query = self.attentions[attn_index]( + query, + prev_bev, + prev_bev, + identity if self.pre_norm else None, + query_pos=bev_pos, + key_pos=bev_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + reference_points=ref_2d, + spatial_shapes=torch.tensor( + [[bev_z,bev_h, bev_w]], device=query.device), + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + # spaital cross attention + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + reference_points=ref_3d, + reference_points_cam=reference_points_cam, + mask=mask, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query diff --git a/projects/mmdet3d_plugin/bevformer/modules/encoder_test.py b/projects/mmdet3d_plugin/bevformer/modules/encoder_3d_conv.py similarity index 65% rename from projects/mmdet3d_plugin/bevformer/modules/encoder_test.py rename to projects/mmdet3d_plugin/bevformer/modules/encoder_3d_conv.py index e1ba331..120d938 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/encoder_test.py +++ b/projects/mmdet3d_plugin/bevformer/modules/encoder_3d_conv.py @@ -21,12 +21,14 @@ import mmcv from mmcv.utils import TORCH_VERSION, digit_version from mmcv.utils import ext_loader +import torch.nn as nn +from mmcv.cnn import PLUGIN_LAYERS, Conv2d,Conv3d, ConvModule, caffe2_xavier_init ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) @TRANSFORMER_LAYER_SEQUENCE.register_module() -class BEVFormerEncoderTest(TransformerLayerSequence): +class BEVFormerEncoder3DConv(TransformerLayerSequence): """ Attention with both self and cross @@ -37,75 +39,99 @@ class BEVFormerEncoderTest(TransformerLayerSequence): `LN`. """ - def __init__(self, *args, pc_range=None, num_points_in_pillar=4, return_intermediate=False, dataset_type='nuscenes', + def __init__(self, *args,embed_dims=256, pc_range=None, num_points_in_voxel=2,num_voxel=8,bev_h=100,bev_w=100,return_intermediate=False, dataset_type='nuscenes', + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg_3d=dict(type='BN3d', ), **kwargs): - super(BEVFormerEncoderTest, self).__init__(*args, **kwargs) + super(BEVFormerEncoder3DConv, self).__init__(*args, **kwargs) self.return_intermediate = return_intermediate - - self.num_points_in_pillar = num_points_in_pillar + self.embed_dims=embed_dims + layer_convs=[] + use_bias_3d = norm_cfg_3d is None + for i in range(self.num_layers): + layer_convs.append( + ConvModule( + self.embed_dims, + self.embed_dims, + kernel_size=3, + stride=1, + padding=1, + bias=use_bias_3d, + conv_cfg=dict(type='Conv3d'), + norm_cfg=norm_cfg_3d, + act_cfg=act_cfg), + ) + self.layer_convs=nn.Sequential(*layer_convs) + self.n_p_in_voxel=num_points_in_voxel + self.n_voxel=num_voxel self.pc_range = pc_range self.fp16_enabled = False @staticmethod - def get_reference_points(H, W, Z=8, num_points_in_pillar=4, dim='3d', bs=1, device='cuda', dtype=torch.float): + def get_reference_points(H, W, Z=8, n_p_in_voxel=2,n_voxel=8, dim='3d', bs=1, device='cuda', dtype=torch.float): """Get the reference points used in SCA and TSA. Args: H, W: spatial shape of bev. Z: hight of pillar. D: sample D points uniformly from each pillar. + device (obj:`device`): The device where reference_points should be. Returns: Tensor: reference points used in decoder, has \ shape (bs, num_keys, num_levels, 2). """ - + n_p_in_pillar=n_voxel*n_p_in_voxel # reference points in 3D space, used in spatial cross-attention (SCA) + if dim == '3d': - zs = torch.linspace(0.5, Z - 0.5, num_points_in_pillar, dtype=dtype, - device=device).view(-1, 1, 1).expand(num_points_in_pillar, H, W) / Z - xs = torch.linspace(0.5, W - 0.5, W, dtype=dtype, - device=device).view(1, 1, W).expand(num_points_in_pillar, H, W) / W - ys = torch.linspace(0.5, H - 0.5, H, dtype=dtype, - device=device).view(1, H, 1).expand(num_points_in_pillar, H, W) / H - ref_3d = torch.stack((xs, ys, zs), -1) - ref_3d = ref_3d.permute(0, 3, 1, 2).flatten(2).permute(0, 2, 1) - ref_3d = ref_3d[None].repeat(bs, 1, 1, 1) + zs = torch.linspace(0.2, n_voxel - 0.2, n_p_in_pillar, dtype=dtype, + device=device).view(n_voxel,n_p_in_voxel, 1, 1).permute(1,0,2,3).expand(n_p_in_voxel,n_voxel, H, W) / n_voxel + + xs = torch.linspace(0.2, W - 0.2, W, dtype=dtype, + device=device).view(1,1, 1, W).expand(n_p_in_voxel,n_voxel, H, W) / W + ys = torch.linspace(0.2, H - 0.2, H, dtype=dtype, + device=device).view(1,1, H, 1).expand(n_p_in_voxel,n_voxel, H, W) / H + ref_3d = torch.stack((xs, ys, zs), -1) #shape: (num_points_in_pillar,h,w,3) + ref_3d = ref_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) + ref_3d = ref_3d[None].repeat(bs, 1, 1, 1) #shape: (bs,num_points_in_pillar,h*w,3) return ref_3d # reference points on 2D bev plane, used in temporal self-attention (TSA). elif dim == '2d': - ref_y, ref_x = torch.meshgrid( - torch.linspace( - 0.5, H - 0.5, H, dtype=dtype, device=device), - torch.linspace( - 0.5, W - 0.5, W, dtype=dtype, device=device) - ) + ref_z, ref_y, ref_x = torch.meshgrid( + torch.linspace(0.2, + n_voxel - 0.2, + n_voxel, + dtype=dtype, + device=device), + torch.linspace(0.2, + H - 0.2, + H, + dtype=dtype, + device=device), + torch.linspace(0.2, + W - 0.2, + W, + dtype=dtype, + device=device) + ) # shape: (bev_z, bev_h, bev_w) + ref_z = ref_z.reshape(-1)[None] / n_voxel ref_y = ref_y.reshape(-1)[None] / H ref_x = ref_x.reshape(-1)[None] / W - ref_2d = torch.stack((ref_x, ref_y), -1) - ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) + ref_2d = torch.stack((ref_x, ref_y, ref_z), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) # (bs, num_query, 1, 3) return ref_2d # This function must use fp32!!! @force_fp32(apply_to=('reference_points', 'img_metas')) def point_sampling(self, reference_points, pc_range, img_metas): ego2lidar=img_metas[0]['ego2lidar'] - sample_token=img_metas[0]['sample_idx'] - from copy import deepcopy - ego2lidar_=deepcopy(ego2lidar) - lidar2img = [] for img_meta in img_metas: lidar2img.append(img_meta['lidar2img']) - lidar2img_ = deepcopy(lidar2img[0]) - test_martrix=dict(lidar2img=lidar2img_,ego2lidar=ego2lidar_) - import pickle as pkl - with open('/home/txy/martix_test/{}.pkl'.format(sample_token),'wb') as f: - pkl.dump(test_martrix,f) - print(sample_token) lidar2img = np.asarray(lidar2img) lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4) ego2lidar = reference_points.new_tensor(ego2lidar) @@ -123,12 +149,12 @@ def point_sampling(self, reference_points, pc_range, img_metas): reference_points = torch.cat( (reference_points, torch.ones_like(reference_points[..., :1])), -1) - reference_points = reference_points.permute(1, 0, 2, 3) - D, B, num_query = reference_points.size()[:3] + reference_points = reference_points.permute(1, 0, 2, 3) #shape: (num_points_in_pillar,bs,h*w,4) + D, B, num_query = reference_points.size()[:3] # D=num_points_in_pillar , num_query=h*w num_cam = lidar2img.size(1) reference_points = reference_points.view( - D, B, 1, num_query, 4).repeat(1, 1, num_cam, 1, 1).unsqueeze(-1) + D, B, 1, num_query, 4).repeat(1, 1, num_cam, 1, 1).unsqueeze(-1) #shape: (num_points_in_pillar,bs,num_cam,h*w,4) lidar2img = lidar2img.view( 1, B, num_cam, 1, 4, 4).repeat(D, 1, 1, num_query, 1, 1) @@ -153,7 +179,9 @@ def point_sampling(self, reference_points, pc_range, img_metas): bev_mask = bev_mask.new_tensor( np.nan_to_num(bev_mask.cpu().numpy())) - reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) + reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) #shape: (num_cam,bs,h*w,num_points_in_pillar,2) + + bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) return reference_points_cam, bev_mask @@ -164,6 +192,7 @@ def forward(self, key, value, *args, + bev_z=None, bev_h=None, bev_w=None, bev_pos=None, @@ -196,16 +225,20 @@ def forward(self, intermediate = [] ref_3d = self.get_reference_points( - bev_h, bev_w, self.pc_range[5]-self.pc_range[2], self.num_points_in_pillar, dim='3d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + bev_h, bev_w, self.pc_range[5]-self.pc_range[2],self.n_p_in_voxel,self.n_voxel, dim='3d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) ref_2d = self.get_reference_points( bev_h, bev_w, dim='2d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + # print('ref_3d',ref_3d.shape) + reference_points_cam, bev_mask = self.point_sampling( ref_3d, self.pc_range, kwargs['img_metas']) # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. shift_ref_2d = ref_2d # .clone() - shift_ref_2d += shift[:, None, None, :] + shift3d = shift.new_zeros(1, 3) + shift3d[:, :2] = shift + shift_ref_2d += shift3d[:, None, None, :] # (num_query, bs, embed_dims) -> (bs, num_query, embed_dims) bev_query = bev_query.permute(1, 0, 2) @@ -216,11 +249,10 @@ def forward(self, prev_bev = torch.stack( [prev_bev, bev_query], 1).reshape(bs*2, len_bev, -1) hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( - bs*2, len_bev, num_bev_level, 2) + bs*2, len_bev, num_bev_level, 3) else: hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape( - bs*2, len_bev, num_bev_level, 2) - + bs*2, len_bev, num_bev_level, 3) for lid, layer in enumerate(self.layers): output = layer( bev_query, @@ -230,6 +262,7 @@ def forward(self, bev_pos=bev_pos, ref_2d=hybird_ref_2d, ref_3d=ref_3d, + bev_z=bev_z, bev_h=bev_h, bev_w=bev_w, spatial_shapes=spatial_shapes, @@ -239,6 +272,11 @@ def forward(self, prev_bev=prev_bev, **kwargs) + bs, _, c = output.shape + output = output.permute(0, 2, 1).reshape(bs, c, bev_z, bev_h, bev_w) + output = self.layer_convs[lid](output) + output = output.flatten(2).permute(0, 2, 1) + bev_query = output if self.return_intermediate: intermediate.append(output) diff --git a/projects/mmdet3d_plugin/bevformer/modules/encoder_waymo.py b/projects/mmdet3d_plugin/bevformer/modules/encoder_waymo.py new file mode 100644 index 0000000..89baf89 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/encoder_waymo.py @@ -0,0 +1,556 @@ + +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from .custom_base_transformer_layer import MyCustomBaseTransformerLayer +import copy +import warnings +from mmcv.cnn.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.cnn.bricks.transformer import TransformerLayerSequence +from mmcv.runner import force_fp32, auto_fp16 +import numpy as np +import torch +import cv2 as cv +import mmcv +from mmcv.utils import TORCH_VERSION, digit_version +from mmcv.utils import ext_loader +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class BEVFormerEncoderWaymo(TransformerLayerSequence): + + """ + Attention with both self and cross + Implements the decoder in DETR transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: `LN`. + """ + + def __init__(self, *args, + volume_flag=False, + bev_z=1, + bev_h=200, + bev_w=200, + total_z=16, + pc_range=None, + num_points_in_voxel=None, + num_voxel=None, + num_points_in_pillar=None, + return_intermediate=False, + dataset_type='waymo', + **kwargs): + + super(BEVFormerEncoderWaymo, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if volume_flag: + assert num_voxel != -1 and num_points_in_voxel != -1 + else: + assert num_points_in_pillar != -1 + self.num_points_in_voxel=num_points_in_voxel + self.num_voxel=num_voxel + self.num_points_in_pillar = num_points_in_pillar + self.bev_z = bev_z + self.bev_h = bev_h + self.bev_w = bev_w + self.totol_z = total_z + self.pc_range = pc_range + self.fp16_enabled = False + self.volume_flag = volume_flag + self.dataset_type = dataset_type + + @staticmethod + def get_reference_points(volume_flag, + H, W, Z, + num_points_in_voxel, + num_voxel, + num_points_in_pillar, + dim, + bs=1, + device='cuda', + dtype=torch.float): + """ + Get the reference points used in SCA and TSA. + Args: + H, W: spatial shape of bev. + Z: hight of pillar. only used if dim == '3d'. + num_points_in_voxel: number of points in a voxel. + num_voxel: number of voxels in a pillar. + num_points_in_pillar: number of points in a pillar. + If volume_flag is True, num_points_in_pillar is equal to num_points_in_voxel * num_voxel. + If volume_flag is False, only num_points_in_pillar is used. + Returns: + ref_3d (Tensor): If dim==`3d`, this function return 3d reference points used in `SCA`. + It has shape (bs, num_points_in_pillar, h*w, 3). + ref_2d (Tensor): If dim==`2d`, this function return 2d reference points used in `TSA`. + It has shape (bs, h*w, 1, 2). + """ + + if volume_flag: # Default to be False + n_p_in_pillar = num_voxel * num_points_in_voxel + if dim == '3d': + zs = torch.linspace(0.2, num_voxel - 0.2, n_p_in_pillar, dtype=dtype, + device=device).view(num_voxel,num_points_in_voxel, 1, 1).permute(1,0,2,3).expand(num_points_in_voxel,n_voxel, H, W) / n_voxel + xs = torch.linspace(0.2, W - 0.2, W, dtype=dtype, + device=device).view(1,1, 1, W).expand(num_points_in_voxel,num_voxel, H, W) / W + ys = torch.linspace(0.2, H - 0.2, H, dtype=dtype, + device=device).view(1,1, H, 1).expand(num_points_in_voxel,num_voxel, H, W) / H + ref_3d = torch.stack((xs, ys, zs), -1) + ref_3d = ref_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) + ref_3d = ref_3d.unsqueeze(0).repeat(bs, 1, 1, 1) + return ref_3d + + # reference points on 2D bev plane, used in temporal self-attention (TSA). + elif dim == '2d': + ref_z, ref_y, ref_x = torch.meshgrid( + torch.linspace(0.2, + num_voxel - 0.2, + num_voxel, + dtype=dtype, + device=device), + torch.linspace(0.2, + H - 0.2, + H, + dtype=dtype, + device=device), + torch.linspace(0.2, + W - 0.2, + W, + dtype=dtype, + device=device) + ) # shape: (bev_z, bev_h, bev_w) + ref_z = ref_z.reshape(-1).unsqueeze(0) / num_voxel + ref_y = ref_y.reshape(-1).unsqueeze(0) / H + ref_x = ref_x.reshape(-1).unsqueeze(0) / W + ref_2d = torch.stack((ref_x, ref_y, ref_z), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) + return ref_2d + else: + # reference points in 3D space, used in spatial cross-attention (SCA) + if dim == '3d': + xs = torch.linspace(0.5, W - 0.5, W, dtype=dtype, device=device) + ys = torch.linspace(0.5, H - 0.5, H, dtype=dtype, device=device) + zs = torch.linspace(0.5, Z - 0.5, num_points_in_pillar, dtype=dtype, device=device) + ''' + Here zs, xs and ys are all normalized to [0, 1]. But I still confused why `W` and `H` are bev_h and bev_w, but `Z` is point cloud range. + By reading the code in `point_sampling` function, here the reference points are in the ego(lidar) space coordinate system. + This is equivalent to the following code: + xs = torch.linspace(pc_range[0] + voxel_size/2, pc_range[3] + voxel_size/2, W, dtype=dtype, device=device) - pc_range[0] + xs = xs / (pc_range[3] - pc_range[0]) + ys = torch.linspace(pc_range[1] + voxel_size/2, pc_range[4] + voxel_size/2, H, dtype=dtype, device=device) - pc_range[1] + ys = ys / (pc_range[4] - pc_range[1]) + + So I think z should also be like follows: + zs = torch.linspace(pc_range[2] + voxel_size/2, pc_range[5] + voxel_size/2, 4, dtype=dtype, device=device) - pc_range[2] + zs = zs / (pc_range[5] - pc_range[2]) + so, here the `Z` should be `self.total_z` but not `self.pc_range[5] - self.pc_range[2]` + But the `BEVFormer` github repo also use `self.pc_range[5] - self.pc_range[2]` as `Z`. Why? + I do not modify this code. But I think this is a bug. + ''' + + xs = xs.view(1, 1, W).expand(num_points_in_pillar, H, W) / W + ys = ys.view(1, H, 1).expand(num_points_in_pillar, H, W) / H + zs = zs.view(num_points_in_pillar, 1, 1).expand(num_points_in_pillar, H, W) / Z + + ref_3d = torch.stack((xs, ys, zs), -1) # (num_points_in_pillar, H, W, 3) + ref_3d = ref_3d.reshape(num_points_in_pillar, H * W, 3) + ref_3d = ref_3d.unsqueeze(0).repeat(bs, 1, 1, 1) # (bs, num_points_in_pillar, h*w, 3) + + return ref_3d + + # reference points on 2D bev plane, used in temporal self-attention (TSA). + elif dim == '2d': + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, H - 0.5, H, dtype=dtype, device=device), + torch.linspace(0.5, W - 0.5, W, dtype=dtype, device=device), + indexing='ij', + ) + ref_y = ref_y.reshape(-1).unsqueeze(0) / H + ref_x = ref_x.reshape(-1).unsqueeze(0) / W + ref_2d = torch.stack((ref_x, ref_y), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) # (bs, h*w, 1, 2) + + return ref_2d + + @force_fp32(apply_to=('reference_points', 'img_metas')) + def point_sampling(self, reference_points, pc_range, img_metas, dataset_type='waymo'): + """ + This method performs point sampling by converting reference points from a 3D coordinate system to a 2D BEV (Bird's Eye View) coordinate system. + It takes 3d reference points(ref_3d), point_cloud_range(pc_range), and img_metas as inputs, + and it returns sampled reference points in the BEV coordinate system (reference_points_cam) and a binary mask indicating valid BEV points (bev_mask). + Args: + reference_points (Tensor): 3d reference points with shape (bs, num_points_in_pillar, h*w, 3). + pc_range (List): [x1, y1, z1, x2, y2, z2], the range of point cloud. + img_metas (list[dict]): current img meta info. The list has length of batch size. + dataset_type (str): The dataset type. Default: 'waymo'. + Returns: + reference_points_cam (Tensor): projected reference points in the camera coordinate system with shape (num_cam, bs, h*w, num_points_in_pillar, 2). + bev_mask (Tensor): binary mask indicating valid points in `reference_points_cam` with shape (num_cam, bs, h*w, num_points_in_pillar). + """ + + # Step 1: prepare transformation matrix + lidar2img = [img_meta['lidar2img'] for img_meta in img_metas] + lidar2img = reference_points.new_tensor(np.asarray(lidar2img)) # (bs, num_cam, 4, 4) + + # Step 2: denormalize the reference points(convert it into the ego system coordinate) + reference_points = reference_points.clone() + reference_points = torch.cat((reference_points, torch.ones_like(reference_points[..., :1])), -1) + + reference_points[..., 0:1] = reference_points[..., 0:1] * (pc_range[3] - pc_range[0]) + pc_range[0] + reference_points[..., 1:2] = reference_points[..., 1:2] * (pc_range[4] - pc_range[1]) + pc_range[1] + reference_points[..., 2:3] = reference_points[..., 2:3] * (pc_range[5] - pc_range[2]) + pc_range[2] + reference_points = reference_points.permute(1, 0, 2, 3) # shape: (num_points_in_pillar, bs, h*w, 4) + + # Step 3: reshape transform matrix and reference points + num_points_in_pillar, bs, num_query_HW, _ = reference_points.size() + num_cam = lidar2img.size(1) + reference_points = reference_points.unsqueeze(2).repeat(1, 1, num_cam, 1, 1) # (num_points_in_pillar, bs, num_cam, h*w, 4) + reference_points = reference_points.unsqueeze(-1) # (num_points_in_pillar, bs, num_cam, h*w, 4, 1) + lidar2img = lidar2img.view(1, bs, num_cam, 1, 4, 4).repeat(num_points_in_pillar, 1, 1, num_query_HW, 1, 1) # (num_points_in_pillar, bs, num_cam, h*w, 4, 4) + + # Step 4: project the reference points to the image plane + assert dataset_type == 'waymo', 'Only support waymo dataset' + lidar2img = lidar2img.to(torch.float32) + reference_points = reference_points.to(torch.float32) + reference_points_cam = torch.matmul(lidar2img, reference_points).squeeze(-1) # (num_points_in_pillar, bs, num_cam, h*w, 4) + + # Step 5: normalize the camera reference points + eps = 1e-5 + bev_mask = (reference_points_cam[..., 2:3] > eps) # use bev_mask to flitter out the points that are behind the camera. It has shape (num_points_in_pillar, bs, num_cam, h*w) + reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum( + reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps) # normalize x and y axis + + reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1] + reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0] + + # Step 6: use bev_mask to filter out the points that are outside the image boundary + bev_mask = (bev_mask + & (reference_points_cam[..., 1:2] > 0.0) + & (reference_points_cam[..., 1:2] < 1.0) + & (reference_points_cam[..., 0:1] < 1.0) + & (reference_points_cam[..., 0:1] > 0.0) + ) + + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + bev_mask = torch.nan_to_num(bev_mask) + else: + bev_mask = bev_mask.new_tensor(np.nan_to_num(bev_mask.cpu().numpy())) + reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # (num_cam, bs, h*w, num_points_in_pillar, 2) + bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) # (num_cam, bs, h*w, num_points_in_pillar) + + return reference_points_cam, bev_mask + + @auto_fp16() + def forward(self, + bev_query=None, + key=None, + value=None, + bev_pos=None, + spatial_shapes=None, + level_start_index=None, + prev_bev=None, + img_metas=None, + shift=None, + topk_mask=None, + **kwargs): + """ + Encoder of bevformer, which contains multiple layers. It can construct BEV features from flattened multi level image features. + Args: + bev_query (Tensor): Input BEV query with shape (num_query, bs, embed_dims). + key & value (Tensor): Input multi-camera features with shape (num_cam, num_value, bs, embed_dims). + bev_pos (torch.Tensor): bev position embedding with shape (bs, embed_dims, 1, h, w). + spatial_shapes (Tensor): spatial shapes of multi-level features. + level_start_index (Tensor): index of mlvl_feat in all level features + prev_bev (Tensor): shape (bev_h*bev_w, bs, embed_dims) if use temporal self attention. + img_metas (list[dict]): current img meta info. The list has length of batch size. + shift (Tensor): If `use_can_bus`, the `shift` tensor get from `can_bus` in img_metas. + If not, `shift` tensor is bev_queries.new_zeros((1, 2)). + Returns: + output (Tensor): forwarded results with shape (bs, num_query, embed_dims). + """ + + # Step 1: prepare the reference points. 3d reference points for spatial cross-attention (SCA) and 2d reference points for temporal self-attention (TSA). + bev_h, bev_w = self.bev_h, self.bev_w + if self.volume_flag: _dim = 3 + else: _dim = 2 + output = bev_query + intermediate = [] + pc_range_z = self.pc_range[5] - self.pc_range[2] + ref_3d = self.get_reference_points(volume_flag=self.volume_flag, + H=bev_h, W=bev_w, Z=pc_range_z, + num_points_in_voxel=self.num_points_in_voxel, + num_voxel=self.num_voxel, + num_points_in_pillar=self.num_points_in_pillar, + dim='3d', + bs=bev_query.size(1), + device=bev_query.device, + dtype=bev_query.dtype) + # ref_3d: (bs, num_points_in_pillar, h*w, 3) + ref_2d = self.get_reference_points(volume_flag=self.volume_flag, + H=bev_h, W=bev_w, Z=pc_range_z, + num_points_in_voxel=self.num_points_in_voxel, + num_voxel=self.num_voxel, + num_points_in_pillar=self.num_points_in_pillar, + dim='2d', + bs=bev_query.size(1), + device=bev_query.device, + dtype=bev_query.dtype) + # ref_2d: (bs, h*w, 1, 2) + + # Step 2: project the 3d reference points to the camera coordinate system and get the binary mask. + reference_points_cam, bev_mask = self.point_sampling(ref_3d, self.pc_range, img_metas, self.dataset_type) + # reference_points_cam: (num_cam, bs, h*w, num_points_in_pillar, 2) + # bev_mask: (num_cam, bs, h*w, num_points_in_pillar) + + if topk_mask is not None: # by default it is None + bs, DHW = topk_mask.shape + num_cam = bev_mask.shape[0] + topk_mask = topk_mask.reshape(1, bs, DHW, 1).repeat(num_cam, 1, 1, self.num_points_in_voxel) + bev_mask_update = torch.logical_and(bev_mask, topk_mask) + bev_mask = bev_mask_update + + # Step 3: prepare the shift reference points for prev BEV features. + # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. -- `BEVFormer` code source + if self.volume_flag: + shift_ref_2d = ref_2d # .clone() + shift3d = shift.new_zeros(1, 3) + shift3d[:, :2] = shift + shift_ref_2d += shift3d[:, None, None, :] + else: + shift_ref_2d = ref_2d + shift_ref_2d += shift[:, None, None, :] + + # Step 4: reshape the bev_query and bev_pos + bev_query = bev_query.permute(1, 0, 2) + if bev_pos is not None: bev_pos = bev_pos.permute(1, 0, 2) + + # Step 5: prepare prev_bev and hybird_ref_2d + bs, len_bev, num_bev_level, _ = ref_2d.shape # (bs, h*w, 1, 2) + if prev_bev is not None: + prev_bev = prev_bev.permute(1, 0, 2) + prev_bev = torch.stack([prev_bev, bev_query], 1).reshape(bs*2, len_bev, -1) + hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape(bs*2, len_bev, num_bev_level, _dim) + else: + hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape(bs*2, len_bev, num_bev_level, _dim) + + # Step 6: run the encoder layers + for layer_idx, layer in enumerate(self.layers): + output = layer(query=bev_query, + key=key, + value=value, + bev_pos=bev_pos, + ref_2d=hybird_ref_2d, + ref_3d=ref_3d, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + prev_bev=prev_bev, + **kwargs) + + # Step 7: update the input `bev_query` of the next layer according to the output of the current layer + bev_query = output + if self.return_intermediate: # Default value is False + intermediate.append(output) + + if self.return_intermediate: # Default value is False + return torch.stack(intermediate) + else: + return output + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class BEVFormerEncoderTopKWaymo(BEVFormerEncoderWaymo): + + """ + Attention with both self and cross + Implements the decoder in DETR transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default + `LN`. + """ + + def __init__(self, *args, DHW=[16, 200, 200], topk_ratio=0.05, **kwargs): + super(BEVFormerEncoderTopKWaymo, self).__init__(*args, **kwargs) + self.topk_ratio = topk_ratio + self.DHW = DHW + + +@TRANSFORMER_LAYER.register_module() +class BEVFormerLayerWaymo(MyCustomBaseTransformerLayer): + """Implements decoder layer in DETR transformer. + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default: None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default: 2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + volume_flag=True, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + bev_z=1, + bev_h=200, + bev_w=200, + **kwargs): + super(BEVFormerLayerWaymo, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + self.volume_flag = volume_flag + self.fp16_enabled = False + self.bev_z = bev_z + self.bev_h = bev_h + self.bev_w = bev_w + # self.pre_norm = operation_order[0] == 'norm' + # So the `pre_norm` is always False in this class. + # So `residual` is always None. + + def forward(self, + query, + key, + value, + bev_pos, + ref_2d, + ref_3d, + reference_points_cam, + spatial_shapes, + level_start_index, + bev_mask, + prev_bev, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + mask=None, + **kwargs): + """ + Forward function for `TransformerDecoderLayer`. + Args: + query (Tensor): The input BEV query with shape (bs, num_queries, embed_dims). + key (Tensor): The key tensor is flattened multi level image feature with shape (num_cam, num_value, bs, embed_dims). + value (Tensor): The value tensor with same shape as `key`. + bev_pos (Tensor): bev position embedding with shape (bs, embed_dims, 1, h, w). + ref_2d (Tensor): hybird 2D reference points used in TSA. + If `prev_bev` is None, it has shape (bs, h*w, 1, 2). + else, it has shape (bs*2, h*w, 1, 2). + ref_3d (Tensor): 3D reference points used in SCA with shape (bs, num_points_in_pillar, h*w, 3). + reference_points_cam (Tensor): projected reference points in the camera coordinate system with shape (num_cam, bs, h*w, num_points_in_pillar, 2). + spatial_shapes (Tensor): spatial shapes of multi-level features. + level_start_index (Tensor): index of mlvl_feat in all level features + bev_mask (Tensor): binary mask indicating valid points in `reference_points_cam` with shape (num_cam, bs, h*w, num_points_in_pillar). + prev_bev (Tensor): shape (bs*2, bev_h*bev_w, embed_dims) if use temporal self attention. + Others are None. + Returns: + query (Tensor): forwarded query results with shape [num_queries, bs, embed_dims]. + """ + + # Step 1: prepare the index of the current layer + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + + # Step 2: prepare the attention masks + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [copy.deepcopy(attn_masks) for _ in range(self.num_attn)] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + # Step 3: run the encoder layers + for layer in self.operation_order: + # Step 4: run the self-attention layer + if layer == 'self_attn': + if self.volume_flag: + spatial_shapes_tsa = torch.tensor([[self.bev_z, self.bev_h, self.bev_w]], device=query.device) + else: + spatial_shapes_tsa = torch.tensor([[self.bev_h, self.bev_w]], device=query.device) + level_start_index_tsa = torch.tensor([0], device=query.device) + + query = self.attentions[attn_index](query=query, + key=prev_bev, + value=prev_bev, + residual=identity if self.pre_norm else None, + query_pos=bev_pos, + key_pos=bev_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + reference_points=ref_2d, + spatial_shapes=spatial_shapes_tsa, + level_start_index=level_start_index_tsa, + **kwargs) + + attn_index += 1 + identity = query # identity will not go through the normalization layer. + + # There is always a normlization layer after the self-attention layer, cross-attention layer and ffn layer. + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + # Step 5: run the cross-attention layer + elif layer == 'cross_attn': + query = self.attentions[attn_index](query, + key, + value, + residual=identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + reference_points=ref_3d, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + mask=mask, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + **kwargs) + + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index](query, identity if self.pre_norm else None) + ffn_index += 1 + + return query diff --git a/projects/mmdet3d_plugin/bevformer/modules/hybrid_transformer.py b/projects/mmdet3d_plugin/bevformer/modules/hybrid_transformer.py new file mode 100644 index 0000000..de65177 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/hybrid_transformer.py @@ -0,0 +1,519 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import xavier_init +from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence, build_positional_encoding +from mmcv.runner.base_module import BaseModule + +from mmdet.models.utils.builder import TRANSFORMER +from torch.nn.init import normal_ +from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from mmcv.runner.base_module import BaseModule +from torchvision.transforms.functional import rotate +from .temporal_self_attention import TemporalSelfAttention +from .spatial_cross_attention import MSDeformableAttention3D +from .decoder import CustomMSDeformableAttention +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from mmcv.runner import force_fp32, auto_fp16 +from mmcv.cnn import PLUGIN_LAYERS, Conv2d,Conv3d, ConvModule, caffe2_xavier_init + + + +@TRANSFORMER.register_module() +class HybridTransformer(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + encoder=None, + decoder=None, + act_cfg=None, + norm_cfg_3d=dict(type='SyncBN', requires_grad=True), + position=None, # positional embedding of query point + encoder_embed_dims=[256, 256, 128, 64], + feature_map_z=[1, 4, 8, 16], + dilations=[2,2,2,2], + paddings=[2,2,2,2], + + embed_dims=256, + more_conv=False, + use_conv=False, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + can_bus_norm=True, + use_cams_embeds=True, + rotate_center=[100, 100], + decoder_on_bev=True, + bev_z=16, + **kwargs): + super(HybridTransformer, self).__init__(**kwargs) + self.more_conv=more_conv + self.use_conv=use_conv + self.encoders = [] + self.positional_encodings = [] + self.encoder_block_num = len(encoder) + self.encoder_keys = [] + self.feature_map_z = feature_map_z + self.encoder_embed_dims = encoder_embed_dims + self.dilations = dilations + self.paddings=paddings + self.norm_cfg_3d=norm_cfg_3d + self.act_cfg=act_cfg + for encoder_key in encoder: + self.encoder_keys.append(encoder_key) + self.encoders.append(build_transformer_layer_sequence(encoder[encoder_key])) + self.positional_encodings.append(build_positional_encoding(position[encoder_key])) + + # register model + for i, layer in enumerate(self.encoders): + self.add_module('encoder_{}'.format(i), layer) + for i, layer in enumerate(self.positional_encodings): + self.add_module('pos_{}'.format(i), layer) + + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + self.embed_dim_ratios=[ embed_dims//dim for dim in encoder_embed_dims] + + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + self.decoder_on_bev = decoder_on_bev + self.bev_z = bev_z + self.two_stage_num_proposals = two_stage_num_proposals + + self.init_layers() + self.rotate_center = rotate_center + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + # self.reference_points = nn.Linear(self.embed_dims, 3) + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + if self.decoder_on_bev: + voxel2bev = [] + last_feature = self.feature_map_z[-1]*self.encoder_embed_dims[-1] + # mid_num = last_feature//2 # exp7, exp8 + # voxel2bev.append(nn.Linear(last_feature, mid_num)) + # voxel2bev.append(nn.ReLU(inplace=True)) + # voxel2bev.append(nn.Linear(mid_num, self.embed_dims)) + # voxel2bev.append(nn.ReLU(inplace=True)) + # voxel2bev.append(nn.LayerNorm(self.embed_dims)) + # self.voxel2bev = nn.Sequential(*voxel2bev) + + mid_num = last_feature + voxel2bev.append(nn.Linear(last_feature, mid_num)) + voxel2bev.append(nn.ReLU(inplace=True)) + voxel2bev.append(nn.Linear(mid_num, self.embed_dims)) + voxel2bev.append(nn.ReLU(inplace=True)) + voxel2bev.append(nn.LayerNorm(self.embed_dims)) + self.voxel2bev = nn.Sequential(*voxel2bev) + + # mid-stage bev->voxe->voxel-> voxel + if self.use_conv: + self.convs=[] + for i in range(self.encoder_block_num-1): + if self.more_conv: + conv = nn.Sequential( + ConvModule( + self.encoder_embed_dims[i], + self.encoder_embed_dims[i], + kernel_size=3, + stride=1, + padding=self.paddings[i], + dilation=self.dilations[i], + # bias=use_bias_3d, + conv_cfg=dict(type='Conv3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ConvModule( + self.encoder_embed_dims[i], + self.encoder_embed_dims[i + 1], + kernel_size=[2, 3, 3], + stride=[2, 1, 1], + padding=[0, 1, 1], + # bias=use_bias_3d, + conv_cfg=dict(type='ConvTranspose3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ConvModule( + self.encoder_embed_dims[i + 1], + self.encoder_embed_dims[i + 1], + kernel_size=3, + stride=1, + padding=self.paddings[i+1], + dilation=self.dilations[i+1], + # bias=use_bias_3d, + conv_cfg=dict(type='Conv3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ) + else: + conv=nn.Sequential( + ConvModule( + self.encoder_embed_dims[i], + self.encoder_embed_dims[i+1], + kernel_size=[2, 3, 3], + stride=[2, 1, 1], + padding=[0, 1, 1], + # bias=use_bias_3d, + conv_cfg=dict(type='ConvTranspose3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ConvModule( + self.encoder_embed_dims[i+1], + self.encoder_embed_dims[i+1], + kernel_size=3, + stride=1, + padding=self.paddings[i], + dilation=self.dilations[i], + # bias=use_bias_3d, + conv_cfg=dict(type='Conv3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ) + + self.convs.append(conv) + self.add_module('pyramid_convs_{}'.format(i), conv) + + else: + self.bev_voxel_transfers = [] + for i in range(self.encoder_block_num-1): + fc1 = self.encoder_embed_dims[i]*self.feature_map_z[i] + fc2 = self.encoder_embed_dims[i+1]*self.feature_map_z[i+1] + block = nn.Sequential( + nn.Linear(fc1, fc2), + nn.ReLU(inplace=True), + nn.LayerNorm(fc2), + ) + self.bev_voxel_transfers.append(block) + self.add_module('bev_voxel_transfers_{}'.format(i), block) + + self.image_feature_map_1_2 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//2), + nn.ReLU(inplace=True), + ) + self.image_feature_map_1_4 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//4), + nn.ReLU(inplace=True), + ) + if 8 in self.embed_dim_ratios: + self.image_feature_map_1_8 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//8), + nn.ReLU(inplace=True), + ) + else: + self.image_feature_map_1_8 = None + + if 16 in self.embed_dim_ratios: + self.image_feature_map_1_16 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//16), + nn.ReLU(inplace=True), + ) + else: + self.image_feature_map_1_16 = None + + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + # xavier_init(self.reference_points, distribution='uniform', bias=0.) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + if self.decoder_on_bev: + xavier_init(self.voxel2bev, distribution='uniform', bias=0.) + if not self.use_conv: + for block in self.bev_voxel_transfers: + xavier_init(block, distribution='uniform', bias=0.) + xavier_init(self.image_feature_map_1_2, distribution='uniform', bias=0.) + xavier_init(self.image_feature_map_1_4, distribution='uniform', bias=0.) + if self.image_feature_map_1_8 is not None: + xavier_init(self.image_feature_map_1_8, distribution='uniform', bias=0.) + if self.image_feature_map_1_16 is not None: + xavier_init(self.image_feature_map_1_16, distribution='uniform', bias=0.) + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_voxel_features( + self, + mlvl_feats, + bev_queries, + bev_z, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + """ + obtain bev features. + """ + + bs = mlvl_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) # (num_query, bs, embed_dims) + bev_pos = None + # bev_pos = bev_pos.flatten(2).permute(2, 0, 1) # (num_query, bs, embed_dims) + + # obtain rotation angle and shift with ego motion + delta_x = np.array([each['can_bus'][0] + for each in kwargs['img_metas']]) + delta_y = np.array([each['can_bus'][1] + for each in kwargs['img_metas']]) + ego_angle = np.array( + [each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']]) + grid_length_y = grid_length[0] + grid_length_x = grid_length[1] + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor( + [shift_x, shift_y]).permute(1, 0) # (2, bs) -> (bs, 2) + + # add can bus signals + can_bus = bev_queries.new_tensor( + [each['can_bus'] for each in kwargs['img_metas']]) # [:, :] + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus * self.use_can_bus # (query_num, bs, embed_dims) + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, + None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=bev_queries.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten_original = feat_flatten.permute( + 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + feat_flatten_map1_2 = self.image_feature_map_1_2(feat_flatten_original) + feat_flatten_map1_4 = self.image_feature_map_1_4(feat_flatten_original) + if self.image_feature_map_1_8 is not None: + feat_flatten_map1_8 = self.image_feature_map_1_8(feat_flatten_original) + else: + feat_flatten_map1_8 = None + if self.image_feature_map_1_16 is not None: + feat_flatten_map1_16 = self.image_feature_map_1_16(feat_flatten_original) + else: + feat_flatten_map1_16 = None + + block_features = [] + for block_index in range(self.encoder_block_num): + # encoder: BEV -> Voxeli -> Voxelj -> Voxelk + # print('bev_query.shape:', block_index, bev_queries.shape) + block_bev_z = self.feature_map_z[block_index] + block_embed_dims = self.encoder_embed_dims[block_index] + if block_bev_z == 1: + bev_mask = torch.zeros((bs, bev_h, bev_w), + device=bev_queries.device).to(bev_queries.dtype) + else: + bev_mask = torch.zeros((bs, block_bev_z, bev_h, bev_w), + device=bev_queries.device).to(bev_queries.dtype) + pos = self.positional_encodings[block_index](bev_mask).to(bev_queries.dtype) # (bs, embed_dims, h, w) + pos = pos.flatten(2).permute(2, 0, 1) # (query_num, bs, embed_dims) + + if block_embed_dims == self.embed_dims: + feat_flatten = feat_flatten_original + elif block_embed_dims*2 == self.embed_dims: + feat_flatten = feat_flatten_map1_2 + elif block_embed_dims*4 == self.embed_dims: + feat_flatten = feat_flatten_map1_4 + elif block_embed_dims*8 == self.embed_dims: + feat_flatten = feat_flatten_map1_8 + elif block_embed_dims*16 == self.embed_dims: + feat_flatten = feat_flatten_map1_16 + + if prev_bev is not None: # (bs, num_query, embed_dims) + stage_prev_bev = prev_bev[block_index] + if block_bev_z == 1: # 2D BEV + if stage_prev_bev.shape[1] == bev_h * bev_w: + stage_prev_bev = stage_prev_bev.permute(1, 0, 2) # (num_query, bs, embed_dims) + if self.rotate_prev_bev: + for i in range(bs): + # num_prev_bev = prev_bev.size(1) + rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + tmp_prev_bev = stage_prev_bev[:, i].reshape( + bev_h, bev_w, -1).permute(2, 0, 1) # (embed_dims, bev_h, bev_w) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) # TODO: for 3D voxel + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + bev_h * bev_w, 1, -1) + stage_prev_bev[:, i] = tmp_prev_bev[:, 0] + + else: # 3D Voxel + if stage_prev_bev.shape[1] == block_bev_z* bev_h * bev_w: + stage_prev_bev = stage_prev_bev.permute(1, 0, 2) # (num_query, bs, embed_dims) + if self.rotate_prev_bev: # revise for 3D feature map + for i in range(bs): + rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + tmp_prev_bev = stage_prev_bev[:, i].reshape(block_bev_z, bev_h, bev_w, -1).permute(3, 0, 1, 2) # (embed_dims, bev_z, bev_h, bev_w) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 3, 0).reshape(block_bev_z * bev_h * bev_w, 1, -1) + stage_prev_bev[:, i] = tmp_prev_bev[:, 0] + else: + stage_prev_bev = None + + # print() + # print('bev_queries',bev_queries.shape) + # print() + + output = self.encoders[block_index]( + bev_queries, + feat_flatten, + feat_flatten, + bev_z=block_bev_z, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=stage_prev_bev, + shift=shift, + **kwargs + ) + + block_features.append(output) + if self.use_conv: + if block_index < self.encoder_block_num - 1: # bev-> voxel or voxel_i -> voxel_j + bev_queries = output.view(block_bev_z, bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + bev_queries = bev_queries.permute(3,4,0,1,2) + # bev_queries = bev_queries.flatten(3) # (bev_h, bev_w, bs, embed_dims1*z1) + bev_queries = self.convs[block_index](bev_queries) + bev_queries = bev_queries.view(bs,self.encoder_embed_dims[block_index + 1], + self.feature_map_z[block_index + 1],bev_h, bev_w, + ) + bev_queries = bev_queries.permute(2,3,4,0,1) + bev_queries = bev_queries.reshape(-1, bs, self.encoder_embed_dims[block_index + 1]) # (num_query, bs, embed_dims) + else: + if block_index < self.encoder_block_num-1: # bev-> voxel or voxel_i -> voxel_j + bev_queries = output.view(block_bev_z, bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + bev_queries = bev_queries.permute(1, 2, 3, 0, 4) + bev_queries = bev_queries.flatten(3) # (bev_h, bev_w, bs, embed_dims1*z1) + bev_queries = self.bev_voxel_transfers[block_index](bev_queries) # (bev_h, bev_w, bs, embed_dims2*z2) + bev_queries = bev_queries.view(bev_h, bev_w, bs, self.feature_map_z[block_index+1], self.encoder_embed_dims[block_index+1]) + bev_queries = bev_queries.permute(3, 0, 1, 2, 4) + bev_queries = bev_queries.reshape(-1, bs, self.encoder_embed_dims[block_index+1]) # (num_query, bs, embed_dims) + + return block_features # is a list + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'object_query_embed', 'prev_bev', 'bev_pos')) + def forward(self, + mlvl_feats, + bev_queries, + object_query_embed, + bev_z, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + reg_branches=None, + cls_branches=None, + prev_bev=None, + **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, num_cams, embed_dims, h, w]. + bev_queries (Tensor): (bev_h*bev_w, c) + bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) + object_query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - bev_embed: BEV features + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + + block_features = self.get_voxel_features( + mlvl_feats, + bev_queries, + bev_z, + bev_h, + bev_w, + grid_length=grid_length, + bev_pos=bev_pos, + prev_bev=prev_bev, + **kwargs) # voxel_embed shape: (bs, num_query, embed_dims) + + return block_features + + diff --git a/projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder.py b/projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder.py new file mode 100644 index 0000000..17b8e3e --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder.py @@ -0,0 +1,80 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from .unet import MYASPPHead +from mmcv.runner.base_module import BaseModule +from mmcv.cnn.bricks.registry import TRANSFORMER_LAYER_SEQUENCE + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class OccConvDecoder(BaseModule): + def __init__(self, + embed_dims=256, + conv_num=3, + pillar_h=16, + conv_cfg=dict(type='Conv2d'), + norm_cfg=dict(type='BN',), + act_cfg=dict(type='ReLU',inplace=True),): + super(OccConvDecoder, self).__init__() + self.embed_dims = embed_dims + self.conv_num = conv_num + self.pillar_h = pillar_h + self.use_bias = norm_cfg is None + + # Define convolutional layers + self.conv_layers = nn.ModuleList() + for _ in range(conv_num): + conv_layer = ConvModule( + self.embed_dims, + self.embed_dims, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg + ) + self.conv_layers.append(conv_layer) + + # ASPP module + self.aspp_head = MYASPPHead( + is_volume=False, + in_channels=self.embed_dims, + in_index=3, + channels=self.embed_dims, + dilations=(1, 3, 6, 9), + dropout_ratio=0.1, + num_classes=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + align_corners=False, + # loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ) + + # Deconvolution to original shape + _out_dim = self.embed_dims * self.pillar_h + self.deconv = ConvModule( + self.embed_dims, + _out_dim, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg + ) + + def forward(self, x): + # Forward pass through convolutional layers + for conv_layer in self.conv_layers: + x = conv_layer(x) # 256 -> 256 + + # Forward pass through ASPP module + x = self.aspp_head(x) # 256 -> 256 + + # Forward pass through deconvolutional layer + x = self.deconv(x) # 256 -> 256 * pillar_h + + return x diff --git a/projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder_3d.py b/projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder_3d.py new file mode 100644 index 0000000..18f06f8 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/occ_conv_decoder_3d.py @@ -0,0 +1,63 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from .unet import MYASPPHead +from mmcv.runner.base_module import BaseModule +from mmcv.cnn.bricks.registry import TRANSFORMER_LAYER_SEQUENCE + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class OccConvDecoder3D(BaseModule): + def __init__(self, + embed_dims, + conv_cfg=dict(type='Conv3d'), + norm_cfg_3d=None, + act_cfg_3d=None): + super(OccConvDecoder3D, self).__init__() + self.embed_dims = embed_dims + use_bias_3d = norm_cfg_3d is None + self.middle_dims = 32 # decrease memory cost + + self.conv1 = ConvModule( + self.embed_dims, + self.middle_dims, + kernel_size=1, + stride=1, + padding=0, + bias=use_bias_3d, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg_3d, + act_cfg=act_cfg_3d + ) + + self.aspp = MYASPPHead( + in_channels=self.middle_dims, + in_index=3, + channels=self.middle_dims, + dilations=(1, 3, 6, 9), + dropout_ratio=0.1, + num_classes=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg_3d, + align_corners=False, + # loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ) + + self.conv2 = ConvModule( + self.middle_dims, + embed_dims, + kernel_size=1, + stride=1, + padding=0, + bias=use_bias_3d, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg_3d, + act_cfg=act_cfg_3d + ) + + def forward(self, x): + x = self.conv1(x) + x = self.aspp(x) + x = self.conv2(x) + return x diff --git a/projects/mmdet3d_plugin/bevformer/modules/occ_transformer.py b/projects/mmdet3d_plugin/bevformer/modules/occ_transformer.py index 1f28371..2ba3e65 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/occ_transformer.py +++ b/projects/mmdet3d_plugin/bevformer/modules/occ_transformer.py @@ -7,24 +7,24 @@ import numpy as np import torch import torch.nn as nn +from torch.nn.init import normal_ +from torchvision.transforms.functional import rotate + from mmcv.cnn import xavier_init from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence from mmcv.runner.base_module import BaseModule - -from mmdet.models.utils.builder import TRANSFORMER -from torch.nn.init import normal_ -from projects.mmdet3d_plugin.models.utils.visual import save_tensor from mmcv.runner.base_module import BaseModule -from torchvision.transforms.functional import rotate -from .temporal_self_attention import TemporalSelfAttention -from .spatial_cross_attention import MSDeformableAttention3D -from .decoder import CustomMSDeformableAttention -from projects.mmdet3d_plugin.models.utils.bricks import run_time from mmcv.runner import force_fp32, auto_fp16 -from mmcv.cnn import PLUGIN_LAYERS, Conv2d,Conv3d, ConvModule, caffe2_xavier_init +from mmcv.cnn import PLUGIN_LAYERS, Conv2d,Conv3d, ConvModule +from mmdet.models.utils.builder import TRANSFORMER + +from projects.mmdet3d_plugin.bevformer.modules.temporal_self_attention import TemporalSelfAttention +from projects.mmdet3d_plugin.bevformer.modules.spatial_cross_attention import MSDeformableAttention3D +from projects.mmdet3d_plugin.bevformer.modules.decoder import CustomMSDeformableAttention +from projects.mmdet3d_plugin.bevformer.modules.cost_volume_module import CostVolumeModule @TRANSFORMER.register_module() -class OccTransformer(BaseModule): +class CVTOccTransformer(BaseModule): """Implements the Detr3D transformer. Args: as_two_stage (bool): Generate query from encoder features. @@ -42,38 +42,60 @@ def __init__(self, encoder=None, decoder=None, embed_dims=256, - rotate_prev_bev=True, - use_shift=True, - use_can_bus=True, + bev_h=200, + bev_w=200, + channels=16, + pc_range=None, + voxel_size=None, + rotate_prev_bev=False, + use_shift=False, + use_can_bus=False, can_bus_norm=True, use_cams_embeds=True, + use_free_mask=False, use_3d=False, use_conv=False, rotate_center=[100, 100], num_classes=18, out_dim=32, pillar_h=16, + queue_length=None, + use_padding=False, + use_temporal=None, + scales=None, act_cfg=dict(type='ReLU',inplace=True), norm_cfg=dict(type='BN', ), norm_cfg_3d=dict(type='BN3d', ), **kwargs): - super(OccTransformer, self).__init__(**kwargs) + super(CVTOccTransformer, self).__init__(**kwargs) self.encoder = build_transformer_layer_sequence(encoder) - + self.use_free_mask = use_free_mask self.embed_dims = embed_dims + self.bev_h = bev_h + self.bev_w = bev_w + self.channels = channels + self.pc_range = pc_range + self.voxel_size = voxel_size + self.grid_length = ((pc_range[4] - pc_range[1]) / bev_h, + (pc_range[3] - pc_range[0]) / bev_w) self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.fp16_enabled = False - + if use_free_mask: + num_classes = num_classes - 1 self.rotate_prev_bev = rotate_prev_bev self.use_shift = use_shift self.use_can_bus = use_can_bus self.can_bus_norm = can_bus_norm self.use_cams_embeds = use_cams_embeds - self.use_3d=use_3d - self.use_conv=use_conv + self.use_3d = use_3d + self.use_conv = use_conv self.pillar_h = pillar_h - self.out_dim=out_dim + self.queue_length = queue_length + self.use_padding = use_padding + self.use_temporal = use_temporal + self.scales = scales + self.out_dim = out_dim if not use_3d: if use_conv: use_bias = norm_cfg is None @@ -101,7 +123,7 @@ def __init__(self, self.decoder = nn.Sequential( nn.Linear(self.embed_dims, self.embed_dims * 2), nn.Softplus(), - nn.Linear(self.embed_dims * 2, self.embed_dims*2), + nn.Linear(self.embed_dims * 2, self.embed_dims * 2), ) else: use_bias_3d = norm_cfg_3d is None @@ -137,6 +159,15 @@ def __init__(self, self.two_stage_num_proposals = two_stage_num_proposals self.init_layers() self.rotate_center = rotate_center + if self.use_temporal == "costvolume": + self.costvolume = CostVolumeModule(bev_h=self.bev_h, + bev_w=self.bev_w, + total_z=self.pillar_h, + channels=self.channels, + pc_range=self.pc_range, + voxel_size=self.voxel_size, + sampled_queue_length=self.queue_length, + scales=self.scales,) def init_layers(self): """Initialize layers of the Detr3DTransformer.""" @@ -171,182 +202,240 @@ def init_weights(self): # xavier_init(self.reference_points, distribution='uniform', bias=0.) xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) - @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) - def get_bev_features( - self, - mlvl_feats, - bev_queries, - bev_h, - bev_w, - grid_length=[0.512, 0.512], - bev_pos=None, - prev_bev=None, - **kwargs): + @auto_fp16(apply_to=('multi_level_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_bev_features(self, multi_level_feats, + bev_queries, + bev_pos=None, + cur_img_metas=None, + prev_bev=None, + **kwargs): """ - obtain bev features. + This method is used to obtain Bird's Eye View (BEV) features from multi-level features, BEV queries, and other related parameters. + use bev queries to find feature in multi-view camera img + Args: + multi_level_feats (list[torch.Tensor]): Current multi level img features from the upstream network. + Each is a 5D-tensor img_feats with shape (bs, num_cams, embed_dims, h, w). + cur_img_metas (list[dict]): Meta information of each sample. The list has length of batch size. + bev_queries (torch.Tensor): (bev_h*bev_w, c). used in decoder + bev_pos (torch.Tensor): (bs, embed_dims, bev_h, bev_w). used in decoder + prev_bev (torch.Tensor): BEV features of the previous sample. + Returns: + results (dict): with keys "bev_embed, feat_flatten, spatial_shapes, level_start_index, shift". + bev_embed (torch.Tensor): BEV feature for current frame. + feat_flatten (torch.Tensor): Each level img feature, flattens the height and width dimensions and combine together. + shape (num_cam, bs, h*w, c). h*w are sum of all fallten h*w of all levels = 12750. + spatial_shapes (torch.Tensor): Record the shape of each level img feature. + tensor([[ 80, 120],[ 40, 60],[ 20, 30],[ 10, 15]]). + level_start_index (torch.Tensor): Record the start index of each level img feature in feat_flatten. + tensor([0, 9600, 12000, 12600]). + shift (torch.Tensor): shift of ego car in x and y axis. shape (1, 2). """ - bs = mlvl_feats[0].size(0) + # Step 1: obtain parameters + bs = multi_level_feats[0].size(0) bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) bev_pos = bev_pos.flatten(2).permute(2, 0, 1) + grid_length_y = self.grid_length[0] + grid_length_x = self.grid_length[1] - # obtain rotation angle and shift with ego motion - - delta_x = np.array([each['can_bus'][0] - for each in kwargs['img_metas']]) - delta_y = np.array([each['can_bus'][1] - for each in kwargs['img_metas']]) - ego_angle = np.array( - [each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']]) - grid_length_y = grid_length[0] - grid_length_x = grid_length[1] - translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) - translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 - bev_angle = ego_angle - translation_angle - shift_y = translation_length * \ - np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h - shift_x = translation_length * \ - np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w - shift_y = shift_y * self.use_shift - shift_x = shift_x * self.use_shift - shift = bev_queries.new_tensor( - [shift_x, shift_y]).permute(1, 0) # xy, bs -> bs, xy + # Step 2: obtain rotation angle and shift with ego motion + if self.use_can_bus: + delta_x = np.array([each['can_bus'][0] for each in cur_img_metas]) + delta_y = np.array([each['can_bus'][1] for each in cur_img_metas]) + ego_angle = np.array([each['can_bus'][-2] / np.pi * 180 for each in cur_img_metas]) + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / self.bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / self.bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor(np.array([shift_x, shift_y])).permute(1, 0) # xy, bs -> bs, xy + else: + shift = bev_queries.new_zeros(bs, 2) + # Step 3: apply rotation to previous BEV features if prev_bev is not None: - if prev_bev.shape[1] == bev_h * bev_w: - prev_bev = prev_bev.permute(1, 0, 2) + if prev_bev.shape[1] == self.bev_h * self.bev_w: + prev_bev = prev_bev.permute(1, 0, 2) # (bev_h*bev_w, bs, embed_dims) - elif len(prev_bev.shape) == 4: - prev_bev = prev_bev.view(bs,-1,bev_h * bev_w).permute(2, 0, 1) + # elif len(prev_bev.shape) == 4: # (bs, embed_dims, bev_h, bev_w) + # prev_bev = prev_bev.view(bs, -1, self.bev_h * self.bev_w).permute(2, 0, 1) # (bev_h*bev_w, bs, embed_dims) + if self.rotate_prev_bev: for i in range(bs): - # num_prev_bev = prev_bev.size(1) - rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + rotation_angle = cur_img_metas[i]['can_bus'][-1] tmp_prev_bev = prev_bev[:, i].reshape( - bev_h, bev_w, -1).permute(2, 0, 1) - tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, - center=self.rotate_center) - tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( - bev_h * bev_w, 1, -1) + self.bev_h, self.bev_w, -1).permute(2, 0, 1) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape(self.bev_h * self.bev_w, 1, -1) prev_bev[:, i] = tmp_prev_bev[:, 0] - # add can bus signals - can_bus = bev_queries.new_tensor( - [each['can_bus'] for each in kwargs['img_metas']]) # [:, :] - can_bus = self.can_bus_mlp(can_bus)[None, :, :] - bev_queries = bev_queries + can_bus * self.use_can_bus + # Step 4: apply ego motion shift to BEV queries + if self.use_can_bus: + can_bus = bev_queries.new_tensor(np.array([each['can_bus'] for each in cur_img_metas])) + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus + # Step 5: flatten the multi level image features feat_flatten = [] spatial_shapes = [] - for lvl, feat in enumerate(mlvl_feats): + for lvl, feat in enumerate(multi_level_feats): + # For each level feature, flattens the height and width dimensions (last two dimensions) and permutes the dimensions to make the shape compatible with concatenation. bs, num_cam, c, h, w = feat.shape spatial_shape = (h, w) feat = feat.flatten(3).permute(1, 0, 3, 2) if self.use_cams_embeds: feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) - feat = feat + self.level_embeds[None, - None, lvl:lvl + 1, :].to(feat.dtype) + feat = feat + self.level_embeds[None, None, lvl:lvl + 1, :].to(feat.dtype) spatial_shapes.append(spatial_shape) feat_flatten.append(feat) feat_flatten = torch.cat(feat_flatten, 2) - spatial_shapes = torch.as_tensor( - spatial_shapes, dtype=torch.long, device=bev_pos.device) - level_start_index = torch.cat((spatial_shapes.new_zeros( - (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=bev_pos.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + feat_flatten = feat_flatten.permute(0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) - feat_flatten = feat_flatten.permute( - 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + # Step 6: Use the encoder the get the BEV features + bev_embed = self.encoder(bev_query=bev_queries, + key=feat_flatten, + value=feat_flatten, + bev_pos=bev_pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=prev_bev, + img_metas=cur_img_metas, + shift=shift, + **kwargs) - bev_embed = self.encoder( - bev_queries, - feat_flatten, - feat_flatten, - bev_h=bev_h, - bev_w=bev_w, - bev_pos=bev_pos, - spatial_shapes=spatial_shapes, - level_start_index=level_start_index, - prev_bev=prev_bev, - shift=shift, - **kwargs - ) + results = { + "bev_embed": bev_embed, + "feat_flatten": feat_flatten, + "spatial_shapes": spatial_shapes, + "level_start_index": level_start_index, + "shift": shift, + } + + return results + + def temporal_fusion(self, cur_bev, cur_img_metas, prev_bev_list, prev_img_metas): + """ + Do Temporal Fusion. + Args: + Returns: + fusion_results (dict): with keys "bev_embed, extra". + bev_embed (torch.Tensor): Updated BEV features after some temporal fusion method. + extra (dict): with keys "refine_feat_w", which is a tensor with shape (w, h, z, 2). + or maybe empty dict. + """ + + # Step 1: prepare parameters + bev_list = list(prev_bev_list) + bev_list.append(cur_bev) + img_metas = list(prev_img_metas) + for batch_idx in range(len(cur_img_metas)): # for i in batch_size + each_batch_img_metas = img_metas[batch_idx] # dict[dict] + each_batch_img_metas[self.queue_length - 1] = cur_img_metas[batch_idx] - return bev_embed + # Step 2: padding(controlled by flag `use_padding`) + if len(bev_list) < self.queue_length and self.use_padding: + bev_list = [bev_list[0].clone() for _ in range(self.queue_length - len(bev_list))] + bev_list + queue_begin = list(img_metas[0].keys())[0] # the min index + for batch_idx in range(len(img_metas)): + for queue_idx in range(0, queue_begin): + img_metas[batch_idx][queue_idx] = img_metas[batch_idx][queue_begin].copy() - @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'object_query_embed', 'prev_bev', 'bev_pos')) - def forward(self, - mlvl_feats, + # Step 3: reshape the img_metas to a list + keys_list = list(img_metas[0].keys()) + keys_list.sort() # HERE! + img_metas_list = [] + for key in keys_list: + for batch_idx in range(len(img_metas)): + img_metas_list.append(img_metas[batch_idx][key]) # list[dict] + + # Step 4: do temporal fusion + if self.use_temporal == 'costvolume' and len(bev_list) > 1: + update_bev, extra = self.costvolume(bev_list, img_metas_list) + fusion_results = {'bev_embed': update_bev, 'extra': extra} + + else: + # no fusion + fusion_results = {'bev_embed': cur_bev, 'extra': {}} + + return fusion_results + + @auto_fp16(apply_to=('multi_level_feats', 'bev_queries', 'prev_bev_list', 'bev_pos')) + def forward(self, multi_level_feats, bev_queries, - object_query_embed, - bev_h, - bev_w, - grid_length=[0.512, 0.512], bev_pos=None, - reg_branches=None, - cls_branches=None, - prev_bev=None, + cur_img_metas=None, + prev_bev_list=[], + prev_img_metas=[], **kwargs): - """Forward function for `Detr3DTransformer`. + """ + Forward function for `Detr3DTransformer`. Args: - mlvl_feats (list(Tensor)): Input queries from - different level. Each element has shape - [bs, num_cams, embed_dims, h, w]. - bev_queries (Tensor): (bev_h*bev_w, c) - bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) - object_query_embed (Tensor): The query embedding for decoder, - with shape [num_query, c]. - reg_branches (obj:`nn.ModuleList`): Regression heads for - feature maps from each decoder layer. Only would - be passed when `with_box_refine` is True. Default to None. + multi_level_feats (list(torch.Tensor)): Current multi level img features from the upstream network. + Each element has shape (bs, num_cams, embed_dims, h, w). + bev_queries (torch.Tensor): bev embedding with shape (hwz, embed_dims). + bev_pos (torch.Tensor): bev position embedding with shape (bs, embed_dims, 1, h, w). + img_metas (list[dict]): current img meta info. The list has length of batch size. + prev_bev_list (list(torch.Tensor)): BEV features of previous frames. Each has shape (bs, bev_h*bev_w, embed_dims). + prev_img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys len_queue-1-prev_bev_list_len, ..., len_queue-2. + The element of each key is a dict. + So each dict has length of prev_bev_list_len. Returns: - tuple[Tensor]: results of decoder containing the following tensor. - - bev_embed: BEV features - - inter_states: Outputs from decoder. If - return_intermediate_dec is True output has shape \ - (num_dec_layers, bs, num_query, embed_dims), else has \ - shape (1, bs, num_query, embed_dims). - - init_reference_out: The initial value of reference \ - points, has shape (bs, num_queries, 4). - - inter_references_out: The internal value of reference \ - points in decoder, has shape \ - (num_dec_layers, bs,num_query, embed_dims) - - enc_outputs_class: The classification score of \ - proposals generated from \ - encoder's feature maps, has shape \ - (batch, h*w, num_classes). \ - Only would be returned when `as_two_stage` is True, \ - otherwise None. - - enc_outputs_coord_unact: The regression results \ - generated from encoder's feature maps., has shape \ - (batch, h*w, 4). Only would \ - be returned when `as_two_stage` is True, \ - otherwise None. + bev_for_history (torch.Tensor): directly from self.get_bev_features with shape (bs, h*w, embed_dims) only used in inference. + outputs (torch.Tensor): bev_embed after fusion, decoder and predictor. shape (bs, w, h, z, c). + extra (dict): with keys "refine_feat_w", which is a tensor with shape (w, h, z, 2). + or maybe empty dict. """ - bev_embed = self.get_bev_features( - mlvl_feats, - bev_queries, - bev_h, - bev_w, - grid_length=grid_length, - bev_pos=bev_pos, - prev_bev=prev_bev, - **kwargs) # bev_embed shape: bs, bev_h*bev_w, embed_dims + # Step 1: prepare parameters + bev_h = self.bev_h + bev_w = self.bev_w + if len(prev_bev_list) > 0: + prev_bev = prev_bev_list[-1] # (bs, h*w*z, c) + else: + prev_bev = None + + # Step 2: get BEV features + get_bev_features_outputs = self.get_bev_features(multi_level_feats, + bev_queries, + bev_pos, + cur_img_metas, + prev_bev, + **kwargs) + bev_embed = get_bev_features_outputs['bev_embed'] + bev_for_history = bev_embed.clone() # (bs, h*w, embed) - bs = mlvl_feats[0].size(0) - bev_embed = bev_embed.permute(0, 2, 1).view(bs, -1, bev_h, bev_w) + # Step 3: do temporal fusion + outputs = self.temporal_fusion(bev_embed, cur_img_metas, prev_bev_list, prev_img_metas) + bev_embed = outputs['bev_embed'] + extra = outputs['extra'] + + # Step 4: Decoder and predictor + bs = multi_level_feats[0].size(0) + # bev_embed = bev_embed.permute(0, 2, 1).view(bs, -1, bev_h, bev_w) # (bs, embed_dims, h, w) if self.use_3d: - outputs=self.decoder(bev_embed.view(bs,-1,self.pillar_h,bev_h, bev_w)) - outputs=outputs.permute(0,4,3,2,1) + assert NotImplementedError elif self.use_conv: + assert NotImplementedError - outputs = self.decoder(bev_embed) - outputs = outputs.view(bs, -1,self.pillar_h, bev_h, bev_w).permute(0,3,4,2, 1) else: - outputs = self.decoder(bev_embed.permute(0,2,3,1)) - outputs = outputs.view(bs, bev_h, bev_w,self.pillar_h,self.out_dim) - outputs = self.predicter(outputs) - # print('outputs',type(outputs)) - return bev_embed, outputs \ No newline at end of file + bev_embed = bev_embed.permute(0, 2, 1).view(bs, -1, bev_h, bev_w) # (bs, embed_dims, h, w) + # outputs = self.decoder(bev_embed.permute(0,2,3,1)) + outputs = self.decoder(bev_embed.permute(0, 3, 2, 1)) # bs, w, h, embed_dims, + outputs = outputs.view(bs, bev_w, bev_h, self.pillar_h, self.out_dim) + + # outputs = self.decoder(bev_embed) # (bs, bev_h*bev_w, embed_dims * 2) + # outputs = outputs.permute(0, 2, 1).view(bs, self.out_dim, self.pillar_h, bev_h, bev_w) # (bs, out_dim, pillar_h, h, w) + # outputs = outputs.permute(0, 4, 3, 2, 1) # (bs, w, h, pillar_h, out_dim) + outputs = self.predicter(outputs) # (bs, w, h, pillar_h, num_classes) + + return bev_for_history, outputs, extra diff --git a/projects/mmdet3d_plugin/bevformer/modules/occ_transformer_waymo.py b/projects/mmdet3d_plugin/bevformer/modules/occ_transformer_waymo.py new file mode 100644 index 0000000..4d98194 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/occ_transformer_waymo.py @@ -0,0 +1,560 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.init import normal_ +from torchvision.transforms.functional import rotate + +from mmcv.cnn import xavier_init +from mmcv.runner import force_fp32, auto_fp16 +from mmcv.runner.base_module import BaseModule +from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence +from mmcv.runner.base_module import BaseModule +from mmdet.models.utils.builder import TRANSFORMER + +from projects.mmdet3d_plugin.bevformer.modules.temporal_self_attention import TemporalSelfAttention +from projects.mmdet3d_plugin.bevformer.modules.spatial_cross_attention import MSDeformableAttention3D +from projects.mmdet3d_plugin.bevformer.modules.decoder import CustomMSDeformableAttention +from projects.mmdet3d_plugin.bevformer.modules.cost_volume_module import CostVolumeModule +from projects.mmdet3d_plugin.bevformer.modules.concat_conv_module import ConcatConvModule + +@TRANSFORMER.register_module() +class CVTOccTransformerWaymo(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + volume_flag=True, + num_feature_levels=4, + num_cams=6, + queue_length=3, + sampled_queue_length=1, + two_stage_num_proposals=300, + encoder=None, + decoder=None, + embed_dims=256, + pc_range=None, + voxel_size=None, + occ_voxel_size=None, + use_larger=True, + use_temporal=None, + rotate_prev_bev=False, + use_shift=False, + use_can_bus=False, + can_bus_norm=True, + use_cams_embeds=True, + use_3d_decoder=False, + use_conv_decoder=False, + rotate_center=[100, 100], + scales=[0.8, 0.9, 1.0, 1.1, 1.2], + num_classes=18, + out_dim=32, + pillar_h=16, + bev_z=1, + bev_h=200, + bev_w=200, + total_z=16, + iter_encoders=None, + use_padding=False, + topK_method='foreground', + **kwargs): + super(CVTOccTransformerWaymo, self).__init__(**kwargs) + self.volume_flag = volume_flag + self.encoder = build_transformer_layer_sequence(encoder) + if iter_encoders is not None: # default is None + self.iter_encoders = torch.nn.ModuleList([build_transformer_layer_sequence(encoder) for encoder in iter_encoders]) + self.decoder = build_transformer_layer_sequence(decoder) + self.topK_method = topK_method + self.queue_length = queue_length + self.sampled_queue_length = sampled_queue_length + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + self.pc_range = pc_range + self.voxel_size = voxel_size + self.occ_voxel_size = occ_voxel_size + self.use_larger = use_larger + self.use_temporal = use_temporal + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + self.use_3d_decoder=use_3d_decoder + self.use_conv_decoder = use_conv_decoder + self.pillar_h = pillar_h + self.out_dim = out_dim + self.bev_z = bev_z + self.bev_h = bev_h + self.bev_w = bev_w + self.total_z = total_z + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + self.grid_length = (self.real_h / self.bev_h, self.real_w / self.bev_w) + self.channels=self.embed_dims//self.total_z # 256//16=16 + self.scales=scales + self.use_padding = use_padding + if self.use_temporal == 'costvolume': + self.costvolume = CostVolumeModule(bev_h=self.bev_h, + bev_w=self.bev_w, + total_z=self.total_z, + channels=self.channels, + pc_range=self.pc_range, + voxel_size=self.voxel_size, + sampled_queue_length=self.sampled_queue_length, + scales=self.scales,) + if self.use_temporal == 'concat_conv': + self.concatconv = ConcatConvModule(bev_h=self.bev_h, + bev_w=self.bev_w, + total_z=self.total_z, + channels=self.channels, + pc_range=self.pc_range, + voxel_size=self.voxel_size, + sampled_queue_length=self.sampled_queue_length,) + + # choose predictor + if not self.use_3d_decoder: + if self.use_conv_decoder: + _out_dim = self.embed_dims*self.pillar_h + # because not use 3d, so total_z=1 + self.predicter = nn.Sequential( + nn.Linear(_out_dim//total_z, self.embed_dims//2), + nn.Softplus(), + nn.Linear(self.embed_dims//2,num_classes), + ) + else:raise NotImplementedError + else: + # use_3d_decoder enter here + _out_dim = self.embed_dims + self.predicter = nn.Sequential( + nn.Linear(_out_dim, self.embed_dims//2), + nn.Softplus(), + nn.Linear(self.embed_dims//2, 2), # binary classify + ) + + self.two_stage_num_proposals = two_stage_num_proposals + self.init_layers() + self.rotate_center = rotate_center + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + # self.reference_points = nn.Linear(self.embed_dims, 3) + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + # xavier_init(self.reference_points, distribution='uniform', bias=0.) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + + @auto_fp16(apply_to=('multi_level_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_bev_features(self, multi_level_feats, + bev_queries, + bev_pos=None, + img_metas=None, + prev_bev=None, + **kwargs): + """ + This method is used to obtain Bird's Eye View (BEV) features from multi-level features, BEV queries, and other related parameters. + use bev queries to find feature in multi-view camera img + Args: + bev_queries (torch.Tensor): (bev_h*bev_w, c). used in decoder + bev_pos (torch.Tensor): (bs, embed_dims, bev_h, bev_w). used in decoder + prev_bev (torch.Tensor): BEV features of the previous sample. + Returns: + results (dict): with keys "bev_embed, feat_flatten, spatial_shapes, level_start_index, shift". + bev_embed (torch.Tensor): BEV feature for current frame. + feat_flatten (torch.Tensor): Each level img feature, flattens the height and width dimensions and combine together. + shape (num_cam, bs, h*w, c). h*w are sum of all fallten h*w of all levels = 12750. + spatial_shapes (torch.Tensor): Record the shape of each level img feature. + tensor([[ 80, 120],[ 40, 60],[ 20, 30],[ 10, 15]]). + level_start_index (torch.Tensor): Record the start index of each level img feature in feat_flatten. + tensor([0, 9600, 12000, 12600]). + shift (torch.Tensor): shift of ego car in x and y axis. shape (1, 2). + """ + + # Step 1: obtain parameters + bev_z = self.bev_z + bev_h = self.bev_h + bev_w = self.bev_w + grid_length = self.grid_length + bs = multi_level_feats[0].size(0) + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) # TODO why reshape here? + bev_pos = bev_pos.flatten(2).permute(2, 0, 1) + + # Step 2: obtain rotation angle and shift with ego motion + grid_length_y = grid_length[0] # 0.4 + grid_length_x = grid_length[1] # 0.4 + if self.use_can_bus: # Default value is False + delta_x = np.array([each['can_bus'][0]for each in img_metas]) + delta_y = np.array([each['can_bus'][1]for each in img_metas]) + ego_angle = np.array([each['can_bus'][-2] / np.pi * 180 for each in img_metas]) + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor([shift_x, shift_y]).permute(1, 0) # (2, 1) -> (1, 2) + else: + shift = bev_queries.new_zeros((1, 2)) + + # Step 3: apply rotation to previous BEV features + if prev_bev is not None: + if self.volume_flag: + if prev_bev.shape[1] == bev_h * bev_w * bev_z: + prev_bev = prev_bev.permute(1, 0, 2) + elif len(prev_bev.shape) == 4: + prev_bev = prev_bev.view(bs,-1,bev_h * bev_w).permute(2, 0, 1) + elif len(prev_bev.shape) == 5: + prev_bev = prev_bev.view(bs, -1,bev_z* bev_h * bev_w).permute(2, 0, 1) + else: + # HERE + if prev_bev.shape[1] == bev_h * bev_w: + prev_bev = prev_bev.permute(1, 0, 2) # (bs, bev_h*bev_w, embed_dims) -> (bev_h*bev_w, bs, embed_dims) + elif len(prev_bev.shape) == 4: # nuscene + prev_bev = prev_bev.view(bs, -1, bev_h * bev_w).permute(2, 0, 1) # (bs, embed_dims, h, w) -> (bev_h*bev_w, bs, embed_dims) + + if self.rotate_prev_bev: # Default value is False + for i in range(bs): + rotation_angle = img_metas[i]['can_bus'][-1] + if self.volume_flag: + tmp_prev_bev = prev_bev[:, i].reshape( + bev_z, bev_h, bev_w, -1).permute(3, 0, 1, 2) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2,3, 0).reshape( + bev_z * bev_h * bev_w, 1, -1) + else: + tmp_prev_bev = prev_bev[:, i].reshape( + bev_h, bev_w, -1).permute(2, 0, 1) + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) + tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + bev_h * bev_w, 1, -1) + prev_bev[:, i] = tmp_prev_bev[:, 0] + + # Step 4: apply ego motion shift to BEV queries + if self.use_can_bus: # Default value is False + can_bus = bev_queries.new_tensor([each['can_bus'] for each in img_metas]) + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus + + # Step 5: flatten the multi level image features + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(multi_level_feats): + # For each level feature, flattens the height and width dimensions (last two dimensions) and permutes the dimensions to make the shape compatible with concatenation. + bs, _, _, h, w = feat.shape # bs, n_views, c, h, w + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None,None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) # list[tuple] + feat_flatten.append(feat) + feat_flatten = torch.cat(feat_flatten, 2) # (num_cam, bs, h*w, c). h*w are sum of all fallten h*w of all levels = 12750 + + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=bev_queries.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + feat_flatten = feat_flatten.permute(0, 2, 1, 3) # (num_cam, h*w, bs, embed_dims) + + # Step 6: Use the encoder the get the BEV features + bev_embed = self.encoder(bev_query=bev_queries, + key=feat_flatten, + value=feat_flatten, + bev_pos=bev_pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=prev_bev, + img_metas=img_metas, + shift=shift, + **kwargs) + + results = { + "bev_embed": bev_embed, + "feat_flatten": feat_flatten, + "spatial_shapes": spatial_shapes, + "level_start_index": level_start_index, + "shift": shift, + } + + return results + + def temporal_fusion(self, cur_bev, prev_bev_list, prev_img_metas, cur_img_metas): + """ + Do Temporal Fusion. + Args: + Returns: + fusion_results (dict): with keys "_bev_embed, extra". + _bev_embed (torch.Tensor): Updated BEV features after some temporal fusion method. + extra (dict): with keys "refine_feat_w", which is a tensor with shape (w, h, z, 2). + or maybe empty dict. + """ + + # Step 1: prepare parameters + bev_list = list(prev_bev_list) + bev_list.append(cur_bev) + img_metas = list(prev_img_metas) + for batch_idx in range(len(cur_img_metas)): # for i in batch_size + each_batch_img_metas = img_metas[batch_idx] # dict[dict] + each_batch_img_metas[self.sampled_queue_length - 1] = cur_img_metas[batch_idx] + + # Step 2: padding(controlled by flag `use_padding`) + if len(bev_list) < self.sampled_queue_length and self.use_padding: + bev_list = [bev_list[0].clone() for _ in range(self.sampled_queue_length - len(bev_list))] + bev_list + queue_begin = list(img_metas[0].keys())[0] # the min index + for batch_idx in range(len(img_metas)): + for queue_idx in range(0, queue_begin): + img_metas[batch_idx][queue_idx] = img_metas[batch_idx][queue_begin].copy() + + # Step 3: reshape the img_metas to a list + keys_list = list(img_metas[0].keys()) + keys_list.sort() # HERE! + img_metas_list = [] + for key in keys_list: + for batch_idx in range(len(img_metas)): + img_metas_list.append(img_metas[batch_idx][key]) # list[dict] + + # Step 4: do temporal fusion + if self.use_temporal == 'costvolume' and len(bev_list) > 1: + update_bev, extra = self.costvolume(bev_list, img_metas_list) + fusion_results = {'_bev_embed': update_bev, 'extra': extra} + + elif self.use_temporal == 'concat_conv' and len(bev_list) > 1: + update_bev = self.concatconv(bev_list, img_metas_list) + fusion_results = {'_bev_embed': update_bev, 'extra': {}} + + else: + # no fusion + fusion_results = {'_bev_embed': cur_bev, 'extra': {}} + + return fusion_results + + @auto_fp16(apply_to=('multi_level_feats', 'bev_queries', 'object_query_embed', 'prev_bev_list', 'bev_pos')) + def forward(self, multi_level_feats, + bev_queries, + bev_pos=None, + img_metas=None, + prev_bev_list=[], + prev_img_metas=[], + **kwargs): + """ + Forward function for `Detr3DTransformer`. + Args: + multi_level_feats (list(torch.Tensor)): Current multi level img features from the upstream network. + Each element has shape (bs, num_cams, embed_dims, h, w). + bev_queries (torch.Tensor): bev embedding with shape (hwz, embed_dims). + bev_pos (torch.Tensor): bev position embedding with shape (bs, embed_dims, 1, h, w). + img_metas (list[dict]): current img meta info. The list has length of batch size. + prev_bev_list (list(torch.Tensor)): BEV features of previous frames. Each has shape (bs, bev_h*bev_w, embed_dims). + prev_img_metas (list[dict[dict]]): Meta information of each sample. + The list has length of batch size. + The dict has keys len_queue-1-prev_bev_list_len, ..., len_queue-2. + The element of each key is a dict. + So each dict has length of prev_bev_list_len. + Returns: + bev_for_history (torch.Tensor): directly from self.get_bev_features with shape (bs, h*w, embed_dims) only used in inference. + outputs (torch.Tensor): bev_embed after fusion, decoder and predictor. shape (bs, w, h, z, c). + extra (dict): with keys "refine_feat_w", which is a tensor with shape (w, h, z, 2). + or maybe empty dict. + """ + + # Step 1: prepare parameters + bev_z = self.bev_z + bev_h = self.bev_h + bev_w = self.bev_w + if not self.volume_flag: assert bev_z == 1 + if len(prev_bev_list) > 0: + prev_bev = prev_bev_list[-1] # (bs, h*w*z, c) + else: + prev_bev = None + + # Step 2: get BEV features + get_bev_features_outputs = self.get_bev_features(multi_level_feats, + bev_queries, + bev_pos=bev_pos, + img_metas=img_metas, + prev_bev=prev_bev, + **kwargs) + _bev_embed = get_bev_features_outputs['bev_embed'] + bev_for_history = _bev_embed + + # Step 3: do temporal fusion + outputs = self.temporal_fusion(_bev_embed, prev_bev_list, prev_img_metas, img_metas) + _bev_embed = outputs['_bev_embed'] + extra = outputs['extra'] # a empty dict or containing refine_feat_w + _bev_embed = _bev_embed.to(bev_for_history.dtype) + + # Step 4: Decoder and Predictor + # assert bev_embed in [bs, DHW, C] order + bev_embed_bs_DHW_C = _bev_embed # [bs, 40000, 256] + feat_flatten = get_bev_features_outputs['feat_flatten'] # [num_cams, 12750, bs, 256] + spatial_shapes = get_bev_features_outputs['spatial_shapes'] # [80, 120]=>[10,15] + level_start_index = get_bev_features_outputs['level_start_index'] + shift = get_bev_features_outputs['shift'] + + bs = multi_level_feats[0].size(0) + if self.use_3d_decoder: + zz = bev_z if self.volume_flag else self.pillar_h + bev_embed_bs_C_D_H_W = bev_embed_bs_DHW_C.permute(0, 2, 1).view(bs, -1, zz, bev_h, bev_w) + res_bs_C_D_H_W = self.decoder(bev_embed_bs_C_D_H_W) + bev_embed_bs_C_D_H_W = bev_embed_bs_C_D_H_W + res_bs_C_D_H_W + bev_embed_bs_W_H_D_C = bev_embed_bs_C_D_H_W.permute(0,4,3,2,1) + outputs_bs_W_H_D_C = self.predicter(bev_embed_bs_W_H_D_C) + + bev_embed_list = [bev_embed_bs_W_H_D_C] + outputs_list = [outputs_bs_W_H_D_C] + topk_dim = 1 # 1 for foreground + for iter_i, iter_encoder in enumerate(self.iter_encoders): + # topk voxel + topk_ratio = iter_encoder.topk_ratio + if self.topK_method == 'foreground' or self.topK_method == 'no_cross_atten' or self.topK_method == 'no_conv': + outputs_onedim_bs_W_H_D = outputs_bs_W_H_D_C[:, :, :, :, topk_dim] + outputs_squeeze_bsWHD = outputs_onedim_bs_W_H_D.reshape(-1) + topk_mask_bs_W_H_D = torch.zeros_like(outputs_onedim_bs_W_H_D, dtype=torch.bool) + topk_mask_squeeze_bsWHD = topk_mask_bs_W_H_D.reshape(-1) + topk = int(outputs_squeeze_bsWHD.shape[0] * topk_ratio) + indices = torch.topk(outputs_squeeze_bsWHD, topk).indices + topk_mask_squeeze_bsWHD[indices] = True + elif self.topK_method == 'ambiguous': + scores_bs_W_H_D = outputs_bs_W_H_D_C.softmax(dim=-1)[:, :, :, :, topk_dim] + ambiguous_bs_W_H_D = 1 - torch.abs(0.5 - scores_bs_W_H_D) + ambiguous_squeeze_bsWHD = ambiguous_bs_W_H_D.reshape(-1) + topk_mask_bs_W_H_D = torch.zeros_like(ambiguous_bs_W_H_D, dtype=torch.bool) + topk_mask_squeeze_bsWHD = topk_mask_bs_W_H_D.reshape(-1) + topk = int(ambiguous_squeeze_bsWHD.shape[0] * topk_ratio) + indices = torch.topk(ambiguous_squeeze_bsWHD, topk).indices + topk_mask_squeeze_bsWHD[indices] = True + elif self.topK_method == 'mixed': + scores_bs_W_H_D = outputs_bs_W_H_D_C.softmax(dim=-1)[:, :, :, :, topk_dim] + ambiguous_bs_W_H_D = 1 - torch.abs(0.5 - scores_bs_W_H_D) + ambiguous_squeeze_bsWHD = ambiguous_bs_W_H_D.reshape(-1) + topk_mask_bs_W_H_D = torch.zeros_like(ambiguous_bs_W_H_D, dtype=torch.bool) + topk_mask_squeeze_bsWHD = topk_mask_bs_W_H_D.reshape(-1) + topk = int(ambiguous_squeeze_bsWHD.shape[0] * topk_ratio * 0.5) + indices = torch.topk(ambiguous_squeeze_bsWHD, topk).indices + topk_mask_squeeze_bsWHD[indices] = True + + outputs_onedim_bs_W_H_D = outputs_bs_W_H_D_C[:, :, :, :, topk_dim] + outputs_squeeze_bsWHD = outputs_onedim_bs_W_H_D.reshape(-1) + topk = int(outputs_squeeze_bsWHD.shape[0] * topk_ratio * 0.5) + indices = torch.topk(outputs_squeeze_bsWHD, topk).indices + topk_mask_squeeze_bsWHD[indices] = True + elif self.topK_method == 'random': + outputs_onedim_bs_W_H_D = outputs_bs_W_H_D_C[:, :, :, :, topk_dim] + outputs_squeeze_bsWHD = outputs_onedim_bs_W_H_D.reshape(-1) + topk_mask_bs_W_H_D = torch.zeros_like(outputs_onedim_bs_W_H_D, dtype=torch.bool) + topk_mask_squeeze_bsWHD = topk_mask_bs_W_H_D.reshape(-1) + topk = int(outputs_squeeze_bsWHD.shape[0] * topk_ratio) + # indices = torch.topk(outputs_squeeze_bsWHD, topk).indices + indices = torch.randint(low=0, high=outputs_squeeze_bsWHD.shape[0], size=(topk,)).to(topk_mask_squeeze_bsWHD.device) + topk_mask_squeeze_bsWHD[indices] = True + else: + raise NotImplementedError + + # upsample + bs, C, D, H, W = bev_embed_bs_C_D_H_W.shape + tg_D, tg_H, tg_W = iter_encoder.DHW + topk_mask_bs_D_H_W = topk_mask_bs_W_H_D.permute(0, 3, 2, 1) + topk_mask_bs_C_D_H_W = topk_mask_bs_D_H_W.unsqueeze(dim=1) # => bs,1,D,H,W + update_bev_embed_bs_C_D_H_W = F.interpolate(bev_embed_bs_C_D_H_W, size=(tg_D, tg_H, tg_W), mode='trilinear', align_corners=True) + update_topk_bs_C_D_H_W = F.interpolate(topk_mask_bs_C_D_H_W.float(), size=(tg_D, tg_H, tg_W), mode='trilinear', align_corners=True) + update_topk_bs_C_D_H_W = update_topk_bs_C_D_H_W > 0 + update_topk_bs_D_H_W = update_topk_bs_C_D_H_W.squeeze(dim=1) + update_bev_embed_bs_C_DHW = update_bev_embed_bs_C_D_H_W.reshape(bs, C, tg_D*tg_H*tg_W) + update_bev_embed_DHW_bs_C = update_bev_embed_bs_C_DHW.permute(2, 0, 1) # => (DHW, bs, C) + update_topk_bs_DHW = update_topk_bs_D_H_W.reshape(bs, tg_D*tg_H*tg_W) + bev_embed_bs_DHW_C = iter_encoder( + update_bev_embed_DHW_bs_C, + feat_flatten, + feat_flatten, + bev_z=tg_D, + bev_h=tg_H, + bev_w=tg_W, + bev_pos=None, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=None, + shift=shift, + topk_mask=update_topk_bs_DHW, + **kwargs + ) + update_bev_embed_bs_DHW_C = update_bev_embed_bs_C_DHW.permute(0, 2, 1) + if self.topK_method != 'no_cross_atten': + bev_embed_bs_DHW_C = bev_embed_bs_DHW_C + update_bev_embed_bs_DHW_C + else: + bev_embed_bs_DHW_C = update_bev_embed_bs_DHW_C + bev_embed_bs_C_D_H_W = bev_embed_bs_DHW_C.permute(0, 2, 1).view(bs, -1, tg_D, tg_H, tg_W) + if self.topK_method != 'no_conv': + res_bs_C_D_H_W = self.iter_decoders[iter_i](bev_embed_bs_C_D_H_W) + bev_embed_bs_C_D_H_W = bev_embed_bs_C_D_H_W + res_bs_C_D_H_W + bev_embed_bs_W_H_D_C = bev_embed_bs_C_D_H_W.permute(0,4,3,2,1) + outputs_bs_W_H_D_C = self.iter_predicters[iter_i](bev_embed_bs_W_H_D_C) + outputs = outputs_bs_W_H_D_C + # previous binary seg, last semantic seg + if iter_i != len(self.iter_encoders)-1: + bev_embed_list.append(bev_embed_bs_W_H_D_C) + outputs_list.append(outputs_bs_W_H_D_C) + + extra['bev_embed_list'] = bev_embed_list + extra['outputs_list'] = outputs_list + + elif self.use_conv_decoder: + """ + If the `use_conv_decoder` flag is set to True, the BEV features are processed using the conventional convolutional decoder. + The BEV features are reshaped and passed through the decoder and predicter. + """ + + bev_embed = bev_embed_bs_DHW_C # [1, 40000, 256] + total_z = self.total_z + bev_embed = bev_embed.permute(0, 2, 1).view(bs, -1, bev_h, bev_w) # [1, 40000, 256] -> [1, 256, 200, 200] + outputs = self.decoder(bev_embed) # [1, 256, 200, 200] + outputs = outputs.view(bs, -1, self.total_z, bev_h, bev_w).permute(0,4,3,2,1).contiguous() # [bs, c, z, h, w] -> [bs, w, h, z, c] + outputs = outputs.reshape(bs * bev_w * bev_h * total_z, -1) # [640000, 16] + outputs = self.predicter(outputs) # [640000, 16] + outputs = outputs.view(bs, bev_w, bev_h, total_z, -1) # [1, 200, 200, 16, 16] + + else: + assert NotImplementedError + + return bev_for_history, outputs, extra diff --git a/projects/mmdet3d_plugin/bevformer/modules/pyramid_transformer.py b/projects/mmdet3d_plugin/bevformer/modules/pyramid_transformer.py new file mode 100644 index 0000000..f19c427 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/pyramid_transformer.py @@ -0,0 +1,542 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import xavier_init +from mmcv.cnn.bricks.transformer import build_transformer_layer_sequence, build_positional_encoding +from mmcv.runner.base_module import BaseModule + +from mmdet.models.utils.builder import TRANSFORMER +from torch.nn.init import normal_ +from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from mmcv.runner.base_module import BaseModule +from torchvision.transforms.functional import rotate +from .temporal_self_attention import TemporalSelfAttention +from .spatial_cross_attention import MSDeformableAttention3D +from .decoder import CustomMSDeformableAttention +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from mmcv.runner import force_fp32, auto_fp16 +from mmcv.cnn import PLUGIN_LAYERS, Conv2d,Conv3d, ConvModule, caffe2_xavier_init + + + +@TRANSFORMER.register_module() +class PyramidTransformer(BaseModule): + """Implements the Detr3D transformer. + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + num_feature_levels=4, + num_cams=6, + two_stage_num_proposals=300, + conv_nums=[4,4], + encoder=None, + decoder=None, + act_cfg=None, + norm_cfg_3d=dict(type='SyncBN', requires_grad=True), + position=None, # positional embedding of query point + pc_range=None, + encoder_embed_dims=[256, 256, 128, 64], + feature_map_z=[1, 4, 8, 16], + dilations=[2,2,2,2], + paddings=[2,2,2,2], + bev_h_list=[50,100,200,200], + bev_w_list=[50,100,200,200], + embed_dims=256, + more_conv=False, + use_conv=False, + rotate_prev_bev=True, + use_shift=True, + use_can_bus=True, + can_bus_norm=True, + use_cams_embeds=True, + rotate_center=[100, 100], + decoder_on_bev=True, + bev_z=16, + **kwargs): + super(PyramidTransformer, self).__init__(**kwargs) + self.pc_range=pc_range + self.real_w = self.pc_range[3] - self.pc_range[0] + self.real_h = self.pc_range[4] - self.pc_range[1] + self.more_conv=more_conv + self.use_conv=use_conv + self.encoders = [] + self.positional_encodings = [] + self.encoder_block_num = len(encoder) + self.encoder_keys = [] + self.feature_map_z = feature_map_z + self.encoder_embed_dims = encoder_embed_dims + self.dilations = dilations + self.paddings=paddings + self.norm_cfg_3d=norm_cfg_3d + self.act_cfg=act_cfg + self.conv_nums=conv_nums + for encoder_key in encoder: + self.encoder_keys.append(encoder_key) + self.encoders.append(build_transformer_layer_sequence(encoder[encoder_key])) + self.positional_encodings.append(build_positional_encoding(position[encoder_key])) + + # register model + for i, layer in enumerate(self.encoders): + self.add_module('encoder_{}'.format(i), layer) + for i, layer in enumerate(self.positional_encodings): + self.add_module('pos_{}'.format(i), layer) + + + self.embed_dims = embed_dims + self.num_feature_levels = num_feature_levels + self.num_cams = num_cams + self.fp16_enabled = False + self.embed_dim_ratios=[ embed_dims//dim for dim in encoder_embed_dims] + + self.bev_h_list = bev_h_list + self.bev_w_list = bev_w_list + + self.rotate_prev_bev = rotate_prev_bev + self.use_shift = use_shift + self.use_can_bus = use_can_bus + self.can_bus_norm = can_bus_norm + self.use_cams_embeds = use_cams_embeds + self.decoder_on_bev = decoder_on_bev + self.bev_z = bev_z + self.two_stage_num_proposals = two_stage_num_proposals + + self.init_layers() + self.rotate_center = rotate_center + + def init_layers(self): + """Initialize layers of the Detr3DTransformer.""" + self.level_embeds = nn.Parameter(torch.Tensor( + self.num_feature_levels, self.embed_dims)) + self.cams_embeds = nn.Parameter( + torch.Tensor(self.num_cams, self.embed_dims)) + # self.reference_points = nn.Linear(self.embed_dims, 3) + + self.can_bus_mlp = nn.Sequential( + nn.Linear(18, self.embed_dims // 2), + nn.ReLU(inplace=True), + nn.Linear(self.embed_dims // 2, self.embed_dims), + nn.ReLU(inplace=True), + ) + if self.can_bus_norm: + self.can_bus_mlp.add_module('norm', nn.LayerNorm(self.embed_dims)) + + + + # mid-stage bev->voxe->voxel-> voxel + + self.convs = [] + self.up_convs=[] + for i in range(self.encoder_block_num): + if self.feature_map_z[i]==1: + conv_cfg=dict(type='Conv2d') + else: + conv_cfg=dict(type='Conv3d') + each_stage_convs=[] + for j in range(self.conv_nums[i]): + dilation=(j+1)%2+1 + each_stage_convs.append( + ConvModule( + self.encoder_embed_dims[i], + self.encoder_embed_dims[i], + kernel_size=3, + stride=1, + padding=dilation, + dilation=dilation, + # bias=use_bias_3d, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ) + + each_stage_convs=nn.Sequential(*each_stage_convs) + self.convs.append(each_stage_convs) + self.add_module('pyramid_convs_{}'.format(i), each_stage_convs) + if i < self.encoder_block_num - 1: + # kernel_size_z = 2 if self.feature_map_z[i+1] // self.feature_map_z[i] == 2 else 3 + # kernel_size_h = 2 if self.bev_h_list[i + 1] // self.bev_h_list[i] == 2 else 3 + # kernel_size_w = 2 if self.bev_w_list[i + 1] // self.bev_w_list[i] == 2 else 3 + # kernel_size=[kernel_size_z,kernel_size_h,kernel_size_w] + if self.bev_h_list[i+1] // self.bev_h_list[i] == 2: # TODO: change to arbitrary upsample ratio + kernel_size = [2, 2, 2] + stride = [2, 2, 2] + padding = [0, 0, 0] + else: + kernel_size = [2, 3, 3] + stride = [2, 1, 1] + padding = [0, 1, 1] + up_conv=nn.Sequential( + ConvModule( + self.encoder_embed_dims[i], + self.encoder_embed_dims[i + 1], + kernel_size=kernel_size, + stride=stride, + padding=padding, + # bias=use_bias_3d, + conv_cfg=dict(type='ConvTranspose3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ConvModule( + self.encoder_embed_dims[i + 1], + self.encoder_embed_dims[i + 1], + kernel_size=3, + stride=1, + padding=self.paddings[i + 1], + dilation=self.dilations[i + 1], + # bias=use_bias_3d, + conv_cfg=dict(type='Conv3d'), + norm_cfg=self.norm_cfg_3d, + act_cfg=self.act_cfg), + ) + + self.up_convs.append(up_conv) + self.add_module('up_convs_{}'.format(i), up_conv) + + + + + self.image_feature_map_1_2 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//2), + nn.ReLU(inplace=True), + ) + self.image_feature_map_1_4 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//4), + nn.ReLU(inplace=True), + ) + if 8 in self.embed_dim_ratios: + self.image_feature_map_1_8 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//8), + nn.ReLU(inplace=True), + ) + else: + self.image_feature_map_1_8 = None + + if 16 in self.embed_dim_ratios: + self.image_feature_map_1_16 = nn.Sequential( + nn.Linear(self.embed_dims, self.embed_dims//16), + nn.ReLU(inplace=True), + ) + else: + self.image_feature_map_1_16 = None + + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MSDeformableAttention3D) or isinstance(m, TemporalSelfAttention) \ + or isinstance(m, CustomMSDeformableAttention): + try: + m.init_weight() + except AttributeError: + m.init_weights() + normal_(self.level_embeds) + normal_(self.cams_embeds) + # xavier_init(self.reference_points, distribution='uniform', bias=0.) + xavier_init(self.can_bus_mlp, distribution='uniform', bias=0.) + if self.decoder_on_bev: + xavier_init(self.voxel2bev, distribution='uniform', bias=0.) + if not self.use_conv: + for block in self.bev_voxel_transfers: + xavier_init(block, distribution='uniform', bias=0.) + xavier_init(self.image_feature_map_1_2, distribution='uniform', bias=0.) + xavier_init(self.image_feature_map_1_4, distribution='uniform', bias=0.) + if self.image_feature_map_1_8 is not None: + xavier_init(self.image_feature_map_1_8, distribution='uniform', bias=0.) + if self.image_feature_map_1_16 is not None: + xavier_init(self.image_feature_map_1_16, distribution='uniform', bias=0.) + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'prev_bev', 'bev_pos')) + def get_voxel_features( + self, + mlvl_feats, + bev_queries, + bev_z, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + prev_bev=None, + **kwargs): + """ + obtain bev features. + """ + + feat_flatten = [] + spatial_shapes = [] + for lvl, feat in enumerate(mlvl_feats): + bs, num_cam, c, h, w = feat.shape + spatial_shape = (h, w) + feat = feat.flatten(3).permute(1, 0, 3, 2) + if self.use_cams_embeds: + feat = feat + self.cams_embeds[:, None, None, :].to(feat.dtype) + feat = feat + self.level_embeds[None, + None, lvl:lvl + 1, :].to(feat.dtype) + spatial_shapes.append(spatial_shape) + feat_flatten.append(feat) + + feat_flatten = torch.cat(feat_flatten, 2) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=bev_queries.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + + feat_flatten_original = feat_flatten.permute( + 0, 2, 1, 3) # (num_cam, H*W, bs, embed_dims) + feat_flatten_map1_2 = self.image_feature_map_1_2(feat_flatten_original) + feat_flatten_map1_4 = self.image_feature_map_1_4(feat_flatten_original) + if self.image_feature_map_1_8 is not None: + feat_flatten_map1_8 = self.image_feature_map_1_8(feat_flatten_original) + else: + feat_flatten_map1_8 = None + if self.image_feature_map_1_16 is not None: + feat_flatten_map1_16 = self.image_feature_map_1_16(feat_flatten_original) + else: + feat_flatten_map1_16 = None + + bs = mlvl_feats[0].size(0) + + bev_queries = bev_queries.unsqueeze(1).repeat(1, bs, 1) # (num_query, bs, embed_dims) + # add can bus signals + can_bus = bev_queries.new_tensor( + [each['can_bus'] for each in kwargs['img_metas']]) # [:, :] + can_bus = self.can_bus_mlp(can_bus)[None, :, :] + bev_queries = bev_queries + can_bus * self.use_can_bus # (query_num, bs, embed_dims) + + block_features = [] + for block_index in range(self.encoder_block_num): + + + bev_pos = None + # bev_pos = bev_pos.flatten(2).permute(2, 0, 1) # (num_query, bs, embed_dims) + + # obtain rotation angle and shift with ego motion + bev_h=self.bev_h_list[block_index] + bev_w=self.bev_w_list[block_index] + delta_x = np.array([each['can_bus'][0] + for each in kwargs['img_metas']]) + delta_y = np.array([each['can_bus'][1] + for each in kwargs['img_metas']]) + ego_angle = np.array( + [each['can_bus'][-2] / np.pi * 180 for each in kwargs['img_metas']]) + + grid_length = [self.real_h / bev_h, + self.real_w / bev_w] + grid_length_y = grid_length[0] + grid_length_x = grid_length[1] + translation_length = np.sqrt(delta_x ** 2 + delta_y ** 2) + translation_angle = np.arctan2(delta_y, delta_x) / np.pi * 180 + bev_angle = ego_angle - translation_angle + shift_y = translation_length * \ + np.cos(bev_angle / 180 * np.pi) / grid_length_y / bev_h + shift_x = translation_length * \ + np.sin(bev_angle / 180 * np.pi) / grid_length_x / bev_w + shift_y = shift_y * self.use_shift + shift_x = shift_x * self.use_shift + shift = bev_queries.new_tensor( + [shift_x, shift_y]).permute(1, 0) # (2, bs) -> (bs, 2) + + + # encoder: BEV -> Voxeli -> Voxelj -> Voxelk + # print('bev_query.shape:', block_index, bev_queries.shape) + block_bev_z = self.feature_map_z[block_index] + block_embed_dims = self.encoder_embed_dims[block_index] + if block_bev_z == 1: + bev_mask = torch.zeros((bs, bev_h, bev_w), + device=bev_queries.device).to(bev_queries.dtype) + else: + bev_mask = torch.zeros((bs, block_bev_z, bev_h, bev_w), + device=bev_queries.device).to(bev_queries.dtype) + pos = self.positional_encodings[block_index](bev_mask).to(bev_queries.dtype) # (bs, embed_dims, h, w) + pos = pos.flatten(2).permute(2, 0, 1) # (query_num, bs, embed_dims) + + + if block_embed_dims == self.embed_dims: + feat_flatten = feat_flatten_original + elif block_embed_dims*2 == self.embed_dims: + feat_flatten = feat_flatten_map1_2 + elif block_embed_dims*4 == self.embed_dims: + feat_flatten = feat_flatten_map1_4 + elif block_embed_dims*8 == self.embed_dims: + feat_flatten = feat_flatten_map1_8 + elif block_embed_dims*16 == self.embed_dims: + feat_flatten = feat_flatten_map1_16 + + # if prev_bev is not None: # (bs, num_query, embed_dims) + # stage_prev_bev = prev_bev[block_index] + # if block_bev_z == 1: # 2D BEV + # if stage_prev_bev.shape[1] == bev_h * bev_w: + # stage_prev_bev = stage_prev_bev.permute(1, 0, 2) # (num_query, bs, embed_dims) + # if self.rotate_prev_bev: + # for i in range(bs): + # # num_prev_bev = prev_bev.size(1) + # rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + # tmp_prev_bev = stage_prev_bev[:, i].reshape( + # bev_h, bev_w, -1).permute(2, 0, 1) # (embed_dims, bev_h, bev_w) + # tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + # center=self.rotate_center) # TODO: for 3D voxel + # tmp_prev_bev = tmp_prev_bev.permute(1, 2, 0).reshape( + # bev_h * bev_w, 1, -1) + # stage_prev_bev[:, i] = tmp_prev_bev[:, 0] + # + # else: # 3D Voxel + # if stage_prev_bev.shape[1] == block_bev_z* bev_h * bev_w: + # stage_prev_bev = stage_prev_bev.permute(1, 0, 2) # (num_query, bs, embed_dims) + # if self.rotate_prev_bev: # revise for 3D feature map + # for i in range(bs): + # rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + # tmp_prev_bev = stage_prev_bev[:, i].reshape(block_bev_z, bev_h, bev_w, -1).permute(3, 0, 1, 2) # (embed_dims, bev_z, bev_h, bev_w) + # tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, center=self.rotate_center) + # tmp_prev_bev = tmp_prev_bev.permute(1, 2, 3, 0).reshape(block_bev_z * bev_h * bev_w, 1, -1) + # stage_prev_bev[:, i] = tmp_prev_bev[:, 0] + # else: + # stage_prev_bev = None + + if prev_bev is not None: # (bs, num_query, embed_dims) + stage_prev_bev = prev_bev[block_index] + if block_bev_z == 1: # 2D BEV + if self.rotate_prev_bev: + for i in range(bs): + # num_prev_bev = prev_bev.size(1) + rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + tmp_prev_bev = stage_prev_bev[i] + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, + center=self.rotate_center) # TODO: for 3D voxel + stage_prev_bev[i] = tmp_prev_bev + stage_prev_bev = stage_prev_bev.permute(2, 3, 0, 1).reshape( + bev_h * bev_w, bs, -1) + + + else: # 3D Voxel + if self.rotate_prev_bev: # revise for 3D feature map + for i in range(bs): + rotation_angle = kwargs['img_metas'][i]['can_bus'][-1] + tmp_prev_bev = stage_prev_bev[i] + tmp_prev_bev = rotate(tmp_prev_bev, rotation_angle, center=self.rotate_center) + stage_prev_bev[i] = tmp_prev_bev + stage_prev_bev = stage_prev_bev.permute(2,3,4,0,1).reshape(block_bev_z * bev_h * bev_w, bs, -1) + + else: + stage_prev_bev = None + + output = self.encoders[block_index]( + bev_queries, + feat_flatten, + feat_flatten, + bev_z=block_bev_z, + bev_h=bev_h, + bev_w=bev_w, + bev_pos=pos, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + prev_bev=stage_prev_bev, + shift=shift, + **kwargs + ) + # block_features.append(output) + if self.feature_map_z[block_index] == 1: + output = output.view(bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + output = output.permute(2, 3, 0, 1) + output = self.convs[block_index](output) + block_features.append(output) + + else: + output = output.view(block_bev_z, bev_h, bev_w, bs, self.encoder_embed_dims[block_index]) + output = output.permute(3, 4, 0, 1, 2) + output = self.convs[block_index](output) + block_features.append(output) + + if block_index < self.encoder_block_num-1: # bev-> voxel or voxel_i -> voxel_j + if self.feature_map_z[block_index] == 1: + bev_queries = output.reshape(bs,self.encoder_embed_dims[block_index],1,bev_h, bev_w) + bev_queries = self.up_convs[block_index](bev_queries) + else: + bev_queries = self.up_convs[block_index](output) + bev_queries = bev_queries.view(bs, self.encoder_embed_dims[block_index + 1], + self.feature_map_z[block_index + 1], self.bev_h_list[block_index + 1], self.bev_w_list[block_index + 1], + ) + bev_queries = bev_queries.permute(2, 3, 4, 0, 1) + bev_queries = bev_queries.reshape(-1, bs, self.encoder_embed_dims[ + block_index + 1]) # (num_query, bs, embed_dims) + return block_features # is a list + + + @auto_fp16(apply_to=('mlvl_feats', 'bev_queries', 'object_query_embed', 'prev_bev', 'bev_pos')) + def forward(self, + mlvl_feats, + bev_queries, + object_query_embed, + bev_z, + bev_h, + bev_w, + grid_length=[0.512, 0.512], + bev_pos=None, + reg_branches=None, + cls_branches=None, + prev_bev=None, + **kwargs): + """Forward function for `Detr3DTransformer`. + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, num_cams, embed_dims, h, w]. + bev_queries (Tensor): (bev_h*bev_w, c) + bev_pos (Tensor): (bs, embed_dims, bev_h, bev_w) + object_query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when `with_box_refine` is True. Default to None. + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + - bev_embed: BEV features + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + + block_features = self.get_voxel_features( + mlvl_feats, + bev_queries, + bev_z, + bev_h, + bev_w, + grid_length=grid_length, + bev_pos=bev_pos, + prev_bev=prev_bev, + **kwargs) # voxel_embed shape: (bs, num_query, embed_dims) + + return block_features + + diff --git a/projects/mmdet3d_plugin/bevformer/modules/residual_block_3d.py b/projects/mmdet3d_plugin/bevformer/modules/residual_block_3d.py new file mode 100644 index 0000000..bfd143b --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/residual_block_3d.py @@ -0,0 +1,52 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule + +class ResidualBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU',inplace=True)): + super(ResidualBlock, self).__init__() + self.conv1 = ConvModule( + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + self.conv2 = ConvModule( + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + ) + self.downsample = ConvModule( + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None, + ) + + def forward(self, x): + out = self.conv1(x) + out = self.conv2(out) + out += self.downsample(x) + out = F.relu(out) + return out + diff --git a/projects/mmdet3d_plugin/bevformer/modules/resnet.py b/projects/mmdet3d_plugin/bevformer/modules/resnet.py new file mode 100644 index 0000000..5a4d021 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/resnet.py @@ -0,0 +1,85 @@ +# Copyright (c) Phigent Robotics. All rights reserved. + +import torch.utils.checkpoint as checkpoint +from torch import nn + +from mmdet.models import BACKBONES +from mmdet.models.backbones.resnet import BasicBlock, Bottleneck + + +@BACKBONES.register_module() +class CustomResNet(nn.Module): + + def __init__( + self, + numC_input, + num_layer=[2, 2, 2], + num_channels=None, + stride=[2, 2, 2], + backbone_output_ids=None, + norm_cfg=dict(type='BN'), + with_cp=False, + block_type='Basic', + ): + super(CustomResNet, self).__init__() + # build backbone + assert len(num_layer) == len(stride) + num_channels = [numC_input*2**(i+1) for i in range(len(num_layer))] \ + if num_channels is None else num_channels + self.backbone_output_ids = range(len(num_layer)) \ + if backbone_output_ids is None else backbone_output_ids + layers = [] + if block_type == 'BottleNeck': + curr_numC = numC_input + for i in range(len(num_layer)): + layer = [ + Bottleneck( + curr_numC, + num_channels[i] // 4, + stride=stride[i], + downsample=nn.Conv2d(curr_numC, num_channels[i], 3, + stride[i], 1), + norm_cfg=norm_cfg) + ] + curr_numC = num_channels[i] + layer.extend([ + Bottleneck(curr_numC, curr_numC // 4, norm_cfg=norm_cfg) + for _ in range(num_layer[i] - 1) + ]) + layers.append(nn.Sequential(*layer)) + elif block_type == 'Basic': + curr_numC = numC_input + for i in range(len(num_layer)): + layer = [ + BasicBlock( + curr_numC, + num_channels[i], + stride=stride[i], + downsample=nn.Conv2d(curr_numC, num_channels[i], 3, + stride[i], 1), + norm_cfg=norm_cfg) + ] + curr_numC = num_channels[i] + layer.extend([ + BasicBlock(curr_numC, curr_numC, norm_cfg=norm_cfg) + for _ in range(num_layer[i] - 1) + ]) + layers.append(nn.Sequential(*layer)) + else: + assert False + self.layers = nn.Sequential(*layers) + + self.with_cp = with_cp + + def forward(self, x): + feats = [] + x_tmp = x + for lid, layer in enumerate(self.layers): + if self.with_cp: + x_tmp = checkpoint.checkpoint(layer, x_tmp) + else: + x_tmp = layer(x_tmp) + if lid in self.backbone_output_ids: + feats.append(x_tmp) + + return feats diff --git a/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py b/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py index 100d94f..04bfb09 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py +++ b/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention.py @@ -27,7 +27,6 @@ ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) - @ATTENTION.register_module() class SpatialCrossAttention(BaseModule): """An attention module used in BEVFormer. @@ -73,78 +72,61 @@ def init_weight(self): xavier_init(self.output_proj, distribution='uniform', bias=0.) @force_fp32(apply_to=('query', 'key', 'value', 'query_pos', 'reference_points_cam')) - def forward(self, - query, - key, + def forward(self, + query, + key, value, residual=None, - query_pos=None, - key_padding_mask=None, - reference_points=None, spatial_shapes=None, reference_points_cam=None, bev_mask=None, level_start_index=None, + query_pos=None, flag='encoder', **kwargs): """Forward Function of Detr3DCrossAtten. Args: - query (Tensor): Query of Transformer with shape - (num_query, bs, embed_dims). - key (Tensor): The key tensor with shape - `(num_key, bs, embed_dims)`. - value (Tensor): The value tensor with shape - `(num_key, bs, embed_dims)`. (B, N, C, H, W) - residual (Tensor): The tensor used for addition, with the - same shape as `x`. Default None. If None, `x` will be used. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. Default - None. - reference_points (Tensor): The normalized reference - points with shape (bs, num_query, 4), - all elements is range in [0, 1], top-left (0,0), - bottom-right (1, 1), including padding area. - or (N, Length_{query}, num_levels, 4), add - additional two dimensions is (w, h) to - form reference boxes. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_key]. - spatial_shapes (Tensor): Spatial shape of features in - different level. With shape (num_levels, 2), - last dimension represent (h, w). + query (Tensor): BEV query of Transformer with shape (bs, num_query, embed_dims). + key (Tensor): The key tensor is flattened multi level image feature with shape (num_cam, num_value, bs, embed_dims). + value (Tensor): The value tensor is the same as `key`. + residual (Tensor): The tensor used for addition, with the same shape as the output `query`. Default None. If None, `query` will be used. + spatial_shapes (Tensor): Spatial shape of features in different level. With shape (num_levels, 2), last dimension represent (h, w). + reference_points_cam (Tensor): projected reference points in the camera coordinate system with shape (num_cam, bs, h*w, num_points_in_pillar, 2). + bev_mask (Tensor): binary mask indicating valid points in `reference_points_cam` with shape (num_cam, bs, h*w, num_points_in_pillar). level_start_index (Tensor): The start index of each level. - A tensor has shape (num_levels) and can be represented - as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + A tensor has shape (num_levels) and can be represented as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. Returns: - Tensor: forwarded results with shape [num_query, bs, embed_dims]. + query (Tensor): forwarded results with shape (bs, num_query, embed_dims). """ - - if key is None: + + # Step 1: prepare the input + if key is None: # This is always False in our model key = query - if value is None: + if value is None: # This is always False in our model value = key - if residual is None: inp_residual = query slots = torch.zeros_like(query) if query_pos is not None: query = query + query_pos - bs, num_query, _ = query.size() + num_cams, multi_level_value_num, bs, embed_dims = key.shape + num_points_in_pillar = reference_points_cam.size(3) - D = reference_points_cam.size(3) + # Step 2: keep all non-zero indexes in the bev_mask indexes = [] for i, mask_per_img in enumerate(bev_mask): - index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1) + index_query_per_img = mask_per_img[0].sum(-1, keepdim=False) # sum over the num_points_in_pillar dimension + # Here [0] is the index of batch_size dimension, so we should ensure the batch_size is 1. + index_query_per_img = index_query_per_img.nonzero() # (n, 1) return the index of all non-zero elements + index_query_per_img = index_query_per_img.squeeze(-1) indexes.append(index_query_per_img) max_len = max([len(each) for each in indexes]) - # each camera only interacts with its corresponding BEV queries. This step can greatly save GPU memory. - queries_rebatch = query.new_zeros( - [bs, self.num_cams, max_len, self.embed_dims]) - reference_points_rebatch = reference_points_cam.new_zeros( - [bs, self.num_cams, max_len, D, 2]) + # Step 3: use the indexes to re-organize the query, reference_points_cam, and bev_mask. Flitter out the out of range elements. + # each camera only interacts with its corresponding BEV queries. This step can greatly save GPU memory. + queries_rebatch = query.new_zeros([bs, num_cams, max_len, embed_dims]) + reference_points_rebatch = reference_points_cam.new_zeros([bs, num_cams, max_len, num_points_in_pillar, 2]) for j in range(bs): for i, reference_points_per_img in enumerate(reference_points_cam): @@ -152,26 +134,31 @@ def forward(self, queries_rebatch[j, i, :len(index_query_per_img)] = query[j, index_query_per_img] reference_points_rebatch[j, i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img] - num_cams, l, bs, embed_dims = key.shape - - key = key.permute(2, 0, 1, 3).reshape( - bs * self.num_cams, l, self.embed_dims) - value = value.permute(2, 0, 1, 3).reshape( - bs * self.num_cams, l, self.embed_dims) + # Step 4: deformable attention + # combine the batch_size and num_cams for all the inputs + key = key.permute(2, 0, 1, 3).reshape(bs*num_cams, multi_level_value_num, embed_dims) + value = value.permute(2, 0, 1, 3).reshape(bs*num_cams, multi_level_value_num, embed_dims) + query_rebatch = queries_rebatch.view(bs*num_cams, max_len, embed_dims) + reference_points_rebatch = reference_points_rebatch.view(bs*num_cams, max_len, num_points_in_pillar, 2) + + queries = self.deformable_attention(query=query_rebatch, + key=key, + value=value, + reference_points=reference_points_rebatch, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index) + + queries = queries.view(bs, num_cams, max_len, embed_dims) - queries = self.deformable_attention(query=queries_rebatch.view(bs*self.num_cams, max_len, self.embed_dims), key=key, value=value, - reference_points=reference_points_rebatch.view(bs*self.num_cams, max_len, D, 2), spatial_shapes=spatial_shapes, - level_start_index=level_start_index).view(bs, self.num_cams, max_len, self.embed_dims) + # Step 5: re-organize the queries back to the original shape for j in range(bs): for i, index_query_per_img in enumerate(indexes): slots[j, index_query_per_img] += queries[j, i, :len(index_query_per_img)] - count = bev_mask.sum(-1) > 0 count = count.permute(1, 2, 0).sum(-1) count = torch.clamp(count, min=1.0) slots = slots / count[..., None] slots = self.output_proj(slots) - return self.dropout(slots) + inp_residual @@ -290,8 +277,7 @@ def forward(self, value (Tensor): The value tensor with shape `(bs, num_key, embed_dims)`. identity (Tensor): The tensor used for addition, with the - same shape as `query`. Default None. If None, - `query` will be used. + same shape as `query`. Default None. If None, `query` will be used. query_pos (Tensor): The positional encoding for `query`. Default: None. key_pos (Tensor): The positional encoding for `key`. Default @@ -323,7 +309,6 @@ def forward(self, query = query + query_pos if not self.batch_first: - # change to (bs, num_query ,embed_dims) query = query.permute(1, 0, 2) value = value.permute(1, 0, 2) @@ -335,6 +320,7 @@ def forward(self, if key_padding_mask is not None: value = value.masked_fill(key_padding_mask[..., None], 0.0) value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) attention_weights = self.attention_weights(query).view( @@ -380,8 +366,7 @@ def forward(self, # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points - # - + if torch.cuda.is_available() and value.is_cuda: if value.dtype == torch.float16: MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 diff --git a/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention_3d.py b/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention_3d.py new file mode 100644 index 0000000..2d15cfb --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/spatial_cross_attention_3d.py @@ -0,0 +1,399 @@ + +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch +import warnings +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import xavier_init, constant_init +from mmcv.cnn.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.cnn.bricks.transformer import build_attention +import math +from mmcv.runner import force_fp32, auto_fp16 + +from mmcv.runner.base_module import BaseModule, ModuleList, Sequential + +from mmcv.utils import ext_loader +from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32, \ + MultiScaleDeformableAttnFunction_fp16 +from projects.mmdet3d_plugin.models.utils.bricks import run_time +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +@ATTENTION.register_module() +class SpatialCrossAttention3D(BaseModule): + """An attention module used in BEVFormer. + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_cams (int): The number of cameras + dropout (float): A Dropout layer on `inp_residual`. + Default: 0.. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + deformable_attention: (dict): The config for the deformable attention used in SCA. + """ + + def __init__(self, + embed_dims=256, + num_cams=6, + pc_range=None, + dropout=0.1, + init_cfg=None, + batch_first=False, + deformable_attention=dict( + type='MSDeformableAttention3D', + embed_dims=256, + num_levels=4), + **kwargs + ): + super(SpatialCrossAttention3D, self).__init__(init_cfg) + + self.init_cfg = init_cfg + self.dropout = nn.Dropout(dropout) + self.pc_range = pc_range + self.fp16_enabled = False + self.deformable_attention = build_attention(deformable_attention) + self.embed_dims = embed_dims + self.num_cams = num_cams + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.batch_first = batch_first + self.init_weight() + + def init_weight(self): + """Default initialization for Parameters of Module.""" + xavier_init(self.output_proj, distribution='uniform', bias=0.) + + @force_fp32(apply_to=('query', 'key', 'value', 'query_pos', 'reference_points_cam')) + def forward(self, + query, + key, + value, + residual=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + reference_points_cam=None, + bev_mask=None, + level_start_index=None, + flag='encoder', + **kwargs): + """Forward Function of Detr3DCrossAtten. + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. (B, N, C, H, W) + residual (Tensor): The tensor used for addition, with the + same shape as `x`. Default None. If None, `x` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, 4), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different level. With shape (num_levels, 2), + last dimension represent (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape (num_levels) and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if key is None: + key = query + if value is None: + value = key + + if residual is None: + inp_residual = query + slots = torch.zeros_like(query) + if query_pos is not None: + query = query + query_pos + + bs, num_query, _ = query.size() + # bevformer reference_points_cam shape: (num_cam,bs,h*w,num_points_in_pillar,2) + D = reference_points_cam.size(3) + indexes = [] + for i, mask_per_img in enumerate(bev_mask): + index_query_per_img = mask_per_img[0].sum(-1).nonzero().squeeze(-1) + indexes.append(index_query_per_img) + max_len = max([len(each) for each in indexes]) + + # each camera only interacts with its corresponding BEV queries. This step can greatly save GPU memory. + queries_rebatch = query.new_zeros( + [bs, self.num_cams, max_len, self.embed_dims]) + reference_points_rebatch = reference_points_cam.new_zeros( + [bs, self.num_cams, max_len, D, 2]) + + for j in range(bs): + for i, reference_points_per_img in enumerate(reference_points_cam): + index_query_per_img = indexes[i] + queries_rebatch[j, i, :len(index_query_per_img)] = query[j, index_query_per_img] + reference_points_rebatch[j, i, :len(index_query_per_img)] = reference_points_per_img[j, index_query_per_img] + + num_cams, l, bs, embed_dims = key.shape + + key = key.permute(2, 0, 1, 3).reshape( + bs * self.num_cams, l, self.embed_dims) + value = value.permute(2, 0, 1, 3).reshape( + bs * self.num_cams, l, self.embed_dims) + + queries = self.deformable_attention(query=queries_rebatch.view(bs*self.num_cams, max_len, self.embed_dims), key=key, value=value, + reference_points=reference_points_rebatch.view(bs*self.num_cams, max_len, D, 2), spatial_shapes=spatial_shapes, + level_start_index=level_start_index).view(bs, self.num_cams, max_len, self.embed_dims) + for j in range(bs): + for i, index_query_per_img in enumerate(indexes): + slots[j, index_query_per_img] += queries[j, i, :len(index_query_per_img)] + + count = bev_mask.sum(-1) > 0 + count = count.permute(1, 2, 0).sum(-1) + count = torch.clamp(count, min=1.0) + slots = slots / count[..., None] + slots = self.output_proj(slots) + + return self.dropout(slots) + inp_residual + + +@ATTENTION.register_module() +class MSDeformableAttention4D(BaseModule): + """An attention module used in BEVFormer based on Deformable-Detr. + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to False. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=8, + im2col_step=64, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.batch_first = batch_first + self.output_proj = None + self.fp16_enabled = False + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.sampling_offsets = nn.Linear( + embed_dims, num_heads * num_levels * num_points * 2) + self.attention_weights = nn.Linear(embed_dims, + num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 2).repeat(1, self.num_levels, self.num_points, 1) + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + **kwargs): + """Forward Function of MultiScaleDeformAttention. + Args: + query (Tensor): Query of Transformer with shape + ( bs, num_query, embed_dims). + key (Tensor): The key tensor with shape + `(bs, num_key, embed_dims)`. + value (Tensor): The value tensor with shape + `(bs, num_key, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + value = query + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + + bs, num_query, _ = query.shape + bs, num_value, _ = value.shape + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value + + value = self.value_proj(value) + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + value = value.view(bs, num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query).view( + bs, num_query, self.num_heads, self.num_levels, self.num_points, 2) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_levels * self.num_points) + + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_levels, + self.num_points) + + if reference_points.shape[-1] == 2: + """ + For each BEV query, it owns `num_Z_anchors` in 3D space that having different heights. + After proejcting, each BEV query has `num_Z_anchors` reference points in each 2D image. + For each referent point, we sample `num_points` sampling points. + For `num_Z_anchors` reference points, it has overall `num_points * num_Z_anchors` sampling points. + """ + offset_normalizer = torch.stack( + [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + + bs, num_query, num_Z_anchors, xy = reference_points.shape + reference_points = reference_points[:, :, None, None, None, :, :] + sampling_offsets = sampling_offsets / \ + offset_normalizer[None, None, None, :, None, :] + bs, num_query, num_heads, num_levels, num_all_points, xy = sampling_offsets.shape + sampling_offsets = sampling_offsets.view( + bs, num_query, num_heads, num_levels, num_all_points // num_Z_anchors, num_Z_anchors, xy) + sampling_locations = reference_points + sampling_offsets + bs, num_query, num_heads, num_levels, num_points, num_Z_anchors, xy = sampling_locations.shape + assert num_all_points == num_points * num_Z_anchors + + sampling_locations = sampling_locations.view( + bs, num_query, num_heads, num_levels, num_all_points, xy) + + elif reference_points.shape[-1] == 4: + assert False + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 2 or 4, but get {reference_points.shape[-1]} instead.') + + # sampling_locations.shape: bs, num_query, num_heads, num_levels, num_all_points, 2 + # attention_weights.shape: bs, num_query, num_heads, num_levels, num_all_points + # + + if torch.cuda.is_available() and value.is_cuda: + if value.dtype == torch.float16: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + else: + MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + output = MultiScaleDeformableAttnFunction.apply( + value, spatial_shapes, level_start_index, sampling_locations, + attention_weights, self.im2col_step) + else: + output = multi_scale_deformable_attn_pytorch( + value, spatial_shapes, sampling_locations, attention_weights) + if not self.batch_first: + output = output.permute(1, 0, 2) + + return output diff --git a/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py b/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py index 78fb9f5..cc3637e 100644 --- a/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py +++ b/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention.py @@ -21,10 +21,9 @@ ext_module = ext_loader.load_ext( '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) - @ATTENTION.register_module() class TemporalSelfAttention(BaseModule): - """An attention module used in BEVFormer based on Deformable-Detr. + """An attention module used in BEVFormer based on "Deformable-Detr"!!!. `Deformable DETR: Deformable Transformers for End-to-End Object Detection. `_. @@ -49,7 +48,7 @@ class TemporalSelfAttention(BaseModule): init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. Default: None. num_bev_queue (int): In this version, we only use one history BEV and one currenct BEV. - the length of BEV queue is 2. + the length of BEV queue is 2.!!! """ def __init__(self, @@ -127,66 +126,41 @@ def init_weights(self): def forward(self, query, - key=None, - value=None, + key, + value, identity=None, - query_pos=None, - key_padding_mask=None, reference_points=None, spatial_shapes=None, level_start_index=None, + query_pos=None, + key_padding_mask=None, flag='decoder', - **kwargs): """Forward Function of MultiScaleDeformAttention. - Args: - query (Tensor): Query of Transformer with shape - (num_query, bs, embed_dims). - key (Tensor): The key tensor with shape - `(num_key, bs, embed_dims)`. - value (Tensor): The value tensor with shape - `(num_key, bs, embed_dims)`. - identity (Tensor): The tensor used for addition, with the - same shape as `query`. Default None. If None, - `query` will be used. - query_pos (Tensor): The positional encoding for `query`. - Default: None. - key_pos (Tensor): The positional encoding for `key`. Default - None. - reference_points (Tensor): The normalized reference - points with shape (bs, num_query, num_levels, 2), - all elements is range in [0, 1], top-left (0,0), - bottom-right (1, 1), including padding area. - or (N, Length_{query}, num_levels, 4), add - additional two dimensions is (w, h) to - form reference boxes. - key_padding_mask (Tensor): ByteTensor for `query`, with - shape [bs, num_key]. - spatial_shapes (Tensor): Spatial shape of features in - different levels. With shape (num_levels, 2), - last dimension represents (h, w). - level_start_index (Tensor): The start index of each level. - A tensor has shape ``(num_levels, )`` and can be represented - as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. - + query (Tensor): BEV query of Transformer with shape (bs, num_query, embed_dims). + key (Tensor): prev_bev. prev_bev is a Tensor with shape (bs*2, bev_h*bev_w, embed_dims) if use temporal self attention. + value (Tensor): the same as key. + identity (Tensor): The tensor used for addition, with the same shape as `query`. Default None. If None, `query` will be used. + reference_points (Tensor): ref_2d. hybird 2D reference points used in TSA. + If `prev_bev` is None, it has shape (bs, h*w, 1, 2). + else, it has shape (bs*2, h*w, 1, 2). + spatial_shapes (Tensor): Spatial shape of features in different levels. With shape (num_levels, 2), last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. A tensor has shape ``(num_levels, )`` and can be represented as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + Others are None. Returns: - Tensor: forwarded results with shape [num_query, bs, embed_dims]. + query (Tensor): forwarded results with shape (bs, num_query, embed_dims). """ - + if value is None: assert self.batch_first bs, len_bev, c = query.shape value = torch.stack([query, query], 1).reshape(bs*2, len_bev, c) - - # value = torch.cat([query, query], 0) - if identity is None: identity = query if query_pos is not None: query = query + query_pos if not self.batch_first: - # change to (bs, num_query ,embed_dims) query = query.permute(1, 0, 2) value = value.permute(1, 0, 2) bs, num_query, embed_dims = query.shape @@ -253,15 +227,15 @@ def forward(self, value, spatial_shapes, sampling_locations, attention_weights) # output shape (bs*num_bev_queue, num_query, embed_dims) - # (bs*num_bev_queue, num_query, embed_dims)-> (num_query, embed_dims, bs*num_bev_queue) + # (bs*num_bev_queue, num_query, embed_dims) -> (num_query, embed_dims, bs*num_bev_queue) output = output.permute(1, 2, 0) # fuse history value and current value - # (num_query, embed_dims, bs*num_bev_queue)-> (num_query, embed_dims, bs, num_bev_queue) + # (num_query, embed_dims, bs*num_bev_queue) -> (num_query, embed_dims, bs, num_bev_queue) output = output.view(num_query, embed_dims, bs, self.num_bev_queue) output = output.mean(-1) - # (num_query, embed_dims, bs)-> (bs, num_query, embed_dims) + # (num_query, embed_dims, bs) -> (bs, num_query, embed_dims) output = output.permute(2, 0, 1) output = self.output_proj(output) diff --git a/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention_3d.py b/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention_3d.py new file mode 100644 index 0000000..c0d95c9 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/temporal_self_attention_3d.py @@ -0,0 +1,344 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from .multi_scale_deformable_attn_function import MultiScaleDeformableAttnFunction_fp32 +from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch +import warnings +import torch +import torch.nn as nn +from mmcv.cnn import xavier_init, constant_init +from mmcv.cnn.bricks.registry import ATTENTION +import math +from mmcv.runner.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import (ConfigDict, build_from_cfg, deprecated_api_warning, + to_2tuple) + +from mmcv.utils import ext_loader +import torch.nn.functional as F +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + + +@ATTENTION.register_module() +class TemporalSelfAttention3D(BaseModule): + """An attention module used in BEVFormer based on Deformable-Detr. + + `Deformable DETR: Deformable Transformers for End-to-End Object Detection. + `_. + + Args: + embed_dims (int): The embedding dimension of Attention. + Default: 256. + num_heads (int): Parallel attention heads. Default: 64. + num_levels (int): The number of feature map used in + Attention. Default: 4. + num_points (int): The number of sampling points for + each query in each head. Default: 4. + im2col_step (int): The step used in image_to_column. + Default: 64. + dropout (float): A Dropout layer on `inp_identity`. + Default: 0.1. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Default to True. + norm_cfg (dict): Config dict for normalization layer. + Default: None. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + num_bev_queue (int): In this version, we only use one history BEV and one currenct BEV. + the length of BEV queue is 2. + """ + + def __init__(self, + embed_dims=256, + num_heads=8, + num_levels=4, + num_points=4, + num_bev_queue=2, + im2col_step=64, + dropout=0.1, + batch_first=True, + norm_cfg=None, + init_cfg=None): + + super().__init__(init_cfg) + if embed_dims % num_heads != 0: + raise ValueError(f'embed_dims must be divisible by num_heads, ' + f'but got {embed_dims} and {num_heads}') + dim_per_head = embed_dims // num_heads + self.norm_cfg = norm_cfg + self.dropout = nn.Dropout(dropout) + self.batch_first = batch_first + self.fp16_enabled = False + + # you'd better set dim_per_head to a power of 2 + # which is more efficient in the CUDA implementation + def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + return (n & (n - 1) == 0) and n != 0 + + if not _is_power_of_2(dim_per_head): + warnings.warn( + "You'd better set embed_dims in " + 'MultiScaleDeformAttention to make ' + 'the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.im2col_step = im2col_step + self.embed_dims = embed_dims + self.num_levels = num_levels + self.num_heads = num_heads + self.num_points = num_points + self.num_bev_queue = num_bev_queue + self.sampling_offsets = nn.Linear( + embed_dims*self.num_bev_queue, num_bev_queue*num_heads * num_levels * num_points * 3) + self.attention_weights = nn.Linear(embed_dims*self.num_bev_queue, + num_bev_queue*num_heads * num_levels * num_points) + self.value_proj = nn.Linear(embed_dims, embed_dims) + self.output_proj = nn.Linear(embed_dims, embed_dims) + self.init_weights() + + def init_weights(self): + """Default initialization for Parameters of Module.""" + constant_init(self.sampling_offsets, 0.) + thetas = torch.arange( + self.num_heads, + dtype=torch.float32) * (2.0 * math.pi / self.num_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin(),thetas.cos()+thetas.sin()], -1) + grid_init = (grid_init / + grid_init.abs().max(-1, keepdim=True)[0]).view( + self.num_heads, 1, 1, + 3).repeat(1, self.num_levels*self.num_bev_queue, self.num_points, 1) + + for i in range(self.num_points): + grid_init[:, :, i, :] *= i + 1 + + self.sampling_offsets.bias.data = grid_init.view(-1) + constant_init(self.attention_weights, val=0., bias=0.) + xavier_init(self.value_proj, distribution='uniform', bias=0.) + xavier_init(self.output_proj, distribution='uniform', bias=0.) + self._is_init = True + + def forward(self, + query, + key=None, + value=None, + identity=None, + query_pos=None, + key_padding_mask=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + flag='decoder', + + **kwargs): + """Forward Function of MultiScaleDeformAttention. + + Args: + query (Tensor): Query of Transformer with shape + (num_query, bs, embed_dims). + key (Tensor): The key tensor with shape + `(num_key, bs, embed_dims)`. + value (Tensor): The value tensor with shape + `(num_key, bs, embed_dims)`. + identity (Tensor): The tensor used for addition, with the + same shape as `query`. Default None. If None, + `query` will be used. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. Default + None. + reference_points (Tensor): The normalized reference + points with shape (bs, num_query, num_levels, 2), + all elements is range in [0, 1], top-left (0,0), + bottom-right (1, 1), including padding area. + or (N, Length_{query}, num_levels, 4), add + additional two dimensions is (w, h) to + form reference boxes. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_key]. + spatial_shapes (Tensor): Spatial shape of features in + different levels. With shape (num_levels, 2), + last dimension represents (h, w). + level_start_index (Tensor): The start index of each level. + A tensor has shape ``(num_levels, )`` and can be represented + as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...]. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + + if value is None: + assert self.batch_first + bs, len_bev, c = query.shape + value = torch.stack([query, query], 1).reshape(bs*2, len_bev, c) + + # value = torch.cat([query, query], 0) + + if identity is None: + identity = query + if query_pos is not None: + query = query + query_pos + if not self.batch_first: + # change to (bs, num_query ,embed_dims) + query = query.permute(1, 0, 2) + value = value.permute(1, 0, 2) + bs, num_query, embed_dims = query.shape + _, num_value, _ = value.shape + # print('spatial_shapes',(spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum(),num_value) + assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]* spatial_shapes[:, 2]).sum() == num_value + assert self.num_bev_queue == 2 + + query = torch.cat([value[:bs], query], -1) + value = self.value_proj(value) + + if key_padding_mask is not None: + value = value.masked_fill(key_padding_mask[..., None], 0.0) + + value = value.reshape(bs*self.num_bev_queue, + num_value, self.num_heads, -1) + sampling_offsets = self.sampling_offsets(query) + sampling_offsets = sampling_offsets.view( + bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels, self.num_points, 3) + attention_weights = self.attention_weights(query).view( + bs, num_query, self.num_heads, self.num_bev_queue, self.num_levels * self.num_points) + attention_weights = attention_weights.softmax(-1) + + attention_weights = attention_weights.view(bs, num_query, + self.num_heads, + self.num_bev_queue, + self.num_levels, + self.num_points) + + attention_weights = attention_weights.permute(0, 3, 1, 2, 4, 5)\ + .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points).contiguous() + sampling_offsets = sampling_offsets.permute(0, 3, 1, 2, 4, 5, 6)\ + .reshape(bs*self.num_bev_queue, num_query, self.num_heads, self.num_levels, self.num_points, 3) + + if reference_points.shape[-1] == 3: # (2, bev_z*bev_h*bev_w, 1, 3) + offset_normalizer = torch.stack([spatial_shapes[..., 2], spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = reference_points[:, :, None, :, None, :] \ + + sampling_offsets \ + / offset_normalizer[None, None, None, :, None, :] + + else: + raise ValueError( + f'Last dim of reference_points must be' + f' 3, but get {reference_points.shape[-1]} instead.') + + # elif reference_points.shape[-1] == 4: + # sampling_locations = reference_points[:, :, None, :, None, :2] \ + # + sampling_offsets / self.num_points \ + # * reference_points[:, :, None, :, None, 2:] \ + # * 0.5 + # else: + # raise ValueError( + # f'Last dim of reference_points must be' + # f' 2 or 4, but get {reference_points.shape[-1]} instead.') + # if torch.cuda.is_available() and value.is_cuda: + # + # # using fp16 deformable attention is unstable because it performs many sum operations + # if value.dtype == torch.float16: + # MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + # else: + # MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32 + # output = MultiScaleDeformableAttnFunction.apply( + # value, spatial_shapes, level_start_index, sampling_locations, + # attention_weights, self.im2col_step) + # else: + # + # output = multi_scale_deformable_attn_pytorch( + # value, spatial_shapes, sampling_locations, attention_weights) + + output = multi_scale_deformable_attn_pytorch_3d(value, spatial_shapes, sampling_locations, attention_weights) + + # output shape (bs*num_bev_queue, num_query, embed_dims) + # (bs*num_bev_queue, num_query, embed_dims)-> (num_query, embed_dims, bs*num_bev_queue) + output = output.permute(1, 2, 0) + + # fuse history value and current value + # (num_query, embed_dims, bs*num_bev_queue)-> (num_query, embed_dims, bs, num_bev_queue) + output = output.view(num_query, embed_dims, bs, self.num_bev_queue) + output = output.mean(-1) + + # (num_query, embed_dims, bs)-> (bs, num_query, embed_dims) + output = output.permute(2, 0, 1) + + output = self.output_proj(output) + + if not self.batch_first: + output = output.permute(1, 0, 2) + + return self.dropout(output) + identity + + +def multi_scale_deformable_attn_pytorch_3d(value, value_spatial_shapes, + sampling_locations, attention_weights): + """multi-scale deformable attention for 3D space + + Args: + value (torch.Tensor): The value has shape + (bs, num_keys, mum_heads, embed_dims//num_heads) + value_spatial_shapes (torch.Tensor): Spatial shape of + each feature map, has shape (num_levels, 3), + last dimension 3 represent (d, h, w) + sampling_locations (torch.Tensor): The location of sampling points, + has shape + (bs ,num_queries, num_heads, num_levels, num_points, 3), + the last dimension 2 represent (x, y, z) + attention_weights (torch.Tensor): The weight of sampling points used + when calculate the attention, has shape + (bs ,num_queries, num_heads, num_levels, num_points), + + Returns: + torch.Tensor: has shape (bs, num_queries, embed_dims) + """ + bs, _, num_heads, embed_dims = value.shape + _, num_queries, num_heads, num_levels, num_points, _ = \ + sampling_locations.shape + value_list = value.split([D_ * H_ * W_ for D_, H_, W_ in value_spatial_shapes], + dim=1) + sampling_grids = 2 * sampling_locations - 1 # (bs ,num_queries, num_heads, num_levels, num_points, 3) + sampling_value_list = [] + for level, (D_, H_, W_) in enumerate(value_spatial_shapes): + # bs, H_*W_, num_heads, embed_dims -> + # bs, H_*W_, num_heads*embed_dims -> + # bs, num_heads*embed_dims, D_*H_*W_ -> + # bs*num_heads, embed_dims, D_, H_, W_ + value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape( + bs * num_heads, embed_dims, D_, H_, W_) + + # bs, num_queries, num_heads, num_points, 3 -> + # bs, num_heads, num_queries, num_points, 3 -> + # bs*num_heads, num_queries, num_points, 3 -> + # bs*num_heads, 1, num_queries, num_points, 3 + sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1).unsqueeze(1) + + # bs*num_heads, embed_dims, num_queries, num_points + sampling_value_l_ = F.grid_sample( + value_l_, + sampling_grid_l_, + mode='bilinear', + padding_mode='zeros', + align_corners=False).squeeze() + + sampling_value_list.append(sampling_value_l_) + # (bs, num_queries, num_heads, num_levels, num_points) -> + # (bs, num_heads, num_queries, num_levels, num_points) -> + # (bs, num_heads, 1, num_queries, num_levels*num_points) + attention_weights = attention_weights.transpose(1, 2).reshape( + bs * num_heads, 1, num_queries, num_levels * num_points) + output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * + attention_weights).sum(-1).view(bs, num_heads * embed_dims, + num_queries) + + return output.transpose(1, 2).contiguous() + + diff --git a/projects/mmdet3d_plugin/bevformer/modules/unet.py b/projects/mmdet3d_plugin/bevformer/modules/unet.py new file mode 100644 index 0000000..420712d --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/unet.py @@ -0,0 +1,788 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (UPSAMPLE_LAYERS, ConvModule, build_activation_layer, + build_norm_layer) +from mmcv.runner import BaseModule +from mmcv.utils.parrots_wrapper import _BatchNorm + +from mmseg.ops import Upsample +from mmdet.models.builder import BACKBONES, HEADS + +from mmcv.cnn import ConvModule, build_upsample_layer +from mmseg.ops import resize +from .decode_head import BaseDecodeHead + + +class UpConvBlock(nn.Module): + """Upsample convolution block in decoder for UNet. + This upsample convolution block consists of one upsample module + followed by one convolution block. The upsample module expands the + high-level low-resolution feature map and the convolution block fuses + the upsampled high-level low-resolution feature map and the low-level + high-resolution feature map from encoder. + Args: + conv_block (nn.Sequential): Sequential of convolutional layers. + in_channels (int): Number of input channels of the high-level + skip_channels (int): Number of input channels of the low-level + high-resolution feature map from encoder. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers in the conv_block. + Default: 2. + stride (int): Stride of convolutional layer in conv_block. Default: 1. + dilation (int): Dilation rate of convolutional layer in conv_block. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='MYInterpConv'). If the size of + high-level feature map is the same as that of skip feature map + (low-level feature map from encoder), it does not need upsample the + high-level feature map and the upsample_cfg is None. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + conv_block, + in_channels, + skip_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='MYInterpConv'), + dcn=None, + plugins=None): + super(UpConvBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.conv_block = conv_block( + in_channels=2 * skip_channels, + out_channels=out_channels, + num_convs=num_convs, + stride=stride, + dilation=dilation, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None) + if upsample_cfg is not None: + self.upsample = build_upsample_layer( + cfg=upsample_cfg, + in_channels=in_channels, + out_channels=skip_channels, + with_cp=with_cp, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.upsample = ConvModule( + in_channels, + skip_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, skip, x): + """Forward function.""" + + x = self.upsample(x) + out = torch.cat([skip, x], dim=1) + out = self.conv_block(out) + + return out + + +class BasicConvBlock(nn.Module): + """Basic convolutional block for UNet. + + This module consists of several plain convolutional layers. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + num_convs (int): Number of convolutional layers. Default: 2. + stride (int): Whether use stride convolution to downsample + the input feature map. If stride=2, it only uses stride convolution + in the first convolutional layer to downsample the input feature + map. Options are 1 or 2. Default: 1. + dilation (int): Whether use dilated convolution to expand the + receptive field. Set dilation rate of each convolutional layer and + the dilation rate of the first convolutional layer is always 1. + Default: 1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + num_convs=2, + stride=1, + dilation=1, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + dcn=None, + plugins=None): + super(BasicConvBlock, self).__init__() + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.with_cp = with_cp + convs = [] + for i in range(num_convs): + convs.append( + ConvModule( + in_channels=in_channels if i == 0 else out_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride if i == 0 else 1, + dilation=1 if i == 0 else dilation, + padding=1 if i == 0 else dilation, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + self.convs = nn.Sequential(*convs) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.convs, x) + else: + out = self.convs(x) + return out + + +@UPSAMPLE_LAYERS.register_module() +class MYDeconvModule(nn.Module): + """Deconvolution upsample module in decoder for UNet (2X upsample). + + This module uses deconvolution to upsample feature map in the decoder + of UNet. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + kernel_size (int): Kernel size of the convolutional layer. Default: 4. + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + kernel_size=4, + scale_factor=2): + super(MYDeconvModule, self).__init__() + + assert (kernel_size - scale_factor >= 0) and\ + (kernel_size - scale_factor) % 2 == 0,\ + f'kernel_size should be greater than or equal to scale_factor '\ + f'and (kernel_size - scale_factor) should be even numbers, '\ + f'while the kernel size is {kernel_size} and scale_factor is '\ + f'{scale_factor}.' + + stride = scale_factor + padding = (kernel_size - scale_factor) // 2 + self.with_cp = with_cp + deconv = nn.ConvTranspose2d( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding) + + norm_name, norm = build_norm_layer(norm_cfg, out_channels) + activate = build_activation_layer(act_cfg) + self.deconv_upsamping = nn.Sequential(deconv, norm, activate) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.deconv_upsamping, x) + else: + out = self.deconv_upsamping(x) + return out + + +@UPSAMPLE_LAYERS.register_module() +class MYInterpConv(nn.Module): + """Interpolation upsample module in decoder for UNet. + + This module uses interpolation to upsample feature map in the decoder + of UNet. It consists of one interpolation upsample layer and one + convolutional layer. It can be one interpolation upsample layer followed + by one convolutional layer (conv_first=False) or one convolutional layer + followed by one interpolation upsample layer (conv_first=True). + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + conv_first (bool): Whether convolutional layer or interpolation + upsample layer first. Default: False. It means interpolation + upsample layer followed by one convolutional layer. + kernel_size (int): Kernel size of the convolutional layer. Default: 1. + stride (int): Stride of the convolutional layer. Default: 1. + padding (int): Padding of the convolutional layer. Default: 1. + upsample_cfg (dict): Interpolation config of the upsample layer. + Default: dict( + scale_factor=2, mode='bilinear', align_corners=False). + """ + + def __init__(self, + in_channels, + out_channels, + with_cp=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + *, + conv_cfg=None, + conv_first=False, + kernel_size=1, + stride=1, + padding=0, + upsample_cfg=dict( + scale_factor=2, mode='bilinear', align_corners=False)): + super(MYInterpConv, self).__init__() + + self.with_cp = with_cp + conv = ConvModule( + in_channels, + out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + upsample = Upsample(**upsample_cfg) + if conv_first: + self.interp_upsample = nn.Sequential(conv, upsample) + else: + self.interp_upsample = nn.Sequential(upsample, conv) + + def forward(self, x): + """Forward function.""" + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.interp_upsample, x) + else: + out = self.interp_upsample(x) + return out + + +@BACKBONES.register_module() +class MYUNet(BaseModule): + """UNet backbone. + + This backbone is the implementation of `U-Net: Convolutional Networks + for Biomedical Image Segmentation `_. + + Args: + in_channels (int): Number of input image channels. Default" 3. + base_channels (int): Number of base channels of each stage. + The output channels of the first stage. Default: 64. + num_stages (int): Number of stages in encoder, normally 5. Default: 5. + strides (Sequence[int 1 | 2]): Strides of each stage in encoder. + len(strides) is equal to num_stages. Normally the stride of the + first stage in encoder is 1. If strides[i]=2, it uses stride + convolution to downsample in the correspondence encoder stage. + Default: (1, 1, 1, 1, 1). + enc_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence encoder stage. + Default: (2, 2, 2, 2, 2). + dec_num_convs (Sequence[int]): Number of convolutional layers in the + convolution block of the correspondence decoder stage. + Default: (2, 2, 2, 2). + downsamples (Sequence[int]): Whether use MaxPool to downsample the + feature map after the first stage of encoder + (stages: [1, num_stages)). If the correspondence encoder stage use + stride convolution (strides[i]=2), it will never use MaxPool to + downsample, even downsamples[i-1]=True. + Default: (True, True, True, True). + enc_dilations (Sequence[int]): Dilation rate of each stage in encoder. + Default: (1, 1, 1, 1, 1). + dec_dilations (Sequence[int]): Dilation rate of each stage in decoder. + Default: (1, 1, 1, 1). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict | None): Config dict for convolution layer. + Default: None. + norm_cfg (dict | None): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict | None): Config dict for activation layer in ConvModule. + Default: dict(type='ReLU'). + upsample_cfg (dict): The upsample config of the upsample module in + decoder. Default: dict(type='MYInterpConv'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + dcn (bool): Use deformable convolution in convolutional layer or not. + Default: None. + plugins (dict): plugins for convolutional layers. Default: None. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Notice: + The input image size should be divisible by the whole downsample rate + of the encoder. More detail of the whole downsample rate can be found + in UNet._check_input_divisible. + """ + + def __init__(self, + in_channels=3, + base_channels=64, + num_stages=5, + strides=(1, 1, 1, 1, 1), + enc_num_convs=(2, 2, 2, 2, 2), + dec_num_convs=(2, 2, 2, 2), + downsamples=(True, True, True, True), + enc_dilations=(1, 1, 1, 1, 1), + dec_dilations=(1, 1, 1, 1), + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + upsample_cfg=dict(type='MYInterpConv'), + norm_eval=False, + dcn=None, + plugins=None, + pretrained=None, + init_cfg=None): + super(MYUNet, self).__init__(init_cfg) + + self.pretrained = pretrained + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be setting at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is a deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + else: + raise TypeError('pretrained must be a str or None') + + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + assert len(strides) == num_stages, \ + 'The length of strides should be equal to num_stages, '\ + f'while the strides is {strides}, the length of '\ + f'strides is {len(strides)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_num_convs) == num_stages, \ + 'The length of enc_num_convs should be equal to num_stages, '\ + f'while the enc_num_convs is {enc_num_convs}, the length of '\ + f'enc_num_convs is {len(enc_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_num_convs) == (num_stages-1), \ + 'The length of dec_num_convs should be equal to (num_stages-1), '\ + f'while the dec_num_convs is {dec_num_convs}, the length of '\ + f'dec_num_convs is {len(dec_num_convs)}, and the num_stages is '\ + f'{num_stages}.' + assert len(downsamples) == (num_stages-1), \ + 'The length of downsamples should be equal to (num_stages-1), '\ + f'while the downsamples is {downsamples}, the length of '\ + f'downsamples is {len(downsamples)}, and the num_stages is '\ + f'{num_stages}.' + assert len(enc_dilations) == num_stages, \ + 'The length of enc_dilations should be equal to num_stages, '\ + f'while the enc_dilations is {enc_dilations}, the length of '\ + f'enc_dilations is {len(enc_dilations)}, and the num_stages is '\ + f'{num_stages}.' + assert len(dec_dilations) == (num_stages-1), \ + 'The length of dec_dilations should be equal to (num_stages-1), '\ + f'while the dec_dilations is {dec_dilations}, the length of '\ + f'dec_dilations is {len(dec_dilations)}, and the num_stages is '\ + f'{num_stages}.' + self.num_stages = num_stages + self.strides = strides + self.downsamples = downsamples + self.norm_eval = norm_eval + self.base_channels = base_channels + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + for i in range(num_stages): + enc_conv_block = [] + if i != 0: + if strides[i] == 1 and downsamples[i - 1]: + enc_conv_block.append(nn.MaxPool2d(kernel_size=2)) + upsample = (strides[i] != 1 or downsamples[i - 1]) + self.decoder.append( + UpConvBlock( + conv_block=BasicConvBlock, + in_channels=base_channels * 2**i, + skip_channels=base_channels * 2**(i - 1), + out_channels=base_channels * 2**(i - 1), + num_convs=dec_num_convs[i - 1], + stride=1, + dilation=dec_dilations[i - 1], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + upsample_cfg=upsample_cfg if upsample else None, + dcn=None, + plugins=None)) + + enc_conv_block.append( + BasicConvBlock( + in_channels=in_channels, + out_channels=base_channels * 2**i, + num_convs=enc_num_convs[i], + stride=strides[i], + dilation=enc_dilations[i], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dcn=None, + plugins=None)) + self.encoder.append((nn.Sequential(*enc_conv_block))) + in_channels = base_channels * 2**i + + def forward(self, x): + self._check_input_divisible(x) + enc_outs = [] + for enc in self.encoder: + x = enc(x) + enc_outs.append(x) + dec_outs = [x] + for i in reversed(range(len(self.decoder))): + x = self.decoder[i](enc_outs[i], x) + dec_outs.append(x) + + return dec_outs + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(MYUNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _check_input_divisible(self, x): + h, w = x.shape[-2:] + whole_downsample_rate = 1 + for i in range(1, self.num_stages): + if self.strides[i] == 2 or self.downsamples[i - 1]: + whole_downsample_rate *= 2 + assert (h % whole_downsample_rate == 0) \ + and (w % whole_downsample_rate == 0),\ + f'The input image size {(h, w)} should be divisible by the whole '\ + f'downsample rate {whole_downsample_rate}, when num_stages is '\ + f'{self.num_stages}, strides is {self.strides}, and downsamples '\ + f'is {self.downsamples}.' + + +class ASPPModule(nn.ModuleList): + """Atrous Spatial Pyramid Pooling (ASPP) Module. + Args: + dilations (tuple[int]): Dilation rate of each layer. + in_channels (int): Input channels. + channels (int): Channels after modules, before + conv_seg. + conv_cfg (dict|None): Config of conv layers. + norm_cfg (dict|None): Config of norm layers. + act_cfg (dict): Config of activation layers. + """ + + def __init__(self, axis, dilations, in_channels, channels, conv_cfg, norm_cfg, + act_cfg): + super(ASPPModule, self).__init__() + self.dilations = dilations + self.in_channels = in_channels + self.channels = channels + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + for dilation in dilations: + kernel_size = [3 for i in range(3)] + padding = [dilation for i in range(3)] + if axis == 'x': + kernel_size[0] = 1 + padding[0] = 0 + elif axis == 'y': + kernel_size[1] = 1 + padding[1] = 0 + elif axis == 'z': + kernel_size[2] = 1 + padding[2] = 0 + elif axis == '2D': + kernel_size = 1 if dilation == 1 else 3 + padding=0 if dilation == 1 else dilation + else: + raise NotImplementedError + + self.append( + ConvModule( + self.in_channels, + self.channels, + # 1 if dilation == 1 else 3, + kernel_size=kernel_size, + padding=padding, + dilation=dilation, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def forward(self, x): + """Forward function.""" + aspp_outs = [] + for aspp_module in self: + aspp_outs.append(aspp_module(x)) + return aspp_outs + + +@HEADS.register_module() +class MYASPPHead(BaseDecodeHead): + """Rethinking Atrous Convolution for Semantic Image Segmentation. + This head is the implementation of `DeepLabV3 + `_. + Args: + dilations (tuple[int]): Dilation rates for ASPP module. + Default: (1, 6, 12, 18). + """ + + def __init__(self, is_volume=True, dilations=(1, 6, 12, 18), **kwargs): + super(MYASPPHead, self).__init__(**kwargs) + assert isinstance(dilations, (list, tuple)) + self.dilations = dilations + # self.image_pool = nn.Sequential( + # nn.AdaptiveAvgPool2d(1), + # ConvModule( + # self.in_channels, + # self.channels, + # 1, + # conv_cfg=self.conv_cfg, + # norm_cfg=self.norm_cfg, + # act_cfg=self.act_cfg)) + self.is_volume = is_volume + if is_volume: + self.aspp_modules_x = ASPPModule( + 'x', + dilations, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck_x = ConvModule( + (len(dilations) + 0) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.aspp_modules_y = ASPPModule( + 'y', + dilations, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck_y = ConvModule( + (len(dilations) + 0) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.aspp_modules_z = ASPPModule( + 'z', + dilations, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck_z = ConvModule( + (len(dilations) + 0) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.fuse_xyz = ConvModule( + 3 * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + else: + self.aspp_modules = ASPPModule( + '2D', + dilations, + self.in_channels, + self.channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.bottleneck = ConvModule( + (len(dilations) + 0) * self.channels, + self.channels, + 3, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + def _forward_feature(self, inputs): + """Forward function for feature maps before classifying each pixel with + ``self.cls_seg`` fc. + Args: + inputs (list[Tensor]): List of multi-level img features. + Returns: + feats (Tensor): A tensor of shape (batch_size, self.channels, + H, W) which is feature map for last layer of decoder head. + """ + x = inputs # self._transform_inputs(inputs) + # aspp_outs = [ + # resize( + # self.image_pool(x), + # size=x.size()[2:], + # mode='bilinear', + # align_corners=self.align_corners) + # ] + if self.is_volume: + aspp_outs_x = torch.cat(self.aspp_modules_x(x), dim=1) + feats_x = self.bottleneck_x(aspp_outs_x) + + aspp_outs_y = torch.cat(self.aspp_modules_y(x), dim=1) + feats_y = self.bottleneck_y(aspp_outs_y) + + aspp_outs_z = torch.cat(self.aspp_modules_z(x), dim=1) + feats_z = self.bottleneck_z(aspp_outs_z) + + feats = torch.cat([feats_x, feats_y, feats_z], dim=1) + feats = self.fuse_xyz(feats) + else: + aspp_outs = torch.cat(self.aspp_modules(x), dim=1) + feats = self.bottleneck(aspp_outs) + + return feats + + def forward(self, inputs): + """Forward function.""" + output = self._forward_feature(inputs) + # output = self.cls_seg(output) + return output + + +if __name__ == '__main__': + def check_3d(): + conv_cfg = dict(type='Conv3d') + norm_cfg = dict(type='BN3d', requires_grad=True) + in_channels = 16 + channels = 16 + model = MYASPPHead( + in_channels=in_channels, + in_index=3, + channels=channels, + dilations=(1, 3, 6, 9), + dropout_ratio=0.1, + num_classes=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + align_corners=False, + # loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ).cuda() + x = torch.zeros((2, in_channels, 8, 256, 256)).cuda() + y = model(x) + print(x.shape, y.shape) + + def check_2d(): + conv_cfg = dict(type='Conv2d') + norm_cfg = dict(type='BN', requires_grad=True) + in_channels = 16 + channels = 16 + model = MYASPPHead( + is_volume=False, + in_channels=in_channels, + in_index=3, + channels=channels, + dilations=(1, 3, 6, 9), + dropout_ratio=0.1, + num_classes=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + align_corners=False, + # loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + ).cuda() + x = torch.zeros((2, in_channels, 256, 256)).cuda() + y = model(x) + print(x.shape, y.shape) + + check_2d() + check_3d() diff --git a/projects/mmdet3d_plugin/bevformer/modules/view_transformer.py b/projects/mmdet3d_plugin/bevformer/modules/view_transformer.py new file mode 100644 index 0000000..f6dc187 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/view_transformer.py @@ -0,0 +1,389 @@ +# Copyright (c) Phigent Robotics. All rights reserved. + +import torch +import torch.nn as nn +from mmcv.runner import BaseModule +# from ..builder import NECKS +from mmdet.models import NECKS + +# from projects.mmdet3d_plugin.ops import bev_pool +from mmcv.cnn import build_conv_layer +# from .. import builder +from mmdet3d.models import builder + +def gen_dx_bx(xbound, ybound, zbound): + # xbound [-51.2, 51.2, 0.8] + # ybound [-51.2, 51.2, 0.8] + # zbound [-10.0, 10.0, 20.0] + dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]]) + bx = torch.Tensor([row[0] + row[2]/2.0 for row in [xbound, ybound, zbound]]) + nx = torch.Tensor([(row[1] - row[0]) / row[2] for row in [xbound, ybound, zbound]]) + return dx, bx, nx + + +def cumsum_trick(x, geom_feats, ranks): + x = x.cumsum(0) + kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool) + kept[:-1] = (ranks[1:] != ranks[:-1]) + x, geom_feats = x[kept], geom_feats[kept] + x = torch.cat((x[:1], x[1:] - x[:-1])) + return x, geom_feats + + +class QuickCumsum(torch.autograd.Function): + @staticmethod + def forward(ctx, x, geom_feats, ranks): + x = x.cumsum(0) + kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool) + kept[:-1] = (ranks[1:] != ranks[:-1]) + + x, geom_feats = x[kept], geom_feats[kept] + x = torch.cat((x[:1], x[1:] - x[:-1])) + + # save kept for backward + ctx.save_for_backward(kept) + + # no gradient for geom_feats + ctx.mark_non_differentiable(geom_feats) + + return x, geom_feats + + @staticmethod + def backward(ctx, gradx, gradgeom): + kept, = ctx.saved_tensors + back = torch.cumsum(kept, 0) + back[kept] -= 1 + + val = gradx[back] + + return val, None, None + +# def bev_pool(feats, coords, B, D, H, W): +# assert feats.shape[0] == coords.shape[0] + +# ranks = ( +# coords[:, 0] * (W * D * B) +# + coords[:, 1] * (D * B) +# + coords[:, 2] * B +# + coords[:, 3] +# ) +# indices = ranks.argsort() +# feats, coords, ranks = feats[indices], coords[indices], ranks[indices] + +# x = QuickCumsum.apply(feats, coords, ranks) +# x = x.permute(0, 4, 1, 2, 3).contiguous() +# return x + +# @NECKS.register_module() +class ViewTransformerLiftSplatShoot_solo(BaseModule): + def __init__(self, grid_config=None, data_config=None, + numC_input=512, numC_Trans=64, downsample=16, + accelerate=False, max_drop_point_rate=0.0, use_bev_pool=True, + **kwargs): + super(ViewTransformerLiftSplatShoot_solo, self).__init__() + if grid_config is None: + grid_config = { + 'xbound': [-51.2, 51.2, 0.8], + 'ybound': [-51.2, 51.2, 0.8], + 'zbound': [-10.0, 10.0, 20.0], + 'dbound': [1.0, 60.0, 1.0],} + self.grid_config = grid_config + dx, bx, nx = gen_dx_bx(self.grid_config['xbound'], + self.grid_config['ybound'], + self.grid_config['zbound'], + ) + self.dx = nn.Parameter(dx, requires_grad=False) + self.bx = nn.Parameter(bx, requires_grad=False) + self.nx = nn.Parameter(nx, requires_grad=False) + + if data_config is None: + data_config = {'input_size': (256, 704)} + self.data_config = data_config + self.downsample = downsample + + self.frustum = self.create_frustum() + self.D, _, _, _ = self.frustum.shape + self.numC_input = numC_input + self.numC_Trans = numC_Trans + self.depthnet = nn.Conv2d(self.numC_input, self.D + self.numC_Trans, kernel_size=1, padding=0) + self.geom_feats = None + self.accelerate = accelerate + self.max_drop_point_rate = max_drop_point_rate + self.use_bev_pool = use_bev_pool + + def get_depth_dist(self, x): + return x.softmax(dim=1) + + def create_frustum(self): + # make grid in image plane + ogfH, ogfW = self.data_config['input_size'] + fH, fW = ogfH // self.downsample, ogfW // self.downsample + ds = torch.arange(*self.grid_config['dbound'], dtype=torch.float).view(-1, 1, 1).expand(-1, fH, fW) + D, _, _ = ds.shape + xs = torch.linspace(0, ogfW - 1, fW, dtype=torch.float).view(1, 1, fW).expand(D, fH, fW) + ys = torch.linspace(0, ogfH - 1, fH, dtype=torch.float).view(1, fH, 1).expand(D, fH, fW) + + # D x H x W x 3 + frustum = torch.stack((xs, ys, ds), -1) + return nn.Parameter(frustum, requires_grad=False) + + def get_geometry(self, rots, trans, intrins, post_rots, post_trans, offset=None): + """Determine the (x,y,z) locations (in the ego frame) + of the points in the point cloud. + Returns B x N x D x H/downsample x W/downsample x 3 + """ + B, N, _ = trans.shape + + # undo post-transformation + # B x N x D x H x W x 3 + points = self.frustum - post_trans.view(B, N, 1, 1, 1, 3) + if offset is not None: + _,D,H,W = offset.shape + points[:,:,:,:,:,2] = points[:,:,:,:,:,2]+offset.view(B,N,D,H,W) + points = torch.inverse(post_rots).view(B, N, 1, 1, 1, 3, 3).matmul(points.unsqueeze(-1)) + + # cam_to_ego + points = torch.cat((points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3], + points[:, :, :, :, :, 2:3] + ), 5) + if intrins.shape[3]==4: # for KITTI + shift = intrins[:,:,:3,3] + points = points - shift.view(B,N,1,1,1,3,1) + intrins = intrins[:,:,:3,:3] + combine = rots.matmul(torch.inverse(intrins)) + points = combine.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1) + points += trans.view(B, N, 1, 1, 1, 3) + + # points_numpy = points.detach().cpu().numpy() + return points + + # def bev_pool(feats, coords, B, D, H, W): + # assert feats.shape[0] == coords.shape[0] + + # ranks = ( + # coords[:, 0] * (W * D * B) + # + coords[:, 1] * (D * B) + # + coords[:, 2] * B + # + coords[:, 3] + # ) + # indices = ranks.argsort() + # feats, coords, ranks = feats[indices], coords[indices], ranks[indices] + + # x = QuickCumsumCuda.apply(feats, coords, ranks, B, D, H, W) + # x = x.permute(0, 4, 1, 2, 3).contiguous() + # return x + + def voxel_pooling(self, geom_feats, x): + B, N, D, H, W, C = x.shape + Nprime = B * N * D * H * W + nx = self.nx.to(torch.long) + # flatten x + x = x.reshape(Nprime, C) + + # flatten indices + geom_feats = ((geom_feats - (self.bx - self.dx / 2.)) / self.dx).long() + geom_feats = geom_feats.view(Nprime, 3) + batch_ix = torch.cat([torch.full([Nprime // B, 1], ix, + device=x.device, dtype=torch.long) for ix in range(B)]) + geom_feats = torch.cat((geom_feats, batch_ix), 1) + + # filter out points that are outside box + kept = (geom_feats[:, 0] >= 0) & (geom_feats[:, 0] < self.nx[0]) \ + & (geom_feats[:, 1] >= 0) & (geom_feats[:, 1] < self.nx[1]) \ + & (geom_feats[:, 2] >= 0) & (geom_feats[:, 2] < self.nx[2]) + x = x[kept] + geom_feats = geom_feats[kept] + + if self.max_drop_point_rate > 0.0 and self.training: + drop_point_rate = torch.rand(1)*self.max_drop_point_rate + kept = torch.rand(x.shape[0])>drop_point_rate + x, geom_feats = x[kept], geom_feats[kept] + + # DEBUG_TMP = False + # if DEBUG_TMP: + if self.use_bev_pool: + final = bev_pool(x, geom_feats, B, self.nx[2], self.nx[0], self.nx[1]) + final = final.transpose(dim0=-2, dim1=-1) + else: + # get tensors from the same voxel next to each other + ranks = geom_feats[:, 0] * (self.nx[1] * self.nx[2] * B) \ + + geom_feats[:, 1] * (self.nx[2] * B) \ + + geom_feats[:, 2] * B \ + + geom_feats[:, 3] + sorts = ranks.argsort() + x, geom_feats, ranks = x[sorts], geom_feats[sorts], ranks[sorts] + + # cumsum trick + x, geom_feats = QuickCumsum.apply(x, geom_feats, ranks) + + # griddify (B x C x Z x X x Y) + final = torch.zeros((B, C, nx[2], nx[1], nx[0]), device=x.device) + final[geom_feats[:, 3], :, geom_feats[:, 2], geom_feats[:, 1], geom_feats[:, 0]] = x + # collapse Z + final = torch.cat(final.unbind(dim=2), 1) + + return final + + def voxel_pooling_accelerated(self, rots, trans, intrins, post_rots, post_trans, x): + B, N, D, H, W, C = x.shape + Nprime = B * N * D * H * W + nx = self.nx.to(torch.long) + # flatten x + x = x.reshape(Nprime, C) + max = 300 + # flatten indices + if self.geom_feats is None: + geom_feats = self.get_geometry(rots, trans, intrins, post_rots, post_trans) + geom_feats = ((geom_feats - (self.bx - self.dx / 2.)) / self.dx).long() + geom_feats = geom_feats.view(Nprime, 3) + batch_ix = torch.cat([torch.full([Nprime // B, 1], ix, + device=x.device, dtype=torch.long) for ix in range(B)]) + geom_feats = torch.cat((geom_feats, batch_ix), 1) + + # filter out points that are outside box + kept1 = (geom_feats[:, 0] >= 0) & (geom_feats[:, 0] < self.nx[0]) \ + & (geom_feats[:, 1] >= 0) & (geom_feats[:, 1] < self.nx[1]) \ + & (geom_feats[:, 2] >= 0) & (geom_feats[:, 2] < self.nx[2]) + idx = torch.range(0, x.shape[0] - 1, dtype=torch.long) + x = x[kept1] + idx = idx[kept1] + geom_feats = geom_feats[kept1] + + # get tensors from the same voxel next to each other + ranks = geom_feats[:, 0] * (self.nx[1] * self.nx[2] * B) \ + + geom_feats[:, 1] * (self.nx[2] * B) \ + + geom_feats[:, 2] * B \ + + geom_feats[:, 3] + sorts = ranks.argsort() + x, geom_feats, ranks, idx = x[sorts], geom_feats[sorts], ranks[sorts], idx[sorts] + repeat_id = torch.ones(geom_feats.shape[0], device=geom_feats.device, dtype=geom_feats.dtype) + curr = 0 + repeat_id[0] = 0 + curr_rank = ranks[0] + + for i in range(1, ranks.shape[0]): + if curr_rank == ranks[i]: + curr += 1 + repeat_id[i] = curr + else: + curr_rank = ranks[i] + curr = 0 + repeat_id[i] = curr + kept2 = repeat_id < max + repeat_id, geom_feats, x, idx = repeat_id[kept2], geom_feats[kept2], x[kept2], idx[kept2] + + geom_feats = torch.cat([geom_feats, repeat_id.unsqueeze(-1)], dim=-1) + self.geom_feats = geom_feats + self.idx = idx + else: + geom_feats = self.geom_feats + idx = self.idx + x = x[idx] + + # griddify (B x C x Z x X x Y) + final = torch.zeros((B, C, nx[2], nx[1], nx[0], max), device=x.device) + final[geom_feats[:, 3], :, geom_feats[:, 2], geom_feats[:, 1], geom_feats[:, 0], geom_feats[:, 4]] = x + final = final.sum(-1) + # collapse Z + final = torch.cat(final.unbind(dim=2), 1) + + return final + + def forward(self, input): + x, rots, trans, intrins, post_rots, post_trans = input + B, N, C, H, W = x.shape + x = x.view(B * N, C, H, W) + x = self.depthnet(x) + depth = self.get_depth_dist(x[:, :self.D]) + img_feat = x[:, self.D:(self.D + self.numC_Trans)] + + # Lift + volume = depth.unsqueeze(1) * img_feat.unsqueeze(2) + volume = volume.view(B, N, self.numC_Trans, self.D, H, W) + volume = volume.permute(0, 1, 3, 4, 5, 2) + + # Splat + if self.accelerate: + bev_feat = self.voxel_pooling_accelerated(rots, trans, intrins, post_rots, post_trans, volume) + else: + geom = self.get_geometry(rots, trans, intrins, post_rots, post_trans) + bev_feat = self.voxel_pooling(geom, volume) + return bev_feat + + +class SELikeModule(nn.Module): + def __init__(self, in_channel=512, feat_channel=256, intrinsic_channel=33): + super(SELikeModule, self).__init__() + self.input_conv = nn.Conv2d(in_channel, feat_channel, kernel_size=1, padding=0) + self.fc = nn.Sequential( + nn.BatchNorm1d(intrinsic_channel), + nn.Linear(intrinsic_channel, feat_channel), + nn.Sigmoid() ) + + def forward(self, x, cam_params): + x = self.input_conv(x) + b,c,_,_ = x.shape + y = self.fc(cam_params).view(b, c, 1, 1) + return x * y.expand_as(x) + + +@NECKS.register_module() +class ViewTransformerLSSBEVDepth(ViewTransformerLiftSplatShoot_solo): + def __init__(self, extra_depth_net, loss_depth_weight, se_config=dict(), + dcn_config=dict(bias=True), **kwargs): + super(ViewTransformerLSSBEVDepth, self).__init__(**kwargs) + self.loss_depth_weight = loss_depth_weight + self.extra_depthnet = builder.build_backbone(extra_depth_net) + self.featnet = nn.Conv2d(self.numC_input, + self.numC_Trans, + kernel_size=1, + padding=0) + self.depthnet = nn.Conv2d(extra_depth_net['num_channels'][0], + self.D, + kernel_size=1, + padding=0) + self.dcn = nn.Sequential(*[build_conv_layer(dict(type='DCNv2', + deform_groups=1), + extra_depth_net['num_channels'][0], + extra_depth_net['num_channels'][0], + kernel_size=3, + stride=1, + padding=1, + dilation=1, + **dcn_config), + nn.BatchNorm2d(extra_depth_net['num_channels'][0]) + ]) + self.se = SELikeModule(self.numC_input, + feat_channel=extra_depth_net['num_channels'][0], + **se_config) + + def forward(self, input): + x, rots, trans, intrins, post_rots, post_trans, depth_gt = input + B, N, C, H, W = x.shape + x = x.view(B * N, C, H, W) + + img_feat = self.featnet(x) + depth_feat = x + cam_params = torch.cat([intrins.reshape(B*N,-1), + post_rots.reshape(B*N,-1), + post_trans.reshape(B*N,-1), + rots.reshape(B*N,-1), + trans.reshape(B*N,-1)],dim=1) + depth_feat = self.se(depth_feat, cam_params) + depth_feat = self.extra_depthnet(depth_feat)[0] + depth_feat = self.dcn(depth_feat) + depth_digit = self.depthnet(depth_feat) + depth_prob = self.get_depth_dist(depth_digit) + + # Lift + volume = depth_prob.unsqueeze(1) * img_feat.unsqueeze(2) + volume = volume.view(B, N, self.numC_Trans, self.D, H, W) + volume = volume.permute(0, 1, 3, 4, 5, 2) + + # Splat + if self.accelerate: + bev_feat = self.voxel_pooling_accelerated(rots, trans, intrins, post_rots, post_trans, volume) + else: + geom = self.get_geometry(rots, trans, intrins, post_rots, post_trans) + bev_feat = self.voxel_pooling(geom, volume) + return bev_feat, depth_digit \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/modules/view_transformer_solo.py b/projects/mmdet3d_plugin/bevformer/modules/view_transformer_solo.py new file mode 100644 index 0000000..578032a --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/view_transformer_solo.py @@ -0,0 +1,376 @@ + +import os +import copy +import pdb +import torch +import torch.nn as nn +import torch.nn.functional as F +# import torch.cuda.amp as amp +from torch.nn.utils.rnn import pad_sequence +from scipy.ndimage import gaussian_filter1d + +from mmcv.runner import BaseModule, force_fp32, auto_fp16 +# from mmdet3d.ops import bev_pool +from mmcv.cnn import build_conv_layer +from mmcv.cnn import ConvModule + +# from ..builder import NECKS +from mmdet.models import NECKS +# from .. import builder +from mmdet3d.models import builder + +from . import ViewTransformerLiftSplatShoot_solo, SELikeModule +# from ..detectors.solofusion import generate_forward_transformation_matrix + + +def finite_check(x, s="", pdb_save=True): + # return + if int(os.environ.get("DEBUG", "0")) == "1": + if pdb_save: + try: + assert (~torch.isfinite(x)).sum() == 0, "{}: {}, {}".format(s, x.min(), x.max()) + except: breakpoint() + else: + assert (~torch.isfinite(x)).sum() == 0, "{}: {}, {}".format(s, x.min(), x.max()) + +def interp_zeroends(x, xp, fp): + """ + For convenience, assumes the sampling dimension is 0. + This also fills in the ends with 0 + + Args: + x: the :math:`x`-coordinates at which to evaluate the interpolated + values. + xp: the :math:`x`-coordinates of the data points, must be increasing. + fp: the :math:`y`-coordinates of the data points, same length as `xp`. + + Returns: + the interpolated values, same size as `x`. + """ + assert len(x.shape) == len(xp.shape) + assert xp.shape == fp.shape + + m = (fp[1:] - fp[:-1]) / (xp[1:] - xp[:-1]) + b = fp[:-1] - (m * xp[:-1]) + + m = torch.cat([m.new_zeros((1, *m.shape[1:])), m, m.new_zeros((1, *m.shape[1:]))], dim=0) + b = torch.cat([b.new_zeros((1, *b.shape[1:])), b, b.new_zeros((1, *b.shape[1:]))], dim=0) + + indicies = torch.sum(torch.ge(x.unsqueeze(1), xp.unsqueeze(0)), dim=1).long() + + res = torch.gather(m, dim=0, index=indicies) * x + torch.gather(b, dim=0, index=indicies) + res.scatter_(dim=0, index=xp[[-1]].long(), src=fp[[-1]]) + + return res + +@NECKS.register_module() +class ViewTransformerSOLOFusion(ViewTransformerLiftSplatShoot_solo): + def __init__(self, + extra_depth_net, + loss_depth_weight, + se_config=dict(), + + do_history_stereo_fusion=False, + stereo_downsample=4, + stereo_group_num=8, + stereo_sampling_num=7, + + stereo_gauss_bin_stdev=2, + stereo_spread_before_add_type=None, + + **kwargs): + super(ViewTransformerSOLOFusion, self).__init__(**kwargs) + self.loss_depth_weight = loss_depth_weight + self.extra_depthnet = builder.build_backbone(extra_depth_net) + self.featnet = nn.Conv2d(self.numC_input, + self.numC_Trans, + kernel_size=1, + padding=0) + self.depthnet = nn.Conv2d(extra_depth_net['num_channels'][0], + self.D, + kernel_size=1, + padding=0) + self.dcn = nn.Sequential(*[build_conv_layer(dict(type='DCNv2', + deform_groups=1), + extra_depth_net['num_channels'][0], + extra_depth_net['num_channels'][0], + kernel_size=3, + stride=1, + padding=1, + dilation=1, + bias=False), + nn.BatchNorm2d(extra_depth_net['num_channels'][0]) + ]) + self.se = SELikeModule(self.numC_input, + feat_channel=extra_depth_net['num_channels'][0], + **se_config) + + self.do_history_stereo_fusion = do_history_stereo_fusion + if self.do_history_stereo_fusion: + self.stereo_group_num = stereo_group_num + self.similarity_net = nn.Sequential( + ConvModule(in_channels=self.stereo_group_num, + out_channels=16, + kernel_size=1, + stride=(1, 1, 1), + padding=0, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU', inplace=True)), + ConvModule(in_channels=16, + out_channels=8, + kernel_size=1, + stride=(1, 1, 1), + padding=0, + conv_cfg=dict(type='Conv3d'), + norm_cfg=dict(type='BN3d'), + act_cfg=dict(type='ReLU', inplace=True)), + nn.Conv3d(in_channels=8, + out_channels=1, + kernel_size=1, + stride=1, + padding=0)) + + self.stereo_eps = 1e-5 + self.stereo_downsample = stereo_downsample + self.stereo_sampling_num = stereo_sampling_num + + # Setup gaussian sampling + gaussians = torch.from_numpy(gaussian_filter1d(F.one_hot(torch.arange(self.D)).float().numpy(), stereo_gauss_bin_stdev, mode='constant', cval=0)) + gaussians = gaussians / gaussians.max() + inv_gaussians = 1 - gaussians + log_inv_gaussians = torch.log(inv_gaussians + self.stereo_eps) + log_inv_gaussians[torch.arange(len(log_inv_gaussians)), torch.arange(len(log_inv_gaussians))] = -1000 + self.log_inv_gaussians = nn.Parameter(log_inv_gaussians, requires_grad=False) + self.bin_centers = nn.Parameter(self.get_bin_centers(), requires_grad=False) + + self.fp16_enabled = False + + + def get_bin_centers(self): + depth_bins = torch.arange(self.grid_config['dbound'][0], + self.grid_config['dbound'][1], + self.grid_config['dbound'][2]) # (118, ) + depth_bins = depth_bins + self.grid_config['dbound'][2] / 2 # center them + assert len(depth_bins) == self.D + return depth_bins + + @force_fp32(apply_to=('curr_global2img', 'curr_img_forward_aug', 'prev_global2img', 'prev_img_forward_aug', 'curr_unaug_cam_to_prev_unaug_cam')) + def get_prev_meshgrid_sampling_points(self, + depth_bins_to_sample, + curr_global2img, + curr_img_forward_aug, + prev_global2img, + prev_img_forward_aug, + curr_unaug_cam_to_prev_unaug_cam): + B, N, _, stereo_H, stereo_W = depth_bins_to_sample.shape + eps = self.stereo_eps + + ### Sample Stereo feats from prev + ## First, transform curr stereo meshgrid to global + meshgrid = torch.stack(torch.meshgrid(torch.arange(stereo_W), torch.arange(stereo_H), indexing="xy"), dim=2) # Need to be along W for first element in each "2"; fH x fW x 2 + meshgrid = (meshgrid * self.stereo_downsample + self.stereo_downsample / 2).to(curr_global2img) # each pixel exists at its center + meshgrid_xyd1 = torch.cat([ + meshgrid[None, None, None, :, :, :].repeat(B, N, self.stereo_sampling_num, 1, 1, 1), + depth_bins_to_sample[:, :, :, :, :, None], + depth_bins_to_sample.new_ones((B, N, self.stereo_sampling_num, stereo_H, stereo_W, 1)) + ], dim=5) # B x N x 118 x stereo_H x stereo_W x 4 + + curr_unaug_cam_meshgrid_xyd1 = torch.inverse(curr_img_forward_aug)[:, :, None, None, None, :, :] @ meshgrid_xyd1.unsqueeze(-1) # B x N x 118 x stereo_H x stereo_W x 4 x 1 + curr_unaug_cam_meshgrid_xyd1[..., :2, 0] *= curr_unaug_cam_meshgrid_xyd1[..., [2], 0] + + global_meshgrid_xyd1 = torch.inverse(curr_global2img)[:, :, None, None, None, :, :] @ curr_unaug_cam_meshgrid_xyd1 # B x N x 118 x stereo_H x stereo_W x 4 x 1 + finite_check(global_meshgrid_xyd1) + + ## Then, transform it to prev cameras + global_meshgrid_xyd1 = global_meshgrid_xyd1[:, None, :, :, :, :, :, :].repeat(1, N, 1, 1, 1, 1, 1, 1) # B x prev_N x curr_N x 118 x stereo_H x stereo_W x 4 x 1. First N is prev cameras + prev_unaug_cam_meshgrid_xyd1 = prev_global2img[:, :, None, None, None, None, :, :] @ global_meshgrid_xyd1 + + + prev_unaug_cam_meshgrid_xyd1[..., :2, 0] /= torch.maximum(prev_unaug_cam_meshgrid_xyd1[..., [2], 0], + torch.ones_like(prev_unaug_cam_meshgrid_xyd1[..., [2], 0]) * eps) + prev_meshgrid_xyd1 = prev_img_forward_aug[:, :, None, None, None, None, :, :] @ prev_unaug_cam_meshgrid_xyd1 # B x prev_N x curr_N x 118 x stereo_H x stereo_W x 4 x 1 + prev_meshgrid_xyd1 = prev_meshgrid_xyd1.squeeze(-1) # B x prev_N x curr_N x 118 x stereo_H x stereo_W x 4 + finite_check(prev_meshgrid_xyd1) + + return prev_meshgrid_xyd1 + + @auto_fp16(apply_to=('curr_sem_feats', )) + def get_mono_depth(self, curr_sem_feats, rots, trans, intrins, post_rots, post_trans): + B, N, sem_C, sem_H, sem_W = curr_sem_feats.shape + curr_sem_feats = curr_sem_feats.view(B * N, sem_C, sem_H, sem_W) + mono_depth_feat = curr_sem_feats + # print("curr_sem_feats.dtype", curr_sem_feats.dtype) + # print("rots.dtype", rots.dtype) + # print("rots.dtype", rots.dtype) + # print("intrins.dtype", intrins.dtype) + # print("post_rots.dtype", post_rots.dtype) + # print("post_trans.dtype", post_trans.dtype) + cam_params = torch.cat([intrins.reshape(B*N,-1), + post_rots.reshape(B*N,-1), + post_trans.reshape(B*N,-1), + rots.reshape(B*N,-1), + trans.reshape(B*N,-1)],dim=1) + mono_depth_feat = self.se(mono_depth_feat, cam_params) + mono_depth_feat = self.extra_depthnet(mono_depth_feat)[0] + mono_depth_feat = self.dcn(mono_depth_feat) + mono_depth_digit = self.depthnet(mono_depth_feat) + + return mono_depth_digit + + + @auto_fp16(apply_to=('curr_sem_feats', 'curr_stereo_feats', 'prev_stereo_feats')) + def forward(self, + curr_sem_feats, + rots, trans, intrins, post_rots, post_trans, + curr_stereo_feats=None, prev_stereo_feats=None, + prev_global2img=None, prev_img_forward_aug=None, curr_global2img=None, curr_img_forward_aug=None, curr_unaug_cam_to_prev_unaug_cam=None): + + B, N, sem_C, sem_H, sem_W = curr_sem_feats.shape + + curr_sem_feats = curr_sem_feats.view(B * N, sem_C, sem_H, sem_W) + curr_img_feat = self.featnet(curr_sem_feats) + mono_depth_digit = self.get_mono_depth(curr_sem_feats.view(B, N, sem_C, sem_H, sem_W), rots, trans, intrins, post_rots, post_trans) + + if not self.do_history_stereo_fusion or prev_stereo_feats is None: + assert curr_stereo_feats is None + depth_digit = mono_depth_digit + else: + # if self.do_history_stereo_fusion and prev_stereo_feats is not None: + # Get stereo fusion feature + B, N, stereo_C, stereo_H, stereo_W = curr_stereo_feats.shape + eps = self.stereo_eps + + assert self.data_config['input_size'][0] // self.stereo_downsample == stereo_H + assert curr_stereo_feats is not None + + # Do stereo + with torch.no_grad(): + ## Stereo Sampling + # First figure out what depths to sample + # Do the gaussian sampling + gauss_sample_distr_log = mono_depth_digit.log_softmax(dim=1) + gauss_sample_depth_idxs = [] + for _ in range(self.stereo_sampling_num): + curr_gauss_sample_depth_idxs = gauss_sample_distr_log.argmax(dim=1) + uncertainty_reduction = self.log_inv_gaussians[curr_gauss_sample_depth_idxs].permute(0, 3, 1, 2) + gauss_sample_distr_log = gauss_sample_distr_log + uncertainty_reduction + gauss_sample_depth_idxs.append(curr_gauss_sample_depth_idxs) + gauss_sample_depth_idxs = torch.stack(gauss_sample_depth_idxs, dim=1) # B*N x k x sem_H x sem_W + gauss_sample_depth_idxs = gauss_sample_depth_idxs.sort(dim=1).values + gauss_sample_depths = self.bin_centers[gauss_sample_depth_idxs] # B*N x k x sem_H x sem_W + + # Now we have depth idxs and their depths. upsample it (via repeat) up to stereo_H & stereo_W. + sample_depth_idxs = gauss_sample_depth_idxs.view(B * N, self.stereo_sampling_num, sem_H, sem_W) + sample_depths = F.interpolate(gauss_sample_depths, + scale_factor=(self.downsample // self.stereo_downsample), + mode='nearest').view(B, N, self.stereo_sampling_num, stereo_H, stereo_W) # B x N x k x stereo_H x stereo_W + + # Now get the sampling xyd1 + prev_meshgrid_xyd1 = self.get_prev_meshgrid_sampling_points( + sample_depths, curr_global2img, curr_img_forward_aug, prev_global2img, prev_img_forward_aug, + curr_unaug_cam_to_prev_unaug_cam) + + prev_meshgrid_xyd1 = prev_meshgrid_xyd1.to(curr_sem_feats) # cast back to fp16 + prev_meshgrid_xy = prev_meshgrid_xyd1[..., :2] # B x prev_N x curr_N x k x stereo_H x stereo_W x 2 + prev_meshgrid_d = prev_meshgrid_xyd1[..., 2] + valid_mask = prev_meshgrid_d > eps + del prev_meshgrid_xyd1 + + # At this point, we have sample_depth_idxs, prev_meshgrid_xy, and valid_mask + # Normalize xy + prev_meshgrid_xy_norm = prev_meshgrid_xy + prev_meshgrid_xy_norm[..., 0] /= self.data_config['input_size'][1] + prev_meshgrid_xy_norm[..., 1] /= self.data_config['input_size'][0] + prev_meshgrid_xy_norm = prev_meshgrid_xy_norm * 2 - 1 # B x prev_N x curr_N x k x stereo_H x stereo_W x 2 + + # Update valid_mask + valid_mask = (valid_mask & (prev_meshgrid_xy_norm[..., 0] > -1.0) + & (prev_meshgrid_xy_norm[..., 0] < 1.0) + & (prev_meshgrid_xy_norm[..., 1] > -1.0) + & (prev_meshgrid_xy_norm[..., 1] < 1.0)) # B x prev_N x curr_N x 118 x stereo_H x stereo_W + + ## Now do the sampling + group_size = (stereo_C // self.stereo_group_num) + cost_volume = curr_stereo_feats.new_zeros(B, N, self.stereo_group_num, self.stereo_sampling_num, stereo_H, stereo_W) # N here is curr_N + for prev_cam_idx in range(N): + ## Setup some stuff + # Get prev cam stuff + curr_prev_stereo_feats = prev_stereo_feats[:, prev_cam_idx, :, :, :] # B x C x stereo_H x stereo_W + curr_prev_meshgrid_xy_norm = prev_meshgrid_xy_norm[:, prev_cam_idx, :, :, :, :, :] # B x curr_N x 118 x stereo_H x stereo_W x 2 + curr_valid_mask = valid_mask[:, prev_cam_idx, :, :, :, :] # B x curr_N x 118 x stereo_H x stereo_W + + # Then, want to only get features from curr stereo for valid locations, so need to prepare for padding and unpadding that. + # Need to feed the cost volume afterwards too + curr_valid_mask_where = torch.where(curr_valid_mask) + curr_valid_mask_where_list = [ # get wheres for every sample in the batch. + [dim_where[curr_valid_mask_where[0] == batch_idx] for dim_where in curr_valid_mask_where[1:]] for batch_idx in range(B)] + curr_valid_mask_num_list = [ # num valid per sample in batch + len(tmp[0]) for tmp in curr_valid_mask_where_list] + curr_valid_mask_padded_valid_mask = torch.stack([ # mask on padded version later, True when not a padding value + torch.arange(max(curr_valid_mask_num_list), device=curr_prev_stereo_feats.device) < tmp_len for tmp_len in curr_valid_mask_num_list], dim=0) # B x max_length + + ## Now get the sampled features in padded form + curr_prev_meshgrid_xy_norm_valid_list = [ # List of size B, inner is _ x 2 + tmp[tmp_mask, :] for tmp, tmp_mask in zip(curr_prev_meshgrid_xy_norm, curr_valid_mask)] + curr_prev_meshgrid_xy_norm_valid_padded = pad_sequence(curr_prev_meshgrid_xy_norm_valid_list, batch_first=True) # B x max_length x 2 + curr_prev_sampled_feats_padded = F.grid_sample(curr_prev_stereo_feats, + curr_prev_meshgrid_xy_norm_valid_padded.unsqueeze(2), align_corners=False) # B x C x max_length x 1 + curr_prev_sampled_feats_padded = curr_prev_sampled_feats_padded.squeeze(3).permute(0, 2, 1) # B x C x max_length -> B x max_length x C + + ## Get the corresponding curr features. Doing this to avoid the max-size tensor B x N x C x 118 x stereo_H x stereo_W. + # Biggest tensor we have is B x max_length x C, which should be around B x C x 118 x stereo_H x stereo_W, so without the N factor. + with torch.set_grad_enabled(curr_stereo_feats.requires_grad): + curr_curr_stereo_feats_valid_list = [ # List of size B, inner is _ x C. ignore 118 dimension for now; it's only needed when indexing into cost volume. + tmp[tmp_where[0], :, tmp_where[2], tmp_where[3]] for tmp, tmp_where in zip(curr_stereo_feats, curr_valid_mask_where_list)] + curr_curr_stereo_feats_valid_padded = pad_sequence(curr_curr_stereo_feats_valid_list, batch_first=True) # B x max_length x C + + assert curr_curr_stereo_feats_valid_padded.shape[1] == curr_prev_sampled_feats_padded.shape[1] == curr_valid_mask_padded_valid_mask.shape[1], \ + f"{curr_curr_stereo_feats_valid_padded.shape[1]} vs {curr_prev_sampled_feats_padded.shape[1]} vs {curr_valid_mask_padded_valid_mask.shape[1]}" + + ## Compute the group correlation + curr_cost_volume = curr_prev_sampled_feats_padded * curr_curr_stereo_feats_valid_padded + curr_cost_volume = curr_cost_volume.view(B, curr_cost_volume.shape[1], self.stereo_group_num, group_size) # B x max_Length x group_num x group_size + curr_cost_volume = curr_cost_volume.sum(dim=3) # B x max_length x group_num + + ## Now fill in cost_volume. Add it incrementally for now, will average later. Dot product is commutative with average + cost_volume[curr_valid_mask_where[0], curr_valid_mask_where[1], :, curr_valid_mask_where[2], curr_valid_mask_where[3], curr_valid_mask_where[4]] += \ + curr_cost_volume[curr_valid_mask_padded_valid_mask] + + del curr_cost_volume, curr_prev_sampled_feats_padded, curr_curr_stereo_feats_valid_padded + + with torch.set_grad_enabled(curr_stereo_feats.requires_grad): + ## Some points are projected to multiple prev cameras; average over those. + num_valid_per_point = valid_mask.float().sum(dim=1) # B x curr_N x k x stereo_H x stereo_W + num_valid_per_point = num_valid_per_point.unsqueeze(2) # B x curr_N x 1 x k x stereo_H x stereo_W + cost_volume = cost_volume / torch.maximum(num_valid_per_point, torch.ones_like(num_valid_per_point)) + + assert curr_stereo_feats.requires_grad == cost_volume.requires_grad + + ## Get the cost volume logits + cost_volume = self.similarity_net(cost_volume.view(B * N, self.stereo_group_num, self.stereo_sampling_num, stereo_H, stereo_W)) + stereo_depth_digit = cost_volume.squeeze(1) # B*N x k x stereo_H x stereo_W + stereo_depth_digit = F.avg_pool2d(stereo_depth_digit.view(B * N, self.stereo_sampling_num, stereo_H, stereo_W), + self.downsample // self.stereo_downsample, + self.downsample // self.stereo_downsample).view(B, N, self.stereo_sampling_num, sem_H, sem_W) # B x N x k x sem_H x sem_W + stereo_depth_digit = stereo_depth_digit.view(B * N, self.stereo_sampling_num, sem_H, sem_W) + + stereo_depth_digit_interp = interp_zeroends( + torch.arange(self.D).to(sample_depth_idxs.device)[:, None, None, None], + sample_depth_idxs.permute(1, 0, 2, 3), + stereo_depth_digit.permute(1, 0, 2, 3)).permute(1, 0, 2, 3) + + depth_digit = mono_depth_digit + stereo_depth_digit_interp + + depth_prob = self.get_depth_dist(depth_digit) + + ### Lift + volume = depth_prob.unsqueeze(1) * curr_img_feat.unsqueeze(2) + volume = volume.view(B, N, self.numC_Trans, self.D, sem_H, sem_W) + volume = volume.permute(0, 1, 3, 4, 5, 2) + + ### Splat + geom = self.get_geometry(rots, trans, intrins, post_rots, post_trans) + bev_feat = self.voxel_pooling(geom, volume) + + return bev_feat, depth_digit \ No newline at end of file diff --git a/projects/mmdet3d_plugin/bevformer/modules/vol_encoder.py b/projects/mmdet3d_plugin/bevformer/modules/vol_encoder.py new file mode 100644 index 0000000..f7add9e --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/vol_encoder.py @@ -0,0 +1,476 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from .custom_base_transformer_layer import MyCustomBaseTransformerLayer +import copy +import warnings +from mmcv.cnn.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.cnn.bricks.transformer import TransformerLayerSequence +from mmcv.runner import force_fp32, auto_fp16 +import numpy as np +import torch +import cv2 as cv +import mmcv +from mmcv.utils import TORCH_VERSION, digit_version +from mmcv.utils import ext_loader +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class VolFormerEncoder(TransformerLayerSequence): + + """ + Attention with both self and cross + Implements the decoder in DETR transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, pc_range=None, num_points_in_pillar=4, num_points_in_voxel=1, + return_intermediate=False, dataset_type='nuscenes', + **kwargs): + + super(VolFormerEncoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + self.num_points_in_pillar = num_points_in_pillar + self.num_points_in_voxel = num_points_in_voxel + self.pc_range = pc_range + self.fp16_enabled = False + + @staticmethod + def get_reference_points(bev_z, bev_h, bev_w, num_points_in_voxel=1, dim='3d', bs=1, device='cuda', dtype=torch.float): + """Get the reference points used in SCA and TSA. + Args: + bev_z, bev_h, bev_w: spatial shape of voxel. + D: sample D points uniformly from each voxel. + device (obj:`device`): The device where + reference_points should be. + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + + # reference points in 3D space, used in spatial cross-attention (SCA) + if dim == '3d': + # only sample the center from each voxel + zs = torch.linspace(0.5, bev_z - 0.5, bev_z, dtype=dtype, + device=device).view(1, bev_z, 1, 1).expand(1, bev_z, bev_h, bev_w) / bev_z + ys = torch.linspace(0.5, bev_h - 0.5, bev_h, dtype=dtype, + device=device).view(1, 1, bev_h, 1).expand(1, bev_z, bev_h, bev_w) / bev_h + xs = torch.linspace(0.5, bev_w - 0.5, bev_w, dtype=dtype, + device=device).view(1, 1, 1, bev_w).expand(1, bev_z, bev_h, bev_w) / bev_w + + ref_3d = torch.stack((xs, ys, zs), -1) # (D, bev_z, bev_h, bev_w, 3) + ref_3d = ref_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) # (D, num_query, 3) + ref_3d = ref_3d[None].repeat(bs, 1, 1, 1) # (bs, D, num_query, 3) + + if num_points_in_voxel > 1: + num = num_points_in_voxel + delta_z, delta_y, delta_x = 0.5/bev_z, 0.5/bev_h, 0.5/bev_w + # the offset of sampling point from the voxel center is in the range [-delta, delta] + zs_offset = torch.linspace(-delta_z, delta_z, num+2, dtype=dtype, device=device)[1:-1] + zs_offset = zs_offset.view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + ys_offset = torch.linspace(-delta_y, delta_y, num+2, dtype=dtype, device=device)[1:-1] + ys_offset = ys_offset.view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + xs_offset = torch.linspace(-delta_x, delta_x, num+2, dtype=dtype, device=device)[1:-1] + xs_offset = xs_offset.view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + offset_3d = torch.stack((xs_offset, ys_offset, zs_offset), -1) # (num, bev_z, bev_h, bev_w, 3) + offset_3d = offset_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) # (num, num_query, 3) + offset_3d = offset_3d[None].repeat(bs, 1, 1, 1) # (bs, num, num_query, 3) + ref_3d = offset_3d + ref_3d + + # num = num_points_in_voxel + # delta_z, delta_y, delta_x = 0.5/bev_z, 0.5/bev_h, 0.5/bev_w + # # the offset of sampling point from the voxel center is in the range [-delta, delta] + # zs_offset_p = torch.linspace(0, delta_z, num//2+2, dtype=dtype, device=device)[1:-1] + # zs_offset_m = torch.linspace(-delta_z, 0, num//2+2, dtype=dtype, device=device)[1:-1] + # zs_offset = torch.cat([zs_offset_m, zs_offset_p], axis=0).view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + # ys_offset_p = torch.linspace(0, delta_y, num//2+2, dtype=dtype, device=device)[1:-1] + # ys_offset_m = torch.linspace(-delta_y, 0, num//2+2, dtype=dtype, device=device)[1:-1] + # ys_offset = torch.cat([ys_offset_m, ys_offset_p], axis=0).view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + # xs_offset_p = torch.linspace(0, delta_x, num//2+2, dtype=dtype, device=device)[1:-1] + # xs_offset_m = torch.linspace(-delta_x, 0, num//2+2, dtype=dtype, device=device)[1:-1] + # xs_offset = torch.cat([xs_offset_m, xs_offset_p], axis=0).view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + # offset_3d = torch.stack((xs_offset, ys_offset, zs_offset), -1) # (num, bev_z, bev_h, bev_w, 3) + # offset_3d = offset_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) # (num, num_query, 3) + # offset_3d = offset_3d[None].repeat(bs, 1, 1, 1) # (bs, num, num_query, 3) + # offset_3d = offset_3d + ref_3d + # ref_3d = torch.cat((ref_3d, offset_3d), dim=1) # (bs, num+1, num_query, 3) + + return ref_3d + + # reference points on 2D bev plane, used in temporal self-attention (TSA). + elif dim == '2d': + # ref_y, ref_x = torch.meshgrid( + # torch.linspace( + # 0.5, bev_h - 0.5, bev_h, dtype=dtype, device=device), + # torch.linspace( + # 0.5, bev_w - 0.5, bev_w, dtype=dtype, device=device) + # ) + ref_z, ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, + bev_z - 0.5, + bev_z, + dtype=dtype, + device=device), + torch.linspace(0.5, + bev_h - 0.5, + bev_h, + dtype=dtype, + device=device), + torch.linspace(0.5, + bev_w - 0.5, + bev_w, + dtype=dtype, + device=device) + ) # shape: (bev_z, bev_h, bev_w) + ref_z = ref_z.reshape(-1)[None] / bev_z + ref_y = ref_y.reshape(-1)[None] / bev_h + ref_x = ref_x.reshape(-1)[None] / bev_w + ref_2d = torch.stack((ref_x, ref_y, ref_z), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) # (bs, num_query, 1, 3) + return ref_2d + + # This function must use fp32!!! + @force_fp32(apply_to=('reference_points', 'img_metas')) + def point_sampling(self, reference_points, pc_range, img_metas): + ego2lidar = img_metas[0]['ego2lidar'] + lidar2img = [] + for img_meta in img_metas: + lidar2img.append(img_meta['lidar2img']) + lidar2img = np.asarray(lidar2img) + lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4) + ego2lidar = reference_points.new_tensor(ego2lidar) + + reference_points = reference_points.clone() + + reference_points[..., 0:1] = reference_points[..., 0:1] * \ + (pc_range[3] - pc_range[0]) + pc_range[0] + reference_points[..., 1:2] = reference_points[..., 1:2] * \ + (pc_range[4] - pc_range[1]) + pc_range[1] + reference_points[..., 2:3] = reference_points[..., 2:3] * \ + (pc_range[5] - pc_range[2]) + pc_range[2] + + reference_points = torch.cat( + (reference_points, torch.ones_like(reference_points[..., :1])), -1) # (bs, D, num_query, 4) + + reference_points = reference_points.permute(1, 0, 2, 3) + D, B, num_query = reference_points.size()[:3] + num_cam = lidar2img.size(1) + + reference_points = reference_points.view( + D, B, 1, num_query, 4).repeat(1, 1, num_cam, 1, 1).unsqueeze(-1) + + lidar2img = lidar2img.view( + 1, B, num_cam, 1, 4, 4).repeat(D, 1, 1, num_query, 1, 1) # (D, B, num_cam, num_query, 4, 4) + + ego2lidar = ego2lidar.view(1, 1, 1, 1, 4, 4).repeat(D, 1, num_cam, num_query, 1, 1) + + reference_points_cam = torch.matmul(torch.matmul(lidar2img.to(torch.float32), ego2lidar.to(torch.float32)), + reference_points.to(torch.float32)).squeeze(-1) + + # reference_points_cam = torch.matmul(lidar2img.to(torch.float32), + # reference_points.to(torch.float32)).squeeze(-1) # (D, B, num_cam, num_query, 4) + + eps = 1e-5 + + bev_mask = (reference_points_cam[..., 2:3] > eps) # (D, B, num_cam, num_query, 1) + reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum( + reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps) # in pixel system + reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1] + reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0] + + bev_mask = (bev_mask & (reference_points_cam[..., 1:2] > 0.0) + & (reference_points_cam[..., 1:2] < 1.0) + & (reference_points_cam[..., 0:1] < 1.0) + & (reference_points_cam[..., 0:1] > 0.0)) + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + bev_mask = torch.nan_to_num(bev_mask) + else: + bev_mask = bev_mask.new_tensor( + np.nan_to_num(bev_mask.cpu().numpy())) + + reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # (num_cam, B, num_query, D, 2) + bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) # (num_cam, B, num_query, D) + + return reference_points_cam, bev_mask + + @auto_fp16() + def forward(self, + bev_query, + key, + value, + *args, + bev_z=None, + bev_h=None, + bev_w=None, + bev_pos=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + prev_bev=None, + shift=0., + **kwargs): + """Forward function for `TransformerDecoder`. + Args: + bev_query (Tensor): Input BEV query with shape + `(num_query, bs, embed_dims)`. + key & value (Tensor): Input multi-cameta features with shape + (num_cam, num_value, bs, embed_dims) + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + + output = bev_query + intermediate = [] + + ref_3d = self.get_reference_points( + bev_z, bev_h, bev_w, self.num_points_in_voxel, dim='3d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + + # the following ref_2d is actualy 3d: (bs, num_query, 1, 3) + ref_2d = self.get_reference_points( + bev_z, bev_h, bev_w, dim='2d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + + reference_points_cam, bev_mask = self.point_sampling( + ref_3d, self.pc_range, kwargs['img_metas']) + + # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. + shift_ref_2d = ref_2d # .clone() + shift3d = shift.new_zeros(1, 3) + shift3d[:, :2] = shift + shift_ref_2d += shift3d[:, None, None, :] + + # (num_query, bs, embed_dims) -> (bs, num_query, embed_dims) + bev_query = bev_query.permute(1, 0, 2) + bev_pos = bev_pos.permute(1, 0, 2) + bs, num_query, num_bev_level, _ = ref_2d.shape # (bs, num_query, 1, 2) + if prev_bev is not None: + prev_bev = prev_bev.permute(1, 0, 2) + prev_bev = torch.stack( + [prev_bev, bev_query], 1).reshape(bs*2, num_query, -1) + hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( + bs*2, num_query, num_bev_level, 3) + else: + hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape( + bs*2, num_query, num_bev_level, 3) + + for lid, layer in enumerate(self.layers): + output = layer( + bev_query, + key, + value, + *args, + bev_pos=bev_pos, + ref_2d=hybird_ref_2d, + ref_3d=ref_3d, + bev_z=bev_z, + bev_h=bev_h, + bev_w=bev_w, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + prev_bev=prev_bev, + **kwargs) + + bev_query = output + if self.return_intermediate: + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output + + +@TRANSFORMER_LAYER.register_module() +class VolFormerLayer(MyCustomBaseTransformerLayer): + """Implements decoder layer in DETR transformer. + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(VolFormerLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + self.fp16_enabled = False + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + def forward(self, + query, + key=None, + value=None, + bev_pos=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + ref_2d=None, + ref_3d=None, + bev_z=None, + bev_h=None, + bev_w=None, + reference_points_cam=None, + mask=None, + spatial_shapes=None, + level_start_index=None, + prev_bev=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + + **kwargs contains some specific arguments of attentions. + + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + # temporal self attention + if layer == 'self_attn': + + query = self.attentions[attn_index]( + query, + prev_bev, + prev_bev, + identity if self.pre_norm else None, + query_pos=bev_pos, + key_pos=bev_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + reference_points=ref_2d, + spatial_shapes=torch.tensor( + [[bev_z, bev_h, bev_w]], device=query.device), # spatial_shapes + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + # spaital cross attention + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + reference_points=ref_3d, + reference_points_cam=reference_points_cam, + mask=mask, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query diff --git a/projects/mmdet3d_plugin/bevformer/modules/voxel_encoder.py b/projects/mmdet3d_plugin/bevformer/modules/voxel_encoder.py new file mode 100644 index 0000000..ae5a737 --- /dev/null +++ b/projects/mmdet3d_plugin/bevformer/modules/voxel_encoder.py @@ -0,0 +1,470 @@ +# --------------------------------------------- +# Copyright (c) OpenMMLab. All rights reserved. +# --------------------------------------------- +# Modified by Zhiqi Li +# --------------------------------------------- + +from projects.mmdet3d_plugin.models.utils.bricks import run_time +from projects.mmdet3d_plugin.models.utils.visual import save_tensor +from .custom_base_transformer_layer import MyCustomBaseTransformerLayer +import copy +import warnings +from mmcv.cnn.bricks.registry import (ATTENTION, + TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.cnn.bricks.transformer import TransformerLayerSequence +from mmcv.runner import force_fp32, auto_fp16 +import numpy as np +import torch +import cv2 as cv +import mmcv +from mmcv.utils import TORCH_VERSION, digit_version +from mmcv.utils import ext_loader +ext_module = ext_loader.load_ext( + '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward']) + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class VoxelFormerEncoder(TransformerLayerSequence): + + """ + Attention with both self and cross + Implements the decoder in DETR transformer. + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + + def __init__(self, *args, pc_range=None, num_points_in_pillar=4, num_points_in_voxel=1, + return_intermediate=False, dataset_type='nuscenes', + **kwargs): + + super(VoxelFormerEncoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + self.num_points_in_pillar = num_points_in_pillar + self.num_points_in_voxel = num_points_in_voxel + self.pc_range = pc_range + self.fp16_enabled = False + + @staticmethod + def get_reference_points(bev_z, bev_h, bev_w, num_points_in_voxel=1, dim='3d', bs=1, device='cuda', dtype=torch.float): + """Get the reference points used in SCA and TSA. + Args: + bev_z, bev_h, bev_w: spatial shape of voxel. + D: sample D points uniformly from each voxel. + device (obj:`device`): The device where + reference_points should be. + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + + # reference points in 3D space, used in spatial cross-attention (SCA) + if dim == '3d': + # only sample the center from each voxel + zs = torch.linspace(0.5, bev_z - 0.5, bev_z, dtype=dtype, + device=device).view(1, bev_z, 1, 1).expand(1, bev_z, bev_h, bev_w) / bev_z + ys = torch.linspace(0.5, bev_h - 0.5, bev_h, dtype=dtype, + device=device).view(1, 1, bev_h, 1).expand(1, bev_z, bev_h, bev_w) / bev_h + xs = torch.linspace(0.5, bev_w - 0.5, bev_w, dtype=dtype, + device=device).view(1, 1, 1, bev_w).expand(1, bev_z, bev_h, bev_w) / bev_w + + ref_3d = torch.stack((xs, ys, zs), -1) # (D, bev_z, bev_h, bev_w, 3) + ref_3d = ref_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) # (D, num_query, 3) + ref_3d = ref_3d[None].repeat(bs, 1, 1, 1) # (bs, D, num_query, 3) + + if num_points_in_voxel > 1: + num = num_points_in_voxel + delta_z, delta_y, delta_x = 0.5/bev_z, 0.5/bev_h, 0.5/bev_w + # the offset of sampling point from the voxel center is in the range [-delta, delta] + zs_offset = torch.linspace(-delta_z, delta_z, num+2, dtype=dtype, device=device)[1:-1] + zs_offset = zs_offset.view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + ys_offset = torch.linspace(-delta_y, delta_y, num+2, dtype=dtype, device=device)[1:-1] + ys_offset = ys_offset.view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + xs_offset = torch.linspace(-delta_x, delta_x, num+2, dtype=dtype, device=device)[1:-1] + xs_offset = xs_offset.view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + offset_3d = torch.stack((xs_offset, ys_offset, zs_offset), -1) # (num, bev_z, bev_h, bev_w, 3) + offset_3d = offset_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) # (num, num_query, 3) + offset_3d = offset_3d[None].repeat(bs, 1, 1, 1) # (bs, num, num_query, 3) + ref_3d = offset_3d + ref_3d + + # num = num_points_in_voxel + # delta_z, delta_y, delta_x = 0.5/bev_z, 0.5/bev_h, 0.5/bev_w + # # the offset of sampling point from the voxel center is in the range [-delta, delta] + # zs_offset_p = torch.linspace(0, delta_z, num//2+2, dtype=dtype, device=device)[1:-1] + # zs_offset_m = torch.linspace(-delta_z, 0, num//2+2, dtype=dtype, device=device)[1:-1] + # zs_offset = torch.cat([zs_offset_m, zs_offset_p], axis=0).view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + # ys_offset_p = torch.linspace(0, delta_y, num//2+2, dtype=dtype, device=device)[1:-1] + # ys_offset_m = torch.linspace(-delta_y, 0, num//2+2, dtype=dtype, device=device)[1:-1] + # ys_offset = torch.cat([ys_offset_m, ys_offset_p], axis=0).view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + # xs_offset_p = torch.linspace(0, delta_x, num//2+2, dtype=dtype, device=device)[1:-1] + # xs_offset_m = torch.linspace(-delta_x, 0, num//2+2, dtype=dtype, device=device)[1:-1] + # xs_offset = torch.cat([xs_offset_m, xs_offset_p], axis=0).view(num, 1, 1, 1).expand(num, bev_z, bev_h, bev_w) + + # offset_3d = torch.stack((xs_offset, ys_offset, zs_offset), -1) # (num, bev_z, bev_h, bev_w, 3) + # offset_3d = offset_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) # (num, num_query, 3) + # offset_3d = offset_3d[None].repeat(bs, 1, 1, 1) # (bs, num, num_query, 3) + # offset_3d = offset_3d + ref_3d + # ref_3d = torch.cat((ref_3d, offset_3d), dim=1) # (bs, num+1, num_query, 3) + + return ref_3d + + # reference points on 2D bev plane, used in temporal self-attention (TSA). + elif dim == '2d': + # ref_y, ref_x = torch.meshgrid( + # torch.linspace( + # 0.5, bev_h - 0.5, bev_h, dtype=dtype, device=device), + # torch.linspace( + # 0.5, bev_w - 0.5, bev_w, dtype=dtype, device=device) + # ) + ref_z, ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, + bev_z - 0.5, + bev_z, + dtype=dtype, + device=device), + torch.linspace(0.5, + bev_h - 0.5, + bev_h, + dtype=dtype, + device=device), + torch.linspace(0.5, + bev_w - 0.5, + bev_w, + dtype=dtype, + device=device) + ) # shape: (bev_z, bev_h, bev_w) + ref_z = ref_z.reshape(-1)[None] / bev_z + ref_y = ref_y.reshape(-1)[None] / bev_h + ref_x = ref_x.reshape(-1)[None] / bev_w + ref_2d = torch.stack((ref_x, ref_y, ref_z), -1) + ref_2d = ref_2d.repeat(bs, 1, 1).unsqueeze(2) # (bs, num_query, 1, 3) + return ref_2d + + # This function must use fp32!!! + @force_fp32(apply_to=('reference_points', 'img_metas')) + def point_sampling(self, reference_points, pc_range, img_metas): + + lidar2img = [] + for img_meta in img_metas: + lidar2img.append(img_meta['lidar2img']) + lidar2img = np.asarray(lidar2img) + lidar2img = reference_points.new_tensor(lidar2img) # (B, N, 4, 4) + reference_points = reference_points.clone() + + reference_points[..., 0:1] = reference_points[..., 0:1] * \ + (pc_range[3] - pc_range[0]) + pc_range[0] + reference_points[..., 1:2] = reference_points[..., 1:2] * \ + (pc_range[4] - pc_range[1]) + pc_range[1] + reference_points[..., 2:3] = reference_points[..., 2:3] * \ + (pc_range[5] - pc_range[2]) + pc_range[2] + + reference_points = torch.cat( + (reference_points, torch.ones_like(reference_points[..., :1])), -1) # (bs, D, num_query, 4) + + reference_points = reference_points.permute(1, 0, 2, 3) + D, B, num_query = reference_points.size()[:3] + num_cam = lidar2img.size(1) + + reference_points = reference_points.view( + D, B, 1, num_query, 4).repeat(1, 1, num_cam, 1, 1).unsqueeze(-1) + + lidar2img = lidar2img.view( + 1, B, num_cam, 1, 4, 4).repeat(D, 1, 1, num_query, 1, 1) # (D, B, num_cam, num_query, 4, 4) + + reference_points_cam = torch.matmul(lidar2img.to(torch.float32), + reference_points.to(torch.float32)).squeeze(-1) # (D, B, num_cam, num_query, 4) + + eps = 1e-5 + + bev_mask = (reference_points_cam[..., 2:3] > eps) # (D, B, num_cam, num_query, 1) + reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum( + reference_points_cam[..., 2:3], torch.ones_like(reference_points_cam[..., 2:3]) * eps) # in pixel system + + reference_points_cam[..., 0] /= img_metas[0]['img_shape'][0][1] + reference_points_cam[..., 1] /= img_metas[0]['img_shape'][0][0] + + bev_mask = (bev_mask & (reference_points_cam[..., 1:2] > 0.0) + & (reference_points_cam[..., 1:2] < 1.0) + & (reference_points_cam[..., 0:1] < 1.0) + & (reference_points_cam[..., 0:1] > 0.0)) + if digit_version(TORCH_VERSION) >= digit_version('1.8'): + bev_mask = torch.nan_to_num(bev_mask) + else: + bev_mask = bev_mask.new_tensor( + np.nan_to_num(bev_mask.cpu().numpy())) + + reference_points_cam = reference_points_cam.permute(2, 1, 3, 0, 4) # (num_cam, B, num_query, D, 2) + bev_mask = bev_mask.permute(2, 1, 3, 0, 4).squeeze(-1) # (num_cam, B, num_query, D) + + return reference_points_cam, bev_mask + + @auto_fp16() + def forward(self, + bev_query, + key, + value, + *args, + bev_z=None, + bev_h=None, + bev_w=None, + bev_pos=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + prev_bev=None, + shift=0., + **kwargs): + """Forward function for `TransformerDecoder`. + Args: + bev_query (Tensor): Input BEV query with shape + `(num_query, bs, embed_dims)`. + key & value (Tensor): Input multi-cameta features with shape + (num_cam, num_value, bs, embed_dims) + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + + output = bev_query + intermediate = [] + + ref_3d = self.get_reference_points( + bev_z, bev_h, bev_w, self.num_points_in_voxel, dim='3d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + + # the following ref_2d is actualy 3d: (bs, num_query, 1, 3) + ref_2d = self.get_reference_points( + bev_z, bev_h, bev_w, dim='2d', bs=bev_query.size(1), device=bev_query.device, dtype=bev_query.dtype) + + reference_points_cam, bev_mask = self.point_sampling( + ref_3d, self.pc_range, kwargs['img_metas']) + + # bug: this code should be 'shift_ref_2d = ref_2d.clone()', we keep this bug for reproducing our results in paper. + shift_ref_2d = ref_2d # .clone() + shift3d = shift.new_zeros(1, 3) + shift3d[:, :2] = shift + shift_ref_2d += shift3d[:, None, None, :] + + # (num_query, bs, embed_dims) -> (bs, num_query, embed_dims) + bev_query = bev_query.permute(1, 0, 2) + bev_pos = bev_pos.permute(1, 0, 2) + bs, num_query, num_bev_level, _ = ref_2d.shape # (bs, num_query, 1, 2) + if prev_bev is not None: + prev_bev = prev_bev.permute(1, 0, 2) + prev_bev = torch.stack( + [prev_bev, bev_query], 1).reshape(bs*2, num_query, -1) + hybird_ref_2d = torch.stack([shift_ref_2d, ref_2d], 1).reshape( + bs*2, num_query, num_bev_level, 3) + else: + hybird_ref_2d = torch.stack([ref_2d, ref_2d], 1).reshape( + bs*2, num_query, num_bev_level, 3) + + for lid, layer in enumerate(self.layers): + output = layer( + bev_query, + key, + value, + *args, + bev_pos=bev_pos, + ref_2d=hybird_ref_2d, + ref_3d=ref_3d, + bev_z=bev_z, + bev_h=bev_h, + bev_w=bev_w, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + reference_points_cam=reference_points_cam, + bev_mask=bev_mask, + prev_bev=prev_bev, + **kwargs) + + bev_query = output + if self.return_intermediate: + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output + + +@TRANSFORMER_LAYER.register_module() +class VoxelFormerLayer(MyCustomBaseTransformerLayer): + """Implements decoder layer in DETR transformer. + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(VoxelFormerLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + self.fp16_enabled = False + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + def forward(self, + query, + key=None, + value=None, + bev_pos=None, + query_pos=None, + key_pos=None, + attn_masks=None, + query_key_padding_mask=None, + key_padding_mask=None, + ref_2d=None, + ref_3d=None, + bev_z=None, + bev_h=None, + bev_w=None, + reference_points_cam=None, + mask=None, + spatial_shapes=None, + level_start_index=None, + prev_bev=None, + **kwargs): + """Forward function for `TransformerDecoderLayer`. + + **kwargs contains some specific arguments of attentions. + + Args: + query (Tensor): The input query with shape + [num_queries, bs, embed_dims] if + self.batch_first is False, else + [bs, num_queries embed_dims]. + key (Tensor): The key tensor with shape [num_keys, bs, + embed_dims] if self.batch_first is False, else + [bs, num_keys, embed_dims] . + value (Tensor): The value tensor with same shape as `key`. + query_pos (Tensor): The positional encoding for `query`. + Default: None. + key_pos (Tensor): The positional encoding for `key`. + Default: None. + attn_masks (List[Tensor] | None): 2D Tensor used in + calculation of corresponding attention. The length of + it should equal to the number of `attention` in + `operation_order`. Default: None. + query_key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_queries]. Only used in `self_attn` layer. + Defaults to None. + key_padding_mask (Tensor): ByteTensor for `query`, with + shape [bs, num_keys]. Default: None. + + Returns: + Tensor: forwarded results with shape [num_queries, bs, embed_dims]. + """ + + norm_index = 0 + attn_index = 0 + ffn_index = 0 + identity = query + if attn_masks is None: + attn_masks = [None for _ in range(self.num_attn)] + elif isinstance(attn_masks, torch.Tensor): + attn_masks = [ + copy.deepcopy(attn_masks) for _ in range(self.num_attn) + ] + warnings.warn(f'Use same attn_mask in all attentions in ' + f'{self.__class__.__name__} ') + else: + assert len(attn_masks) == self.num_attn, f'The length of ' \ + f'attn_masks {len(attn_masks)} must be equal ' \ + f'to the number of attention in ' \ + f'operation_order {self.num_attn}' + + for layer in self.operation_order: + # temporal self attention + if layer == 'self_attn': + + query = self.attentions[attn_index]( + query, + prev_bev, + prev_bev, + identity if self.pre_norm else None, + query_pos=bev_pos, + key_pos=bev_pos, + attn_mask=attn_masks[attn_index], + key_padding_mask=query_key_padding_mask, + reference_points=ref_2d, + spatial_shapes=torch.tensor( + [[bev_z, bev_h, bev_w]], device=query.device), # spatial_shapes + level_start_index=torch.tensor([0], device=query.device), + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'norm': + query = self.norms[norm_index](query) + norm_index += 1 + + # spaital cross attention + elif layer == 'cross_attn': + query = self.attentions[attn_index]( + query, + key, + value, + identity if self.pre_norm else None, + query_pos=query_pos, + key_pos=key_pos, + reference_points=ref_3d, + reference_points_cam=reference_points_cam, + mask=mask, + attn_mask=attn_masks[attn_index], + key_padding_mask=key_padding_mask, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + **kwargs) + attn_index += 1 + identity = query + + elif layer == 'ffn': + query = self.ffns[ffn_index]( + query, identity if self.pre_norm else None) + ffn_index += 1 + + return query diff --git a/projects/mmdet3d_plugin/core/evaluation/eval_hooks.py b/projects/mmdet3d_plugin/core/evaluation/eval_hooks.py index 0477213..09af39f 100644 --- a/projects/mmdet3d_plugin/core/evaluation/eval_hooks.py +++ b/projects/mmdet3d_plugin/core/evaluation/eval_hooks.py @@ -1,4 +1,3 @@ - # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, # in order to avoid strong version dependency, we did not directly # inherit EvalHook but BaseDistEvalHook. @@ -28,7 +27,7 @@ def _calc_dynamic_intervals(start_interval, dynamic_interval_list): class CustomDistEvalHook(BaseDistEvalHook): - def __init__(self, *args, dynamic_intervals=None, **kwargs): + def __init__(self, *args, dynamic_intervals=None, **kwargs): super(CustomDistEvalHook, self).__init__(*args, **kwargs) self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: @@ -73,7 +72,7 @@ def _do_evaluate(self, runner): if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') - from projects.mmdet3d_plugin.bevformer.apis.test import custom_multi_gpu_test # to solve circlur import + from projects.mmdet3d_plugin.bevformer.apis.test import custom_multi_gpu_test # to solve circlur import results = custom_multi_gpu_test( runner.model, @@ -84,8 +83,8 @@ def _do_evaluate(self, runner): print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) - key_score = self.evaluate(runner, results) + # key_score = self.evaluate(runner, results) + self.dataloader.dataset.evaluate(results, runner=runner) + # if self.save_best: + # self._save_ckpt(runner, key_score) - if self.save_best: - self._save_ckpt(runner, key_score) - diff --git a/projects/mmdet3d_plugin/datasets/__init__.py b/projects/mmdet3d_plugin/datasets/__init__.py index 66afd68..0f680f7 100644 --- a/projects/mmdet3d_plugin/datasets/__init__.py +++ b/projects/mmdet3d_plugin/datasets/__init__.py @@ -1,7 +1,6 @@ from .nuscenes_dataset import CustomNuScenesDataset from .nuscenes_occ import NuSceneOcc from .builder import custom_build_dataset +from .waymo_temporal_zlt import CustomWaymoDataset_T -__all__ = [ - 'CustomNuScenesDataset' -] +__all__ = ["CustomNuScenesDataset", "NuSceneOcc", "CustomWaymoDataset_T"] diff --git a/projects/mmdet3d_plugin/datasets/builder.py b/projects/mmdet3d_plugin/datasets/builder.py index 0ad7a92..e592f1e 100644 --- a/projects/mmdet3d_plugin/datasets/builder.py +++ b/projects/mmdet3d_plugin/datasets/builder.py @@ -1,6 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -import copy import platform import random from functools import partial @@ -15,6 +14,7 @@ from projects.mmdet3d_plugin.datasets.samplers.group_sampler import DistributedGroupSampler from projects.mmdet3d_plugin.datasets.samplers.distributed_sampler import DistributedSampler from projects.mmdet3d_plugin.datasets.samplers.sampler import build_sampler +from projects.mmdet3d_plugin.datasets.samplers.my_group_batch_sampler import MyGroupBatchSampler def build_dataloader(dataset, samples_per_gpu, @@ -25,6 +25,8 @@ def build_dataloader(dataset, seed=None, shuffler_sampler=None, nonshuffler_sampler=None, + use_streaming=False, + cfg=None, **kwargs): """Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. @@ -44,37 +46,51 @@ def build_dataloader(dataset, DataLoader: A PyTorch dataloader. """ rank, world_size = get_dist_info() - if dist: - # DistributedGroupSampler will definitely shuffle the data to satisfy - # that images on each GPU are in the same group - if shuffle: - sampler = build_sampler(shuffler_sampler if shuffler_sampler is not None else dict(type='DistributedGroupSampler'), - dict( - dataset=dataset, - samples_per_gpu=samples_per_gpu, - num_replicas=world_size, - rank=rank, - seed=seed) - ) - - else: - sampler = build_sampler(nonshuffler_sampler if nonshuffler_sampler is not None else dict(type='DistributedSampler'), - dict( - dataset=dataset, - num_replicas=world_size, - rank=rank, - shuffle=shuffle, - seed=seed) - ) - - batch_size = samples_per_gpu + if use_streaming: + # here we use the sequential data loader for streaming inference + batch_sampler = MyGroupBatchSampler( + dataset, + batch_size=1, + world_size=world_size, + rank=rank, + seed=seed, + total_epochs=cfg.total_epochs, + ) + batch_size = 1 num_workers = workers_per_gpu + sampler = None else: - # assert False, 'not support in bevformer' - print('WARNING!!!!, Only can be used for obtain inference speed!!!!') - sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None - batch_size = num_gpus * samples_per_gpu - num_workers = num_gpus * workers_per_gpu + if dist: + # DistributedGroupSampler will definitely shuffle the data to satisfy + # that images on each GPU are in the same group + if shuffle: + sampler = build_sampler(shuffler_sampler if shuffler_sampler is not None else dict(type='DistributedGroupSampler'), + dict( + dataset=dataset, + samples_per_gpu=samples_per_gpu, + num_replicas=world_size, + rank=rank, + seed=seed) + ) + + else: + sampler = build_sampler(nonshuffler_sampler if nonshuffler_sampler is not None else dict(type='DistributedSampler'), + dict( + dataset=dataset, + num_replicas=world_size, + rank=rank, + shuffle=shuffle, + seed=seed) + ) + + batch_size = samples_per_gpu + num_workers = workers_per_gpu + else: + # assert False, 'not support in bevformer' + print('WARNING!!!!, Only can be used for obtain inference speed!!!!') + sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None + batch_size = num_gpus * samples_per_gpu + num_workers = num_gpus * workers_per_gpu init_fn = partial( worker_init_fn, num_workers=num_workers, rank=rank, @@ -84,6 +100,7 @@ def build_dataloader(dataset, dataset, batch_size=batch_size, sampler=sampler, + batch_sampler=batch_sampler if use_streaming else None, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, diff --git a/projects/mmdet3d_plugin/datasets/cdist.py b/projects/mmdet3d_plugin/datasets/cdist.py new file mode 100644 index 0000000..e84cc8c --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/cdist.py @@ -0,0 +1,154 @@ +import numpy as np +import torch +from sklearn.neighbors import NearestNeighbors + +class Metric_CDist: + def __init__(self, + num_classes = 16, + FREE_LABEL = 23, + use_CDist = True, + use_mIoU = True, + use_infov_mask = True, + use_lidar_mask = False, + use_camera_mask = True, + use_binary_mask = False, + use_dynamic_object_mask = True, + ): + self.num_classes = num_classes + self.FREE_LABEL = FREE_LABEL + self.use_CDist = use_CDist + self.use_mIoU = use_mIoU + self.use_infov_mask = use_infov_mask + self.use_lidar_mask = use_lidar_mask + self.use_camera_mask = use_camera_mask + self.use_binary_mask = use_binary_mask + self.use_dynamic_object_mask = use_dynamic_object_mask + self.CLASS_NAMES = [ + 'GO', + 'TYPE_VEHICLE', "TYPE_BICYCLIST", "TYPE_PEDESTRIAN", "TYPE_SIGN", + 'TYPE_TRAFFIC_LIGHT', 'TYPE_POLE', 'TYPE_CONSTRUCTION_CONE', 'TYPE_BICYCLE', 'TYPE_MOTORCYCLE', + 'TYPE_BUILDING', 'TYPE_VEGETATION', 'TYPE_TREE_TRUNK', + 'TYPE_ROAD', 'TYPE_WALKABLE', + 'TYPE_FREE', + ], + + def get_mask(self, voxel_semantics, mask_infov, mask_lidar, mask_camera): + mask=torch.ones_like(voxel_semantics) # shape (bs, w, h, z) + if self.use_infov_mask: + mask = torch.logical_and(mask_infov, mask) + if self.use_lidar_mask: + mask = torch.logical_and(mask_lidar, mask) + if self.use_camera_mask: + mask = torch.logical_and(mask_camera, mask) + if self.use_binary_mask: + mask_binary = torch.logical_or(voxel_semantics == 0, voxel_semantics == self.num_classes-1) # 0: general object, 15: free + mask = torch.logical_and(mask_binary, mask) + if self.use_dynamic_object_mask: + classname = self.CLASS_NAMES + dynamic_class = ['TYPE_VEHICLE', 'TYPE_BICYCLIST', 'TYPE_PEDESTRIAN', 'TYPE_BICYCLE', 'TYPE_MOTORCYCLE'] + class_to_index = {class_name: index for index, class_name in enumerate(classname)} + dynamic_semantics_label_list = [class_to_index[class_name] for class_name in dynamic_class] + mask_dynamic_object = torch.ones_like(voxel_semantics, dtype=torch.bool) + for label in dynamic_semantics_label_list: + # for each dynamic object, mask out the corresponding class + mask_dynamic_object = torch.logical_and(mask_dynamic_object, voxel_semantics != label) + mask = torch.logical_and(mask_dynamic_object, mask) + mask = mask.bool() # ensure the mask is boolean, (bs, bev_w, bev_h, bev_z) + return mask + + def compute_CDist(self, gtocc, predocc, mask): + alpha = 1.0 # Hyperparameter + + # Squeeze dimensions + gtocc = gtocc.squeeze(0) + predocc = predocc.squeeze(0) + mask = mask.squeeze(0) + + # Use mask to change unobserved into 16 (out of range) + gtocc = torch.where(mask, gtocc, torch.ones_like(gtocc) * self.num_classes) + predocc = torch.where(mask, predocc, torch.ones_like(predocc) * self.num_classes) + + # Get all unique class labels + labels_tensor = torch.unique(torch.cat((gtocc, predocc), dim=0)) + labels_list = labels_tensor.tolist() + labels_list = [x for x in labels_list if x < (self.num_classes-1)] # skip free type + + CDist_tensor = torch.zeros((self.num_classes-1), device='cuda') + for label in labels_list: + + # Extract points for the current class + labeled_gtocc = torch.nonzero(gtocc == label).float() # (N_1, 3) + labeled_predocc = torch.nonzero(predocc == label).float() # (N_2, 3) + + if labeled_gtocc.shape[0] == 0 or labeled_predocc.shape[0] == 0: + # CDist_tensor[label] = 2 + CDist_tensor[label] = labeled_gtocc.shape[0] + labeled_predocc.shape[0] + continue + + # convert tensor to numpy + labeled_gtocc_np = labeled_gtocc.cpu().numpy() + labeled_predocc_np = labeled_predocc.cpu().numpy() + + # Use sklearn's NearestNeighbors to find nearest neighbors + reference_gt = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(labeled_gtocc_np) + reference_pred = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(labeled_predocc_np) + + dist_pred_to_gt, _ = reference_gt.kneighbors(labeled_predocc_np) + dist_gt_to_pred, _ = reference_pred.kneighbors(labeled_gtocc_np) + + dist_pred_to_gt = torch.from_numpy(dist_pred_to_gt).squeeze().to('cuda') + dist_gt_to_pred = torch.from_numpy(dist_gt_to_pred).squeeze().to('cuda') + + exp_dist1 = 1 - torch.exp(-dist_pred_to_gt * alpha) + exp_dist2 = 1 - torch.exp(-dist_gt_to_pred * alpha) + chamfer_distance = torch.sum(exp_dist1) + torch.sum(exp_dist2) + + CDist_tensor[label] = chamfer_distance.item() + + return CDist_tensor + + def compute_count_matrix(self, gtocc, predocc): + n_cl = self.num_classes + count_matrix = torch.zeros((n_cl, n_cl), device='cuda') + assert gtocc.shape == predocc.shape, "ground truth and predition result should share the same shape and same mask" + + # filter out out of bound semantics + correct_idx = (gtocc >= 0) & (gtocc < n_cl) + count_matrix = torch.bincount(n_cl * gtocc[correct_idx].to(torch.int) + predocc[correct_idx].to(torch.int), + weights=None, minlength=n_cl ** 2).reshape(n_cl, n_cl) + return count_matrix + + def eval_metrics(self, voxel_semantics, voxel_semantics_preds, mask_lidar, mask_infov, mask_camera): + """ + Args: + voxel_semantic: bs, w, h, z + other four are the same + """ + # process the data + voxel_semantics[voxel_semantics==self.FREE_LABEL] = self.num_classes-1 + voxel_semantics_preds[voxel_semantics_preds==self.FREE_LABEL] = self.num_classes-1 + mask = self.get_mask(voxel_semantics, mask_infov, mask_lidar, mask_camera) + + # compute chamfer distance + if self.use_CDist: + CDist_tensor = self.compute_CDist(gtocc=voxel_semantics, predocc=voxel_semantics_preds, mask=mask) + else: + CDist_tensor = torch.zeros((self.num_classes), device='cuda') + + if self.use_mIoU: + # compute mIoU + masked_semantics_gt = voxel_semantics[mask] + masked_semantics_pred = voxel_semantics_preds[mask] + count_matrix = self.compute_count_matrix(gtocc=masked_semantics_gt, predocc=masked_semantics_pred) + else: + count_matrix = torch.zeros((self.num_classes, self.num_classes), device='cuda') + + # count dict + # use count matrix is the same + # gt_count = torch.sum(count_matrix, dim=1) + # pred_count = torch.sum(count_matrix, dim=0) + + occ_results = { "CDist_tensor": CDist_tensor.cpu().numpy(), + "count_matrix": count_matrix.cpu().numpy(), } + + return occ_results \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/nuscenes_dataset.py b/projects/mmdet3d_plugin/datasets/nuscenes_dataset.py index 392c32b..2243dfc 100644 --- a/projects/mmdet3d_plugin/datasets/nuscenes_dataset.py +++ b/projects/mmdet3d_plugin/datasets/nuscenes_dataset.py @@ -10,7 +10,7 @@ import numpy as np from nuscenes.eval.common.utils import quaternion_yaw, Quaternion from mmdet3d.core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes -from .nuscnes_eval import NuScenesEval_custom +from .nuscenes_eval import NuScenesEval_custom from projects.mmdet3d_plugin.models.utils.visual import save_tensor from mmcv.parallel import DataContainer as DC import random diff --git a/projects/mmdet3d_plugin/datasets/nuscnes_eval.py b/projects/mmdet3d_plugin/datasets/nuscenes_eval.py similarity index 100% rename from projects/mmdet3d_plugin/datasets/nuscnes_eval.py rename to projects/mmdet3d_plugin/datasets/nuscenes_eval.py diff --git a/projects/mmdet3d_plugin/datasets/nuscenes_mono_dataset.py b/projects/mmdet3d_plugin/datasets/nuscenes_mono_dataset.py index b036b87..d7a66bd 100644 --- a/projects/mmdet3d_plugin/datasets/nuscenes_mono_dataset.py +++ b/projects/mmdet3d_plugin/datasets/nuscenes_mono_dataset.py @@ -395,7 +395,7 @@ def _evaluate_single(self, """ from nuscenes import NuScenes #from nuscenes.eval.detection.evaluate import NuScenesEval - from .nuscnes_eval import NuScenesEval_custom + from .nuscenes_eval import NuScenesEval_custom output_dir = osp.join(*osp.split(result_path)[:-1]) self.nusc = NuScenes( version=self.version, dataroot=self.data_root, verbose=False) diff --git a/projects/mmdet3d_plugin/datasets/nuscenes_occ.py b/projects/mmdet3d_plugin/datasets/nuscenes_occ.py index 253b98e..a845a8b 100644 --- a/projects/mmdet3d_plugin/datasets/nuscenes_occ.py +++ b/projects/mmdet3d_plugin/datasets/nuscenes_occ.py @@ -1,20 +1,18 @@ +import os import copy - +import random import numpy as np -from mmdet.datasets import DATASETS -from mmdet3d.datasets import NuScenesDataset +import torch + +from tqdm import tqdm import mmcv -from os import path as osp +from mmcv.parallel import DataContainer as DC from mmdet.datasets import DATASETS -import torch -import numpy as np +from mmdet3d.datasets import NuScenesDataset + from nuscenes.eval.common.utils import quaternion_yaw, Quaternion -from .nuscnes_eval import NuScenesEval_custom -from projects.mmdet3d_plugin.models.utils.visual import save_tensor -from mmcv.parallel import DataContainer as DC -import random from nuscenes.utils.geometry_utils import transform_matrix - +from projects.mmdet3d_plugin.datasets.occ_metrics import Metric_mIoU, Metric_FScore @DATASETS.register_module() class NuSceneOcc(NuScenesDataset): @@ -23,32 +21,81 @@ class NuSceneOcc(NuScenesDataset): This datset only add camera intrinsics and extrinsics to the results. """ - def __init__(self, queue_length=4, bev_size=(200, 200), overlap_test=False, *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__(self, *args, + queue_length=1, + load_interval=1, + bev_size=(200, 200), + overlap_test=False, + eval_fscore=False, + point_cloud_range=None, + voxel_size=None, + CLASS_NAMES=None, + **kwargs): + self.eval_fscore = eval_fscore self.queue_length = queue_length self.overlap_test = overlap_test self.bev_size = bev_size - self.data_infos = self.load_annotations(self.ann_file) - + self.my_load_interval = load_interval + self.data_infos_full = self.load_annotations(self.ann_file) + self.data_infos = self.data_infos_full[::self.my_load_interval] + self.image_data_root = os.path.join(self.data_root, 'samples') + self.point_cloud_range = point_cloud_range + self.voxel_size = voxel_size + self.CLASS_NAMES = CLASS_NAMES + if hasattr(self, 'flag'): + self.flag = self.flag[::load_interval] + super().__init__(*args, **kwargs) + # def __len__(self): + # return len(self.data_infos) def load_annotations(self, ann_file): - """Load annotations from ann_file. - + """ + Load annotations from ann_file. Args: ann_file (str): Path of the annotation file. - Returns: list[dict]: List of annotations sorted by timestamps. """ + data = mmcv.load(ann_file) - # self.train_split=data['train_split'] - # self.val_split=data['val_split'] data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp'])) - data_infos = data_infos[::self.load_interval] self.metadata = data['metadata'] self.version = self.metadata['version'] return data_infos + + def __getitem__(self, idx): + """ + Get item from infos according to the given index. + Args: + idx (int): Index for accessing the target data. + Returns: + dict: Data dictionary of the corresponding index. + """ + + if self.test_mode: + return self.prepare_test_data(idx) + + while True: + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def prepare_test_data(self, index): + """ + Prepare data for testing. + Args: + index (int): Index for accessing the target data. + Returns: + dict: Testing data dict of the corresponding index. + """ + + input_dict = self.get_data_info(index) + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + return example def prepare_train_data(self, index): """ @@ -58,26 +105,45 @@ def prepare_train_data(self, index): Returns: dict: Training data dict of the corresponding index. """ - queue = [] - index_list = list(range(index-self.queue_length, index)) - random.shuffle(index_list) - index_list = sorted(index_list[1:]) - index_list.append(index) + + # Step 1: get the index list of the history data + index *= self.my_load_interval + if self.queue_length <= 1: + index_list = [index] + else: + index_list = list(range(index-self.queue_length, index)) + random.shuffle(index_list) + index_list = sorted(index_list[1:]) + index_list.append(index) + + # Step 2: get the data according to the index list + data_queue = [] for i in index_list: i = max(0, i) input_dict = self.get_data_info(i) if input_dict is None: return None + + # Step 3: prepare the data by dataloader pipeline self.pre_pipeline(input_dict) example = self.pipeline(input_dict) - if self.filter_empty_gt and \ - (example is None or ~(example['gt_labels_3d']._data != -1).any()): - return None - queue.append(example) - return self.union2one(queue) + data_queue.append(example) + # Step 4: union the data_queue into one single sample + return self.union2one(data_queue) def union2one(self, queue): + """ + convert sample queue into one single sample. + Args: + queue (List[Dict]): the sample queue + Returns: + queue (Dict): the single sample + """ + + # Step 1: 1. union the `img` tensor into a single tensor. + # 2. union the `img_metas` dict into a dict[dict] + # 3. add prev_bev_exists and scene_token imgs_list = [each['img'].data for each in queue] metas_map = {} prev_scene_token = None @@ -100,86 +166,91 @@ def union2one(self, queue): metas_map[i]['can_bus'][-1] -= prev_angle prev_pos = copy.deepcopy(tmp_pos) prev_angle = copy.deepcopy(tmp_angle) + + # Step 2: pack them together queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) queue = queue[-1] + return queue def get_data_info(self, index): - """Get data info according to the given index. - + """ + Get data info according to the given index. Args: index (int): Index of the sample data to get. - Returns: - dict: Data information that will be passed to the data \ - preprocessing pipelines. It includes the following keys: - - - sample_idx (str): Sample index. - - pts_filename (str): Filename of point clouds. - - sweeps (list[dict]): Infos of sweeps. - - timestamp (float): Sample timestamp. - - img_filename (str, optional): Image filename. - - lidar2img (list[np.ndarray], optional): Transformations \ - from lidar to different cameras. - - ann_info (dict): Annotation info. + dict: Data information that will be passed to the data preprocessing pipelines. """ - info = self.data_infos[index] - # standard protocal modified from SECOND.Pytorch + + # Step 1: get the data info + info = self.data_infos_full[index] + + # Step 2: add some basic info without preprocessing input_dict = dict( occ_gt_path=info['occ_gt_path'], sample_idx=info['token'], pts_filename=info['lidar_path'], - sweeps=info['sweeps'], ego2global_translation=info['ego2global_translation'], ego2global_rotation=info['ego2global_rotation'], - prev_idx=info['prev'], - next_idx=info['next'], scene_token=info['scene_token'], can_bus=info['can_bus'], frame_idx=info['frame_idx'], - timestamp=info['timestamp'] / 1e6, ) + + # Step 3: add the `ego2lidar` transformation matrix lidar2ego_rotation = info['lidar2ego_rotation'] lidar2ego_translation = info['lidar2ego_translation'] ego2lidar = transform_matrix(translation=lidar2ego_translation, rotation=Quaternion(lidar2ego_rotation), inverse=True) - input_dict['ego2lidar']=ego2lidar + input_dict['ego2lidar'] = ego2lidar + + # Step 4: get the image paths, lidar2cam, intrinsics, lidar2img for each image if self.modality['use_camera']: - image_paths = [] + img_filename = [] lidar2img_rts = [] lidar2cam_rts = [] cam_intrinsics = [] for cam_type, cam_info in info['cams'].items(): - image_paths.append(cam_info['data_path']) - # obtain lidar to image transformation matrix + data_path = cam_info['data_path'] + basename = os.path.basename(data_path) + img_filename.append(os.path.join(self.image_data_root, cam_type, basename)) + + # obtain `lidar2cam` transformation matrix lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation']) - lidar2cam_t = cam_info[ - 'sensor2lidar_translation'] @ lidar2cam_r.T + lidar2cam_t = cam_info['sensor2lidar_translation'] @ lidar2cam_r.T lidar2cam_rt = np.eye(4) lidar2cam_rt[:3, :3] = lidar2cam_r.T lidar2cam_rt[3, :3] = -lidar2cam_t + + # obtain `lidar2img` and `intrinsic` transformation matrix intrinsic = cam_info['cam_intrinsic'] viewpad = np.eye(4) viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic lidar2img_rt = (viewpad @ lidar2cam_rt.T) - lidar2img_rts.append(lidar2img_rt) + lidar2img_rts.append(lidar2img_rt) cam_intrinsics.append(viewpad) lidar2cam_rts.append(lidar2cam_rt.T) + input_dict.update( dict( - img_filename=image_paths, + img_filename=img_filename, lidar2img=lidar2img_rts, cam_intrinsic=cam_intrinsics, lidar2cam=lidar2cam_rts, - )) + ) + ) if not self.test_mode: annos = self.get_ann_info(index) input_dict['ann_info'] = annos + # Step 5: get the `ego2global` transformation matrix rotation = Quaternion(input_dict['ego2global_rotation']) translation = input_dict['ego2global_translation'] + ego2global = transform_matrix(translation=translation, rotation=rotation, inverse=False) + + # Step 6: update the `can_bus` info can_bus = input_dict['can_bus'] can_bus[:3] = translation can_bus[3:7] = rotation @@ -189,77 +260,39 @@ def get_data_info(self, index): can_bus[-2] = patch_angle / 180 * np.pi can_bus[-1] = patch_angle + input_dict.update( + dict( + ego2global=ego2global, + can_bus=can_bus, + ) + ) + return input_dict - def __getitem__(self, idx): - """Get item from infos according to the given index. - Returns: - dict: Data dictionary of the corresponding index. - """ - if self.test_mode: - return self.prepare_test_data(idx) - while True: - - data = self.prepare_train_data(idx) - if data is None: - idx = self._rand_another(idx) - continue - return data - - def _evaluate_single(self, - result_path, - logger=None, - metric='bbox', - result_name='pts_bbox'): - """Evaluation for a single model in nuScenes protocol. - - Args: - result_path (str): Path of the result file. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - metric (str): Metric name used for evaluation. Default: 'bbox'. - result_name (str): Result name in the metric prefix. - Default: 'pts_bbox'. - - Returns: - dict: Dictionary of evaluation details. - """ - from nuscenes import NuScenes - self.nusc = NuScenes(version=self.version, dataroot=self.data_root, - verbose=True) + def evaluate(self, occ_results, runner=None, **eval_kwargs): + self.occ_eval_metrics = Metric_mIoU( + point_cloud_range=self.point_cloud_range, + voxel_size=self.voxel_size, + CLASS_NAMES=self.CLASS_NAMES, + ) - output_dir = osp.join(*osp.split(result_path)[:-1]) + if self.eval_fscore: # False + self.fscore_eval_metrics=Metric_FScore( + leaf_size=10, + threshold_acc=0.4, + threshold_complete=0.4, + voxel_size=[0.4, 0.4, 0.4], + range=[-40, -40, -1, 40, 40, 5.4], + void=[17, 255], + use_lidar_mask=False, + use_image_mask=True, + ) + + print('\nStarting Evaluation...') + for index, results in enumerate(tqdm(occ_results)): + count_matrix = results['count_matrix'] + scene_id = results['scene_id'] + frame_id = results['frame_id'] + self.occ_eval_metrics.add_batch(count_matrix=count_matrix, scene_id=scene_id, frame_id=frame_id) - eval_set_map = { - 'v1.0-mini': 'mini_val', - 'v1.0-trainval': 'val', - } - self.nusc_eval = NuScenesEval_custom( - self.nusc, - config=self.eval_detection_configs, - result_path=result_path, - eval_set=eval_set_map[self.version], - output_dir=output_dir, - verbose=True, - overlap_test=self.overlap_test, - data_infos=self.data_infos - ) - self.nusc_eval.main(plot_examples=0, render_curves=False) - # record metrics - metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json')) - detail = dict() - metric_prefix = f'{result_name}_NuScenes' - for name in self.CLASSES: - for k, v in metrics['label_aps'][name].items(): - val = float('{:.4f}'.format(v)) - detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val - for k, v in metrics['label_tp_errors'][name].items(): - val = float('{:.4f}'.format(v)) - detail['{}/{}_{}'.format(metric_prefix, name, k)] = val - for k, v in metrics['tp_errors'].items(): - val = float('{:.4f}'.format(v)) - detail['{}/{}'.format(metric_prefix, - self.ErrNameMapping[k])] = val - detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score'] - detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap'] - return detail + self.occ_eval_metrics.print(runner=runner) diff --git a/projects/mmdet3d_plugin/datasets/occ_metrics.py b/projects/mmdet3d_plugin/datasets/occ_metrics.py new file mode 100644 index 0000000..cf06107 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/occ_metrics.py @@ -0,0 +1,358 @@ +import os +import math + +from tqdm import tqdm +import numpy as np +from datetime import datetime +from functools import reduce +from sklearn.neighbors import KDTree +from termcolor import colored + +np.seterr(divide='ignore', invalid='ignore') +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" + + +def pcolor(string, color, on_color=None, attrs=None): + """ + Produces a colored string for printing + + Parameters + ---------- + string : str + String that will be colored + color : str + Color to use + on_color : str + Background color to use + attrs : list of str + Different attributes for the string + + Returns + ------- + string: str + Colored string + """ + return colored(string, color, on_color, attrs) + + +def getCellCoordinates(points, voxelSize): + return (points / voxelSize).astype(np.int) + + +def getNumUniqueCells(cells): + M = cells.max() + 1 + return np.unique(cells[:, 0] + M * cells[:, 1] + M ** 2 * cells[:, 2]).shape[0] + +class Metric_mIoU(): + def __init__(self, + point_cloud_range, + voxel_size, + CLASS_NAMES, + use_CDist=False, + **kwargs, + ): + self.use_CDist = use_CDist + self.num_classes = len(CLASS_NAMES) + self.point_cloud_range = point_cloud_range + self.voxel_size = voxel_size + self.CLASS_NAMES = CLASS_NAMES + self.occ_xdim = int((self.point_cloud_range[3] - self.point_cloud_range[0]) / self.voxel_size[0]) + self.occ_ydim = int((self.point_cloud_range[4] - self.point_cloud_range[1]) / self.voxel_size[1]) + self.occ_zdim = int((self.point_cloud_range[5] - self.point_cloud_range[2]) / self.voxel_size[2]) + self.voxel_num = self.occ_xdim * self.occ_ydim * self.occ_zdim + self.hist = np.zeros((self.num_classes, self.num_classes)) + self.cnt = 0 + self.class_voxel_count_pred = {} + self.class_voxel_count_gt = {} + self.CDist_tensor = np.zeros(self.num_classes-1) + + def hist_info(self, n_cl, pred, gt): + """ + This matrix is called by the function `compute_mIoU`. + But I move the count matrix process to the `forward_test` function in the model. + So this function will not be used. + build confusion matrix + Args: + n_cl (int): num_classes_occupancy + pred (1-d array): pred_occupancy_label + gt (1-d array): gt_occupancu_label + Returns: + tuple:(hist, correctly number_predicted_labels, num_labelled_sample) + """ + + assert pred.shape == gt.shape + k = (gt >= 0) & (gt < n_cl) # exclude 255 + labeled = np.sum(k) + correct = np.sum((pred[k] == gt[k])) + + return ( + np.bincount( + n_cl * gt[k].astype(int) + pred[k].astype(int), minlength=n_cl ** 2 + ).reshape(n_cl, n_cl), + correct, + labeled, + ) + + def per_class_iu(self, hist): + ''' + iou = TP / (TP + FP + FN). + ''' + return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) + + def per_class_recall(self, hist): + ''' + recall = TP / (TP + FN). But I do not use it, it can be added easily. + ''' + return np.diag(hist) / hist.sum(0) + + def per_class_precision(self, hist): + ''' + precision = TP / (TP + FP). But I do not use it, it can be added easily. + ''' + return np.diag(hist) / hist.sum(1) + + def compute_mIoU(self, pred, label, n_classes): + ''' + Because I move the compute count matrix process to the `forward_test` function in the model, this function will not be used. + ''' + hist = np.zeros((n_classes, n_classes)) + new_hist, correct, labeled = self.hist_info(n_classes, pred.flatten(), label.flatten()) + hist += new_hist + mIoUs = self.per_class_iu(hist) + return round(np.nanmean(mIoUs) * 100, 2), hist + + def add_batch(self, CDist_tensor=None, count_matrix=None, scene_id=None, frame_id=None): + ''' + The main evalution function. Called by function `eval` in dataset class. + Args: + CDist_tensor (np.array): (num_classes-1, ) + count_matrix (np.array): (num_classes, num_classes) + with scene_id(int) and frame_id(int), we can change our evaluation strategy with a little bit of code. + For example we can only evaluation a specific subset of scenes. + ''' + self.cnt += 1 + self.hist += count_matrix + if self.use_CDist: + self.CDist_tensor += CDist_tensor + + def print_iou(self, hist, mIoU_type_str=None, runner=None): + ''' + print the final IoU results. Called by function `print`. Use mIoU_type_str to control the output. + Args: + hist (np.array): (num_classes, num_classes) count matrix. + mIoU_type_str (str): for flexible control. I do not use it now. + ''' + + print(f"===> {mIoU_type_str} mIoU: ") + # Step 1: count mIoU for each class + mIoU = self.per_class_iu(hist) + file_path = "work_dirs/result.txt" + + # Step 2: print IoU for each class + for ind_class in range(self.num_classes): + class_name = self.CLASS_NAMES[ind_class] + iou_value = mIoU[ind_class] + if not math.isnan(iou_value): + print(f"{class_name} IoU: {round(iou_value * 100, 2)}") + if runner is not None: + runner.log_buffer.output[f"{class_name} IoU"] = round(iou_value * 100, 2) + else: + with open(file_path, "a") as file: + file.write(f"{class_name} IoU: {round(iou_value * 100, 2)}\n") + + # Step 3: print mIoU + + # mIoU_go_free = round(np.nanmean(mIoU[1:-1]) * 100, 2) + mIoU_free = round(np.nanmean(mIoU[:-1]) * 100, 2) + # mIoU_all = round(np.nanmean(mIoU) * 100, 2) + # IoU_go_motor_free = np.concatenate((mIoU[1:9], mIoU[10:-1]), axis=0) + # mIoU_go_motor_free = round(np.nanmean(IoU_go_motor_free) * 100, 2) + IoU_motor_free = np.concatenate((mIoU[0:9], mIoU[10:-1]), axis=0) + mIoU_motor_free = round(np.nanmean(IoU_motor_free) * 100, 2) + # print(f'===> mIoU without general object and free classes: ' + str(mIoU_go_free)) # mIoU without general object and free classes + print(f'===> mIoU of non free class: ' + str(mIoU_free)) # mIoU of non-free classes + # print(f'===> mIoU: ' + str(mIoU_all)) # mIoU of all classes + # print(f'===> mIoU without general object, MOTORCYCLE and free class: ' + str(mIoU_go_motor_free)) + print(f'===> mIoU without MOTORCYCLE class(only for waymo): ' + str(mIoU_motor_free)) + + if runner is not None: + # write the results to the log file and log.json file in work directory + runner.log_buffer.output['mIoU of non free'] = mIoU_free + runner.log_buffer.output['mIoU without MOTORCYCLE(waymo)'] = mIoU_motor_free + runner.log_buffer.ready = True + else: + with open(file_path, "a") as file: + # file.write(f'===> mIoU without general object and free classes: ' + str(mIoU_go_free) + '\n') + file.write(f'===> mIoU of non free class: ' + str(mIoU_free) + '\n') + # file.write(f'===> mIoU: ' + str(mIoU_all) + '\n') + # file.write(f'===> mIoU without general object and MOTORCYCLE class: ' + str(mIoU_go_motor_free) + '\n') + file.write(f'===> mIoU without MOTORCYCLE class: ' + str(mIoU_motor_free) + '\n') + + def print(self, runner=None): # this is important + ''' + compute and print the final results. Called by function `eval` in dataset class. + Args: + None. All information are stored in the member variables by function `add_batch` + Returns: + I want to add some. + ''' + + # Step 1: prepare some parameters + gt_count = self.hist.sum(1) + pred_count = self.hist.sum(0) + total_count = gt_count + pred_count + + # Step 2: compute CDist (controlled by `self.use_CDist`) + if self.use_CDist: + new_array = total_count[:15] + self.CDist_tensor /= new_array + # print CDist_tensor here + for ind_class in range(self.num_classes-1): # without free + class_name = self.CLASS_NAMES[ind_class] + print(f"{class_name} CDist: {round(self.CDist_tensor[ind_class] * 100, 2)}") + + # Step 3: print mIoU and save to file + current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + file_path = "work_dirs/result.txt" + if os.path.exists(file_path): + with open(file_path, "a") as file: + file.write(current_time + "\n") + else: + with open(file_path, "w") as file: + file.write(current_time + "\n") + + self.print_iou(self.hist, mIoU_type_str='all', runner=runner) + + return + + def eval_from_file(self, pred_path, gt_path, load_interval=1): + gts_dict = {} + for scene in os.listdir(gt_path): + for frame in os.listdir(os.path.join(gt_path, scene)): + scene_token = frame + gts_dict[scene_token] = os.path.join(gt_path, scene, frame, 'labels.npz') + print('number of gt samples = {}'.format(len(gts_dict))) + + dirs_list = [ + "work_dirs/bevformer_base_occ_conv3d_waymo_allgift/results_epoch8/", + "work_dirs/bevformer_base_occ_conv3d_waymo_ambiguous/results_epoch8/", + "work_dirs/bevformer_base_occ_conv3d_waymo_noohem/results_epoch8/", + "work_dirs/bevformer_base_occ_conv3d_waymo_no_cross_atten/results_epoch8/", + ] + pred_path = dirs_list[2] # "work_dirs/bevformer_base_occ_conv3d_waymo_ambiguous/results_epoch6/" + union_files = set(os.listdir(dirs_list[0])) + print(pred_path) + for _dir in dirs_list: + union_files = union_files.intersection(set(os.listdir(_dir))) + + preds_dict = {} + for file in os.listdir(pred_path)[::load_interval]: + if file not in union_files: continue + if '.npz' not in file: continue + + scene_token = file.split('.npz')[0] + preds_dict[scene_token] = os.path.join(pred_path, file) + print('number of pred samples = {}'.format(len(preds_dict))) + return gts_dict, preds_dict + + def __call__(self): + gts_dict, preds_dict = self.eval_from_file() + # _mIoU = 0. + for scene_token in tqdm(preds_dict.keys()): + cnt += 1 + # gt = np.load(gts_dict[scene_token]) + # bs,H,W,Z + self.add_batch() + # _mIoU += _miou + + results = self.print() + return results + + +class Metric_FScore(): + def __init__(self, + leaf_size=10, + threshold_acc=0.6, + threshold_complete=0.6, + voxel_size=[0.4, 0.4, 0.4], + range=[-40, -40, -1, 40, 40, 5.4], + void=[17, 255], + use_lidar_mask=False, + use_camera_mask=False, + ) -> None: + + self.leaf_size = leaf_size + self.threshold_acc = threshold_acc + self.threshold_complete = threshold_complete + self.voxel_size = voxel_size + self.range = range + self.void = void + self.use_lidar_mask = use_lidar_mask + self.use_camera_mask = use_camera_mask + self.cnt=0 + self.tot_acc = 0. + self.tot_cmpl = 0. + self.tot_f1_mean = 0. + self.eps = 1e-8 + raise NotImplementedError + + def voxel2points(self, voxel): + # occIdx = torch.where(torch.logical_and(voxel != FREE, voxel != NOT_OBSERVED)) + # if isinstance(voxel, np.ndarray): voxel = torch.from_numpy(voxel) + mask = np.logical_not(reduce(np.logical_or, [voxel == self.void[i] for i in range(len(self.void))])) + occIdx = np.where(mask) + + points = np.concatenate((occIdx[0][:, None] * self.voxel_size[0] + self.voxel_size[0] / 2 + self.range[0], \ + occIdx[1][:, None] * self.voxel_size[1] + self.voxel_size[1] / 2 + self.range[1], \ + occIdx[2][:, None] * self.voxel_size[2] + self.voxel_size[2] / 2 + self.range[2]), + axis=1) + return points + + def add_batch(self,semantics_pred,semantics_gt,mask_lidar,mask_camera): + + # for scene_token in tqdm(preds_dict.keys()): + self.cnt += 1 + + if self.use_camera_mask: + + semantics_gt[mask_camera == False] = 255 + semantics_pred[mask_camera == False] = 255 + elif self.use_lidar_mask: + semantics_gt[mask_lidar == False] = 255 + semantics_pred[mask_lidar == False] = 255 + else: + pass + + ground_truth = self.voxel2points(semantics_gt) + prediction = self.voxel2points(semantics_pred) + if prediction.shape[0] == 0: + accuracy=0 + completeness=0 + fmean=0 + + else: + prediction_tree = KDTree(prediction, leaf_size=self.leaf_size) + ground_truth_tree = KDTree(ground_truth, leaf_size=self.leaf_size) + complete_distance, _ = prediction_tree.query(ground_truth) + complete_distance = complete_distance.flatten() + + accuracy_distance, _ = ground_truth_tree.query(prediction) + accuracy_distance = accuracy_distance.flatten() + + # evaluate completeness + complete_mask = complete_distance < self.threshold_complete + completeness = complete_mask.mean() + + # evalute accuracy + accuracy_mask = accuracy_distance < self.threshold_acc + accuracy = accuracy_mask.mean() + + fmean = 2.0 / (1 / (accuracy+self.eps) + 1 / (completeness+self.eps)) + + self.tot_acc += accuracy + self.tot_cmpl += completeness + self.tot_f1_mean += fmean + + def count_fscore(self,): + base_color, attrs = 'red', ['bold', 'dark'] + print(pcolor('######## F score: {} #######'.format(self.tot_f1_mean / self.cnt), base_color, attrs=attrs)) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/pipelines/__init__.py b/projects/mmdet3d_plugin/datasets/pipelines/__init__.py index 6d976ae..c8f5b7a 100644 --- a/projects/mmdet3d_plugin/datasets/pipelines/__init__.py +++ b/projects/mmdet3d_plugin/datasets/pipelines/__init__.py @@ -2,8 +2,9 @@ PadMultiViewImage, NormalizeMultiviewImage, PhotoMetricDistortionMultiViewImage, CustomCollect3D, RandomScaleImageMultiViewImage) from .formating import CustomDefaultFormatBundle3D -from .transform_3d import LoadOccGTFromFile +from .loading import LoadOccGTFromFileNuScenes, LoadOccGTFromFileWaymo, MyLoadMultiViewImageFromFiles __all__ = [ 'PadMultiViewImage', 'NormalizeMultiviewImage', - 'PhotoMetricDistortionMultiViewImage', 'CustomDefaultFormatBundle3D', 'CustomCollect3D', 'RandomScaleImageMultiViewImage' + 'PhotoMetricDistortionMultiViewImage', 'CustomDefaultFormatBundle3D', 'CustomCollect3D', 'RandomScaleImageMultiViewImage', + 'MyLoadMultiViewImageFromFiles', ] \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/pipelines/loading.py b/projects/mmdet3d_plugin/datasets/pipelines/loading.py index e69de29..532d32c 100644 --- a/projects/mmdet3d_plugin/datasets/pipelines/loading.py +++ b/projects/mmdet3d_plugin/datasets/pipelines/loading.py @@ -0,0 +1,214 @@ +import numpy as np +from numpy import random +import mmcv +from mmdet.datasets.builder import PIPELINES +from mmcv.parallel import DataContainer as DC +import os +from PIL import Image + + + +@PIPELINES.register_module() +class MyLoadMultiViewImageFromFiles(object): + """ + This image file loader is for Waymo dataset. + Load multi channel images from a list of separate channel files. + Expects results['img_filename'] to be a list of filenames. + note that we read image in BGR style to align with opencv.imread + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + """ + + def __init__(self, to_float32=False, img_scale=None, color_type='unchanged'): + self.to_float32 = to_float32 + self.img_scale = img_scale + self.color_type = color_type + + def pad(self, img): + # to pad the 5 input images into a same size (for Waymo) + if img.shape[0] != self.img_scale[0]: + padded = np.zeros((self.img_scale[0],self.img_scale[1],3)) + padded[0:img.shape[0], 0:img.shape[1], :] = img + img = padded + return img + + def __call__(self, results): + """ + Call function to load multi-view image from files. + Args: + results (dict): Result dict containing multi-view image filenames. + Returns: + dict: The result dict containing the multi-view image data. + Added keys and values are described below. + - filename (str): Multi-view image filenames. + - img (np.ndarray): Multi-view image arrays. + - img_shape (tuple[int]): Shape of multi-view image arrays. + - ori_shape (tuple[int]): Shape of original image arrays. + - pad_shape (tuple[int]): Shape of padded image arrays. + - img_norm_cfg (dict): Normalization configuration of images. + """ + + # Step 1: load image according to the filename + filename = results['img_filename'] + img = [np.asarray(Image.open(name))[...,::-1] for name in filename] + + # Step 2: record the original shape of the image + results['ori_shape'] = [img_i.shape for img_i in img] + + # Step 3: pad the image + if self.img_scale is not None: + img = [self.pad(img_i) for img_i in img] + + # Step 4: stack the image + img = np.stack(img, axis=-1) + + # Step 5: convert the image to float32 + if self.to_float32: + img = img.astype(np.float32) + + # Step 6: record the filename, image, image shape, and image normalization configuration + results['filename'] = filename + results['img'] = [img[..., i] for i in range(img.shape[-1])] + results['img_shape'] = img.shape + results['pad_shape'] = img.shape + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = dict(mean=np.zeros(num_channels, dtype=np.float32), + std=np.ones(num_channels, dtype=np.float32), + to_rgb=False) # This will be replaced in `NormalizeMultiviewImage` + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + return "{} (to_float32={}, color_type='{}')".format(self.__class__.__name__, self.to_float32, self.color_type) + + +@PIPELINES.register_module() +class LoadOccGTFromFileWaymo(object): + """Load multi channel images from a list of separate channel files. + + Expects results['img_filename'] to be a list of filenames. + note that we read image in BGR style to align with opencv.imread + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + """ + + def __init__( + self, + data_root, + use_larger=True, + crop_x=False, + use_infov_mask=True, + use_lidar_mask=False, + use_camera_mask=True, + FREE_LABEL=None, + num_classes=None, + ): + self.use_larger=use_larger + self.data_root = data_root # this is occ_gt_data_root in config file + self.crop_x = crop_x + self.use_infov_mask = use_infov_mask + self.use_lidar_mask = use_lidar_mask + self.use_camera_mask = use_camera_mask + self.FREE_LABEL = FREE_LABEL + self.num_classes = num_classes + + def __call__(self, results): + # Step 1: get the occupancy ground truth file path + pts_filename = results['pts_filename'] + basename = os.path.basename(pts_filename) + seq_name = basename[1:4] + frame_name = basename[4:7] + if self.use_larger: + file_path = os.path.join(self.data_root, seq_name, '{}_04.npz'.format(frame_name)) + else: + file_path = os.path.join(self.data_root, seq_name, '{}.npz'.format(frame_name)) + + # Step 2: load the file + occ_labels = np.load(file_path) + semantics = occ_labels['voxel_label'] + mask_infov = occ_labels['infov'].astype(bool) + mask_lidar = occ_labels['origin_voxel_state'].astype(bool) + mask_camera = occ_labels['final_voxel_state'].astype(bool) + + # Step 3: crop the x axis + if self.crop_x: # default is False + w, h, d = semantics.shape + semantics = semantics[w//2:, :, :] + mask_infov = mask_infov[w//2:, :, :] + mask_lidar = mask_lidar[w//2:, :, :] + mask_camera = mask_camera[w//2:, :, :] + + # Step 4: unify the mask + mask = np.ones_like(semantics).astype(bool) # 200, 200, 16 + if self.use_infov_mask: + mask = mask & mask_infov + if self.use_lidar_mask: + mask = mask & mask_lidar + if self.use_camera_mask: + mask = mask & mask_camera + mask = mask.astype(bool) + results['valid_mask'] = mask + + # Step 5: change the FREE_LABEL to num_classes-1 + if self.FREE_LABEL is not None: + semantics[semantics == self.FREE_LABEL] = self.num_classes - 1 + results['voxel_semantics'] = semantics + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + return "{} (data_root={}')".format( + self.__class__.__name__, self.data_root) + +@PIPELINES.register_module() +class LoadOccGTFromFileNuScenes(object): + """ + Load multi channel images from a list of separate channel files. + Expects results['img_filename'] to be a list of filenames. + note that we read image in BGR style to align with opencv.imread + Args: + to_float32 (bool): Whether to convert the img to float32. + Defaults to False. + color_type (str): Color type of the file. Defaults to 'unchanged'. + """ + + def __init__( + self, + data_root, + ): + self.data_root = data_root + + def __call__(self, results): + # Step 1: get the occ_gt_path + occ_gt_path = results['occ_gt_path'] + occ_gt_path = os.path.join(self.data_root, occ_gt_path) + + # Step 2: parse the scene idx + parts = occ_gt_path.split('/') + scene_part = [part for part in parts if 'scene-' in part] + if scene_part: + scene_number = scene_part[0].split('-')[1] + results['scene_idx'] = int(scene_number) + + # Step 3: load the occ_gt file + occ_labels = np.load(occ_gt_path) + semantics = occ_labels['semantics'] + mask_lidar = occ_labels['mask_lidar'].astype(bool) + mask_camera = occ_labels['mask_camera'].astype(bool) + + results['voxel_semantics'] = semantics + results['mask_lidar'] = mask_lidar + results['mask_camera'] = mask_camera + + return results + + def __repr__(self): + """str: Return a string that describes the module.""" + return "{} (data_root={}')".format( + self.__class__.__name__, self.data_root) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py b/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py index 7e14300..9086f35 100644 --- a/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py +++ b/projects/mmdet3d_plugin/datasets/pipelines/transform_3d.py @@ -6,52 +6,15 @@ import os -@PIPELINES.register_module() -class LoadOccGTFromFile(object): - """Load multi channel images from a list of separate channel files. - - Expects results['img_filename'] to be a list of filenames. - note that we read image in BGR style to align with opencv.imread - Args: - to_float32 (bool): Whether to convert the img to float32. - Defaults to False. - color_type (str): Color type of the file. Defaults to 'unchanged'. - """ - - def __init__( - self, - data_root, - ): - self.data_root = data_root - - def __call__(self, results): - # print(results.keys()) - occ_gt_path = results['occ_gt_path'] - occ_gt_path = os.path.join(self.data_root,occ_gt_path) - - occ_labels = np.load(occ_gt_path) - semantics = occ_labels['semantics'] - mask_lidar = occ_labels['mask_lidar'] - mask_camera = occ_labels['mask_camera'] - - results['voxel_semantics'] = semantics - results['mask_lidar'] = mask_lidar - results['mask_camera'] = mask_camera - return results - - def __repr__(self): - """str: Return a string that describes the module.""" - return "{} (data_root={}')".format( - self.__class__.__name__, self.data_root) - @PIPELINES.register_module() class PadMultiViewImage(object): - """Pad the multi-view image. - There are two padding modes: (1) pad to a fixed size and (2) pad to the - minimum size that is divisible by some number. - Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", + """ + Pad the multi-view image and add keys "pad_shape". + There are two padding modes: (1) pad to a fixed size + (2) pad to the minimum size that is divisible by some number. + Args: size (tuple, optional): Fixed padding size. size_divisor (int, optional): The divisor of padded size. @@ -67,20 +30,15 @@ def __init__(self, size=None, size_divisor=None, pad_val=0): assert size is None or size_divisor is None def _pad_img(self, results): - """Pad images according to ``self.size``.""" + """Pad images according to `self.size`.""" if self.size is not None: - padded_img = [mmcv.impad( - img, shape=self.size, pad_val=self.pad_val) for img in results['img']] + padded_img = [mmcv.impad(img, shape=self.size, pad_val=self.pad_val) for img in results['img']] elif self.size_divisor is not None: - padded_img = [mmcv.impad_to_multiple( - img, self.size_divisor, pad_val=self.pad_val) for img in results['img']] + padded_img = [mmcv.impad_to_multiple(img, self.size_divisor, pad_val=self.pad_val) for img in results['img']] - results['ori_shape'] = [img.shape for img in results['img']] results['img'] = padded_img results['img_shape'] = [img.shape for img in padded_img] results['pad_shape'] = [img.shape for img in padded_img] - results['pad_fixed_size'] = self.size - results['pad_size_divisor'] = self.size_divisor def __call__(self, results): """Call function to pad images, masks, semantic segmentation maps. @@ -102,8 +60,8 @@ def __repr__(self): @PIPELINES.register_module() class NormalizeMultiviewImage(object): - """Normalize the image. - Added key is "img_norm_cfg". + """ + Normalize the image and add key "img_norm_cfg". Args: mean (sequence): Mean values of 3 channels. std (sequence): Std values of 3 channels. @@ -118,7 +76,8 @@ def __init__(self, mean, std, to_rgb=True): def __call__(self, results): - """Call function to normalize images. + """ + Call function to normalize images. Args: results (dict): Result dict from loading pipeline. Returns: @@ -127,8 +86,7 @@ def __call__(self, results): """ results['img'] = [mmcv.imnormalize(img, self.mean, self.std, self.to_rgb) for img in results['img']] - results['img_norm_cfg'] = dict( - mean=self.mean, std=self.std, to_rgb=self.to_rgb) + results['img_norm_cfg'] = dict(mean=self.mean, std=self.std, to_rgb=self.to_rgb) return results def __repr__(self): @@ -236,81 +194,112 @@ def __repr__(self): repr_str += f'hue_delta={self.hue_delta})' return repr_str +@PIPELINES.register_module() +class RandomScaleImageMultiViewImage(object): + """ + Random scale the image + Args: + scales (list): List of scales to choose from. + If the list contains only one scale, then the scale is fixed. + """ + + def __init__(self, scales=[]): + self.scales = scales + + def __call__(self, results): + """ + Call function to pad images, masks, semantic segmentation maps. + Args: + results (dict): Result dict from loading pipeline. + Returns: + dict: Updated result dict. + """ + + # Step 1: sample a random scale + rand_scale = np.random.choice(self.scales) + + # Step 2: scale the image + y_size = [int(img.shape[0] * rand_scale) for img in results['img']] + x_size = [int(img.shape[1] * rand_scale) for img in results['img']] + results['img'] = [mmcv.imresize(img, (x_size[idx], y_size[idx]), return_scale=False) for idx, img in enumerate(results['img'])] + + # Step 3: update the `lidar2img` and `intrinsic` transformation matrix + scale_factor = np.eye(4) + scale_factor[0, 0] *= rand_scale + scale_factor[1, 1] *= rand_scale + lidar2img = [scale_factor @ l2i for l2i in results['lidar2img']] + results['lidar2img'] = lidar2img + cam_intrinsic = [scale_factor @ cam_intr for cam_intr in results['cam_intrinsic']] + results['cam_intrinsic'] = cam_intrinsic + + # Step 4: update the image shape + results['img_shape'] = [img.shape for img in results['img']] + + return results + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.scales}, ' + return repr_str + @PIPELINES.register_module() class CustomCollect3D(object): - """Collect data from the loader relevant to the specific task. - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "proposals", "gt_bboxes", - "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - 'img_shape': shape of the image input to the network as a tuple \ - (h, w, c). Note that images may be zero padded on the \ - bottom/right if the batch tensor is larger than this shape. - - 'scale_factor': a float indicating the preprocessing scale - - 'flip': a boolean indicating if image flip transform was used - - 'filename': path to the image file - - 'ori_shape': original shape of the image as a tuple (h, w, c) - - 'pad_shape': image shape after padding - - 'lidar2img': transform from lidar to image - - 'depth2img': transform from depth to image - - 'cam2img': transform from camera to image - - 'pcd_horizontal_flip': a boolean indicating if point cloud is \ - flipped horizontally - - 'pcd_vertical_flip': a boolean indicating if point cloud is \ - flipped vertically - - 'box_mode_3d': 3D box mode - - 'box_type_3d': 3D box type - - 'img_norm_cfg': a dict of normalization information: - - mean: per channel mean subtraction - - std: per channel std divisor - - to_rgb: bool indicating if bgr was converted to rgb - - 'pcd_trans': point cloud transformations - - 'sample_idx': sample index - - 'pcd_scale_factor': point cloud scale factor - - 'pcd_rotation': rotation applied to point cloud - - 'pts_filename': path to point cloud file. + """ + Collect data from the loader relevant to the specific task. + This is usually the last stage of the data loader pipeline. Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. + keys (Sequence[str]): Keys of results to be collected in `data`. meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ('filename', 'ori_shape', 'img_shape', 'lidar2img', - 'depth2img', 'cam2img', 'pad_shape', 'scale_factor', 'flip', - 'pcd_horizontal_flip', 'pcd_vertical_flip', 'box_mode_3d', - 'box_type_3d', 'img_norm_cfg', 'pcd_trans', - 'sample_idx', 'pcd_scale_factor', 'pcd_rotation', 'pts_filename') + `mmcv.DataContainer` and collected in `data[img_metas]`. """ - def __init__(self, - keys, - meta_keys=('filename', 'ori_shape', 'img_shape', 'lidar2img','ego2lidar', - 'depth2img', 'cam2img', 'pad_shape', - 'scale_factor', 'flip', 'pcd_horizontal_flip', - 'pcd_vertical_flip', 'box_mode_3d', 'box_type_3d', - 'img_norm_cfg', 'pcd_trans', 'sample_idx', 'prev_idx', 'next_idx', - 'pcd_scale_factor', 'pcd_rotation', 'pts_filename', - 'transformation_3d_flow', 'scene_token', - 'can_bus', - )): + def __init__(self, keys=None, meta_keys=None,): self.keys = keys self.meta_keys = meta_keys + assert self.meta_keys is not None, 'meta_keys must be set' def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` + """ + Call function to collect keys in results. The keys in `meta_keys` will be converted to :obj:`mmcv.DataContainer`. Args: results (dict): Result dict contains the data to collect. Returns: - dict: The result dict contains the following keys - - keys in ``self.keys`` - - ``img_metas`` + dict: The result dict contains the following keys in `self.keys` and `img_metas` """ - + + if 'rots' in self.meta_keys: + sensor2ego_list = results['sensor2ego'] # list of 4x4 matrices + rots_list = [sensor2ego[:3, :3] for sensor2ego in sensor2ego_list] # list of 3x3 matrices + rots = np.stack(rots_list, axis=0) # (num_cams, 3, 3) + results['rots'] = rots + trans_list = [sensor2ego[:3, 3] for sensor2ego in sensor2ego_list] # list of 3x1 vectors + trans = np.stack(trans_list, axis=0) # (num_cams, 3) + results['trans'] = trans + + cam_intrinsic_list = results['cam_intrinsic'] # list of 4x4 matrices + intrins_list = [cam_intrinsic[:3, :3] for cam_intrinsic in cam_intrinsic_list] # list of 3x3 matrices + intrins = np.stack(intrins_list, axis=0) # (num_cams, 3, 3) + results['intrins'] = intrins + + post_rots_list = [np.eye(3) for sensor2ego in sensor2ego_list] # list of 3x3 matrices + post_rots = np.stack(post_rots_list, axis=0) # (num_cams, 3, 3) + results['post_rots'] = post_rots + post_trans_list = [np.zeros(3) for sensor2ego in sensor2ego_list] # list of 3x1 vectors + post_trans = np.stack(post_trans_list, axis=0) # (num_cams, 3) + results['post_trans'] = post_trans + + frame_id = results['sample_idx'] % 1000 + if frame_id == 0: + results['start_of_sequence'] = 1 + else: + results['start_of_sequence'] = 0 + results['sequence_group_idx'] = results['sample_idx'] % 1000000 // 1000 + data = {} img_metas = {} - + for key in self.meta_keys: if key in results: img_metas[key] = results[key] @@ -320,50 +309,8 @@ def __call__(self, results): data[key] = results[key] return data + def __repr__(self): """str: Return a string that describes the module.""" return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' - - - -@PIPELINES.register_module() -class RandomScaleImageMultiViewImage(object): - """Random scale the image - Args: - scales - """ - - def __init__(self, scales=[]): - self.scales = scales - assert len(self.scales)==1 - - def __call__(self, results): - """Call function to pad images, masks, semantic segmentation maps. - Args: - results (dict): Result dict from loading pipeline. - Returns: - dict: Updated result dict. - """ - rand_ind = np.random.permutation(range(len(self.scales)))[0] - rand_scale = self.scales[rand_ind] - - y_size = [int(img.shape[0] * rand_scale) for img in results['img']] - x_size = [int(img.shape[1] * rand_scale) for img in results['img']] - scale_factor = np.eye(4) - scale_factor[0, 0] *= rand_scale - scale_factor[1, 1] *= rand_scale - results['img'] = [mmcv.imresize(img, (x_size[idx], y_size[idx]), return_scale=False) for idx, img in - enumerate(results['img'])] - lidar2img = [scale_factor @ l2i for l2i in results['lidar2img']] - results['lidar2img'] = lidar2img - results['img_shape'] = [img.shape for img in results['img']] - results['ori_shape'] = [img.shape for img in results['img']] - - return results - - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(size={self.scales}, ' - return repr_str \ No newline at end of file + f'(keys={self.keys}, meta_keys={self.meta_keys})' \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/samplers/__init__.py b/projects/mmdet3d_plugin/datasets/samplers/__init__.py index bb2a0b1..251ef6c 100644 --- a/projects/mmdet3d_plugin/datasets/samplers/__init__.py +++ b/projects/mmdet3d_plugin/datasets/samplers/__init__.py @@ -1,4 +1,5 @@ from .group_sampler import DistributedGroupSampler from .distributed_sampler import DistributedSampler from .sampler import SAMPLER, build_sampler - +from .my_group_batch_sampler import MyGroupBatchSampler +from .nuscene_dataset_detail import * \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/samplers/my_group_batch_sampler.py b/projects/mmdet3d_plugin/datasets/samplers/my_group_batch_sampler.py new file mode 100644 index 0000000..bd3e2a2 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/samplers/my_group_batch_sampler.py @@ -0,0 +1,137 @@ +import itertools +import copy + +import numpy as np +import torch +import torch.distributed as dist +from mmcv.runner import get_dist_info +from torch.utils.data.sampler import Sampler +# from .nuscene_dataset_detail import new_train_data_scene_size_list, new_group_idx_to_sample_idxs +from .waymo_dataset_detail import train_data_group_flag, new_group_idx_to_sample_idxs + +def sync_random_seed(seed=None, device='cuda'): + """Make sure different ranks share the same seed. + All workers must call this function, otherwise it will deadlock. + This method is generally used in `DistributedSampler`, + because the seed should be identical across all processes + in the distributed group. + In distributed sampling, different ranks should sample non-overlapped + data in the dataset. Therefore, this function is used to make sure that + each rank shuffles the data indices in the same order based + on the same seed. Then different ranks could use different indices + to select non-overlapped data from the same data list. + Args: + seed (int, Optional): The seed. Default to None. + device (str): The device where the seed will be put on. + Default to 'cuda'. + Returns: + int: Seed to be used. + """ + if seed is None: + seed = np.random.randint(2**31) + assert isinstance(seed, int) + + rank, world_size = get_dist_info() + + if world_size == 1: + return seed + + if rank == 0: + random_num = torch.tensor(seed, dtype=torch.int32, device=device) + else: + random_num = torch.tensor(0, dtype=torch.int32, device=device) + dist.broadcast(random_num, src=0) + return random_num.item() + +class MyGroupBatchSampler(Sampler): + """ + Pardon this horrendous name. Basically, we want every sample to be from its own group. + If batch size is 4 and # of GPUs is 8, each sample of these 32 should be operating on + its own group. + + Shuffling is only done for group order, not done within groups. + """ + + def __init__(self, + dataset, + batch_size=1, + world_size=None, + rank=None, + seed=0, + total_epochs=8, + load_interval=1, + train_process=True,): + + _rank, _world_size = get_dist_info() + if world_size is None: + world_size = _world_size + if rank is None: + rank = _rank + self.total_epochs = total_epochs + + self.final_data_group_size = train_data_group_flag + + self.dataset = dataset + self.batch_size = batch_size + assert self.batch_size == 1, "warning! batch_size > 1" + + self.world_size = world_size + self.rank = rank + self.seed = sync_random_seed(seed) + + assert load_interval == 1, "not support load interval not equal to 1" + self.load_interval = load_interval + self.size = len(self.dataset) + + self.group_sizes = np.array(self.final_data_group_size) + self.groups_num = len(self.group_sizes) + self.global_batch_size = batch_size * world_size + assert self.groups_num >= self.global_batch_size + + self.group_idx_to_sample_idxs = new_group_idx_to_sample_idxs + + # Get a generator per sample idx. Considering samples over all + # GPUs, each sample position has its own generator + self.group_indices_per_global_sample_idx = [ self._group_indices_per_global_sample_idx(self.rank * self.batch_size + local_sample_idx) for local_sample_idx in range(self.batch_size)] + + # Keep track of a buffer of dataset sample idxs for each local sample idx + self.buffer_per_local_sample = [[] for _ in range(self.batch_size)] # [[]] + + def _infinite_group_indices(self): + total_group_indices_list = [] + g = torch.Generator() + g.manual_seed(self.seed) + for i in range(self.total_epochs + 1): # Here +1 for the last epoch # donot worry we only use the sampler to train. + each_epoch_group_indices_list = torch.randperm(self.groups_num, generator=g).tolist() + total_group_indices_list += each_epoch_group_indices_list[:self.groups_num // self.load_interval] # self.groups_num // self.load_interval + yield from total_group_indices_list + + def _group_indices_per_global_sample_idx(self, global_sample_idx): + yield from itertools.islice(self._infinite_group_indices(), + global_sample_idx, + None, + self.global_batch_size) + + def __iter__(self): + while True: + curr_batch = [] + for local_sample_idx in range(self.batch_size): + if len(self.buffer_per_local_sample[local_sample_idx]) == 0: + # Finished current group, refill with next group + try: + new_group_idx = next(self.group_indices_per_global_sample_idx[local_sample_idx]) + except StopIteration: + break + self.buffer_per_local_sample[local_sample_idx] = copy.deepcopy(self.group_idx_to_sample_idxs[new_group_idx]) + + curr_batch.append(self.buffer_per_local_sample[local_sample_idx].pop(0)) + if curr_batch == []: + return + yield curr_batch + + def __len__(self): + """Length of base dataset.""" + return self.size + + def set_epoch(self, epoch): + self.epoch = epoch diff --git a/projects/mmdet3d_plugin/datasets/samplers/nuscene_dataset_detail.py b/projects/mmdet3d_plugin/datasets/samplers/nuscene_dataset_detail.py new file mode 100644 index 0000000..dbb8987 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/samplers/nuscene_dataset_detail.py @@ -0,0 +1,20 @@ +import json +import os + +train_split_scene_idx_list = [1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 138, 139, 149, 150, 151, 152, 154, 155, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 187, 188, 190, 191, 192, 193, 194, 195, 196, 199, 200, 202, 203, 204, 206, 207, 208, 209, 210, 211, 212, 213, 214, 218, 219, 220, 222, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 315, 316, 317, 318, 321, 323, 324, 328, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 405, 406, 407, 408, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 461, 462, 463, 464, 465, 467, 468, 469, 471, 472, 474, 475, 476, 477, 478, 479, 480, 499, 500, 501, 502, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 517, 518, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 541, 542, 543, 544, 545, 546, 566, 568, 570, 571, 572, 573, 574, 575, 576, 577, 578, 580, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 681, 683, 684, 685, 686, 687, 688, 689, 695, 696, 697, 698, 700, 701, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 726, 727, 728, 730, 731, 733, 734, 735, 736, 737, 738, 739, 740, 741, 744, 746, 747, 749, 750, 751, 752, 757, 758, 759, 760, 761, 762, 763, 764, 765, 767, 768, 769, 786, 787, 789, 790, 791, 792, 803, 804, 805, 806, 808, 809, 810, 811, 812, 813, 815, 816, 817, 819, 820, 821, 822, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 858, 860, 861, 862, 863, 864, 865, 866, 868, 869, 870, 871, 872, 873, 875, 876, 877, 878, 880, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 945, 947, 949, 952, 953, 955, 956, 957, 958, 959, 960, 961, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 988, 989, 990, 991, 992, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1106, 1107, 1108, 1109, 1110] + +val_split_scene_idx_list = [3, 12, 13, 14, 15, 16, 17, 18, 35, 36, 38, 39, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 221, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 329, 330, 331, 332, 344, 345, 346, 519, 520, 521, 522, 523, 524, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 625, 626, 627, 629, 630, 632, 633, 634, 635, 636, 637, 638, 770, 771, 775, 777, 778, 780, 781, 782, 783, 784, 794, 795, 796, 797, 798, 799, 800, 802, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 962, 963, 966, 967, 968, 969, 971, 972, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073] + +train_data_scene_size_list = [40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 39, 40, 41, 39, 40, 39, 39, 40, 40, 39, 39, 39, 40, 39, 39, 40, 41, 39, 39, 41, 40, 39, 39, 39, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 39, 40, 39, 39, 39, 40, 40, 39, 40, 40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 39, 40, 39, 40, 40, 40, 40, 40, 39, 32, 40, 40, 40, 40, 40, 40, 40, 40, 39, 39, 40, 39, 39, 40, 40, 39, 40, 39, 40, 39, 39, 40, 39, 40, 40, 39, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 39, 40, 39, 40, 40, 39, 40, 40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 39, 40, 40, 39, 40, 40, 39, 39, 40, 40, 40, 40, 40, 39, 40, 40, 39, 39, 39, 39, 40, 40, 40, 39, 39, 40, 39, 39, 40, 40, 39, 39, 39, 39, 41, 40, 40, 40, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 40, 41, 40, 40, 39, 41, 40, 40, 40, 40, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 40, 40, 40, 41, 40, 40, 40, 41, 40, 41, 41, 41, 41, 40, 41, 40, 41, 41, 41, 41, 40, 41, 41, 41, 41, 41, 41, 40, 41, 41, 40, 41, 41, 40, 40, 40, 41, 41, 41, 40, 41, 41, 41, 40, 41, 40, 41, 41, 40, 40, 40, 41, 40, 41, 41, 41, 41, 41, 41, 41, 40, 41, 41, 40, 41, 40, 40, 40, 40, 41, 41, 41, 40, 41, 40, 40, 40, 39, 41, 39, 40, 40, 41, 41, 40, 41, 41, 40, 40, 40, 41, 40, 41, 40, 41, 41, 40, 41, 41, 41, 40, 40, 41, 41, 40, 41, 41, 41, 41, 40, 40, 41, 41, 41, 41, 41, 40, 41, 41, 41, 40, 40, 40, 41, 41, 41, 40, 41, 40, 41, 41, 41, 40, 41, 40, 41, 41, 40, 40, 41, 41, 40, 41, 40, 41, 40, 40, 40, 41, 41, 40, 41, 40, 41, 40, 41, 40, 41, 41, 41, 40, 41, 40, 41, 40, 40, 40, 40, 40, 40, 41, 41, 41, 40, 41, 41, 41, 41, 41, 40, 41, 40, 40, 41, 41, 41, 40, 40, 41, 41, 41, 41, 41, 40, 40, 40, 40, 41, 40, 41, 40, 40, 40, 41, 40, 40, 41, 40, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 40, 40, 40, 41, 40, 41, 41, 41, 41, 40, 41, 41, 41, 41, 41, 41, 41, 40, 40, 41, 41, 40, 41, 40, 41, 40, 41, 41, 40, 40, 40, 40, 40, 40, 40, 41, 40, 41, 40, 40, 40, 40, 41, 40, 40, 41, 40, 41, 40, 40, 40, 40, 41, 40, 41, 40, 41, 41, 40, 41, 41, 40, 41, 41, 40, 41, 41, 41, 41, 41, 41, 40, 41, 41, 40, 40, 40, 39, 40, 41, 40, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 41, 40, 40, 40, 40, 40, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 41, 40, 41, 40, 41, 41, 40, 40, 41, 40, 41, 41, 41, 40, 40, 41, 40, 41, 41, 41, 40, 40, 40, 41, 40, 40, 41, 41, 40, 41, 41, 40, 41, 40, 40, 41, 40, 40, 40, 40, 41, 40, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 41, 41, 40, 41, 40, 40, 40, 40, 40, 41, 40, 41, 40, 41, 40, 40, 40, 41, 41, 40, 40, 40, 40, 40, 40, 41, 41, 40, 40, 40, 40, 40, 41, 41, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 41, 40, 40, 41, 40, 39, 40] + +sorted_scene_idx_list = [161, 162, 163, 164, 165, 166, 167, 168, 170, 171, 172, 173, 174, 175, 176, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 199, 200, 202, 203, 204, 206, 207, 208, 209, 210, 211, 212, 213, 214, 315, 316, 317, 318, 177, 178, 179, 180, 181, 182, 183, 184, 185, 187, 188, 218, 220, 222, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 190, 191, 192, 193, 194, 195, 196, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 138, 139, 149, 150, 151, 152, 154, 155, 157, 158, 159, 160, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 382, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 375, 373, 374, 376, 377, 378, 379, 380, 381, 383, 384, 385, 386, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 328, 499, 500, 501, 502, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 665, 666, 667, 668, 669, 670, 671, 672, 673, 321, 674, 675, 676, 677, 678, 679, 681, 683, 684, 685, 686, 323, 687, 688, 689, 324, 514, 515, 517, 518, 695, 696, 697, 698, 700, 701, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 726, 727, 728, 730, 731, 733, 734, 735, 736, 737, 738, 739, 740, 741, 744, 746, 747, 749, 750, 751, 752, 757, 758, 759, 760, 761, 762, 763, 764, 765, 767, 768, 769, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 541, 542, 543, 544, 545, 546, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 461, 462, 463, 464, 465, 467, 468, 469, 471, 472, 474, 475, 476, 477, 478, 479, 480, 566, 568, 570, 571, 572, 573, 574, 575, 576, 577, 578, 580, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 868, 869, 870, 871, 872, 873, 875, 876, 877, 878, 880, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 803, 804, 805, 806, 808, 809, 810, 811, 812, 813, 815, 816, 817, 819, 820, 821, 822, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 988, 989, 990, 991, 945, 947, 949, 952, 953, 955, 956, 957, 958, 959, 960, 961, 399, 400, 401, 402, 403, 405, 406, 407, 408, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 786, 787, 789, 790, 791, 792, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 858, 860, 861, 862, 863, 864, 865, 866, 992, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1044, 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1106, 1107, 1108, 1109, 1110] + +new_train_data_scene_size_list = [40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 39, 40, 41, 39, 40, 39, 39, 40, 40, 39, 39, 39, 40, 39, 39, 40, 41, 39, 39, 41, 40, 39, 40, 39, 39, 40, 39, 39, 40, 40, 39, 40, 39, 40, 39, 39, 39, 39, 39, 39, 40, 39, 40, 39, 40, 40, 40, 40, 40, 39, 32, 39, 40, 40, 40, 39, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 39, 40, 39, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 39, 40, 39, 39, 39, 40, 40, 39, 40, 39, 39, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 39, 40, 39, 40, 39, 39, 40, 40, 39, 39, 39, 39, 41, 40, 40, 40, 40, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 40, 40, 39, 41, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 40, 41, 40, 41, 40, 41, 40, 40, 40, 40, 40, 40, 41, 39, 40, 40, 39, 41, 39, 40, 40, 41, 41, 40, 41, 41, 40, 40, 41, 41, 40, 41, 41, 41, 41, 41, 40, 40, 41, 40, 40, 41, 41, 41, 40, 40, 41, 41, 41, 40, 41, 41, 40, 40, 40, 41, 40, 41, 40, 40, 40, 41, 40, 41, 40, 40, 40, 41, 40, 40, 41, 40, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 40, 40, 40, 41, 40, 41, 41, 41, 41, 40, 41, 41, 41, 41, 41, 41, 41, 40, 40, 41, 41, 40, 41, 40, 41, 40, 41, 41, 40, 41, 41, 40, 41, 41, 41, 40, 40, 41, 41, 40, 41, 41, 41, 41, 40, 40, 41, 41, 41, 40, 39, 40, 40, 40, 40, 40, 40, 39, 40, 40, 39, 40, 40, 39, 39, 40, 40, 40, 40, 40, 39, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 40, 41, 41, 41, 40, 41, 40, 41, 41, 40, 40, 40, 41, 40, 41, 41, 41, 41, 41, 41, 41, 40, 41, 41, 40, 41, 40, 40, 40, 40, 41, 41, 41, 40, 41, 40, 41, 41, 40, 41, 41, 41, 40, 40, 40, 41, 41, 41, 40, 41, 40, 41, 41, 41, 40, 41, 40, 41, 41, 40, 40, 41, 41, 40, 41, 40, 41, 40, 40, 40, 41, 41, 40, 41, 40, 41, 40, 41, 40, 41, 41, 41, 41, 40, 41, 41, 40, 40, 40, 39, 40, 41, 40, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 40, 41, 40, 40, 40, 40, 40, 41, 40, 40, 40, 41, 40, 41, 40, 40, 40, 40, 41, 40, 40, 41, 40, 41, 40, 40, 40, 41, 40, 40, 41, 40, 41, 41, 41, 40, 40, 41, 40, 41, 41, 41, 40, 40, 40, 40, 40, 40, 41, 40, 41, 40, 41, 41, 40, 40, 40, 41, 40, 40, 40, 41, 40, 41, 41, 41, 41, 40, 41, 40, 41, 41, 40, 40, 40, 40, 40, 40, 41, 41, 40, 41, 41, 41, 41, 41, 41, 40, 41, 41, 40, 41, 41, 40, 40, 40, 41, 41, 40, 41, 40, 41, 40, 41, 41, 40, 41, 41, 40, 41, 41, 40, 41, 41, 41, 41, 41, 40, 40, 40, 41, 40, 40, 41, 41, 40, 41, 41, 40, 41, 40, 40, 41, 40, 40, 40, 40, 41, 40, 40, 40, 41, 40, 40, 40, 40, 40, 40, 40, 41, 41, 40, 41, 40, 40, 40, 40, 40, 41, 40, 41, 40, 41, 40, 40, 40, 41, 41, 40, 40, 40, 40, 40, 40, 41, 41, 40, 40, 40, 40, 40, 41, 41, 41, 40, 40, 41, 40, 40, 40, 40, 40, 40, 41, 40, 40, 41, 40, 39, 40] + +sum = 0 +a_dict = {} +for i in range(len(new_train_data_scene_size_list)): + a_dict[i] = list(range(sum, sum + new_train_data_scene_size_list[i])) + sum += new_train_data_scene_size_list[i] + +new_group_idx_to_sample_idxs = a_dict diff --git a/projects/mmdet3d_plugin/datasets/samplers/waymo_dataset_detail.py b/projects/mmdet3d_plugin/datasets/samplers/waymo_dataset_detail.py new file mode 100644 index 0000000..ace07b2 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/samplers/waymo_dataset_detail.py @@ -0,0 +1,14 @@ +train_data_group_flag = [198, 199, 199, 198, 198, 198, 199, 197, 199, 198, 199, 198, 198, 198, 198, 198, 199, 199, 198, 198, 198, 199, 199, 199, 198, 199, 199, 199, 198, 199, 197, 199, 198, 198, 198, 198, 198, 198, 198, 199, 199, 199, 198, 198, 199, 199, 199, 197, 198, 199, 197, 189, 199, 197, 199, 198, 195, 198, 199, 198, 199, 199, 199, 198, 199, 198, 199, 199, 199, 198, 198, 194, 199, 198, 199, 199, 198, 198, 199, 199, 198, 198, 199, 198, 199, 198, 199, 198, 199, 199, 198, 199, 175, 199, 198, 198, 199, 198, 199, 199, 198, 199, 198, 198, 198, 199, 199, 198, 197, 199, 198, 197, 197, 199, 198, 197, 198, 198, 198, 199, 198, 198, 199, 198, 198, 198, 199, 198, 198, 199, 198, 196, 198, 199, 198, 198, 199, 199, 199, 197, 198, 198, 200, 199, 198, 198, 197, 198, 199, 199, 199, 199, 199, 198, 198, 198, 198, 197, 198, 199, 199, 198, 198, 199, 199, 199, 199, 199, 199, 198, 199, 198, 198, 199, 198, 199, 199, 198, 198, 198, 198, 198, 198, 198, 199, 199, 190, 199, 196, 197, 198, 198, 199, 197, 198, 199, 198, 199, 198, 180, 199, 199, 199, 198, 198, 198, 197, 198, 199, 198, 198, 199, 199, 199, 199, 199, 198, 197, 199, 198, 199, 197, 198, 199, 196, 199, 199, 197, 199, 198, 198, 198, 199, 199, 198, 198, 198, 198, 197, 199, 199, 199, 198, 198, 198, 199, 199, 197, 198, 199, 199, 199, 198, 198, 197, 199, 198, 198, 199, 197, 198, 198, 198, 198, 198, 199, 198, 198, 199, 198, 199, 198, 198, 198, 198, 199, 198, 198, 199, 199, 197, 182, 198, 198, 199, 185, 198, 199, 199, 197, 199, 199, 199, 198, 199, 198, 199, 198, 199, 199, 199, 199, 197, 197, 198, 199, 199, 199, 199, 199, 198, 198, 198, 199, 199, 199, 199, 199, 198, 198, 198, 198, 199, 199, 199, 199, 198, 199, 198, 198, 199, 199, 197, 198, 198, 198, 197, 199, 198, 198, 199, 199, 199, 199, 198, 198, 199, 199, 198, 198, 198, 199, 198, 199, 198, 198, 199, 198, 199, 198, 199, 198, 198, 199, 198, 197, 198, 199, 198, 199, 198, 198, 198, 198, 198, 198, 197, 198, 198, 198, 197, 198, 198, 199, 199, 199, 199, 199, 199, 198, 198, 199, 199, 199, 196, 198, 199, 198, 198, 199, 199, 197, 199, 199, 199, 198, 198, 199, 199, 198, 199, 198, 198, 198, 198, 199, 198, 198, 198, 199, 199, 199, 199, 198, 198, 198, 198, 197, 198, 195, 199, 198, 199, 199, 199, 198, 198, 190, 197, 199, 198, 198, 199, 199, 198, 199, 198, 198, 198, 198, 198, 199, 198, 198, 198, 199, 198, 199, 199, 198, 197, 198, 198, 198, 198, 199, 197, 199, 197, 198, 199, 198, 198, 199, 199, 199, 199, 198, 199, 199, 198, 199, 197, 198, 198, 199, 198, 199, 199, 199, 198, 198, 199, 198, 199, 198, 198, 198, 198, 191, 199, 199, 199, 199, 197, 198, 199, 199, 198, 199, 194, 198, 196, 193, 196, 199, 199, 197, 197, 199, 199, 199, 198, 198, 199, 198, 198, 199, 198, 199, 199, 198, 198, 197, 199, 198, 198, 198, 199, 199, 199, 199, 199, 198, 196, 199, 198, 199, 198, 198, 198, 199, 198, 198, 198, 198, 198, 198, 199, 198, 199, 198, 199, 198, 198, 198, 198, 198, 198, 198, 199, 198, 199, 198, 199, 198, 199, 198, 197, 196, 198, 197, 199, 199, 198, 198, 199, 199, 199, 199, 199, 198, 199, 199, 198, 199, 198, 199, 199, 198, 199, 198, 171, 198, 199, 199, 196, 199, 198, 198, 198, 198, 195, 199, 199, 198, 198, 199, 198, 194, 198, 198, 199, 198, 199, 197, 198, 199, 198, 199, 199, 198, 198, 198, 198, 199, 199, 198, 199, 198, 198, 199, 197, 197, 198, 198, 198, 199, 199, 199, 199, 198, 199, 199, 197, 198, 198, 198, 198, 198, 199, 199, 197, 198, 198, 199, 198, 198, 199, 195, 199, 198, 199, 198, 198, 198, 197, 198, 198, 197, 198, 199, 199, 199, 198, 198, 199, 199, 199, 199, 198, 199, 198, 198, 199, 197, 199, 198, 199, 199, 198, 198, 199, 199, 197, 193, 186, 197, 199, 199, 198, 197, 196, 199, 198, 198, 199, 199, 199, 198, 199, 198, 199, 198, 199, 199, 199, 199, 199, 199, 198, 198, 199, 199, 199, 199, 198, 198, 197, 196, 199, 198, 199, 198, 198, 198, 198, 199, 197, 198, 198, 199, 198, 199, 199, 198, 197, 199, 190, 198, 198, 198, 199, 199, 199, 199, 198, 199, 197, 198, 196, 198, 198, 199, 199, 199, 198, 198, 198, 199, 198, 198, 199, 198, 199, 198, 198, 199, 199, 198, 199, 199, 198, 199, 198, 199, 198, 199] + +# test_data_group_flag = [198, 199, 198, 198, 198, 199, 183, 198, 198, 198, 199, 191, 199, 199, 199, 197, 198, 198, 198, 199, 197, 196, 198, 198, 198, 199, 199, 197, 198, 198, 198, 199, 198, 198, 199, 198, 198, 199, 198, 199, 198, 199, 193, 199, 199, 197, 198, 194, 198, 198, 197, 199, 199, 199, 199, 199, 199, 199, 197, 198, 198, 198, 197, 181, 199, 199, 198, 198, 199, 199, 199, 198, 198, 198, 197, 199, 199, 198, 197, 197, 198, 199, 198, 199, 197, 194, 199, 199, 198, 197, 199, 198, 192, 198, 198, 196, 199, 199, 198, 199, 198, 197, 198, 198, 199, 199, 199, 199, 199, 198, 196, 197, 197, 199, 199, 199, 199, 199, 197, 197, 198, 199, 198, 199, 198, 198, 199, 199, 198, 198, 198, 199, 198, 199, 199, 199, 198, 198, 198, 198, 199, 199, 199, 198, 199, 196, 199, 199, 198, 199, 198, 197, 196, 198, 198, 198, 199, 198, 197, 198, 199, 197, 199, 199, 199, 198, 198, 198, 199, 199, 199, 198, 199, 198, 198, 199, 198, 198, 199, 199, 199, 199, 198, 198, 196, 199, 198, 199, 197, 198, 198, 198, 197, 199, 198, 198, 198, 198, 199, 198, 199, 199] + +waymo_data_flag_dict = {0: 0, 1: 198, 2: 397, 3: 596, 4: 794, 5: 992, 6: 1190, 7: 1389, 8: 1586, 9: 1785, 10: 1983, 11: 2182, 12: 2380, 13: 2578, 14: 2776, 15: 2974, 16: 3172, 17: 3371, 18: 3570, 19: 3768, 20: 3966, 21: 4164, 22: 4363, 23: 4562, 24: 4761, 25: 4959, 26: 5158, 27: 5357, 28: 5556, 29: 5754, 30: 5953, 31: 6150, 32: 6349, 33: 6547, 34: 6745, 35: 6943, 36: 7141, 37: 7339, 38: 7537, 39: 7735, 40: 7934, 41: 8133, 42: 8332, 43: 8530, 44: 8728, 45: 8927, 46: 9126, 47: 9325, 48: 9522, 49: 9720, 50: 9919, 51: 10116, 52: 10305, 53: 10504, 54: 10701, 55: 10900, 56: 11098, 57: 11293, 58: 11491, 59: 11690, 60: 11888, 61: 12087, 62: 12286, 63: 12485, 64: 12683, 65: 12882, 66: 13080, 67: 13279, 68: 13478, 69: 13677, 70: 13875, 71: 14073, 72: 14267, 73: 14466, 74: 14664, 75: 14863, 76: 15062, 77: 15260, 78: 15458, 79: 15657, 80: 15856, 81: 16054, 82: 16252, 83: 16451, 84: 16649, 85: 16848, 86: 17046, 87: 17245, 88: 17443, 89: 17642, 90: 17841, 91: 18039, 92: 18238, 93: 18413, 94: 18612, 95: 18810, 96: 19008, 97: 19207, 98: 19405, 99: 19604, 100: 19803, 101: 20001, 102: 20200, 103: 20398, 104: 20596, 105: 20794, 106: 20993, 107: 21192, 108: 21390, 109: 21587, 110: 21786, 111: 21984, 112: 22181, 113: 22378, 114: 22577, 115: 22775, 116: 22972, 117: 23170, 118: 23368, 119: 23566, 120: 23765, 121: 23963, 122: 24161, 123: 24360, 124: 24558, 125: 24756, 126: 24954, 127: 25153, 128: 25351, 129: 25549, 130: 25748, 131: 25946, 132: 26142, 133: 26340, 134: 26539, 135: 26737, 136: 26935, 137: 27134, 138: 27333, 139: 27532, 140: 27729, 141: 27927, 142: 28125, 143: 28325, 144: 28524, 145: 28722, 146: 28920, 147: 29117, 148: 29315, 149: 29514, 150: 29713, 151: 29912, 152: 30111, 153: 30310, 154: 30508, 155: 30706, 156: 30904, 157: 31102, 158: 31299, 159: 31497, 160: 31696, 161: 31895, 162: 32093, 163: 32291, 164: 32490, 165: 32689, 166: 32888, 167: 33087, 168: 33286, 169: 33485, 170: 33683, 171: 33882, 172: 34080, 173: 34278, 174: 34477, 175: 34675, 176: 34874, 177: 35073, 178: 35271, 179: 35469, 180: 35667, 181: 35865, 182: 36063, 183: 36261, 184: 36459, 185: 36658, 186: 36857, 187: 37047, 188: 37246, 189: 37442, 190: 37639, 191: 37837, 192: 38035, 193: 38234, 194: 38431, 195: 38629, 196: 38828, 197: 39026, 198: 39225, 199: 39423, 200: 39603, 201: 39802, 202: 40001, 203: 40200, 204: 40398, 205: 40596, 206: 40794, 207: 40991, 208: 41189, 209: 41388, 210: 41586, 211: 41784, 212: 41983, 213: 42182, 214: 42381, 215: 42580, 216: 42779, 217: 42977, 218: 43174, 219: 43373, 220: 43571, 221: 43770, 222: 43967, 223: 44165, 224: 44364, 225: 44560, 226: 44759, 227: 44958, 228: 45155, 229: 45354, 230: 45552, 231: 45750, 232: 45948, 233: 46147, 234: 46346, 235: 46544, 236: 46742, 237: 46940, 238: 47138, 239: 47335, 240: 47534, 241: 47733, 242: 47932, 243: 48130, 244: 48328, 245: 48526, 246: 48725, 247: 48924, 248: 49121, 249: 49319, 250: 49518, 251: 49717, 252: 49916, 253: 50114, 254: 50312, 255: 50509, 256: 50708, 257: 50906, 258: 51104, 259: 51303, 260: 51500, 261: 51698, 262: 51896, 263: 52094, 264: 52292, 265: 52490, 266: 52689, 267: 52887, 268: 53085, 269: 53284, 270: 53482, 271: 53681, 272: 53879, 273: 54077, 274: 54275, 275: 54473, 276: 54672, 277: 54870, 278: 55068, 279: 55267, 280: 55466, 281: 55663, 282: 55845, 283: 56043, 284: 56241, 285: 56440, 286: 56625, 287: 56823, 288: 57022, 289: 57221, 290: 57418, 291: 57617, 292: 57816, 293: 58015, 294: 58213, 295: 58412, 296: 58610, 297: 58809, 298: 59007, 299: 59206, 300: 59405, 301: 59604, 302: 59803, 303: 60000, 304: 60197, 305: 60395, 306: 60594, 307: 60793, 308: 60992, 309: 61191, 310: 61390, 311: 61588, 312: 61786, 313: 61984, 314: 62183, 315: 62382, 316: 62581, 317: 62780, 318: 62979, 319: 63177, 320: 63375, 321: 63573, 322: 63771, 323: 63970, 324: 64169, 325: 64368, 326: 64567, 327: 64765, 328: 64964, 329: 65162, 330: 65360, 331: 65559, 332: 65758, 333: 65955, 334: 66153, 335: 66351, 336: 66549, 337: 66746, 338: 66945, 339: 67143, 340: 67341, 341: 67540, 342: 67739, 343: 67938, 344: 68137, 345: 68335, 346: 68533, 347: 68732, 348: 68931, 349: 69129, 350: 69327, 351: 69525, 352: 69724, 353: 69922, 354: 70121, 355: 70319, 356: 70517, 357: 70716, 358: 70914, 359: 71113, 360: 71311, 361: 71510, 362: 71708, 363: 71906, 364: 72105, 365: 72303, 366: 72500, 367: 72698, 368: 72897, 369: 73095, 370: 73294, 371: 73492, 372: 73690, 373: 73888, 374: 74086, 375: 74284, 376: 74482, 377: 74679, 378: 74877, 379: 75075, 380: 75273, 381: 75470, 382: 75668, 383: 75866, 384: 76065, 385: 76264, 386: 76463, 387: 76662, 388: 76861, 389: 77060, 390: 77258, 391: 77456, 392: 77655, 393: 77854, 394: 78053, 395: 78249, 396: 78447, 397: 78646, 398: 78844, 399: 79042, 400: 79241, 401: 79440, 402: 79637, 403: 79836, 404: 80035, 405: 80234, 406: 80432, 407: 80630, 408: 80829, 409: 81028, 410: 81226, 411: 81425, 412: 81623, 413: 81821, 414: 82019, 415: 82217, 416: 82416, 417: 82614, 418: 82812, 419: 83010, 420: 83209, 421: 83408, 422: 83607, 423: 83806, 424: 84004, 425: 84202, 426: 84400, 427: 84598, 428: 84795, 429: 84993, 430: 85188, 431: 85387, 432: 85585, 433: 85784, 434: 85983, 435: 86182, 436: 86380, 437: 86578, 438: 86768, 439: 86965, 440: 87164, 441: 87362, 442: 87560, 443: 87759, 444: 87958, 445: 88156, 446: 88355, 447: 88553, 448: 88751, 449: 88949, 450: 89147, 451: 89345, 452: 89544, 453: 89742, 454: 89940, 455: 90138, 456: 90337, 457: 90535, 458: 90734, 459: 90933, 460: 91131, 461: 91328, 462: 91526, 463: 91724, 464: 91922, 465: 92120, 466: 92319, 467: 92516, 468: 92715, 469: 92912, 470: 93110, 471: 93309, 472: 93507, 473: 93705, 474: 93904, 475: 94103, 476: 94302, 477: 94501, 478: 94699, 479: 94898, 480: 95097, 481: 95295, 482: 95494, 483: 95691, 484: 95889, 485: 96087, 486: 96286, 487: 96484, 488: 96683, 489: 96882, 490: 97081, 491: 97279, 492: 97477, 493: 97676, 494: 97874, 495: 98073, 496: 98271, 497: 98469, 498: 98667, 499: 98865, 500: 99056, 501: 99255, 502: 99454, 503: 99653, 504: 99852, 505: 100049, 506: 100247, 507: 100446, 508: 100645, 509: 100843, 510: 101042, 511: 101236, 512: 101434, 513: 101630, 514: 101823, 515: 102019, 516: 102218, 517: 102417, 518: 102614, 519: 102811, 520: 103010, 521: 103209, 522: 103408, 523: 103606, 524: 103804, 525: 104003, 526: 104201, 527: 104399, 528: 104598, 529: 104796, 530: 104995, 531: 105194, 532: 105392, 533: 105590, 534: 105787, 535: 105986, 536: 106184, 537: 106382, 538: 106580, 539: 106779, 540: 106978, 541: 107177, 542: 107376, 543: 107575, 544: 107773, 545: 107969, 546: 108168, 547: 108366, 548: 108565, 549: 108763, 550: 108961, 551: 109159, 552: 109358, 553: 109556, 554: 109754, 555: 109952, 556: 110150, 557: 110348, 558: 110546, 559: 110745, 560: 110943, 561: 111142, 562: 111340, 563: 111539, 564: 111737, 565: 111935, 566: 112133, 567: 112331, 568: 112529, 569: 112727, 570: 112925, 571: 113124, 572: 113322, 573: 113521, 574: 113719, 575: 113918, 576: 114116, 577: 114315, 578: 114513, 579: 114710, 580: 114906, 581: 115104, 582: 115301, 583: 115500, 584: 115699, 585: 115897, 586: 116095, 587: 116294, 588: 116493, 589: 116692, 590: 116891, 591: 117090, 592: 117288, 593: 117487, 594: 117686, 595: 117884, 596: 118083, 597: 118281, 598: 118480, 599: 118679, 600: 118877, 601: 119076, 602: 119274, 603: 119445, 604: 119643, 605: 119842, 606: 120041, 607: 120237, 608: 120436, 609: 120634, 610: 120832, 611: 121030, 612: 121228, 613: 121423, 614: 121622, 615: 121821, 616: 122019, 617: 122217, 618: 122416, 619: 122614, 620: 122808, 621: 123006, 622: 123204, 623: 123403, 624: 123601, 625: 123800, 626: 123997, 627: 124195, 628: 124394, 629: 124592, 630: 124791, 631: 124990, 632: 125188, 633: 125386, 634: 125584, 635: 125782, 636: 125981, 637: 126180, 638: 126378, 639: 126577, 640: 126775, 641: 126973, 642: 127172, 643: 127369, 644: 127566, 645: 127764, 646: 127962, 647: 128160, 648: 128359, 649: 128558, 650: 128757, 651: 128956, 652: 129154, 653: 129353, 654: 129552, 655: 129749, 656: 129947, 657: 130145, 658: 130343, 659: 130541, 660: 130739, 661: 130938, 662: 131137, 663: 131334, 664: 131532, 665: 131730, 666: 131929, 667: 132127, 668: 132325, 669: 132524, 670: 132719, 671: 132918, 672: 133116, 673: 133315, 674: 133513, 675: 133711, 676: 133909, 677: 134106, 678: 134304, 679: 134502, 680: 134699, 681: 134897, 682: 135096, 683: 135295, 684: 135494, 685: 135692, 686: 135890, 687: 136089, 688: 136288, 689: 136487, 690: 136686, 691: 136884, 692: 137083, 693: 137281, 694: 137479, 695: 137678, 696: 137875, 697: 138074, 698: 138272, 699: 138471, 700: 138670, 701: 138868, 702: 139066, 703: 139265, 704: 139464, 705: 139661, 706: 139854, 707: 140040, 708: 140237, 709: 140436, 710: 140635, 711: 140833, 712: 141030, 713: 141226, 714: 141425, 715: 141623, 716: 141821, 717: 142020, 718: 142219, 719: 142418, 720: 142616, 721: 142815, 722: 143013, 723: 143212, 724: 143410, 725: 143609, 726: 143808, 727: 144007, 728: 144206, 729: 144405, 730: 144604, 731: 144802, 732: 145000, 733: 145199, 734: 145398, 735: 145597, 736: 145796, 737: 145994, 738: 146192, 739: 146389, 740: 146585, 741: 146784, 742: 146982, 743: 147181, 744: 147379, 745: 147577, 746: 147775, 747: 147973, 748: 148172, 749: 148369, 750: 148567, 751: 148765, 752: 148964, 753: 149162, 754: 149361, 755: 149560, 756: 149758, 757: 149955, 758: 150154, 759: 150344, 760: 150542, 761: 150740, 762: 150938, 763: 151137, 764: 151336, 765: 151535, 766: 151734, 767: 151932, 768: 152131, 769: 152328, 770: 152526, 771: 152722, 772: 152920, 773: 153118, 774: 153317, 775: 153516, 776: 153715, 777: 153913, 778: 154111, 779: 154309, 780: 154508, 781: 154706, 782: 154904, 783: 155103, 784: 155301, 785: 155500, 786: 155698, 787: 155896, 788: 156095, 789: 156294, 790: 156492, 791: 156691, 792: 156890, 793: 157088, 794: 157287, 795: 157485, 796: 157684, 797: 157882} +# the value is the begin index of this scene in the dataset + +sum = 0 +a_dict = {} +for i in range(len(train_data_group_flag)): + a_dict[i] = list(range(sum, sum + train_data_group_flag[i])) + sum += train_data_group_flag[i] + +new_group_idx_to_sample_idxs = a_dict \ No newline at end of file diff --git a/projects/mmdet3d_plugin/datasets/waymo_temporal_zlt.py b/projects/mmdet3d_plugin/datasets/waymo_temporal_zlt.py new file mode 100644 index 0000000..df52bf4 --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/waymo_temporal_zlt.py @@ -0,0 +1,380 @@ +import os +import copy +import random +import pickle +from functools import reduce + +from tqdm import tqdm +import numpy as np +import torch +import mmcv +from mmcv.parallel import DataContainer as DC +from mmcv.utils import print_log +from mmdet3d.datasets import DATASETS +from mmdet3d.core.bbox import Box3DMode, points_cam2img +from mmdet3d.datasets.kitti_dataset import KittiDataset +from mmdet3d.core.bbox import get_box_type +from mmdet3d.core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode, LiDARInstance3DBoxes, points_cam2img) +from nuscenes.eval.common.utils import quaternion_yaw, Quaternion + +from projects.mmdet3d_plugin.datasets.zltwaymo import CustomWaymoDataset +from projects.mmdet3d_plugin.datasets.occ_metrics import Metric_FScore, Metric_mIoU + +@DATASETS.register_module() +class CustomWaymoDataset_T(CustomWaymoDataset): + + CLASSES = ('Car', 'Pedestrian', 'Sign', 'Cyclist') + + def __init__(self, + *args, + load_interval=1, + history_len=1, + input_sample_policy=None, + skip_len=0, + withimage=True, + pose_file=None, + offset=0, + use_streaming=False, + **kwargs): + with open(pose_file, 'rb') as f: + pose_all = pickle.load(f) + self.pose_all = pose_all + self.length_waymo = sum([len(scene) for k, scene in pose_all.items()]) + self.history_len = history_len + self.input_sample_policy = input_sample_policy + self.skip_len = skip_len + self.withimage = withimage + self.load_interval_waymo = load_interval + self.length = self.length_waymo + self.offset = offset + self.evaluation_kwargs = kwargs + self.use_streaming = use_streaming + super().__init__(*args, **kwargs) + + def __len__(self): + return self.length_waymo // self.load_interval_waymo + + def __getitem__(self, idx): + if self.test_mode: + return self.prepare_test_data(idx) + if self.use_streaming: + return self.prepare_streaming_train_data(idx) + + while True: + data = self.prepare_train_data(idx) + if data is None: + idx = self._rand_another(idx) + continue + return data + + def prepare_streaming_train_data(self, index): + index = int(index * self.load_interval_waymo) + input_dict = self.get_data_info(index) + if input_dict is None: + return None + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + return example + + def prepare_test_data(self, index): + """ + Prepare data for testing. + Args: + index (int): Index for accessing the target data. + + Returns: + dict: Testing data dict of the corresponding index. + """ + + index += self.offset + input_dict = self.get_data_info(index) + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + return example + + def get_input_idx(self, idx_list): + ''' + sample the input index list + Args: + idx_list (List[int]): the index list from `index - self.history_len` to `index`. + It contains current frame index, but it dropped another random frame index to add randomness. + So the length is `self.history_len`. + Returns: + sampled_idx_list (List[int]): the index list after sampling + ''' + + if self.input_sample_policy['type'] == 'normal': + return idx_list + + elif self.input_sample_policy['type'] == 'large interval': + sampled_idx_list = [] + for i in range(0, self.input_sample_policy['number']): + sampled_idx = max(0, self.history_len - 1 - i * self.input_sample_policy['interval']) + sampled_idx_list.append(idx_list[sampled_idx]) + return sorted(sampled_idx_list) + + elif self.input_sample_policy['type'] == 'random interval': + fix_interval = self.input_sample_policy['fix interval'] + slow_interval = random.randint(0, fix_interval-1) + random_interval = random.choice([fix_interval, slow_interval]) + + sampled_idx_list = [] + for i in range(0, self.input_sample_policy['number']): + sampled_idx = max(self.history_len - 1 - i * random_interval, 0) + sampled_idx_list.append(idx_list[sampled_idx]) + + return sorted(sampled_idx_list) + + else: + raise NotImplementedError('not implemented input_sample_policy type') + + def prepare_train_data(self, index): + ''' + prepare data for training + Args: + index (Int): the index of the data + Returns: + data (Dict): the data dict for training + ''' + + # Step 1: get the index list of the history data + index *= self.load_interval_waymo + if self.history_len == 1: + idx_list = [index] + else: + queue_start_index = index - self.history_len + idx_list = list(range(queue_start_index, index)) + random.shuffle(idx_list) + idx_list = sorted(idx_list[1:]) # drop one frame to add some randomness + idx_list.append(index) + + # Step 2: sample the index list + i_list = self.get_input_idx(idx_list) + + # Step 3: get the data info according to the index list + data_queue = [] + for i in i_list: + i = max(0, i) + input_dict = self.get_data_info(i) + if input_dict is None: + return None + + # Step 4: prepare the data by dataloader pipeline + self.pre_pipeline(input_dict) + example = self.pipeline(input_dict) + data_queue.append(example) + + # Step 5: union the data_queue into one single sample + if self.filter_empty_gt and (data_queue[0] is None): + return None + if self.withimage: + return self.union2one(data_queue) + else: + return data_queue[-1] + + def union2one(self, queue): + """ + convert sample queue into one single sample. + Args: + queue (List[Dict]): the sample queue + Returns: + queue (Dict): the single sample + """ + + # Step 1: 1. union the `img` tensor into a single tensor. + # 2. union the `img_metas` dict into a dict[dict] + # 3. add prev_bev_exists and scene_token + prev_scene_token=None + imgs_list = [each['img'].data for each in queue] + metas_map = {} + for i, each in enumerate(queue): + metas_map[i] = each['img_metas'].data + if metas_map[i]['sample_idx']//1000 != prev_scene_token: + metas_map[i]['prev_bev_exists'] = False + prev_scene_token = metas_map[i]['sample_idx'] // 1000 + metas_map[i]['scene_token']= prev_scene_token + + else: + metas_map[i]['scene_token'] = prev_scene_token + metas_map[i]['prev_bev_exists'] = True + + # Step 2: pack them together + queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) + queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) + queue = queue[-1] + + return queue + + + def get_data_info(self, index): + ''' + get the data info according to the index. Most of them are image meta data. + Args: + index (Int): the index of the data. + Returns: + input dict (Dict): the data info dict. + ''' + + # Step 1: get the data info + info = self.data_infos_full[index] + + # Step 2: get the image file name and idx + sample_idx = info['image']['image_idx'] + scene_idx = sample_idx % 1000000 // 1000 + frame_idx = sample_idx % 1000000 % 1000 + img_filename = os.path.join(self.data_root, info['image']['image_path']) + + # # Step 3: get the `lidar2img` (why here it get the lidar2img and in the following code it get another lidar2img) + # rect = info['calib']['R0_rect'].astype(np.float32) + # Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32) + # P0 = info['calib']['P0'].astype(np.float32) + # lidar2img = P0 @ rect @ Trv2c + + # the Tr_velo_to_cam is computed for all images but not saved in .info for img1-4 + # the size of img0-2: 1280x1920; img3-4: 886x1920. Attention + + # Step 4: get the image paths, lidar2img, intrinsics, sensor2ego for each image + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + intrinsics_rts = [] + sensor2ego_rts = [] + + for idx_img in range(self.num_views): + pose = self.pose_all[scene_idx][frame_idx][idx_img] + + intrinsics = pose['intrinsics'] # sensor2img + sensor2ego = pose['sensor2ego'] + lidar2img = intrinsics @ np.linalg.inv(sensor2ego) + ego2global = pose['ego2global'] + + # Attention! (this code means the pose info dismatch the image data file) + if idx_img == 2: + image_paths.append(img_filename.replace('image_0', f'image_3')) + elif idx_img == 3: + image_paths.append(img_filename.replace('image_0', f'image_2')) + else: + image_paths.append(img_filename.replace('image_0', f'image_{idx_img}')) + + lidar2img_rts.append(lidar2img) + intrinsics_rts.append(intrinsics) + sensor2ego_rts.append(sensor2ego) + + # Step 5: get the pts filename by function `_get_pts_filename` in class `CustomWaymoDataset` + pts_filename = self._get_pts_filename(sample_idx) + + # Step 6: pack the data info into a dict + input_dict = dict( + sample_idx=sample_idx, + pts_filename=pts_filename, + img_prefix=None, + ) + + if self.modality['use_camera']: + input_dict['img_filename'] = image_paths + input_dict['lidar2img'] = lidar2img_rts + input_dict['cam_intrinsic'] = intrinsics_rts + input_dict['sensor2ego'] = sensor2ego_rts + ego2global = self.pose_all[scene_idx][frame_idx][0]['ego2global'] + input_dict['ego2global'] = ego2global + input_dict['global_to_curr_lidar_rt'] = np.linalg.inv(pose['ego2global']) + + # Step 7: get the annos info + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + # Step 8: get the can_bus info (In `waymo` dataset, we do not have can_bus info) + can_bus = np.zeros(9) + input_dict['can_bus'] = can_bus + + return input_dict + + def get_ann_info(self, index): + ''' + get the annotation info according to the index. + Args: + index (Int): the index of the data. + Returns: + annos (Dict): the annotation info dict. + ''' + + if self.test_mode == True: + info = self.data_infos[index] + else: info = self.data_infos_full[index] + + rect = info['calib']['R0_rect'].astype(np.float32) + Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32) + + annos = info['annos'] + # we need other objects to avoid collision when sample + annos = self.remove_dontcare(annos) + + loc = annos['location'] + dims = annos['dimensions'] + rots = annos['rotation_y'] + gt_names = annos['name'] + gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]], + axis=1).astype(np.float32) + + gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to( + self.box_mode_3d, np.linalg.inv(rect @ Trv2c)) + + + gt_bboxes = annos['bbox'] + + selected = self.drop_arrays_by_name(gt_names, ['DontCare']) + gt_bboxes = gt_bboxes[selected].astype('float32') + gt_names = gt_names[selected] + gt_labels = [] + for cat in gt_names: + if cat in self.CLASSES: + gt_labels.append(self.CLASSES.index(cat)) + else: + gt_labels.append(-1) + gt_labels = np.array(gt_labels).astype(np.int64) + gt_labels_3d = copy.deepcopy(gt_labels) + + anns_results = dict( + gt_bboxes_3d=gt_bboxes_3d, + gt_labels_3d=gt_labels_3d, + bboxes=gt_bboxes, + labels=gt_labels, + gt_names=gt_names) + return anns_results + + def evaluate(self, occ_results, metric='mIoU', runner=None, **eval_kwargs): + ''' + This function will be called by `tools/test.py` to evaluate the results. + Args: + occ_results (List[Dict]): the results of the model. + function `forward_test` of `occformer_waymo.py` return the occ_result. + Then function `custom_multi_gpu_test` of `projects/mmdet3d_plugin/bevformer/apis/test.py` will pack them. + metric (Str): the evaluation metric. By default, it is `mIoU`. + Returns: + None. `occ_eval_metrics.print()` will directly print the result to the terminal. + ''' + + def eval(occ_eval_metrics, runner=None): + print('\nStarting Evaluation...') + for index, occ_result in enumerate(tqdm(occ_results)): + CDist_tensor = occ_result.get('CDist_tensor', None) + count_matrix = occ_result['count_matrix'] + scene_id = occ_result['scene_id'] + frame_id = occ_result['frame_id'] + occ_eval_metrics.add_batch(CDist_tensor, count_matrix, scene_id, frame_id) + occ_eval_metrics.print(runner=runner) + + if "mIoU" in metric: + # Step 1: initialize the `metric_mIoU` + occ_eval_metrics = Metric_mIoU(**self.evaluation_kwargs) + + # Step 2: evaluate the results by `eval` function + # Because we have move most calculation to `forward test` and directly return the `count_matrix`, the eval here will be fast. + eval(occ_eval_metrics, runner=runner) + + elif "FScore" in metric: + occ_eval_metrics = Metric_FScore() + eval(occ_eval_metrics) + + else: + raise NotImplementedError diff --git a/projects/mmdet3d_plugin/datasets/zltwaymo.py b/projects/mmdet3d_plugin/datasets/zltwaymo.py new file mode 100644 index 0000000..a86865f --- /dev/null +++ b/projects/mmdet3d_plugin/datasets/zltwaymo.py @@ -0,0 +1,590 @@ +import mmcv +import numpy as np +import os +import tempfile +import torch +from mmcv.utils import print_log +from os import path as osp +# ERROR ROOT at LINE 331, AT line 236 in format_result, we adjust the worker to be really small +from mmdet3d.datasets import DATASETS #really fucked up for not adding '3d' +from mmdet3d.core.bbox import Box3DMode, points_cam2img +from mmdet3d.datasets.kitti_dataset import KittiDataset +# from .waymo_let_metric import compute_waymo_let_metric + +@DATASETS.register_module() +class CustomWaymoDataset(KittiDataset): + """Waymo Dataset. + + This class serves as the API for experiments on the Waymo Dataset. + + Please refer to ``_for data downloading. + It is recommended to symlink the dataset root to $MMDETECTION3D/data and + organize them as the doc shows. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + split (str): Split of input data. + pts_prefix (str, optional): Prefix of points files. + Defaults to 'velodyne'. + pipeline (list[dict], optional): Pipeline used for data processing. + Defaults to None. + classes (tuple[str], optional): Classes used in the dataset. + Defaults to None. + modality (dict, optional): Modality to specify the sensor data used + as input. Defaults to None. + box_type_3d (str, optional): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'LiDAR' in this dataset. Available options includes + + - 'LiDAR': box in LiDAR coordinates + - 'Depth': box in depth coordinates, usually for indoor dataset + - 'Camera': box in camera coordinates + filter_empty_gt (bool, optional): Whether to filter empty GT. + Defaults to True. + test_mode (bool, optional): Whether the dataset is in test mode. + Defaults to False. + pcd_limit_range (list): The range of point cloud used to filter + invalid predicted boxes. Default: [-85, -85, -5, 85, 85, 5]. + """ + + CLASSES = ('Car', 'Pedestrian', 'Sign', 'Cyclist') + + def __init__(self, + data_root, + ann_file, + split, + num_views=5, + pts_prefix='velodyne', + pipeline=None, + classes=None, + modality=None, + box_type_3d='LiDAR', + filter_empty_gt=True, + test_mode=False, + load_interval=1, + gt_bin = None, + pcd_limit_range=[-85, -85, -5, 85, 85, 5], + **kwargs): + super().__init__( + data_root=data_root, + ann_file=ann_file, + split=split, + pts_prefix=pts_prefix, + pipeline=pipeline, + classes=classes, + modality=modality, + box_type_3d=box_type_3d, + filter_empty_gt=filter_empty_gt, + test_mode=test_mode, + pcd_limit_range=pcd_limit_range, + ) + + self.num_views = num_views + assert self.num_views <= 5 + # to load a subset, just set the load_interval in the dataset config + self.data_infos_full = self.data_infos + self.load_interval = load_interval + self.data_infos = self.data_infos[::load_interval] + if hasattr(self, 'flag'): + self.flag = self.flag[::load_interval] + if test_mode == True: + + if gt_bin != None: + self.gt_bin = gt_bin + # elif load_interval==1 and 'val' in ann_file: + # self.gt_bin = 'gt.bin' + # elif load_interval==5 and 'val' in ann_file: + # self.gt_bin = 'gt_subset.bin' + # elif load_interval==20 and 'train' in ann_file: + # self.gt_bin = 'gt_train_subset.bin' + # else: + # assert gt_bin == 'wrong' + def _get_pts_filename(self, idx): + pts_filename = osp.join(self.root_split, self.pts_prefix, f'{idx:07d}.bin') + return pts_filename + + def get_data_info(self, index): + """Get data info according to the given index. + + Args: + index (int): Index of the sample data to get. + + Returns: + dict: Standard input_dict consists of the + data information. + + - sample_idx (str): sample index + - pts_filename (str): filename of point clouds + - img_prefix (str | None): prefix of image files + - img_info (dict): image info + - lidar2img (list[np.ndarray], optional): transformations from + lidar to different cameras + - ann_info (dict): annotation info + """ + # index=475 # in infos_train.pkl is index 485 + info = self.data_infos[index] + sample_idx = info['image']['image_idx'] + img_filename = os.path.join(self.data_root, + info['image']['image_path']) + + # TODO: consider use torch.Tensor only + rect = info['calib']['R0_rect'].astype(np.float32) + Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32) + P0 = info['calib']['P0'].astype(np.float32) + lidar2img = P0 @ rect @ Trv2c + + # the Tr_velo_to_cam is computed for all images but not saved in .info for img1-4 + # the size of img0-2: 1280x1920; img3-4: 886x1920 + if self.modality['use_camera']: + image_paths = [] + lidar2img_rts = [] + + # load calibration for all 5 images. + calib_path = img_filename.replace('image_0', 'calib').replace('.png', '.txt') + Tr_velo_to_cam_list = [] + with open(calib_path, 'r') as f: + lines = f.readlines() + for line_num in range(6, 6 + self.num_views): + trans = np.array([float(info) for info in lines[line_num].split(' ')[1:13]]).reshape(3, 4) + trans = np.concatenate([trans, np.array([[0., 0., 0., 1.]])], axis=0).astype(np.float32) + Tr_velo_to_cam_list.append(trans) + assert np.allclose(Tr_velo_to_cam_list[0], info['calib']['Tr_velo_to_cam'].astype(np.float32)) + + for idx_img in range(self.num_views): + rect = info['calib']['R0_rect'].astype(np.float32) + # Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32) + Trv2c = Tr_velo_to_cam_list[idx_img] + P0 = info['calib'][f'P{idx_img}'].astype(np.float32) + lidar2img = P0 @ rect @ Trv2c + + image_paths.append(img_filename.replace('image_0', f'image_{idx_img}')) + lidar2img_rts.append(lidar2img) + + pts_filename = self._get_pts_filename(sample_idx) + input_dict = dict( + sample_idx=sample_idx, + pts_filename=pts_filename, + img_prefix=None, + ) + if self.modality['use_camera']: + input_dict['img_filename'] = image_paths + input_dict['lidar2img'] = lidar2img_rts + input_dict['pose'] = info['pose'] + if not self.test_mode: + annos = self.get_ann_info(index) + input_dict['ann_info'] = annos + + return input_dict + + def format_results(self, + outputs, + pklfile_prefix=None, + submission_prefix=None, + data_format='waymo'): + """Format the results to pkl file. + + Args: + outputs (list[dict]): Testing results of the dataset. + pklfile_prefix (str | None): The prefix of pkl files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + submission_prefix (str | None): The prefix of submitted files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Default: None. + data_format (str | None): Output data format. Default: 'waymo'. + Another supported choice is 'kitti'. + + Returns: + tuple: (result_files, tmp_dir), result_files is a dict containing + the json filepaths, tmp_dir is the temporal directory created + for saving json files when jsonfile_prefix is not specified. + """ + if pklfile_prefix is None: + tmp_dir = tempfile.TemporaryDirectory() + pklfile_prefix = osp.join(tmp_dir.name, 'results') + else: + tmp_dir = None + + assert ('waymo' in data_format or 'kitti' in data_format), \ + f'invalid data_format {data_format}' + print("still work before format_results --- if not isinstance") + # np.save('debug_eval/zltwaymo_eval_result_before_format_results',outputs) + # print('saved!') + # exit(0) + if (not isinstance(outputs[0], dict)) or 'img_bbox' in outputs[00]: + raise TypeError('Not supported type for reformat results.') + elif 'pts_bbox' in outputs[0]:#we go this way + result_files = dict() + for name in outputs[0]: + results_ = [out[name] for out in outputs] + pklfile_prefix_ = pklfile_prefix + name #saving path + if submission_prefix is not None: + submission_prefix_ = f'{submission_prefix}_{name}' + else: + submission_prefix_ = None + result_files_ = self.bbox2result_kitti(results_, self.CLASSES, + pklfile_prefix_, + submission_prefix_) + result_files[name] = result_files_ + else: + result_files = self.bbox2result_kitti(outputs, self.CLASSES, + pklfile_prefix, + submission_prefix) + # print(result_files) + # np.save('debug_eval/zltwaymo_eval_result_kitti_format',result_files)## turn into cam-coord, it sucks + # exit(0) + # open('zlt_output_kitti_format_debug.txt','w').write(str(result_files)) #we got absolutely right data + # exit(0) + if 'waymo' in data_format: + from .zlt_kitti2waymo import zlt_KITTI2Waymo as KITTI2Waymo + waymo_root = osp.join( + self.data_root.split('kitti_format')[0], 'waymo_format') + if 'train' in self.ann_file: + waymo_tfrecords_dir = osp.join(waymo_root, 'training') + prefix = '0' + elif self.split == 'training': + waymo_tfrecords_dir = osp.join(waymo_root, 'validation') + prefix = '1' + elif self.split == 'testing': + waymo_tfrecords_dir = osp.join(waymo_root, 'testing') + prefix = '2' + else: + raise ValueError('Not supported split value.') + save_tmp_dir = tempfile.TemporaryDirectory() + waymo_results_save_dir = save_tmp_dir.name + waymo_results_final_path = f'{pklfile_prefix}.bin' + print("still work before converter init!!!") + if 'pts_bbox' in result_files:#result_files deprecated + converter = KITTI2Waymo(result_files['pts_bbox'], + waymo_tfrecords_dir, + waymo_results_save_dir, + waymo_results_final_path, prefix) + else: + converter = KITTI2Waymo(result_files, waymo_tfrecords_dir, + waymo_results_save_dir, + waymo_results_final_path, prefix) + print("still work before converter convert!!!") + print(waymo_tfrecords_dir, waymo_results_save_dir, waymo_results_final_path) + # exit(0) + converter.convert() + print("still work after converter convert!!!") + save_tmp_dir.cleanup() + + return result_files, tmp_dir + + def evaluate(self, + results, + metric='waymo', + logger=None, + + pklfile_prefix=None, + submission_prefix=None, + show=False, + out_dir=None, + jsonfile_prefix=None, + pipeline=None): + """Evaluation in KITTI protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default: 'waymo'. Another supported metric is 'kitti'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + pklfile_prefix (str | None): The prefix of pkl files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + submission_prefix (str | None): The prefix of submission datas. + If not specified, the submission data will not be generated. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + + Returns: + dict[str: float]: results of each evaluation metric + """ + print("metric here is-----------{}".format(metric)) + # np.save('debug_eval/zltwaymo_eval_result',results) + # print('saved!')## result still correct here! + # exit(0) + assert ('waymo' in metric or 'kitti' in metric), \ + f'invalid metric {metric}' + + if 'waymo' in metric: + waymo_root = osp.join( + self.data_root.split('kitti_format')[0], 'waymo_format') + if pklfile_prefix is None: + eval_tmp_dir = tempfile.TemporaryDirectory() + pklfile_prefix = osp.join(eval_tmp_dir.name, 'results') + else: + eval_tmp_dir = None + result_files, tmp_dir = self.format_results( + results, + pklfile_prefix, + submission_prefix, + data_format='waymo')# xxxxxxx not found inside, maybe it's OK + + import shutil + shutil.copy(f'{pklfile_prefix}.bin', 'work_dirs/result.bin') + from time import time + _ = time() + ap_dict = None + print('time usage of compute_let_metric: {} s'.format(time()-_)) + + if eval_tmp_dir is not None: + eval_tmp_dir.cleanup() + + if tmp_dir is not None: + tmp_dir.cleanup() + + if show: + self.show(results, out_dir,pipeline=pipeline) + return ap_dict + + def just_evaluate(self, + metric='waymo', + logger=None, + + pklfile_prefix=None, + submission_prefix=None, + show=False, + out_dir=None, + jsonfile_prefix=None, + pipeline=None): + """Evaluation in KITTI protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default: 'waymo'. Another supported metric is 'kitti'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + pklfile_prefix (str | None): The prefix of pkl files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + submission_prefix (str | None): The prefix of submission datas. + If not specified, the submission data will not be generated. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + + Returns: + dict[str: float]: results of each evaluation metric + """ + print("metric here is-----------{}".format(metric)) + # np.save('debug_eval/zltwaymo_eval_result',results) + # print('saved!')## result still correct here! + # exit(0) + assert ('waymo' in metric or 'kitti' in metric), \ + f'invalid metric {metric}' + + if 'waymo' in metric: + + from time import time + _ = time() + ap_dict = None# compute_waymo_let_metric(f'data/waymo/waymo_format/gt.bin', 'work_dirs/result.bin') + print('time usage of compute_let_metric: {} s'.format(time() - _)) + + return ap_dict + + + def bbox2result_kitti(self, + net_outputs, + class_names, + pklfile_prefix=None, + submission_prefix=None): + """Convert results to kitti format for evaluation and test submission. + + Args: + net_outputs (List[np.ndarray]): list of array storing the + bbox and score + class_nanes (List[String]): A list of class names + pklfile_prefix (str | None): The prefix of pkl file. + submission_prefix (str | None): The prefix of submission file. + + Returns: + List[dict]: A list of dict have the kitti 3d format + """ + assert len(net_outputs) == len(self.data_infos), \ + 'invalid list length of network outputs' + if submission_prefix is not None: + mmcv.mkdir_or_exist(submission_prefix) + # np.save('debug_eval/zltwaymo_eval_net_outputs',net_outputs) # data_size * [output one frame] + # exit(0) + det_annos = [] + print('\nConverting prediction to KITTI format') + for idx, pred_dicts in enumerate( + mmcv.track_iter_progress(net_outputs)): + annos = [] + info = self.data_infos[idx] + sample_idx = info['image']['image_idx'] + image_shape = info['image']['image_shape'][:2] + # if you are going to replace final result.bin with gt boxes, do it here + box_dict = self.convert_valid_bboxes(pred_dicts, info) + # np.save('debug_eval/zltwaymo_box_dict',box_dict) + # print(box_dict) + # exit(0) + if len(box_dict['bbox']) > 0: + box_2d_preds = box_dict['bbox'] + box_preds = box_dict['box3d_camera'] + scores = box_dict['scores'] + box_preds_lidar = box_dict['box3d_lidar'] + label_preds = box_dict['label_preds'] + + anno = { + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'dimensions': [], + 'location': [], + 'rotation_y': [], + 'score': [] + } + + for box, box_lidar, bbox, score, label in zip( + box_preds, box_preds_lidar, box_2d_preds, scores, + label_preds): + bbox[2:] = np.minimum(bbox[2:], image_shape[::-1]) + bbox[:2] = np.maximum(bbox[:2], [0, 0]) + anno['name'].append(class_names[int(label)]) + anno['truncated'].append(0.0) + anno['occluded'].append(0) + anno['alpha'].append( + -np.arctan2(-box_lidar[1], box_lidar[0]) + box[6]) + anno['bbox'].append(bbox) + anno['dimensions'].append(box[3:6]) + anno['location'].append(box[:3]) + anno['rotation_y'].append(box[6]) + anno['score'].append(score) + + anno = {k: np.stack(v) for k, v in anno.items()} + annos.append(anno) + + if submission_prefix is not None: + curr_file = f'{submission_prefix}/{sample_idx:07d}.txt' + with open(curr_file, 'w') as f: + bbox = anno['bbox'] + loc = anno['location'] + dims = anno['dimensions'] # lhw -> hwl + + for idx in range(len(bbox)): + print( + '{} -1 -1 {:.4f} {:.4f} {:.4f} {:.4f} ' + '{:.4f} {:.4f} {:.4f} ' + '{:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}'. + format(anno['name'][idx], anno['alpha'][idx], + bbox[idx][0], bbox[idx][1], + bbox[idx][2], bbox[idx][3], + dims[idx][1], dims[idx][2], + dims[idx][0], loc[idx][0], loc[idx][1], + loc[idx][2], anno['rotation_y'][idx], + anno['score'][idx]), + file=f) + else: + annos.append({ + 'name': np.array([]), + 'truncated': np.array([]), + 'occluded': np.array([]), + 'alpha': np.array([]), + 'bbox': np.zeros([0, 4]), + 'dimensions': np.zeros([0, 3]), + 'location': np.zeros([0, 3]), + 'rotation_y': np.array([]), + 'score': np.array([]), + }) + annos[-1]['sample_idx'] = np.array( + [sample_idx] * len(annos[-1]['score']), dtype=np.int64) + + det_annos += annos + + if pklfile_prefix is not None: + if not pklfile_prefix.endswith(('.pkl', '.pickle')): + out = f'{pklfile_prefix}.pkl' + mmcv.dump(det_annos, out) + print(f'Result is saved to {out}.') + + return det_annos + + def convert_valid_bboxes(self, box_dict, info): + """Convert the boxes into valid format. + + Args: + box_dict (dict): Bounding boxes to be converted. + + - boxes_3d (:obj:``LiDARInstance3DBoxes``): 3D bounding boxes. + - scores_3d (np.ndarray): Scores of predicted boxes. + - labels_3d (np.ndarray): Class labels of predicted boxes. + info (dict): Dataset information dictionary. + + Returns: + dict: Valid boxes after conversion. + + - bbox (np.ndarray): 2D bounding boxes (in camera 0). + - box3d_camera (np.ndarray): 3D boxes in camera coordinates. + - box3d_lidar (np.ndarray): 3D boxes in lidar coordinates. + - scores (np.ndarray): Scores of predicted boxes. + - label_preds (np.ndarray): Class labels of predicted boxes. + - sample_idx (np.ndarray): Sample index. + """ + # TODO: refactor this function + box_preds = box_dict['boxes_3d'] + scores = box_dict['scores_3d'] + labels = box_dict['labels_3d'] + sample_idx = info['image']['image_idx'] + # TODO: remove the hack of yaw + box_preds.limit_yaw(offset=0.5, period=np.pi * 2) + + if len(box_preds) == 0: + return dict( + bbox=np.zeros([0, 4]), + box3d_camera=np.zeros([0, 7]), + box3d_lidar=np.zeros([0, 7]), + scores=np.zeros([0]), + label_preds=np.zeros([0, 4]), + sample_idx=sample_idx) + + rect = info['calib']['R0_rect'].astype(np.float32) + Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32) + P0 = info['calib']['P0'].astype(np.float32) + P0 = box_preds.tensor.new_tensor(P0) # that is to say, box_2d_pred only projected to cam0 image! + + box_preds_camera = box_preds.convert_to(Box3DMode.CAM, rect @ Trv2c) #box3d in camera coord + + box_corners = box_preds_camera.corners + box_corners_in_image = points_cam2img(box_corners, P0) + # box_corners_in_image: [N, 8, 2] + minxy = torch.min(box_corners_in_image, dim=1)[0] + maxxy = torch.max(box_corners_in_image, dim=1)[0] + box_2d_preds = torch.cat([minxy, maxxy], dim=1) + # Post-processing + # check box_preds + limit_range = box_preds.tensor.new_tensor(self.pcd_limit_range) + valid_pcd_inds = ((box_preds.center > limit_range[:3]) & + (box_preds.center < limit_range[3:])) + valid_inds = valid_pcd_inds.all(-1) + + if valid_inds.sum() > 0: + return dict( + bbox=box_2d_preds[valid_inds, :].numpy(), + box3d_camera=box_preds_camera[valid_inds].tensor.numpy(), + box3d_lidar=box_preds[valid_inds].tensor.numpy(), + scores=scores[valid_inds].numpy(), + label_preds=labels[valid_inds].numpy(), + sample_idx=sample_idx, + ) + else: + return dict( + bbox=np.zeros([0, 4]), + box3d_camera=np.zeros([0, 7]), + box3d_lidar=np.zeros([0, 7]), + scores=np.zeros([0]), + label_preds=np.zeros([0, 4]), + sample_idx=sample_idx, + ) \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/backbones/__init__.py b/projects/mmdet3d_plugin/models/backbones/__init__.py index cea72f5..c6d9d6f 100644 --- a/projects/mmdet3d_plugin/models/backbones/__init__.py +++ b/projects/mmdet3d_plugin/models/backbones/__init__.py @@ -1,3 +1,3 @@ from .vovnet import VoVNet - -__all__ = ['VoVNet'] \ No newline at end of file +from .bevdetresnet import ResNetForBEVDet_solo +__all__ = ['VoVNet', 'ResNetForBEVDet_solo'] \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/backbones/bevdetresnet.py b/projects/mmdet3d_plugin/models/backbones/bevdetresnet.py new file mode 100644 index 0000000..c3a6f92 --- /dev/null +++ b/projects/mmdet3d_plugin/models/backbones/bevdetresnet.py @@ -0,0 +1,75 @@ +# Copyright (c) Phigent Robotics. All rights reserved. + +from torch import nn +from mmdet.models.backbones.resnet import Bottleneck, BasicBlock +import torch.utils.checkpoint as checkpoint + +from mmdet.models import BACKBONES + +from mmcv.cnn.bricks.registry import NORM_LAYERS + + +@NORM_LAYERS.register_module() +class IdentityNormLayer(nn.Module): + # Fake norm layer to easily fit into "norm_cfg". Just an identity layer. + def __init__(self, *args, **kwargs): + super().__init__() + + def forward(self, x): + return x + + +@BACKBONES.register_module() +class ResNetForBEVDet_solo(nn.Module): + def __init__(self, numC_input, num_layer=[2,2,2], num_channels=None, stride=[2,2,2], + backbone_output_ids=None, norm_cfg=dict(type='BN'), + with_cp=False, block_type='Basic'): + super(ResNetForBEVDet_solo, self).__init__() + self.numC_input = numC_input + #build backbone + # assert len(num_layer)>=3 + assert len(num_layer)==len(stride) + num_channels = [numC_input*2**(i+1) for i in range(len(num_layer))] \ + if num_channels is None else num_channels + self.backbone_output_ids = range(len(num_layer)) \ + if backbone_output_ids is None else backbone_output_ids + layers = [] + if block_type == 'BottleNeck': + curr_numC = numC_input + for i in range(len(num_layer)): + layer=[Bottleneck(curr_numC, num_channels[i]//4, stride=stride[i], + downsample=nn.Conv2d(curr_numC,num_channels[i],3,stride[i],1), + norm_cfg=norm_cfg)] + curr_numC= num_channels[i] + layer.extend([Bottleneck(curr_numC, curr_numC//4, + norm_cfg=norm_cfg) for _ in range(num_layer[i]-1)]) + layers.append(nn.Sequential(*layer)) + elif block_type == 'Basic': + curr_numC = numC_input + for i in range(len(num_layer)): + layer=[BasicBlock(curr_numC, num_channels[i], stride=stride[i], + downsample=nn.Conv2d(curr_numC,num_channels[i],3,stride[i],1), + norm_cfg=norm_cfg)] + curr_numC= num_channels[i] + layer.extend([BasicBlock(curr_numC, curr_numC, norm_cfg=norm_cfg) for _ in range(num_layer[i]-1)]) + layers.append(nn.Sequential(*layer)) + else: + assert False + self.layers = nn.Sequential(*layers) + + self.with_cp = with_cp + + def forward(self, x): + feats = [] + x_tmp = x + if -1 in self.backbone_output_ids: + feats.append(x) + + for lid, layer in enumerate(self.layers): + if self.with_cp: + x_tmp = checkpoint.checkpoint(layer, x_tmp) + else: + x_tmp = layer(x_tmp) + if lid in self.backbone_output_ids: + feats.append(x_tmp) + return feats \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/necks/__init__.py b/projects/mmdet3d_plugin/models/necks/__init__.py new file mode 100644 index 0000000..5557bcc --- /dev/null +++ b/projects/mmdet3d_plugin/models/necks/__init__.py @@ -0,0 +1,3 @@ +from .fpn import CustomFPN +from .view_transformer import LSSViewTransformer +from .second_fpn import SECONDFPN_solo \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/necks/fpn.py b/projects/mmdet3d_plugin/models/necks/fpn.py new file mode 100644 index 0000000..d380816 --- /dev/null +++ b/projects/mmdet3d_plugin/models/necks/fpn.py @@ -0,0 +1,202 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import ConvModule +from mmcv.runner import BaseModule, auto_fp16 + +from mmdet3d.models.builder import NECKS + + +@NECKS.register_module() +class CustomFPN(BaseModule): + r"""Feature Pyramid Network. + + This is an implementation of paper `Feature Pyramid Networks for Object + Detection `_. + + Args: + in_channels (List[int]): Number of input channels per scale. + out_channels (int): Number of output channels (used at each scale) + num_outs (int): Number of output scales. + start_level (int): Index of the start input backbone level used to + build the feature pyramid. Default: 0. + end_level (int): Index of the end input backbone level (exclusive) to + build the feature pyramid. Default: -1, which means the last level. + add_extra_convs (bool | str): If bool, it decides whether to add conv + layers on top of the original feature maps. Default to False. + If True, it is equivalent to `add_extra_convs='on_input'`. + If str, it specifies the source feature map of the extra convs. + Only the following options are allowed + + - 'on_input': Last feat map of neck inputs (i.e. backbone feature). + - 'on_lateral': Last feature map after lateral convs. + - 'on_output': The last output feature map after fpn convs. + relu_before_extra_convs (bool): Whether to apply relu before the extra + conv. Default: False. + no_norm_on_lateral (bool): Whether to apply norm on lateral. + Default: False. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (str): Config dict for activation layer in ConvModule. + Default: None. + upsample_cfg (dict): Config dict for interpolate layer. + Default: `dict(mode='nearest')` + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> import torch + >>> in_channels = [2, 3, 5, 7] + >>> scales = [340, 170, 84, 43] + >>> inputs = [torch.rand(1, c, s, s) + ... for c, s in zip(in_channels, scales)] + >>> self = FPN(in_channels, 11, len(in_channels)).eval() + >>> outputs = self.forward(inputs) + >>> for i in range(len(outputs)): + ... print(f'outputs[{i}].shape = {outputs[i].shape}') + outputs[0].shape = torch.Size([1, 11, 340, 340]) + outputs[1].shape = torch.Size([1, 11, 170, 170]) + outputs[2].shape = torch.Size([1, 11, 84, 84]) + outputs[3].shape = torch.Size([1, 11, 43, 43]) + """ + + def __init__(self, + in_channels, + out_channels, + num_outs, + start_level=0, + end_level=-1, + out_ids=[], + add_extra_convs=False, + relu_before_extra_convs=False, + no_norm_on_lateral=False, + conv_cfg=None, + norm_cfg=None, + act_cfg=None, + upsample_cfg=dict(mode='nearest'), + init_cfg=dict( + type='Xavier', layer='Conv2d', distribution='uniform')): + super(CustomFPN, self).__init__(init_cfg) + assert isinstance(in_channels, list) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_ins = len(in_channels) + self.num_outs = num_outs + self.relu_before_extra_convs = relu_before_extra_convs + self.no_norm_on_lateral = no_norm_on_lateral + self.fp16_enabled = False + self.upsample_cfg = upsample_cfg.copy() + self.out_ids = out_ids + if end_level == -1: + self.backbone_end_level = self.num_ins + # assert num_outs >= self.num_ins - start_level + else: + # if end_level < inputs, no extra level is allowed + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + self.add_extra_convs = add_extra_convs + assert isinstance(add_extra_convs, (str, bool)) + if isinstance(add_extra_convs, str): + # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' + assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') + elif add_extra_convs: # True + self.add_extra_convs = 'on_input' + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = ConvModule( + in_channels[i], + out_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, + act_cfg=act_cfg, + inplace=False) + + self.lateral_convs.append(l_conv) + if i in self.out_ids: + fpn_conv = ConvModule( + out_channels, + out_channels, + 3, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(fpn_conv) + + # add extra conv layers (e.g., RetinaNet) + extra_levels = num_outs - self.backbone_end_level + self.start_level + if self.add_extra_convs and extra_levels >= 1: + for i in range(extra_levels): + if i == 0 and self.add_extra_convs == 'on_input': + in_channels = self.in_channels[self.backbone_end_level - 1] + else: + in_channels = out_channels + extra_fpn_conv = ConvModule( + in_channels, + out_channels, + 3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + inplace=False) + self.fpn_convs.append(extra_fpn_conv) + + @auto_fp16() + def forward(self, inputs): + """Forward function.""" + assert len(inputs) == len(self.in_channels) + # build laterals + laterals = [ + lateral_conv(inputs[i + self.start_level]) + for i, lateral_conv in enumerate(self.lateral_convs) + ] + + # build top-down path + used_backbone_levels = len(laterals) + for i in range(used_backbone_levels - 1, 0, -1): + # In some cases, fixing `scale factor` (e.g. 2) is preferred, but + # it cannot co-exist with `size` in `F.interpolate`. + if 'scale_factor' in self.upsample_cfg: + laterals[i - 1] += F.interpolate(laterals[i], + **self.upsample_cfg) + else: + prev_shape = laterals[i - 1].shape[2:] + laterals[i - 1] += F.interpolate( + laterals[i], size=prev_shape, **self.upsample_cfg) + + # build outputs + # part 1: from original levels + outs = [self.fpn_convs[i](laterals[i]) for i in self.out_ids] + # part 2: add extra levels + if self.num_outs > len(outs): + # use max pool to get more levels on top of outputs + # (e.g., Faster R-CNN, Mask R-CNN) + if not self.add_extra_convs: + for i in range(self.num_outs - used_backbone_levels): + outs.append(F.max_pool2d(outs[-1], 1, stride=2)) + # add conv layers on top of original feature maps (RetinaNet) + else: + if self.add_extra_convs == 'on_input': + extra_source = inputs[self.backbone_end_level - 1] + elif self.add_extra_convs == 'on_lateral': + extra_source = laterals[-1] + elif self.add_extra_convs == 'on_output': + extra_source = outs[-1] + else: + raise NotImplementedError + outs.append(self.fpn_convs[used_backbone_levels](extra_source)) + for i in range(used_backbone_levels + 1, self.num_outs): + if self.relu_before_extra_convs: + outs.append(self.fpn_convs[i](F.relu(outs[-1]))) + else: + outs.append(self.fpn_convs[i](outs[-1])) + return outs[0] diff --git a/projects/mmdet3d_plugin/models/necks/second_fpn.py b/projects/mmdet3d_plugin/models/necks/second_fpn.py new file mode 100644 index 0000000..1a89192 --- /dev/null +++ b/projects/mmdet3d_plugin/models/necks/second_fpn.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from mmcv.cnn import build_conv_layer, build_norm_layer, build_upsample_layer +from mmcv.runner import BaseModule, auto_fp16 +from torch import nn as nn +from mmdet.models import NECKS + +@NECKS.register_module() +class SECONDFPN_solo(BaseModule): + """FPN used in SECOND/PointPillars/PartA2/MVXNet. + + Args: + in_channels (list[int]): Input channels of multi-scale feature maps. + out_channels (list[int]): Output channels of feature maps. + upsample_strides (list[int]): Strides used to upsample the + feature maps. + norm_cfg (dict): Config dict of normalization layers. + upsample_cfg (dict): Config dict of upsample layers. + conv_cfg (dict): Config dict of conv layers. + use_conv_for_no_stride (bool): Whether to use conv when stride is 1. + """ + + def __init__(self, + in_channels=[128, 128, 256], + out_channels=[256, 256, 256], + upsample_strides=[1, 2, 4], + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), + upsample_cfg=dict(type='deconv', bias=False), + conv_cfg=dict(type='Conv2d', bias=False), + final_conv_feature_dim=None, + use_conv_for_no_stride=False, + init_cfg=None): + # if for GroupNorm, + # cfg is dict(type='GN', num_groups=num_groups, eps=1e-3, affine=True) + super(SECONDFPN_solo, self).__init__(init_cfg=init_cfg) + assert len(out_channels) == len(upsample_strides) == len(in_channels) + self.in_channels = in_channels + self.out_channels = out_channels + self.fp16_enabled = False + + deblocks = [] + for i, out_channel in enumerate(out_channels): + stride = upsample_strides[i] + if stride > 1 or (stride == 1 and not use_conv_for_no_stride): + upsample_layer = build_upsample_layer( + upsample_cfg, + in_channels=in_channels[i], + out_channels=out_channel, + kernel_size=upsample_strides[i], + stride=upsample_strides[i]) + else: + stride = np.round(1 / stride).astype(np.int64) + upsample_layer = build_conv_layer( + conv_cfg, + in_channels=in_channels[i], + out_channels=out_channel, + kernel_size=stride, + stride=stride) + + deblock = nn.Sequential(upsample_layer, + build_norm_layer(norm_cfg, out_channel)[1], + nn.ReLU(inplace=True)) + deblocks.append(deblock) + self.deblocks = nn.ModuleList(deblocks) + + if final_conv_feature_dim is not None: + self.final_feature_dim = final_conv_feature_dim + self.final_conv = nn.Sequential( + build_conv_layer(conv_cfg, in_channels=sum(out_channels), out_channels=sum(out_channels) // 2, kernel_size=3, stride=1, padding=1), + build_norm_layer(norm_cfg, sum(out_channels) // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer(conv_cfg, in_channels=sum(out_channels) // 2, out_channels=final_conv_feature_dim, kernel_size=1, stride=1)) + else: + self.final_feature_dim = sum(out_channels) + self.final_conv = None + + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='ConvTranspose2d'), + dict(type='Constant', layer='NaiveSyncBatchNorm2d', val=1.0) + ] + + @auto_fp16() + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): 4D Tensor in (N, C, H, W) shape. + + Returns: + list[torch.Tensor]: Multi-level feature maps. + """ + assert len(x) == len(self.in_channels) + ups = [deblock(x[i]) for i, deblock in enumerate(self.deblocks)] + + if len(ups) > 1: + out = torch.cat(ups, dim=1) + else: + out = ups[0] + + if self.final_conv is not None: + out = self.final_conv(out) + + return [out] diff --git a/projects/mmdet3d_plugin/models/necks/view_transformer.py b/projects/mmdet3d_plugin/models/necks/view_transformer.py new file mode 100644 index 0000000..55aff19 --- /dev/null +++ b/projects/mmdet3d_plugin/models/necks/view_transformer.py @@ -0,0 +1,302 @@ + +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer +from mmcv.runner import BaseModule, force_fp32 +from torch.cuda.amp.autocast_mode import autocast +from torch.utils.checkpoint import checkpoint + +# from mmdet3d.ops.bev_pool_v2.bev_pool import bev_pool_v2 +from mmdet.models.backbones.resnet import BasicBlock +from mmdet.models.builder import NECKS + +@NECKS.register_module() +class LSSViewTransformer(BaseModule): + r"""Lift-Splat-Shoot view transformer with BEVPoolv2 implementation. + + Please refer to the `paper `_ and + `paper ` + + Args: + grid_config (dict): Config of grid alone each axis in format of + (lower_bound, upper_bound, interval). axis in {x,y,z,depth}. + input_size (tuple(int)): Size of input images in format of (height, + width). + downsample (int): Down sample factor from the input size to the feature + size. + in_channels (int): Channels of input feature. + out_channels (int): Channels of transformed feature. + accelerate (bool): Whether the view transformation is conducted with + acceleration. Note: the intrinsic and extrinsic of cameras should + be constant when 'accelerate' is set true. + """ + + def __init__( + self, + grid_config, + input_size, + downsample=16, + in_channels=512, + out_channels=64, + accelerate=False, + ): + super(LSSViewTransformer, self).__init__() + self.grid_config = grid_config + self.downsample = downsample + self.create_grid_infos(**grid_config) + self.create_frustum(grid_config['depth'], input_size, downsample) + self.out_channels = out_channels + self.in_channels = in_channels + self.depth_net = nn.Conv2d( + in_channels, self.D + self.out_channels, kernel_size=1, padding=0) + self.accelerate = accelerate + self.initial_flag = True + + def create_grid_infos(self, x, y, z, **kwargs): + """Generate the grid information including the lower bound, interval, + and size. + + Args: + x (tuple(float)): Config of grid alone x axis in format of + (lower_bound, upper_bound, interval). + y (tuple(float)): Config of grid alone y axis in format of + (lower_bound, upper_bound, interval). + z (tuple(float)): Config of grid alone z axis in format of + (lower_bound, upper_bound, interval). + **kwargs: Container for other potential parameters + """ + self.grid_lower_bound = torch.Tensor([cfg[0] for cfg in [x, y, z]]) + self.grid_interval = torch.Tensor([cfg[2] for cfg in [x, y, z]]) + self.grid_size = torch.Tensor([(cfg[1] - cfg[0]) / cfg[2] + for cfg in [x, y, z]]) + + def create_frustum(self, depth_cfg, input_size, downsample): + """Generate the frustum template for each image. + + Args: + depth_cfg (tuple(float)): Config of grid alone depth axis in format + of (lower_bound, upper_bound, interval). + input_size (tuple(int)): Size of input images in format of (height, + width). + downsample (int): Down sample scale factor from the input size to + the feature size. + """ + H_in, W_in = input_size + H_feat, W_feat = H_in // downsample, W_in // downsample + d = torch.arange(*depth_cfg, dtype=torch.float)\ + .view(-1, 1, 1).expand(-1, H_feat, W_feat) + self.D = d.shape[0] + x = torch.linspace(0, W_in - 1, W_feat, dtype=torch.float)\ + .view(1, 1, W_feat).expand(self.D, H_feat, W_feat) + y = torch.linspace(0, H_in - 1, H_feat, dtype=torch.float)\ + .view(1, H_feat, 1).expand(self.D, H_feat, W_feat) + + # D x H x W x 3 + self.frustum = torch.stack((x, y, d), -1) + + def get_lidar_coor(self, rots, trans, cam2imgs, post_rots, post_trans, + bda): + """Calculate the locations of the frustum points in the lidar + coordinate system. + + Args: + rots (torch.Tensor): Rotation from camera coordinate system to + lidar coordinate system in shape (B, N_cams, 3, 3). + trans (torch.Tensor): Translation from camera coordinate system to + lidar coordinate system in shape (B, N_cams, 3). + cam2imgs (torch.Tensor): Camera intrinsic matrixes in shape + (B, N_cams, 3, 3). + post_rots (torch.Tensor): Rotation in camera coordinate system in + shape (B, N_cams, 3, 3). It is derived from the image view + augmentation. + post_trans (torch.Tensor): Translation in camera coordinate system + derived from image view augmentation in shape (B, N_cams, 3). + + Returns: + torch.tensor: Point coordinates in shape + (B, N_cams, D, ownsample, 3) + """ + B, N, _ = trans.shape + + # post-transformation + # B x N x D x H x W x 3 + points = self.frustum.to(rots) - post_trans.view(B, N, 1, 1, 1, 3) + points = torch.inverse(post_rots).view(B, N, 1, 1, 1, 3, 3)\ + .matmul(points.unsqueeze(-1)) + + # cam_to_ego + points = torch.cat( + (points[..., :2, :] * points[..., 2:3, :], points[..., 2:3, :]), 5) + combine = rots.matmul(torch.inverse(cam2imgs)) + points = combine.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1) + points += trans.view(B, N, 1, 1, 1, 3) + points = bda.view(B, 1, 1, 1, 1, 3, + 3).matmul(points.unsqueeze(-1)).squeeze(-1) + return points + + def init_acceleration_v2(self, coor): + """Pre-compute the necessary information in acceleration including the + index of points in the final feature. + + Args: + coor (torch.tensor): Coordinate of points in lidar space in shape + (B, N_cams, D, H, W, 3). + x (torch.tensor): Feature of points in shape + (B, N_cams, D, H, W, C). + """ + + ranks_bev, ranks_depth, ranks_feat, \ + interval_starts, interval_lengths = \ + self.voxel_pooling_prepare_v2(coor) + + self.ranks_bev = ranks_bev.int().contiguous() + self.ranks_feat = ranks_feat.int().contiguous() + self.ranks_depth = ranks_depth.int().contiguous() + self.interval_starts = interval_starts.int().contiguous() + self.interval_lengths = interval_lengths.int().contiguous() + + def voxel_pooling_v2(self, coor, depth, feat): + ranks_bev, ranks_depth, ranks_feat, \ + interval_starts, interval_lengths = \ + self.voxel_pooling_prepare_v2(coor) + if ranks_feat is None: + print('warning ---> no points within the predefined ' + 'bev receptive field') + dummy = torch.zeros(size=[ + feat.shape[0], feat.shape[2], + int(self.grid_size[2]), + int(self.grid_size[0]), + int(self.grid_size[1]) + ]).to(feat) + dummy = torch.cat(dummy.unbind(dim=2), 1) + return dummy + feat = feat.permute(0, 1, 3, 4, 2) + bev_feat_shape = (depth.shape[0], int(self.grid_size[2]), + int(self.grid_size[1]), int(self.grid_size[0]), + feat.shape[-1]) # (B, Z, Y, X, C) + bev_feat = bev_pool_v2(depth, feat, ranks_depth, ranks_feat, ranks_bev, + bev_feat_shape, interval_starts, + interval_lengths) + # collapse Z + bev_feat = torch.cat(bev_feat.unbind(dim=2), 1) + return bev_feat + + def voxel_pooling_prepare_v2(self, coor): + """Data preparation for voxel pooling. + + Args: + coor (torch.tensor): Coordinate of points in the lidar space in + shape (B, N, D, H, W, 3). + + Returns: + tuple[torch.tensor]: Rank of the voxel that a point is belong to + in shape (N_Points); Reserved index of points in the depth + space in shape (N_Points). Reserved index of points in the + feature space in shape (N_Points). + """ + B, N, D, H, W, _ = coor.shape + num_points = B * N * D * H * W + # record the index of selected points for acceleration purpose + ranks_depth = torch.range( + 0, num_points - 1, dtype=torch.int, device=coor.device) + ranks_feat = torch.range( + 0, num_points // D - 1, dtype=torch.int, device=coor.device) + ranks_feat = ranks_feat.reshape(B, N, 1, H, W) + ranks_feat = ranks_feat.expand(B, N, D, H, W).flatten() + # convert coordinate into the voxel space + coor = ((coor - self.grid_lower_bound.to(coor)) / + self.grid_interval.to(coor)) + coor = coor.long().view(num_points, 3) + batch_idx = torch.range(0, B - 1).reshape(B, 1). \ + expand(B, num_points // B).reshape(num_points, 1).to(coor) + coor = torch.cat((coor, batch_idx), 1) + + # filter out points that are outside box + kept = (coor[:, 0] >= 0) & (coor[:, 0] < self.grid_size[0]) & \ + (coor[:, 1] >= 0) & (coor[:, 1] < self.grid_size[1]) & \ + (coor[:, 2] >= 0) & (coor[:, 2] < self.grid_size[2]) + if len(kept) == 0: + return None, None, None, None, None + coor, ranks_depth, ranks_feat = \ + coor[kept], ranks_depth[kept], ranks_feat[kept] + # get tensors from the same voxel next to each other + ranks_bev = coor[:, 3] * ( + self.grid_size[2] * self.grid_size[1] * self.grid_size[0]) + ranks_bev += coor[:, 2] * (self.grid_size[1] * self.grid_size[0]) + ranks_bev += coor[:, 1] * self.grid_size[0] + coor[:, 0] + order = ranks_bev.argsort() + ranks_bev, ranks_depth, ranks_feat = \ + ranks_bev[order], ranks_depth[order], ranks_feat[order] + + kept = torch.ones( + ranks_bev.shape[0], device=ranks_bev.device, dtype=torch.bool) + kept[1:] = ranks_bev[1:] != ranks_bev[:-1] + interval_starts = torch.where(kept)[0].int() + if len(interval_starts) == 0: + return None, None, None, None, None + interval_lengths = torch.zeros_like(interval_starts) + interval_lengths[:-1] = interval_starts[1:] - interval_starts[:-1] + interval_lengths[-1] = ranks_bev.shape[0] - interval_starts[-1] + return ranks_bev.int().contiguous(), ranks_depth.int().contiguous( + ), ranks_feat.int().contiguous(), interval_starts.int().contiguous( + ), interval_lengths.int().contiguous() + + def pre_compute(self, input): + if self.initial_flag: + coor = self.get_lidar_coor(*input[1:7]) + self.init_acceleration_v2(coor) + self.initial_flag = False + + def view_transform_core(self, input, depth, tran_feat): + B, N, C, H, W = input[0].shape + + # Lift-Splat + if self.accelerate: + feat = tran_feat.view(B, N, self.out_channels, H, W) + feat = feat.permute(0, 1, 3, 4, 2) + depth = depth.view(B, N, self.D, H, W) + bev_feat_shape = (depth.shape[0], int(self.grid_size[2]), + int(self.grid_size[1]), int(self.grid_size[0]), + feat.shape[-1]) # (B, Z, Y, X, C) + bev_feat = bev_pool_v2(depth, feat, self.ranks_depth, + self.ranks_feat, self.ranks_bev, + bev_feat_shape, self.interval_starts, + self.interval_lengths) + + bev_feat = bev_feat.squeeze(2) + else: + coor = self.get_lidar_coor(*input[1:7]) + bev_feat = self.voxel_pooling_v2( + coor, depth.view(B, N, self.D, H, W), + tran_feat.view(B, N, self.out_channels, H, W)) + return bev_feat, depth + + def view_transform(self, input, depth, tran_feat): + if self.accelerate: + self.pre_compute(input) + return self.view_transform_core(input, depth, tran_feat) + + def forward(self, input): + """Transform image-view feature into bird-eye-view feature. + + Args: + input (list(torch.tensor)): of (image-view feature, rots, trans, + intrins, post_rots, post_trans) + + Returns: + torch.tensor: Bird-eye-view feature in shape (B, C, H_BEV, W_BEV) + """ + x = input[0] + B, N, C, H, W = x.shape + x = x.view(B * N, C, H, W) + x = self.depth_net(x) + + depth_digit = x[:, :self.D, ...] + tran_feat = x[:, self.D:self.D + self.out_channels, ...] + depth = depth_digit.softmax(dim=1) + return self.view_transform(input, depth, tran_feat) + + def get_mlp_input(self, rot, tran, intrin, post_rot, post_tran, bda): + return None diff --git a/projects/mmdet3d_plugin/models/utils/__init__.py b/projects/mmdet3d_plugin/models/utils/__init__.py index 1df10c8..1dcc45e 100644 --- a/projects/mmdet3d_plugin/models/utils/__init__.py +++ b/projects/mmdet3d_plugin/models/utils/__init__.py @@ -2,4 +2,5 @@ from .bricks import run_time from .grid_mask import GridMask from .position_embedding import RelPositionEmbedding -from .visual import save_tensor \ No newline at end of file +from .visual import save_tensor +from .positional_encoding import LearnedPositionalEncoding3D,LearnedPositionalEncoding3DAdd \ No newline at end of file diff --git a/projects/mmdet3d_plugin/models/utils/positional_encoding.py b/projects/mmdet3d_plugin/models/utils/positional_encoding.py new file mode 100644 index 0000000..5c5ead4 --- /dev/null +++ b/projects/mmdet3d_plugin/models/utils/positional_encoding.py @@ -0,0 +1,155 @@ +import math + +import torch +import torch.nn as nn +from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING +from mmcv.runner import BaseModule + + +@POSITIONAL_ENCODING.register_module() +class LearnedPositionalEncoding3D(BaseModule): + """Position embedding with learnable embedding weights. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. The final returned dimension for + each position is 2 times of this value. + row_num_embed (int, optional): The dictionary size of row embeddings. + Default 50. + col_num_embed (int, optional): The dictionary size of col embeddings. + Default 50. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_feats, + row_num_embed=50, + col_num_embed=50, + height_num_embed=50, + init_cfg=dict(type='Uniform', layer='Embedding')): + super(LearnedPositionalEncoding3D, self).__init__(init_cfg) + if isinstance(num_feats, int): + num_feats = [num_feats for i in range(3)] + self.row_embed = nn.Embedding(row_num_embed, num_feats[0]) + self.col_embed = nn.Embedding(col_num_embed, num_feats[1]) + self.height_embed = nn.Embedding(height_num_embed, num_feats[2]) + self.num_feats = num_feats + self.row_num_embed = row_num_embed + self.col_num_embed = col_num_embed + self.height_num_embed = height_num_embed + + def forward(self, mask): + """Forward function for `LearnedPositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + l, h, w = mask.shape[-3:] + x = torch.arange(w, device=mask.device) + y = torch.arange(h, device=mask.device) + z = torch.arange(l, device=mask.device) + x_embed = self.col_embed(x) + y_embed = self.row_embed(y) + z_embed = self.height_embed(z) + pos = torch.cat( + (x_embed.unsqueeze(0).unsqueeze(0).repeat(l, h, 1, 1), + y_embed.unsqueeze(1).unsqueeze(0).repeat(l, 1, w, 1), + z_embed.unsqueeze(1).unsqueeze(1).repeat(1, h, w, 1)),dim=-1).permute(3, 0, 1, 2).unsqueeze(0).repeat(mask.shape[0],1, 1, 1, 1) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'row_num_embed={self.row_num_embed}, ' + repr_str += f'col_num_embed={self.col_num_embed})' + repr_str += f'height_num_embed={self.height_num_embed})' + return repr_str + + +@POSITIONAL_ENCODING.register_module() +class LearnedPositionalEncoding3DAdd(BaseModule): + """Position embedding with learnable embedding weights. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. The final returned dimension for + each position is 2 times of this value. + row_num_embed (int, optional): The dictionary size of row embeddings. + Default 50. + col_num_embed (int, optional): The dictionary size of col embeddings. + Default 50. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + num_feats, + row_num_embed=50, + col_num_embed=50, + height_num_embed=16, + init_cfg=dict(type='Uniform', layer='Embedding')): + super(LearnedPositionalEncoding3DAdd, self).__init__(init_cfg) + self.num_feats = num_feats + num_feats = num_feats * 2 + self.row_embed = nn.Embedding(row_num_embed, num_feats) + self.col_embed = nn.Embedding(col_num_embed, num_feats) + self.height_embed = nn.Embedding(height_num_embed, num_feats) + + self.row_num_embed = row_num_embed + self.col_num_embed = col_num_embed + self.height_num_embed = height_num_embed + + + def forward(self, mask): + """Forward function for `LearnedPositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, d, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, d, h, w]. + """ + d, h, w = mask.shape[-3:] + x = torch.arange(w, device=mask.device) + y = torch.arange(h, device=mask.device) + z = torch.arange(d, device=mask.device) + x_embed = self.col_embed(x) # (n, embed_dims) + y_embed = self.row_embed(y) + z_embed = self.height_embed(z) + + # 2D pos: concatnation + # pos = torch.cat( + # (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( + # 1, w, 1)), + # dim=-1).permute(2, 0, + # 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) # (bs, embed_dims, h, w) + + # 3D pos: element-wise + _x_embed = x_embed[None, None, ...].repeat(d, h, 1, 1) + _y_embed = y_embed[None, :, None, :].repeat(d, 1, w, 1) + _z_embed = z_embed[:, None, None, :].repeat(1, h, w, 1) + xyz_embed = _x_embed + _y_embed + _z_embed + + # # (bs, embed_dims, d, h, w) + pos = xyz_embed.permute(3, 0, 1, 2).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1, 1) + + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'row_num_embed={self.row_num_embed}, ' + repr_str += f'col_num_embed={self.col_num_embed})' + repr_str += f'height_num_embed={self.height_num_embed})' + + return repr_str diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..57e6a87 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,306 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: linux-64 +_libgcc_mutex=0.1=main +_openmp_mutex=5.1=1_gnu +absl-py=1.4.0=pypi_0 +addict=2.4.0=pypi_0 +altair=5.1.2=pypi_0 +annotated-types=0.6.0=pypi_0 +antlr4-python3-runtime=4.9.3=pypi_0 +anyio=3.6.2=pypi_0 +appdirs=1.4.4=pypi_0 +argon2-cffi=21.3.0=pypi_0 +argon2-cffi-bindings=21.2.0=pypi_0 +arrow=1.2.3=pypi_0 +asttokens=2.2.1=pypi_0 +attrs=23.1.0=pypi_0 +backcall=0.2.0=pypi_0 +backports-zoneinfo=0.2.1=pypi_0 +beautifulsoup4=4.12.2=pypi_0 +black=23.3.0=pypi_0 +bleach=6.0.0=pypi_0 +blinker=1.6.2=pypi_0 +blis=0.7.11=pypi_0 +braceexpand=0.1.7=pypi_0 +ca-certificates=2023.08.22=h06a4308_0 +cachetools=5.3.0=pypi_0 +catalogue=2.0.10=pypi_0 +ccimport=0.4.2=pypi_0 +certifi=2023.5.7=pypi_0 +cffi=1.15.1=pypi_0 +cfgv=3.4.0=pypi_0 +charset-normalizer=3.1.0=pypi_0 +click=8.1.3=pypi_0 +cloudpathlib=0.15.1=pypi_0 +cloudpickle=2.2.1=pypi_0 +colorama=0.4.6=pypi_0 +comm=0.1.3=pypi_0 +confection=0.1.3=pypi_0 +contexttimer=0.3.3=pypi_0 +contourpy=1.0.7=pypi_0 +cumm=0.4.11=pypi_0 +cycler=0.11.0=pypi_0 +cymem=2.0.8=pypi_0 +cython=0.29.34=pypi_0 +dask=2023.3.2=pypi_0 +debugpy=1.6.7=pypi_0 +decorator=5.1.1=pypi_0 +decord=0.6.0=pypi_0 +defusedxml=0.7.1=pypi_0 +descartes=1.1.0=pypi_0 +detectron2=0.6=pypi_0 +distlib=0.3.7=pypi_0 +distro=1.8.0=pypi_0 +dm-tree=0.1.8=pypi_0 +docker-pycreds=0.4.0=pypi_0 +exceptiongroup=1.1.1=pypi_0 +executing=1.2.0=pypi_0 +fairscale=0.4.4=pypi_0 +fastjsonschema=2.16.3=pypi_0 +filelock=3.12.4=pypi_0 +fire=0.5.0=pypi_0 +flake8=5.0.4=pypi_0 +flatbuffers=23.5.26=pypi_0 +fonttools=4.39.3=pypi_0 +fqdn=1.5.1=pypi_0 +fsspec=2023.4.0=pypi_0 +ftfy=6.1.1=pypi_0 +fvcore=0.1.5.post20221221=pypi_0 +gitdb=4.0.10=pypi_0 +gitpython=3.1.31=pypi_0 +google-auth=2.20.0=pypi_0 +google-auth-oauthlib=0.4.1=pypi_0 +googleapis-common-protos=1.59.1=pypi_0 +grpcio=1.54.0=pypi_0 +h11=0.14.0=pypi_0 +h5py=3.8.0=pypi_0 +httpcore=1.0.1=pypi_0 +httpx=0.25.1=pypi_0 +huggingface-hub=0.14.1=pypi_0 +hydra-core=1.3.2=pypi_0 +identify=2.5.30=pypi_0 +idna=3.4=pypi_0 +imageio=2.28.1=pypi_0 +importlib-metadata=6.6.0=pypi_0 +importlib-resources=5.12.0=pypi_0 +iniconfig=2.0.0=pypi_0 +iopath=0.1.9=pypi_0 +ipykernel=6.22.0=pypi_0 +ipython=8.12.2=pypi_0 +ipython-genutils=0.2.0=pypi_0 +ipywidgets=8.0.6=pypi_0 +isoduration=20.11.0=pypi_0 +isort=5.13.2=pypi_0 +jedi=0.18.2=pypi_0 +jinja2=3.1.2=pypi_0 +joblib=1.2.0=pypi_0 +jsonpointer=2.3=pypi_0 +jsonschema=4.17.3=pypi_0 +jupyter=1.0.0=pypi_0 +jupyter-client=8.2.0=pypi_0 +jupyter-console=6.6.3=pypi_0 +jupyter-core=5.3.0=pypi_0 +jupyter-events=0.6.3=pypi_0 +jupyter-server=2.5.0=pypi_0 +jupyter-server-terminals=0.4.4=pypi_0 +jupyterlab-pygments=0.2.2=pypi_0 +jupyterlab-widgets=3.0.7=pypi_0 +kaggle=1.5.16=pypi_0 +kiwisolver=1.4.4=pypi_0 +kornia=0.5.0=pypi_0 +langcodes=3.3.0=pypi_0 +lark=1.1.7=pypi_0 +lazy-loader=0.2=pypi_0 +ld_impl_linux-64=2.38=h1181459_1 +libclang=16.0.0=pypi_0 +libffi=3.4.4=h6a678d5_0 +libgcc-ng=11.2.0=h1234567_1 +libgomp=11.2.0=h1234567_1 +libstdcxx-ng=11.2.0=h1234567_1 +llvmlite=0.31.0=pypi_0 +lyft-dataset-sdk=0.0.8=pypi_0 +markdown=3.4.3=pypi_0 +markdown-it-py=2.2.0=pypi_0 +markupsafe=2.1.2=pypi_0 +matplotlib=3.2.0=pypi_0 +matplotlib-inline=0.1.6=pypi_0 +mccabe=0.7.0=pypi_0 +mdurl=0.1.2=pypi_0 +mistune=2.0.5=pypi_0 +mmcls=0.25.0=pypi_0 +mmcv-full=1.4.0=pypi_0 +mmdet=2.22.0=dev_0 +mmdet3d=0.17.1=dev_0 +mmsegmentation=0.14.1=pypi_0 +model-index=0.1.11=pypi_0 +murmurhash=1.0.10=pypi_0 +mypy-extensions=1.0.0=pypi_0 +nbclassic=1.0.0=pypi_0 +nbclient=0.7.4=pypi_0 +nbconvert=7.3.1=pypi_0 +nbformat=5.8.0=pypi_0 +ncurses=6.4=h6a678d5_0 +nest-asyncio=1.5.6=pypi_0 +networkx=2.2=pypi_0 +ninja=1.11.1=pypi_0 +nodeenv=1.8.0=pypi_0 +notebook=6.5.4=pypi_0 +notebook-shim=0.2.3=pypi_0 +numba=0.49.0=pypi_0 +numpy=1.19.5=pypi_0 +nuscenes-devkit=1.1.10=pypi_0 +oauthlib=3.2.2=pypi_0 +omegaconf=2.3.0=pypi_0 +open-clip-torch=2.16.0=pypi_0 +openai=1.1.1=pypi_0 +opencv-python=4.7.0.72=pypi_0 +opencv-python-headless=4.5.5.64=pypi_0 +opendatasets=0.1.22=pypi_0 +openexr=1.3.9=pypi_0 +openmim=0.3.7=pypi_0 +openssl=3.0.11=h7f8727e_2 +ordered-set=4.1.0=pypi_0 +packaging=23.1=pypi_0 +pandas=1.4.0=pypi_0 +pandocfilters=1.5.0=pypi_0 +parso=0.8.3=pypi_0 +pathspec=0.11.1=pypi_0 +pathtools=0.1.2=pypi_0 +pathy=0.10.2=pypi_0 +pccm=0.4.8=pypi_0 +pexpect=4.8.0=pypi_0 +pickleshare=0.7.5=pypi_0 +pillow=9.2.0=pypi_0 +pip=23.2.1=pypi_0 +pkgutil-resolve-name=1.3.10=pypi_0 +platformdirs=3.11.0=pypi_0 +plotly=5.13.1=pypi_0 +pluggy=1.0.0=pypi_0 +plyfile=0.9=pypi_0 +portalocker=2.7.0=pypi_0 +pre-commit=3.4.0=pypi_0 +preshed=3.0.9=pypi_0 +prettytable=3.7.0=pypi_0 +prometheus-client=0.16.0=pypi_0 +promise=2.3=pypi_0 +prompt-toolkit=3.0.38=pypi_0 +protobuf=3.20.3=pypi_0 +psutil=5.9.5=pypi_0 +ptyprocess=0.7.0=pypi_0 +pure-eval=0.2.2=pypi_0 +pyarrow=10.0.0=pypi_0 +pyasn1=0.5.0=pypi_0 +pyasn1-modules=0.3.0=pypi_0 +pybind11=2.11.1=pypi_0 +pycocoevalcap=1.2=pypi_0 +pycocotools=2.0.6=pypi_0 +pycodestyle=2.9.1=pypi_0 +pycparser=2.21=pypi_0 +pydantic=2.4.2=pypi_0 +pydantic-core=2.10.1=pypi_0 +pydeck=0.8.1b0=pypi_0 +pyflakes=2.5.0=pypi_0 +pygments=2.15.1=pypi_0 +pyparsing=3.0.9=pypi_0 +pyquaternion=0.9.9=pypi_0 +pyrsistent=0.19.3=pypi_0 +pytest=7.3.1=pypi_0 +python=3.8.18=h955ad1f_0 +python-dateutil=2.8.2=pypi_0 +python-json-logger=2.0.7=pypi_0 +python-magic=0.4.27=pypi_0 +python-slugify=8.0.1=pypi_0 +pytz=2023.3=pypi_0 +pywavelets=1.4.1=pypi_0 +pyyaml=6.0=pypi_0 +pyzmq=25.0.2=pypi_0 +qtconsole=5.4.3=pypi_0 +qtpy=2.3.1=pypi_0 +readline=8.2=h5eee18b_0 +regex=2023.5.5=pypi_0 +requests=2.30.0=pypi_0 +requests-oauthlib=1.3.1=pypi_0 +rfc3339-validator=0.1.4=pypi_0 +rfc3986-validator=0.1.1=pypi_0 +rich=13.3.5=pypi_0 +rsa=4.9=pypi_0 +salesforce-lavis=1.0.2=pypi_0 +sanmodule=0.1=dev_0 +scikit-image=0.19.3=pypi_0 +scikit-learn=1.2.2=pypi_0 +scipy=1.9.1=pypi_0 +send2trash=1.8.2=pypi_0 +sentencepiece=0.1.99=pypi_0 +sentry-sdk=1.22.1=pypi_0 +setuptools=67.6.0=pypi_0 +shapely=1.8.5=pypi_0 +simplejson=3.19.1=pypi_0 +six=1.16.0=pypi_0 +smart-open=6.4.0=pypi_0 +smmap=5.0.0=pypi_0 +sniffio=1.3.0=pypi_0 +soupsieve=2.4.1=pypi_0 +spacy=3.7.1=pypi_0 +spacy-legacy=3.0.12=pypi_0 +spacy-loggers=1.0.5=pypi_0 +spconv=2.3.6=pypi_0 +sqlite=3.41.2=h5eee18b_0 +srsly=2.4.8=pypi_0 +stack-data=0.6.2=pypi_0 +streamlit=1.27.2=pypi_0 +submitit=1.4.5=pypi_0 +svgwrite=1.4.3=pypi_0 +tabulate=0.9.0=pypi_0 +tenacity=8.2.2=pypi_0 +tensorboard=2.11.2=pypi_0 +tensorboard-data-server=0.6.1=pypi_0 +tensorboard-plugin-wit=1.8.1=pypi_0 +termcolor=2.3.0=pypi_0 +terminado=0.17.1=pypi_0 +terminaltables=3.1.10=pypi_0 +text-unidecode=1.3=pypi_0 +thinc=8.2.1=pypi_0 +threadpoolctl=3.1.0=pypi_0 +tifffile=2023.4.12=pypi_0 +timm=0.4.12=pypi_0 +tinycss2=1.2.1=pypi_0 +tk=8.6.12=h1ccaba5_0 +tokenizers=0.13.3=pypi_0 +toml=0.10.2=pypi_0 +tomli=2.0.1=pypi_0 +torch=1.10.0+cu113=pypi_0 +torchaudio=0.10.0+cu113=pypi_0 +torchvision=0.11.1+cu113=pypi_0 +tornado=6.3.1=pypi_0 +tqdm=4.65.0=pypi_0 +traitlets=5.9.0=pypi_0 +transformers=4.26.1=pypi_0 +tree=0.2.4=pypi_0 +trimesh=2.35.39=pypi_0 +typeguard=2.13.3=pypi_0 +typer=0.9.0=pypi_0 +typing-extensions=4.8.0=pypi_0 +tzlocal=5.1=pypi_0 +uri-template=1.2.0=pypi_0 +urllib3=1.26.15=pypi_0 +validators=0.22.0=pypi_0 +virtualenv=20.24.5=pypi_0 +wandb=0.15.2=pypi_0 +wasabi=1.1.2=pypi_0 +watchdog=3.0.0=pypi_0 +wcwidth=0.2.6=pypi_0 +weasel=0.3.2=pypi_0 +webcolors=1.13=pypi_0 +webdataset=0.2.57=pypi_0 +webencodings=0.5.1=pypi_0 +websocket-client=1.5.1=pypi_0 +werkzeug=2.3.3=pypi_0 +wheel=0.41.2=py38h06a4308_0 +widgetsnbextension=4.0.7=pypi_0 +wrapt=1.15.0=pypi_0 +xz=5.4.2=h5eee18b_0 +yacs=0.1.8=pypi_0 +yapf=0.33.0=pypi_0 +zipp=3.15.0=pypi_0 +zlib=1.2.13=h5eee18b_0 diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 0000000..c211b1d --- /dev/null +++ b/tools/README.md @@ -0,0 +1,88 @@ +# Tools +## test_data_pipeline.py +### usage +This file allows you to run the entire dataloader pipeline without requiring CUDA, making it very convenient for debugging. + +### config parameter +config: Path to the required config file. + +### command line +```sh +python -m tools.test_data_pipeline +``` + +## condition_benchmark.py +### usage +This script calls the pose file of the specified dataset and divides each scene based on the average speed of the vehicle. Its output is a list containing all scenes with ego car speed larger than the threshold. + +### config parameter +threshold: The standard for speed division. +pose_file: Path to the dataset pose parameter file. + +### command line +```sh +python tools/condition_benchmark.py +``` + +## train.py +### usage +called by `dist_train.sh` and `slurm_train.sh`. Run the training process. + +## test.py +### usage +called by `dist_test.sh` and `slurm_test.sh`. Run the training process. We can directly get the metric results(`--eval mIoU`) or save the occupancy prediction results into files(`--out`). + +## dist_train.sh +### usage +Run the training process. + +### command line +```sh +./tools/dist_train.sh path/to/config/file GPU_NUM +``` + +## dist_test.sh +### usage +Run the evaluation process. + +### command line +```sh +./tools/dist_test.sh path/to/config/file path/to/checkpoint/file GPU_NUM --eval mIoU +./tools/dist_test.sh path/to/config/file path/to/checkpoint/file GPU_NUM --out +``` + +## slurm_train.sh +### usage +Run the training process by `srun`. + +### command line +```sh +GPUS=${NUM_GPUS} ./tools/slurm_train.sh brie1 train path/to/config/file +``` +Explanation: +PARTITION: `brie1` +JOB_NAME: `train` +NUM_GPUS: The code logic of this script requires the input to be a multiple of 8. If you want to run the code with a different number of GPUs, please modify the script according to the srun cluster documentation. + +## slrum_test.sh +### usage +Run the evaluation process by `srun`. + +### command line +```sh +GPUS=${NUM_GPUS} ./tools/slurm_test.sh brie1 test path/to/config/file path/to/checkpoint/file --eval mIoU +GPUS=${NUM_GPUS} ./tools/slurm_test.sh brie1 test path/to/config/file path/to/checkpoint/file --out +``` +Explanation: +PARTITION: `brie1` +JOB_NAME: `test` +NUM_GPUS: The code logic of this script requires the input to be a multiple of 8. If you want to run the code with a different number of GPUs, please modify the script according to the srun cluster documentation. + +## create_data.py +### usage +In the dataset preparing process, we need to use this script to generate info files. For more information, you can see 'https://github.com/CVPR2023-3D-Occupancy-Prediction/CVPR2023-3D-Occupancy-Prediction/blob/main/docs/getting_started.md' + +### command line +```sh +python tools/create_data.py occ --root-path ./data/occ3d-nus --out-dir ./data/occ3d-nus --extra-tag occ --version v1.0 --canbus ./data --occ-path ./data/occ3d-nus +``` \ No newline at end of file diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py index 806175f..e86935c 100644 --- a/tools/analysis_tools/analyze_logs.py +++ b/tools/analysis_tools/analyze_logs.py @@ -1,9 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse import json +from collections import defaultdict + import numpy as np import seaborn as sns -from collections import defaultdict from matplotlib import pyplot as plt @@ -13,20 +14,24 @@ def cal_train_time(log_dicts, args): all_times = [] for epoch in log_dict.keys(): if args.include_outliers: - all_times.append(log_dict[epoch]['time']) + all_times.append(log_dict[epoch]["time"]) else: - all_times.append(log_dict[epoch]['time'][1:]) + all_times.append(log_dict[epoch]["time"][1:]) all_times = np.array(all_times) epoch_ave_time = all_times.mean(-1) slowest_epoch = epoch_ave_time.argmax() fastest_epoch = epoch_ave_time.argmin() std_over_epoch = epoch_ave_time.std() - print(f'slowest epoch {slowest_epoch + 1}, ' - f'average time is {epoch_ave_time[slowest_epoch]:.4f}') - print(f'fastest epoch {fastest_epoch + 1}, ' - f'average time is {epoch_ave_time[fastest_epoch]:.4f}') - print(f'time std over epochs is {std_over_epoch:.4f}') - print(f'average iter time: {np.mean(all_times):.4f} s/iter') + print( + f"slowest epoch {slowest_epoch + 1}, " + f"average time is {epoch_ave_time[slowest_epoch]:.4f}" + ) + print( + f"fastest epoch {fastest_epoch + 1}, " + f"average time is {epoch_ave_time[fastest_epoch]:.4f}" + ) + print(f"time std over epochs is {std_over_epoch:.4f}") + print(f"average iter time: {np.mean(all_times):.4f} s/iter") print() @@ -40,7 +45,7 @@ def plot_curve(log_dicts, args): legend = [] for json_log in args.json_logs: for metric in args.keys: - legend.append(f'{json_log}_{metric}') + legend.append(f"{json_log}_{metric}") assert len(legend) == (len(args.json_logs) * len(args.keys)) metrics = args.keys @@ -48,12 +53,11 @@ def plot_curve(log_dicts, args): for i, log_dict in enumerate(log_dicts): epochs = list(log_dict.keys()) for j, metric in enumerate(metrics): - print(f'plot curve of {args.json_logs[i]}, metric is {metric}') + print(f"plot curve of {args.json_logs[i]}, metric is {metric}") if metric not in log_dict[epochs[args.interval - 1]]: - raise KeyError( - f'{args.json_logs[i]} does not contain metric {metric}') + raise KeyError(f"{args.json_logs[i]} does not contain metric {metric}") - if args.mode == 'eval': + if args.mode == "eval": if min(epochs) == args.interval: x0 = args.interval else: @@ -64,11 +68,10 @@ def plot_curve(log_dicts, args): x0 = min(epochs) else: # find the first epoch that do eval - x0 = min(epochs) + args.interval - \ - min(epochs) % args.interval + x0 = min(epochs) + args.interval - min(epochs) % args.interval xs = np.arange(x0, max(epochs) + 1, args.interval) ys = [] - for epoch in epochs[args.interval - 1::args.interval]: + for epoch in epochs[args.interval - 1 :: args.interval]: ys += log_dict[epoch][metric] # if training is aborted before eval of the last epoch @@ -79,86 +82,76 @@ def plot_curve(log_dicts, args): ax = plt.gca() ax.set_xticks(xs) - plt.xlabel('epoch') - plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o') + plt.xlabel("epoch") + plt.plot(xs, ys, label=legend[i * num_metrics + j], marker="o") else: xs = [] ys = [] - num_iters_per_epoch = \ - log_dict[epochs[args.interval-1]]['iter'][-1] - for epoch in epochs[args.interval - 1::args.interval]: - iters = log_dict[epoch]['iter'] - if log_dict[epoch]['mode'][-1] == 'val': + num_iters_per_epoch = log_dict[epochs[args.interval - 1]]["iter"][-1] + for epoch in epochs[args.interval - 1 :: args.interval]: + iters = log_dict[epoch]["iter"] + if log_dict[epoch]["mode"][-1] == "val": iters = iters[:-1] - xs.append( - np.array(iters) + (epoch - 1) * num_iters_per_epoch) - ys.append(np.array(log_dict[epoch][metric][:len(iters)])) + xs.append(np.array(iters) + (epoch - 1) * num_iters_per_epoch) + ys.append(np.array(log_dict[epoch][metric][: len(iters)])) xs = np.concatenate(xs) ys = np.concatenate(ys) - plt.xlabel('iter') - plt.plot( - xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) + plt.xlabel("iter") + plt.plot(xs, ys, label=legend[i * num_metrics + j], linewidth=0.5) plt.legend() if args.title is not None: plt.title(args.title) if args.out is None: plt.show() else: - print(f'save curve to: {args.out}') + print(f"save curve to: {args.out}") plt.savefig(args.out) plt.cla() def add_plot_parser(subparsers): - parser_plt = subparsers.add_parser( - 'plot_curve', help='parser for plotting curves') - parser_plt.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') + parser_plt = subparsers.add_parser("plot_curve", help="parser for plotting curves") parser_plt.add_argument( - '--keys', - type=str, - nargs='+', - default=['mAP_0.25'], - help='the metric that you want to plot') - parser_plt.add_argument('--title', type=str, help='title of figure') + "json_logs", type=str, nargs="+", help="path of train log in json format" + ) parser_plt.add_argument( - '--legend', + "--keys", type=str, - nargs='+', - default=None, - help='legend of each plot') + nargs="+", + default=["mAP_0.25"], + help="the metric that you want to plot", + ) + parser_plt.add_argument("--title", type=str, help="title of figure") parser_plt.add_argument( - '--backend', type=str, default=None, help='backend of plt') - parser_plt.add_argument( - '--style', type=str, default='dark', help='style of plt') - parser_plt.add_argument('--out', type=str, default=None) - parser_plt.add_argument('--mode', type=str, default='train') - parser_plt.add_argument('--interval', type=int, default=1) + "--legend", type=str, nargs="+", default=None, help="legend of each plot" + ) + parser_plt.add_argument("--backend", type=str, default=None, help="backend of plt") + parser_plt.add_argument("--style", type=str, default="dark", help="style of plt") + parser_plt.add_argument("--out", type=str, default=None) + parser_plt.add_argument("--mode", type=str, default="train") + parser_plt.add_argument("--interval", type=int, default=1) def add_time_parser(subparsers): parser_time = subparsers.add_parser( - 'cal_train_time', - help='parser for computing the average time per training iteration') + "cal_train_time", + help="parser for computing the average time per training iteration", + ) parser_time.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') + "json_logs", type=str, nargs="+", help="path of train log in json format" + ) parser_time.add_argument( - '--include-outliers', - action='store_true', - help='include the first value of every epoch when computing ' - 'the average time') + "--include-outliers", + action="store_true", + help="include the first value of every epoch when computing " + "the average time", + ) def parse_args(): - parser = argparse.ArgumentParser(description='Analyze Json Log') + parser = argparse.ArgumentParser(description="Analyze Json Log") # currently only support plot curve and calculate average train time - subparsers = parser.add_subparsers(dest='task', help='task parser') + subparsers = parser.add_subparsers(dest="task", help="task parser") add_plot_parser(subparsers) add_time_parser(subparsers) args = parser.parse_args() @@ -171,13 +164,13 @@ def load_json_logs(json_logs): # value of sub dict is a list of corresponding values of all iterations log_dicts = [dict() for _ in json_logs] for json_log, log_dict in zip(json_logs, log_dicts): - with open(json_log, 'r') as log_file: + with open(json_log, "r") as log_file: for line in log_file: log = json.loads(line.strip()) # skip lines without `epoch` field - if 'epoch' not in log: + if "epoch" not in log: continue - epoch = log.pop('epoch') + epoch = log.pop("epoch") if epoch not in log_dict: log_dict[epoch] = defaultdict(list) for k, v in log.items(): @@ -190,12 +183,12 @@ def main(): json_logs = args.json_logs for json_log in json_logs: - assert json_log.endswith('.json') + assert json_log.endswith(".json") log_dicts = load_json_logs(json_logs) eval(args.task)(log_dicts, args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/analysis_tools/benchmark.py b/tools/analysis_tools/benchmark.py index 487a348..f25d6e4 100644 --- a/tools/analysis_tools/benchmark.py +++ b/tools/analysis_tools/benchmark.py @@ -1,31 +1,35 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse +import sys import time + import torch from mmcv import Config from mmcv.parallel import MMDataParallel from mmcv.runner import load_checkpoint, wrap_fp16_model -import sys -sys.path.append('.') -from projects.mmdet3d_plugin.datasets.builder import build_dataloader -from projects.mmdet3d_plugin.datasets import custom_build_dataset + +sys.path.append(".") # from mmdet3d.datasets import build_dataloader, build_dataset from mmdet3d.models import build_detector -#from tools.misc.fuse_conv_bn import fuse_module + +from projects.mmdet3d_plugin.datasets import custom_build_dataset +from projects.mmdet3d_plugin.datasets.builder import build_dataloader + +# from tools.misc.fuse_conv_bn import fuse_module def parse_args(): - parser = argparse.ArgumentParser(description='MMDet benchmark a model') - parser.add_argument('config', help='test config file path') - parser.add_argument('--checkpoint', default=None, help='checkpoint file') - parser.add_argument('--samples', default=2000, help='samples to benchmark') - parser.add_argument( - '--log-interval', default=50, help='interval of logging') + parser = argparse.ArgumentParser(description="MMDet benchmark a model") + parser.add_argument("config", help="test config file path") + parser.add_argument("--checkpoint", default=None, help="checkpoint file") + parser.add_argument("--samples", default=2000, help="samples to benchmark") + parser.add_argument("--log-interval", default=50, help="interval of logging") parser.add_argument( - '--fuse-conv-bn', - action='store_true', - help='Whether to fuse conv and bn, this will slightly increase' - 'the inference speed') + "--fuse-conv-bn", + action="store_true", + help="Whether to fuse conv and bn, this will slightly increase" + "the inference speed", + ) args = parser.parse_args() return args @@ -35,7 +39,7 @@ def main(): cfg = Config.fromfile(args.config) # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): + if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True @@ -49,17 +53,18 @@ def main(): samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, - shuffle=False) + shuffle=False, + ) # build the model and load checkpoint cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) + model = build_detector(cfg.model, test_cfg=cfg.get("test_cfg")) + fp16_cfg = cfg.get("fp16", None) if fp16_cfg is not None: wrap_fp16_model(model) if args.checkpoint is not None: - load_checkpoint(model, args.checkpoint, map_location='cpu') - #if args.fuse_conv_bn: + load_checkpoint(model, args.checkpoint, map_location="cpu") + # if args.fuse_conv_bn: # model = fuse_module(model) model = MMDataParallel(model, device_ids=[0]) @@ -84,15 +89,17 @@ def main(): pure_inf_time += elapsed if (i + 1) % args.log_interval == 0: fps = (i + 1 - num_warmup) / pure_inf_time - print(f'Done image [{i + 1:<3}/ {args.samples}], ' - f'fps: {fps:.1f} img / s') + print( + f"Done image [{i + 1:<3}/ {args.samples}], " + f"fps: {fps:.1f} img / s" + ) if (i + 1) == args.samples: pure_inf_time += elapsed fps = (i + 1 - num_warmup) / pure_inf_time - print(f'Overall fps: {fps:.1f} img / s') + print(f"Overall fps: {fps:.1f} img / s") break -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/analysis_tools/get_params.py b/tools/analysis_tools/get_params.py index fb697ad..8078f22 100644 --- a/tools/analysis_tools/get_params.py +++ b/tools/analysis_tools/get_params.py @@ -1,9 +1,10 @@ import torch -file_path = './ckpts/bevformer_v4.pth' -model = torch.load(file_path, map_location='cpu') + +file_path = "./ckpts/bevformer_v4.pth" +model = torch.load(file_path, map_location="cpu") all = 0 -for key in list(model['state_dict'].keys()): - all += model['state_dict'][key].nelement() +for key in list(model["state_dict"].keys()): + all += model["state_dict"][key].nelement() print(all) # smaller 63374123 diff --git a/tools/analysis_tools/visual.py b/tools/analysis_tools/visual.py index f711b75..9470719 100644 --- a/tools/analysis_tools/visual.py +++ b/tools/analysis_tools/visual.py @@ -3,53 +3,53 @@ # Modified by Zhiqi Li # --------------------------------------------- -import mmcv -from nuscenes.nuscenes import NuScenes -from PIL import Image -from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix -from typing import Tuple, List, Iterable +from typing import Iterable, List, Tuple + import matplotlib.pyplot as plt +import mmcv import numpy as np -from PIL import Image from matplotlib import rcParams from matplotlib.axes import Axes -from pyquaternion import Quaternion +from nuscenes.eval.common.data_classes import EvalBox, EvalBoxes +from nuscenes.eval.detection.data_classes import DetectionBox +from nuscenes.eval.detection.render import visualize_sample +from nuscenes.eval.detection.utils import category_to_detection_name +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.data_classes import Box, LidarPointCloud, RadarPointCloud +from nuscenes.utils.geometry_utils import ( + BoxVisibility, + box_in_image, + transform_matrix, + view_points, +) from PIL import Image -from matplotlib import rcParams -from matplotlib.axes import Axes from pyquaternion import Quaternion from tqdm import tqdm -from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box -from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix -from nuscenes.eval.common.data_classes import EvalBoxes, EvalBox -from nuscenes.eval.detection.data_classes import DetectionBox -from nuscenes.eval.detection.utils import category_to_detection_name -from nuscenes.eval.detection.render import visualize_sample +cams = [ + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_BACK_RIGHT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_FRONT_LEFT", +] - - -cams = ['CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_BACK_RIGHT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_FRONT_LEFT'] - -import numpy as np import matplotlib.pyplot as plt -from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box -from PIL import Image +import numpy as np from matplotlib import rcParams +from nuscenes.utils.data_classes import Box, LidarPointCloud, RadarPointCloud +from PIL import Image def render_annotation( - anntoken: str, - margin: float = 10, - view: np.ndarray = np.eye(4), - box_vis_level: BoxVisibility = BoxVisibility.ANY, - out_path: str = 'render.png', - extra_info: bool = False) -> None: + anntoken: str, + margin: float = 10, + view: np.ndarray = np.eye(4), + box_vis_level: BoxVisibility = BoxVisibility.ANY, + out_path: str = "render.png", + extra_info: bool = False, +) -> None: """ Render selected annotation. :param anntoken: Sample_annotation token. @@ -59,18 +59,23 @@ def render_annotation( :param out_path: Optional path to save the rendered figure to disk. :param extra_info: Whether to render extra information below camera view. """ - ann_record = nusc.get('sample_annotation', anntoken) - sample_record = nusc.get('sample', ann_record['sample_token']) - assert 'LIDAR_TOP' in sample_record['data'].keys(), 'Error: No LIDAR_TOP in data, unable to render.' + ann_record = nusc.get("sample_annotation", anntoken) + sample_record = nusc.get("sample", ann_record["sample_token"]) + assert ( + "LIDAR_TOP" in sample_record["data"].keys() + ), "Error: No LIDAR_TOP in data, unable to render." # Figure out which camera the object is fully visible in (this may return nothing). boxes, cam = [], [] - cams = [key for key in sample_record['data'].keys() if 'CAM' in key] + cams = [key for key in sample_record["data"].keys() if "CAM" in key] all_bboxes = [] select_cams = [] for cam in cams: - _, boxes, _ = nusc.get_sample_data(sample_record['data'][cam], box_vis_level=box_vis_level, - selected_anntokens=[anntoken]) + _, boxes, _ = nusc.get_sample_data( + sample_record["data"][cam], + box_vis_level=box_vis_level, + selected_anntokens=[anntoken], + ) if len(boxes) > 0: all_bboxes.append(boxes) select_cams.append(cam) @@ -82,30 +87,38 @@ def render_annotation( num_cam = len(all_bboxes) fig, axes = plt.subplots(1, num_cam + 1, figsize=(18, 9)) - select_cams = [sample_record['data'][cam] for cam in select_cams] - print('bbox in cams:', select_cams) + select_cams = [sample_record["data"][cam] for cam in select_cams] + print("bbox in cams:", select_cams) # Plot LIDAR view. - lidar = sample_record['data']['LIDAR_TOP'] - data_path, boxes, camera_intrinsic = nusc.get_sample_data(lidar, selected_anntokens=[anntoken]) + lidar = sample_record["data"]["LIDAR_TOP"] + data_path, boxes, camera_intrinsic = nusc.get_sample_data( + lidar, selected_anntokens=[anntoken] + ) LidarPointCloud.from_file(data_path).render_height(axes[0], view=view) for box in boxes: c = np.array(get_color(box.name)) / 255.0 box.render(axes[0], view=view, colors=(c, c, c)) corners = view_points(boxes[0].corners(), view, False)[:2, :] - axes[0].set_xlim([np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin]) - axes[0].set_ylim([np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin]) - axes[0].axis('off') - axes[0].set_aspect('equal') + axes[0].set_xlim( + [np.min(corners[0, :]) - margin, np.max(corners[0, :]) + margin] + ) + axes[0].set_ylim( + [np.min(corners[1, :]) - margin, np.max(corners[1, :]) + margin] + ) + axes[0].axis("off") + axes[0].set_aspect("equal") # Plot CAMERA view. for i in range(1, num_cam + 1): cam = select_cams[i - 1] - data_path, boxes, camera_intrinsic = nusc.get_sample_data(cam, selected_anntokens=[anntoken]) + data_path, boxes, camera_intrinsic = nusc.get_sample_data( + cam, selected_anntokens=[anntoken] + ) im = Image.open(data_path) axes[i].imshow(im) - axes[i].set_title(nusc.get('sample_data', cam)['channel']) - axes[i].axis('off') - axes[i].set_aspect('equal') + axes[i].set_title(nusc.get("sample_data", cam)["channel"]) + axes[i].axis("off") + axes[i].set_aspect("equal") for box in boxes: c = np.array(get_color(box.name)) / 255.0 box.render(axes[i], view=camera_intrinsic, normalize=True, colors=(c, c, c)) @@ -115,39 +128,53 @@ def render_annotation( axes[i].set_ylim(im.size[1], 0) if extra_info: - rcParams['font.family'] = 'monospace' - - w, l, h = ann_record['size'] - category = ann_record['category_name'] - lidar_points = ann_record['num_lidar_pts'] - radar_points = ann_record['num_radar_pts'] - - sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP']) - pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token']) - dist = np.linalg.norm(np.array(pose_record['translation']) - np.array(ann_record['translation'])) - - information = ' \n'.join(['category: {}'.format(category), - '', - '# lidar points: {0:>4}'.format(lidar_points), - '# radar points: {0:>4}'.format(radar_points), - '', - 'distance: {:>7.3f}m'.format(dist), - '', - 'width: {:>7.3f}m'.format(w), - 'length: {:>7.3f}m'.format(l), - 'height: {:>7.3f}m'.format(h)]) - - plt.annotate(information, (0, 0), (0, -20), xycoords='axes fraction', textcoords='offset points', va='top') + rcParams["font.family"] = "monospace" + + w, l, h = ann_record["size"] + category = ann_record["category_name"] + lidar_points = ann_record["num_lidar_pts"] + radar_points = ann_record["num_radar_pts"] + + sample_data_record = nusc.get("sample_data", sample_record["data"]["LIDAR_TOP"]) + pose_record = nusc.get("ego_pose", sample_data_record["ego_pose_token"]) + dist = np.linalg.norm( + np.array(pose_record["translation"]) - np.array(ann_record["translation"]) + ) + + information = " \n".join( + [ + "category: {}".format(category), + "", + "# lidar points: {0:>4}".format(lidar_points), + "# radar points: {0:>4}".format(radar_points), + "", + "distance: {:>7.3f}m".format(dist), + "", + "width: {:>7.3f}m".format(w), + "length: {:>7.3f}m".format(l), + "height: {:>7.3f}m".format(h), + ] + ) + + plt.annotate( + information, + (0, 0), + (0, -20), + xycoords="axes fraction", + textcoords="offset points", + va="top", + ) if out_path is not None: plt.savefig(out_path) - -def get_sample_data(sample_data_token: str, - box_vis_level: BoxVisibility = BoxVisibility.ANY, - selected_anntokens=None, - use_flat_vehicle_coordinates: bool = False): +def get_sample_data( + sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False, +): """ Returns the data path as well as all annotations related to that sample_data. Note that the boxes are transformed into the current sensor's coordinate frame. @@ -160,16 +187,16 @@ def get_sample_data(sample_data_token: str, """ # Retrieve sensor & pose records - sd_record = nusc.get('sample_data', sample_data_token) - cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) - sensor_record = nusc.get('sensor', cs_record['sensor_token']) - pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + sd_record = nusc.get("sample_data", sample_data_token) + cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"]) + sensor_record = nusc.get("sensor", cs_record["sensor_token"]) + pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"]) data_path = nusc.get_sample_data_path(sample_data_token) - if sensor_record['modality'] == 'camera': - cam_intrinsic = np.array(cs_record['camera_intrinsic']) - imsize = (sd_record['width'], sd_record['height']) + if sensor_record["modality"] == "camera": + cam_intrinsic = np.array(cs_record["camera_intrinsic"]) + imsize = (sd_record["width"], sd_record["height"]) else: cam_intrinsic = None imsize = None @@ -185,20 +212,25 @@ def get_sample_data(sample_data_token: str, for box in boxes: if use_flat_vehicle_coordinates: # Move box to ego vehicle coord system parallel to world z plane. - yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] - box.translate(-np.array(pose_record['translation'])) - box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + yaw = Quaternion(pose_record["rotation"]).yaw_pitch_roll[0] + box.translate(-np.array(pose_record["translation"])) + box.rotate( + Quaternion( + scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)] + ).inverse + ) else: # Move box to ego vehicle coord system. - box.translate(-np.array(pose_record['translation'])) - box.rotate(Quaternion(pose_record['rotation']).inverse) + box.translate(-np.array(pose_record["translation"])) + box.rotate(Quaternion(pose_record["rotation"]).inverse) # Move box to sensor coord system. - box.translate(-np.array(cs_record['translation'])) - box.rotate(Quaternion(cs_record['rotation']).inverse) + box.translate(-np.array(cs_record["translation"])) + box.rotate(Quaternion(cs_record["rotation"]).inverse) - if sensor_record['modality'] == 'camera' and not \ - box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + if sensor_record["modality"] == "camera" and not box_in_image( + box, cam_intrinsic, imsize, vis_level=box_vis_level + ): continue box_list.append(box) @@ -206,13 +238,13 @@ def get_sample_data(sample_data_token: str, return data_path, box_list, cam_intrinsic - -def get_predicted_data(sample_data_token: str, - box_vis_level: BoxVisibility = BoxVisibility.ANY, - selected_anntokens=None, - use_flat_vehicle_coordinates: bool = False, - pred_anns=None - ): +def get_predicted_data( + sample_data_token: str, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + selected_anntokens=None, + use_flat_vehicle_coordinates: bool = False, + pred_anns=None, +): """ Returns the data path as well as all annotations related to that sample_data. Note that the boxes are transformed into the current sensor's coordinate frame. @@ -225,16 +257,16 @@ def get_predicted_data(sample_data_token: str, """ # Retrieve sensor & pose records - sd_record = nusc.get('sample_data', sample_data_token) - cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) - sensor_record = nusc.get('sensor', cs_record['sensor_token']) - pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + sd_record = nusc.get("sample_data", sample_data_token) + cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"]) + sensor_record = nusc.get("sensor", cs_record["sensor_token"]) + pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"]) data_path = nusc.get_sample_data_path(sample_data_token) - if sensor_record['modality'] == 'camera': - cam_intrinsic = np.array(cs_record['camera_intrinsic']) - imsize = (sd_record['width'], sd_record['height']) + if sensor_record["modality"] == "camera": + cam_intrinsic = np.array(cs_record["camera_intrinsic"]) + imsize = (sd_record["width"], sd_record["height"]) else: cam_intrinsic = None imsize = None @@ -250,71 +282,88 @@ def get_predicted_data(sample_data_token: str, for box in boxes: if use_flat_vehicle_coordinates: # Move box to ego vehicle coord system parallel to world z plane. - yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0] - box.translate(-np.array(pose_record['translation'])) - box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse) + yaw = Quaternion(pose_record["rotation"]).yaw_pitch_roll[0] + box.translate(-np.array(pose_record["translation"])) + box.rotate( + Quaternion( + scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)] + ).inverse + ) else: # Move box to ego vehicle coord system. - box.translate(-np.array(pose_record['translation'])) - box.rotate(Quaternion(pose_record['rotation']).inverse) + box.translate(-np.array(pose_record["translation"])) + box.rotate(Quaternion(pose_record["rotation"]).inverse) # Move box to sensor coord system. - box.translate(-np.array(cs_record['translation'])) - box.rotate(Quaternion(cs_record['rotation']).inverse) + box.translate(-np.array(cs_record["translation"])) + box.rotate(Quaternion(cs_record["rotation"]).inverse) - if sensor_record['modality'] == 'camera' and not \ - box_in_image(box, cam_intrinsic, imsize, vis_level=box_vis_level): + if sensor_record["modality"] == "camera" and not box_in_image( + box, cam_intrinsic, imsize, vis_level=box_vis_level + ): continue box_list.append(box) return data_path, box_list, cam_intrinsic - - -def lidiar_render(sample_token, data,out_path=None): +def lidiar_render(sample_token, data, out_path=None): bbox_gt_list = [] bbox_pred_list = [] - anns = nusc.get('sample', sample_token)['anns'] + anns = nusc.get("sample", sample_token)["anns"] for ann in anns: - content = nusc.get('sample_annotation', ann) + content = nusc.get("sample_annotation", ann) try: - bbox_gt_list.append(DetectionBox( - sample_token=content['sample_token'], - translation=tuple(content['translation']), - size=tuple(content['size']), - rotation=tuple(content['rotation']), - velocity=nusc.box_velocity(content['token'])[:2], - ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content - else tuple(content['ego_translation']), - num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), - detection_name=category_to_detection_name(content['category_name']), - detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), - attribute_name='')) + bbox_gt_list.append( + DetectionBox( + sample_token=content["sample_token"], + translation=tuple(content["translation"]), + size=tuple(content["size"]), + rotation=tuple(content["rotation"]), + velocity=nusc.box_velocity(content["token"])[:2], + ego_translation=(0.0, 0.0, 0.0) + if "ego_translation" not in content + else tuple(content["ego_translation"]), + num_pts=-1 if "num_pts" not in content else int(content["num_pts"]), + detection_name=category_to_detection_name(content["category_name"]), + detection_score=-1.0 + if "detection_score" not in content + else float(content["detection_score"]), + attribute_name="", + ) + ) except: pass - bbox_anns = data['results'][sample_token] + bbox_anns = data["results"][sample_token] for content in bbox_anns: - bbox_pred_list.append(DetectionBox( - sample_token=content['sample_token'], - translation=tuple(content['translation']), - size=tuple(content['size']), - rotation=tuple(content['rotation']), - velocity=tuple(content['velocity']), - ego_translation=(0.0, 0.0, 0.0) if 'ego_translation' not in content - else tuple(content['ego_translation']), - num_pts=-1 if 'num_pts' not in content else int(content['num_pts']), - detection_name=content['detection_name'], - detection_score=-1.0 if 'detection_score' not in content else float(content['detection_score']), - attribute_name=content['attribute_name'])) + bbox_pred_list.append( + DetectionBox( + sample_token=content["sample_token"], + translation=tuple(content["translation"]), + size=tuple(content["size"]), + rotation=tuple(content["rotation"]), + velocity=tuple(content["velocity"]), + ego_translation=(0.0, 0.0, 0.0) + if "ego_translation" not in content + else tuple(content["ego_translation"]), + num_pts=-1 if "num_pts" not in content else int(content["num_pts"]), + detection_name=content["detection_name"], + detection_score=-1.0 + if "detection_score" not in content + else float(content["detection_score"]), + attribute_name=content["attribute_name"], + ) + ) gt_annotations = EvalBoxes() pred_annotations = EvalBoxes() gt_annotations.add_boxes(sample_token, bbox_gt_list) pred_annotations.add_boxes(sample_token, bbox_pred_list) - print('green is ground truth') - print('blue is the predited result') - visualize_sample(nusc, sample_token, gt_annotations, pred_annotations, savepath=out_path+'_bev') + print("green is ground truth") + print("blue is the predited result") + visualize_sample( + nusc, sample_token, gt_annotations, pred_annotations, savepath=out_path + "_bev" + ) def get_color(category_name: str): @@ -322,25 +371,59 @@ def get_color(category_name: str): Provides the default colors based on the category names. This method works for the general nuScenes categories, as well as the nuScenes detection categories. """ - a = ['noise', 'animal', 'human.pedestrian.adult', 'human.pedestrian.child', 'human.pedestrian.construction_worker', - 'human.pedestrian.personal_mobility', 'human.pedestrian.police_officer', 'human.pedestrian.stroller', - 'human.pedestrian.wheelchair', 'movable_object.barrier', 'movable_object.debris', - 'movable_object.pushable_pullable', 'movable_object.trafficcone', 'static_object.bicycle_rack', 'vehicle.bicycle', - 'vehicle.bus.bendy', 'vehicle.bus.rigid', 'vehicle.car', 'vehicle.construction', 'vehicle.emergency.ambulance', - 'vehicle.emergency.police', 'vehicle.motorcycle', 'vehicle.trailer', 'vehicle.truck', 'flat.driveable_surface', - 'flat.other', 'flat.sidewalk', 'flat.terrain', 'static.manmade', 'static.other', 'static.vegetation', - 'vehicle.ego'] + a = [ + "noise", + "animal", + "human.pedestrian.adult", + "human.pedestrian.child", + "human.pedestrian.construction_worker", + "human.pedestrian.personal_mobility", + "human.pedestrian.police_officer", + "human.pedestrian.stroller", + "human.pedestrian.wheelchair", + "movable_object.barrier", + "movable_object.debris", + "movable_object.pushable_pullable", + "movable_object.trafficcone", + "static_object.bicycle_rack", + "vehicle.bicycle", + "vehicle.bus.bendy", + "vehicle.bus.rigid", + "vehicle.car", + "vehicle.construction", + "vehicle.emergency.ambulance", + "vehicle.emergency.police", + "vehicle.motorcycle", + "vehicle.trailer", + "vehicle.truck", + "flat.driveable_surface", + "flat.other", + "flat.sidewalk", + "flat.terrain", + "static.manmade", + "static.other", + "static.vegetation", + "vehicle.ego", + ] class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] - #print(category_name) - if category_name == 'bicycle': - return nusc.colormap['vehicle.bicycle'] - elif category_name == 'construction_vehicle': - return nusc.colormap['vehicle.construction'] - elif category_name == 'traffic_cone': - return nusc.colormap['movable_object.trafficcone'] + # print(category_name) + if category_name == "bicycle": + return nusc.colormap["vehicle.bicycle"] + elif category_name == "construction_vehicle": + return nusc.colormap["vehicle.construction"] + elif category_name == "traffic_cone": + return nusc.colormap["movable_object.trafficcone"] for key in nusc.colormap.keys(): if category_name in key: @@ -349,23 +432,23 @@ def get_color(category_name: str): def render_sample_data( - sample_toekn: str, - with_anns: bool = True, - box_vis_level: BoxVisibility = BoxVisibility.ANY, - axes_limit: float = 40, - ax=None, - nsweeps: int = 1, - out_path: str = None, - underlay_map: bool = True, - use_flat_vehicle_coordinates: bool = True, - show_lidarseg: bool = False, - show_lidarseg_legend: bool = False, - filter_lidarseg_labels=None, - lidarseg_preds_bin_path: str = None, - verbose: bool = True, - show_panoptic: bool = False, - pred_data=None, - ) -> None: + sample_toekn: str, + with_anns: bool = True, + box_vis_level: BoxVisibility = BoxVisibility.ANY, + axes_limit: float = 40, + ax=None, + nsweeps: int = 1, + out_path: str = None, + underlay_map: bool = True, + use_flat_vehicle_coordinates: bool = True, + show_lidarseg: bool = False, + show_lidarseg_legend: bool = False, + filter_lidarseg_labels=None, + lidarseg_preds_bin_path: str = None, + verbose: bool = True, + show_panoptic: bool = False, + pred_data=None, +) -> None: """ Render sample data onto axis. :param sample_data_token: Sample_data token. @@ -393,36 +476,47 @@ def render_sample_data( If show_lidarseg is True, show_panoptic will be set to False. """ lidiar_render(sample_toekn, pred_data, out_path=out_path) - sample = nusc.get('sample', sample_toekn) + sample = nusc.get("sample", sample_toekn) # sample = data['results'][sample_token_list[0]][0] cams = [ - 'CAM_FRONT_LEFT', - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_BACK_LEFT', - 'CAM_BACK', - 'CAM_BACK_RIGHT', + "CAM_FRONT_LEFT", + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_BACK_LEFT", + "CAM_BACK", + "CAM_BACK_RIGHT", ] if ax is None: _, ax = plt.subplots(4, 3, figsize=(24, 18)) j = 0 for ind, cam in enumerate(cams): - sample_data_token = sample['data'][cam] + sample_data_token = sample["data"][cam] - sd_record = nusc.get('sample_data', sample_data_token) - sensor_modality = sd_record['sensor_modality'] + sd_record = nusc.get("sample_data", sample_data_token) + sensor_modality = sd_record["sensor_modality"] - if sensor_modality in ['lidar', 'radar']: + if sensor_modality in ["lidar", "radar"]: assert False - elif sensor_modality == 'camera': + elif sensor_modality == "camera": # Load boxes and image. - boxes = [Box(record['translation'], record['size'], Quaternion(record['rotation']), - name=record['detection_name'], token='predicted') for record in - pred_data['results'][sample_toekn] if record['detection_score'] > 0.2] - - data_path, boxes_pred, camera_intrinsic = get_predicted_data(sample_data_token, - box_vis_level=box_vis_level, pred_anns=boxes) - _, boxes_gt, _ = nusc.get_sample_data(sample_data_token, box_vis_level=box_vis_level) + boxes = [ + Box( + record["translation"], + record["size"], + Quaternion(record["rotation"]), + name=record["detection_name"], + token="predicted", + ) + for record in pred_data["results"][sample_toekn] + if record["detection_score"] > 0.2 + ] + + data_path, boxes_pred, camera_intrinsic = get_predicted_data( + sample_data_token, box_vis_level=box_vis_level, pred_anns=boxes + ) + _, boxes_gt, _ = nusc.get_sample_data( + sample_data_token, box_vis_level=box_vis_level + ) if ind == 3: j += 1 ind = ind % 3 @@ -438,10 +532,20 @@ def render_sample_data( if with_anns: for box in boxes_pred: c = np.array(get_color(box.name)) / 255.0 - box.render(ax[j, ind], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + box.render( + ax[j, ind], + view=camera_intrinsic, + normalize=True, + colors=(c, c, c), + ) for box in boxes_gt: c = np.array(get_color(box.name)) / 255.0 - box.render(ax[j + 2, ind], view=camera_intrinsic, normalize=True, colors=(c, c, c)) + box.render( + ax[j + 2, ind], + view=camera_intrinsic, + normalize=True, + colors=(c, c, c), + ) # Limit visible range. ax[j, ind].set_xlim(0, data.size[0]) @@ -452,26 +556,41 @@ def render_sample_data( else: raise ValueError("Error: Unknown sensor modality!") - ax[j, ind].axis('off') - ax[j, ind].set_title('PRED: {} {labels_type}'.format( - sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) - ax[j, ind].set_aspect('equal') - - ax[j + 2, ind].axis('off') - ax[j + 2, ind].set_title('GT:{} {labels_type}'.format( - sd_record['channel'], labels_type='(predictions)' if lidarseg_preds_bin_path else '')) - ax[j + 2, ind].set_aspect('equal') + ax[j, ind].axis("off") + ax[j, ind].set_title( + "PRED: {} {labels_type}".format( + sd_record["channel"], + labels_type="(predictions)" if lidarseg_preds_bin_path else "", + ) + ) + ax[j, ind].set_aspect("equal") + + ax[j + 2, ind].axis("off") + ax[j + 2, ind].set_title( + "GT:{} {labels_type}".format( + sd_record["channel"], + labels_type="(predictions)" if lidarseg_preds_bin_path else "", + ) + ) + ax[j + 2, ind].set_aspect("equal") if out_path is not None: - plt.savefig(out_path+'_camera', bbox_inches='tight', pad_inches=0, dpi=200) + plt.savefig(out_path + "_camera", bbox_inches="tight", pad_inches=0, dpi=200) if verbose: plt.show() plt.close() -if __name__ == '__main__': - nusc = NuScenes(version='v1.0-trainval', dataroot='./data/nuscenes', verbose=True) + +if __name__ == "__main__": + nusc = NuScenes(version="v1.0-trainval", dataroot="./data/nuscenes", verbose=True) # render_annotation('7603b030b42a4b1caa8c443ccc1a7d52') - bevformer_results = mmcv.load('test/bevformer_base/Thu_Jun__9_16_22_37_2022/pts_bbox/results_nusc.json') - sample_token_list = list(bevformer_results['results'].keys()) + bevformer_results = mmcv.load( + "test/bevformer_base/Thu_Jun__9_16_22_37_2022/pts_bbox/results_nusc.json" + ) + sample_token_list = list(bevformer_results["results"].keys()) for id in range(0, 10): - render_sample_data(sample_token_list[id], pred_data=bevformer_results, out_path=sample_token_list[id]) + render_sample_data( + sample_token_list[id], + pred_data=bevformer_results, + out_path=sample_token_list[id], + ) diff --git a/tools/condition_benchmark.py b/tools/condition_benchmark.py new file mode 100644 index 0000000..e9f18a6 --- /dev/null +++ b/tools/condition_benchmark.py @@ -0,0 +1,26 @@ +""" +distinguish the ego car fast scene and ego car slow scene. +""" + +import pickle as pkl +import numpy as np + +pose_file = "/public/MARS/datasets/waymo_occV2/cam_infos_vali.pkl" +cam_idx = 0 + +poses_all = pkl.load(open(pose_file, "rb")) + +speed = {} +for scene_idx, poses in poses_all.items(): + _len = len(poses) + move = poses[_len - 1][cam_idx]["ego2global"] - poses[0][cam_idx]["ego2global"] + dist = np.linalg.norm(move[:, 3]) / _len + speed[scene_idx] = dist + +_list = sorted([v for k, v in speed.items()]) +valid_scene = [] +theshold = 1 +for scene_idx, dist in speed.items(): + if dist > theshold: + valid_scene.append(scene_idx) +print(valid_scene) diff --git a/tools/create_data.py b/tools/create_data.py index 3eac178..9a6ca18 100644 --- a/tools/create_data.py +++ b/tools/create_data.py @@ -3,16 +3,18 @@ # --------------------------------------------- # Modified by Zhiqi Li # --------------------------------------------- -from data_converter.create_gt_database import create_groundtruth_database -from data_converter import nuscenes_converter as nuscenes_converter -from data_converter import nuscenes_occ_converter_own as occ_converter -from data_converter import lyft_converter as lyft_converter -from data_converter import kitti_converter as kitti -from data_converter import indoor_converter as indoor import argparse -from os import path as osp import sys -sys.path.append('.') +from os import path as osp + +from data_converter import indoor_converter as indoor +from data_converter import kitti_converter as kitti +from data_converter import lyft_converter as lyft_converter +from data_converter import nuscenes_converter as nuscenes_converter +from data_converter import nuscenes_occ_converter_own as occ_converter +from data_converter.create_gt_database import create_groundtruth_database + +sys.path.append(".") def kitti_data_prep(root_path, info_prefix, version, out_dir): @@ -30,33 +32,35 @@ def kitti_data_prep(root_path, info_prefix, version, out_dir): kitti.create_kitti_info_file(root_path, info_prefix) kitti.create_reduced_point_cloud(root_path, info_prefix) - info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl') - info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl') - info_trainval_path = osp.join(root_path, - f'{info_prefix}_infos_trainval.pkl') - info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl') + info_train_path = osp.join(root_path, f"{info_prefix}_infos_train.pkl") + info_val_path = osp.join(root_path, f"{info_prefix}_infos_val.pkl") + info_trainval_path = osp.join(root_path, f"{info_prefix}_infos_trainval.pkl") + info_test_path = osp.join(root_path, f"{info_prefix}_infos_test.pkl") kitti.export_2d_annotation(root_path, info_train_path) kitti.export_2d_annotation(root_path, info_val_path) kitti.export_2d_annotation(root_path, info_trainval_path) kitti.export_2d_annotation(root_path, info_test_path) create_groundtruth_database( - 'KittiDataset', + "KittiDataset", root_path, info_prefix, - f'{out_dir}/{info_prefix}_infos_train.pkl', + f"{out_dir}/{info_prefix}_infos_train.pkl", relative_path=False, - mask_anno_path='instances_train.json', - with_mask=(version == 'mask')) - - -def nuscenes_data_prep(root_path, - can_bus_root_path, - info_prefix, - version, - dataset_name, - out_dir, - max_sweeps=10): + mask_anno_path="instances_train.json", + with_mask=(version == "mask"), + ) + + +def nuscenes_data_prep( + root_path, + can_bus_root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10, +): """Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, @@ -71,34 +75,42 @@ def nuscenes_data_prep(root_path, max_sweeps (int): Number of input consecutive frames. Default: 10 """ nuscenes_converter.create_nuscenes_infos( - root_path, out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps) - - if version == 'v1.0-test': - info_test_path = osp.join( - out_dir, f'{info_prefix}_infos_temporal_test.pkl') - nuscenes_converter.export_2d_annotation( - root_path, info_test_path, version=version) + root_path, + out_dir, + can_bus_root_path, + info_prefix, + version=version, + max_sweeps=max_sweeps, + ) + + if version == "v1.0-test": + info_test_path = osp.join(out_dir, f"{info_prefix}_infos_temporal_test.pkl") + # nuscenes_converter.export_2d_annotation( + # root_path, info_test_path, version=version + # ) else: - info_train_path = osp.join( - out_dir, f'{info_prefix}_infos_temporal_train.pkl') - info_val_path = osp.join( - out_dir, f'{info_prefix}_infos_temporal_val.pkl') - nuscenes_converter.export_2d_annotation( - root_path, info_train_path, version=version) - nuscenes_converter.export_2d_annotation( - root_path, info_val_path, version=version) + info_train_path = osp.join(out_dir, f"{info_prefix}_infos_temporal_train.pkl") + info_val_path = osp.join(out_dir, f"{info_prefix}_infos_temporal_val.pkl") + # nuscenes_converter.export_2d_annotation( + # root_path, info_train_path, version=version + # ) + # nuscenes_converter.export_2d_annotation( + # root_path, info_val_path, version=version + # ) # create_groundtruth_database(dataset_name, root_path, info_prefix, # f'{out_dir}/{info_prefix}_infos_train.pkl') -def occ_nuscenes_data_prep(root_path, - occ_path, - can_bus_root_path, - info_prefix, - version, - dataset_name, - out_dir, - max_sweeps=10): +def occ_nuscenes_data_prep( + root_path, + occ_path, + can_bus_root_path, + info_prefix, + version, + dataset_name, + out_dir, + max_sweeps=10, +): """Prepare data related to nuScenes dataset. Related data consists of '.pkl' files recording basic infos, @@ -113,27 +125,33 @@ def occ_nuscenes_data_prep(root_path, max_sweeps (int): Number of input consecutive frames. Default: 10 """ occ_converter.create_nuscenes_occ_infos( - root_path, occ_path,out_dir, can_bus_root_path, info_prefix, version=version, max_sweeps=max_sweeps) - - if version == 'v1.0-test': - info_test_path = osp.join( - out_dir, f'{info_prefix}_infos_temporal_test.pkl') - nuscenes_converter.export_2d_annotation( - root_path, info_test_path, version=version) + root_path, + occ_path, + out_dir, + can_bus_root_path, + info_prefix, + version=version, + max_sweeps=max_sweeps, + ) + + if version == "v1.0-test": + info_test_path = osp.join(out_dir, f"{info_prefix}_infos_temporal_test.pkl") + # nuscenes_converter.export_2d_annotation( + # root_path, info_test_path, version=version + # ) else: - info_train_path = osp.join( - out_dir, f'{info_prefix}_infos_temporal_train.pkl') - info_val_path = osp.join( - out_dir, f'{info_prefix}_infos_temporal_val.pkl') - nuscenes_converter.export_2d_annotation( - root_path, info_train_path, version=version) - nuscenes_converter.export_2d_annotation( - root_path, info_val_path, version=version) + info_train_path = osp.join(out_dir, f"{info_prefix}_infos_temporal_train.pkl") + info_val_path = osp.join(out_dir, f"{info_prefix}_infos_temporal_val.pkl") + # nuscenes_converter.export_2d_annotation( + # root_path, info_train_path, version=version + # ) # This function `export_2d_annotation` will generate "{info_prefix}_infos_temporal_val_mono3d.coco.json", which is not needed. + # nuscenes_converter.export_2d_annotation( + # root_path, info_val_path, version=version + # ) # create_groundtruth_database(dataset_name, root_path, info_prefix, # f'{out_dir}/{info_prefix}_infos_train.pkl') - def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10): """Prepare data related to Lyft dataset. @@ -149,7 +167,8 @@ def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10): Defaults to 10. """ lyft_converter.create_lyft_infos( - root_path, info_prefix, version=version, max_sweeps=max_sweeps) + root_path, info_prefix, version=version, max_sweeps=max_sweeps + ) def scannet_data_prep(root_path, info_prefix, out_dir, workers): @@ -161,8 +180,7 @@ def scannet_data_prep(root_path, info_prefix, out_dir, workers): out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. """ - indoor.create_indoor_info_file( - root_path, info_prefix, out_dir, workers=workers) + indoor.create_indoor_info_file(root_path, info_prefix, out_dir, workers=workers) def s3dis_data_prep(root_path, info_prefix, out_dir, workers): @@ -174,8 +192,7 @@ def s3dis_data_prep(root_path, info_prefix, out_dir, workers): out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. """ - indoor.create_indoor_info_file( - root_path, info_prefix, out_dir, workers=workers) + indoor.create_indoor_info_file(root_path, info_prefix, out_dir, workers=workers) def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers): @@ -187,16 +204,10 @@ def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers): out_dir (str): Output directory of the generated info file. workers (int): Number of threads to be used. """ - indoor.create_indoor_info_file( - root_path, info_prefix, out_dir, workers=workers) + indoor.create_indoor_info_file(root_path, info_prefix, out_dir, workers=workers) -def waymo_data_prep(root_path, - info_prefix, - version, - out_dir, - workers, - max_sweeps=5): +def waymo_data_prep(root_path, info_prefix, version, out_dir, workers, max_sweeps=5): """Prepare the info file for waymo dataset. Args: @@ -209,123 +220,133 @@ def waymo_data_prep(root_path, """ from tools.data_converter import waymo_converter as waymo - splits = ['training', 'validation', 'testing'] + splits = ["training", "validation", "testing"] for i, split in enumerate(splits): - load_dir = osp.join(root_path, 'waymo_format', split) - if split == 'validation': - save_dir = osp.join(out_dir, 'kitti_format', 'training') + load_dir = osp.join(root_path, "waymo_format", split) + if split == "validation": + save_dir = osp.join(out_dir, "kitti_format", "training") else: - save_dir = osp.join(out_dir, 'kitti_format', split) + save_dir = osp.join(out_dir, "kitti_format", split) converter = waymo.Waymo2KITTI( load_dir, save_dir, prefix=str(i), workers=workers, - test_mode=(split == 'test')) + test_mode=(split == "test"), + ) converter.convert() # Generate waymo infos - out_dir = osp.join(out_dir, 'kitti_format') + out_dir = osp.join(out_dir, "kitti_format") kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps) create_groundtruth_database( - 'WaymoDataset', + "WaymoDataset", out_dir, info_prefix, - f'{out_dir}/{info_prefix}_infos_train.pkl', + f"{out_dir}/{info_prefix}_infos_train.pkl", relative_path=False, - with_mask=False) + with_mask=False, + ) -parser = argparse.ArgumentParser(description='Data converter arg parser') -parser.add_argument('dataset', metavar='kitti', help='name of the dataset') +parser = argparse.ArgumentParser(description="Data converter arg parser") +parser.add_argument("dataset", metavar="kitti", help="name of the dataset") parser.add_argument( - '--root-path', + "--root-path", type=str, - default='./data/kitti', - help='specify the root path of dataset') + default="./data/kitti", + help="specify the root path of dataset", +) parser.add_argument( - '--occ-path', - type=str, - default='./data/occ', - help='specify the occ path of dataset') + "--occ-path", type=str, default="./data/occ", help="specify the occ path of dataset" +) parser.add_argument( - '--canbus', + "--canbus", type=str, - default='./data', - help='specify the root path of nuScenes canbus') + default="./data", + help="specify the root path of nuScenes canbus", +) parser.add_argument( - '--version', + "--version", type=str, - default='v1.0', + default="v1.0", required=False, - help='specify the dataset version, no need for kitti') + help="specify the dataset version, no need for kitti", +) parser.add_argument( - '--max-sweeps', + "--max-sweeps", type=int, default=10, required=False, - help='specify sweeps of lidar per example') + help="specify sweeps of lidar per example", +) parser.add_argument( - '--out-dir', + "--out-dir", type=str, - default='./data/kitti', - required='False', - help='name of info pkl') -parser.add_argument('--extra-tag', type=str, default='kitti') + default="./data/kitti", + required="False", + help="name of info pkl", +) +parser.add_argument("--extra-tag", type=str, default="kitti") parser.add_argument( - '--workers', type=int, default=4, help='number of threads to be used') + "--workers", type=int, default=4, help="number of threads to be used" +) args = parser.parse_args() -if __name__ == '__main__': - if args.dataset == 'kitti': +if __name__ == "__main__": + if args.dataset == "kitti": kitti_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, version=args.version, - out_dir=args.out_dir) - elif args.dataset == 'nuscenes' and args.version != 'v1.0-mini': - train_version = f'{args.version}-trainval' + out_dir=args.out_dir, + ) + elif args.dataset == "nuscenes" and args.version != "v1.0-mini": + train_version = f"{args.version}-trainval" nuscenes_data_prep( root_path=args.root_path, can_bus_root_path=args.canbus, info_prefix=args.extra_tag, version=train_version, - dataset_name='NuScenesDataset', + dataset_name="NuScenesDataset", out_dir=args.out_dir, - max_sweeps=args.max_sweeps) - test_version = f'{args.version}-test' + max_sweeps=args.max_sweeps, + ) + test_version = f"{args.version}-test" nuscenes_data_prep( root_path=args.root_path, can_bus_root_path=args.canbus, info_prefix=args.extra_tag, version=test_version, - dataset_name='NuScenesDataset', + dataset_name="NuScenesDataset", out_dir=args.out_dir, - max_sweeps=args.max_sweeps) + max_sweeps=args.max_sweeps, + ) - - elif args.dataset == 'nuscenes' and args.version == 'v1.0-mini': - train_version = f'{args.version}' + elif args.dataset == "nuscenes" and args.version == "v1.0-mini": + train_version = f"{args.version}" nuscenes_data_prep( root_path=args.root_path, can_bus_root_path=args.canbus, info_prefix=args.extra_tag, version=train_version, - dataset_name='NuScenesDataset', + dataset_name="NuScenesDataset", out_dir=args.out_dir, - max_sweeps=args.max_sweeps) - elif args.dataset == 'occ' and args.version != 'v1.0-mini': - train_version = f'{args.version}-trainval' + max_sweeps=args.max_sweeps, + ) + elif args.dataset == "occ" and args.version != "v1.0-mini": + train_version = f"{args.version}-trainval" occ_nuscenes_data_prep( root_path=args.root_path, occ_path=args.occ_path, can_bus_root_path=args.canbus, info_prefix=args.extra_tag, version=train_version, - dataset_name='NuScenesDataset', + dataset_name="NuScenesDataset", out_dir=args.out_dir, - max_sweeps=args.max_sweeps) + max_sweeps=args.max_sweeps, + ) # test_version = f'{args.version}-test' # nuscenes_data_prep( # root_path=args.root_path, @@ -335,53 +356,60 @@ def waymo_data_prep(root_path, # dataset_name='NuScenesDataset', # out_dir=args.out_dir, # max_sweeps=args.max_sweeps) - elif args.dataset == 'occ' and args.version == 'v1.0-mini': - train_version = f'{args.version}' + elif args.dataset == "occ" and args.version == "v1.0-mini": + train_version = f"{args.version}" occ_nuscenes_data_prep( root_path=args.root_path, occ_path=args.occ_path, can_bus_root_path=args.canbus, info_prefix=args.extra_tag, version=train_version, - dataset_name='NuScenesDataset', + dataset_name="NuScenesDataset", out_dir=args.out_dir, - max_sweeps=args.max_sweeps) - elif args.dataset == 'lyft': - train_version = f'{args.version}-train' + max_sweeps=args.max_sweeps, + ) + elif args.dataset == "lyft": + train_version = f"{args.version}-train" lyft_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, version=train_version, - max_sweeps=args.max_sweeps) - test_version = f'{args.version}-test' + max_sweeps=args.max_sweeps, + ) + test_version = f"{args.version}-test" lyft_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, version=test_version, - max_sweeps=args.max_sweeps) - elif args.dataset == 'waymo': + max_sweeps=args.max_sweeps, + ) + elif args.dataset == "waymo": waymo_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, version=args.version, out_dir=args.out_dir, workers=args.workers, - max_sweeps=args.max_sweeps) - elif args.dataset == 'scannet': + max_sweeps=args.max_sweeps, + ) + elif args.dataset == "scannet": scannet_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, out_dir=args.out_dir, - workers=args.workers) - elif args.dataset == 's3dis': + workers=args.workers, + ) + elif args.dataset == "s3dis": s3dis_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, out_dir=args.out_dir, - workers=args.workers) - elif args.dataset == 'sunrgbd': + workers=args.workers, + ) + elif args.dataset == "sunrgbd": sunrgbd_data_prep( root_path=args.root_path, info_prefix=args.extra_tag, out_dir=args.out_dir, - workers=args.workers) + workers=args.workers, + ) diff --git a/tools/data_converter/create_gt_database.py b/tools/data_converter/create_gt_database.py index 7317ced..8164859 100644 --- a/tools/data_converter/create_gt_database.py +++ b/tools/data_converter/create_gt_database.py @@ -1,16 +1,16 @@ # Copyright (c) OpenMMLab. All rights reserved. +import pickle +from os import path as osp + import mmcv import numpy as np -import pickle from mmcv import track_iter_progress from mmcv.ops import roi_align -from os import path as osp -from pycocotools import mask as maskUtils -from pycocotools.coco import COCO - from mmdet3d.core.bbox import box_np_ops as box_np_ops from mmdet3d.datasets import build_dataset from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps +from pycocotools import mask as maskUtils +from pycocotools.coco import COCO def _poly2mask(mask_ann, img_h, img_w): @@ -19,7 +19,7 @@ def _poly2mask(mask_ann, img_h, img_w): # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) rle = maskUtils.merge(rles) - elif isinstance(mask_ann['counts'], list): + elif isinstance(mask_ann["counts"], list): # uncompressed RLE rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) else: @@ -36,17 +36,17 @@ def _parse_coco_ann_info(ann_info): gt_masks_ann = [] for i, ann in enumerate(ann_info): - if ann.get('ignore', False): + if ann.get("ignore", False): continue - x1, y1, w, h = ann['bbox'] - if ann['area'] <= 0: + x1, y1, w, h = ann["bbox"] + if ann["area"] <= 0: continue bbox = [x1, y1, x1 + w, y1 + h] - if ann.get('iscrowd', False): + if ann.get("iscrowd", False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) - gt_masks_ann.append(ann['segmentation']) + gt_masks_ann.append(ann["segmentation"]) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) @@ -60,8 +60,7 @@ def _parse_coco_ann_info(ann_info): else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - ann = dict( - bboxes=gt_bboxes, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann) + ann = dict(bboxes=gt_bboxes, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann) return ann @@ -69,20 +68,23 @@ def _parse_coco_ann_info(ann_info): def crop_image_patch_v2(pos_proposals, pos_assigned_gt_inds, gt_masks): import torch from torch.nn.modules.utils import _pair + device = pos_proposals.device num_pos = pos_proposals.size(0) - fake_inds = ( - torch.arange(num_pos, - device=device).to(dtype=pos_proposals.dtype)[:, None]) + fake_inds = torch.arange(num_pos, device=device).to(dtype=pos_proposals.dtype)[ + :, None + ] rois = torch.cat([fake_inds, pos_proposals], dim=1) # Nx5 mask_size = _pair(28) rois = rois.to(device=device) gt_masks_th = ( - torch.from_numpy(gt_masks).to(device).index_select( - 0, pos_assigned_gt_inds).to(dtype=rois.dtype)) + torch.from_numpy(gt_masks) + .to(device) + .index_select(0, pos_assigned_gt_inds) + .to(dtype=rois.dtype) + ) # Use RoIAlign could apparently accelerate the training (~0.1s/iter) - targets = ( - roi_align(gt_masks_th, rois, mask_size[::-1], 1.0, 0, True).squeeze(1)) + targets = roi_align(gt_masks_th, rois, mask_size[::-1], 1.0, 0, True).squeeze(1) return targets @@ -97,29 +99,31 @@ def crop_image_patch(pos_proposals, gt_masks, pos_assigned_gt_inds, org_img): w = np.maximum(x2 - x1 + 1, 1) h = np.maximum(y2 - y1 + 1, 1) - mask_patch = gt_mask[y1:y1 + h, x1:x1 + w] + mask_patch = gt_mask[y1 : y1 + h, x1 : x1 + w] masked_img = gt_mask[..., None] * org_img - img_patch = masked_img[y1:y1 + h, x1:x1 + w] + img_patch = masked_img[y1 : y1 + h, x1 : x1 + w] img_patches.append(img_patch) masks.append(mask_patch) return img_patches, masks -def create_groundtruth_database(dataset_class_name, - data_path, - info_prefix, - info_path=None, - mask_anno_path=None, - used_classes=None, - database_save_path=None, - db_info_save_path=None, - relative_path=True, - add_rgb=False, - lidar_only=False, - bev_only=False, - coors_range=None, - with_mask=False): +def create_groundtruth_database( + dataset_class_name, + data_path, + info_prefix, + info_path=None, + mask_anno_path=None, + used_classes=None, + database_save_path=None, + db_info_save_path=None, + relative_path=True, + add_rgb=False, + lidar_only=False, + bev_only=False, + coors_range=None, + with_mask=False, +): """Given the raw data, generate the ground truth database. Args: @@ -141,14 +145,13 @@ def create_groundtruth_database(dataset_class_name, with_mask (bool): Whether to use mask. Default: False. """ - print(f'Create GT Database of {dataset_class_name}') - dataset_cfg = dict( - type=dataset_class_name, data_root=data_path, ann_file=info_path) - if dataset_class_name == 'KittiDataset': - file_client_args = dict(backend='disk') + print(f"Create GT Database of {dataset_class_name}") + dataset_cfg = dict(type=dataset_class_name, data_root=data_path, ann_file=info_path) + if dataset_class_name == "KittiDataset": + file_client_args = dict(backend="disk") dataset_cfg.update( test_mode=False, - split='training', + split="training", modality=dict( use_lidar=True, use_depth=False, @@ -157,44 +160,44 @@ def create_groundtruth_database(dataset_class_name, ), pipeline=[ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, - file_client_args=file_client_args) - ]) + file_client_args=file_client_args, + ), + ], + ) - elif dataset_class_name == 'NuScenesDataset': + elif dataset_class_name == "NuScenesDataset": dataset_cfg.update( use_valid_flag=True, pipeline=[ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', - load_dim=5, - use_dim=5), + type="LoadPointsFromFile", coord_type="LIDAR", load_dim=5, use_dim=5 + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, use_dim=[0, 1, 2, 3, 4], pad_empty_sweeps=True, - remove_close=True), - dict( - type='LoadAnnotations3D', - with_bbox_3d=True, - with_label_3d=True) - ]) - - elif dataset_class_name == 'WaymoDataset': - file_client_args = dict(backend='disk') + remove_close=True, + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), + ], + ) + + elif dataset_class_name == "WaymoDataset": + file_client_args = dict(backend="disk") dataset_cfg.update( test_mode=False, - split='training', + split="training", modality=dict( use_lidar=True, use_depth=False, @@ -203,25 +206,27 @@ def create_groundtruth_database(dataset_class_name, ), pipeline=[ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, - file_client_args=file_client_args) - ]) + file_client_args=file_client_args, + ), + ], + ) dataset = build_dataset(dataset_cfg) if database_save_path is None: - database_save_path = osp.join(data_path, f'{info_prefix}_gt_database') + database_save_path = osp.join(data_path, f"{info_prefix}_gt_database") if db_info_save_path is None: - db_info_save_path = osp.join(data_path, - f'{info_prefix}_dbinfos_train.pkl') + db_info_save_path = osp.join(data_path, f"{info_prefix}_dbinfos_train.pkl") mmcv.mkdir_or_exist(database_save_path) all_db_infos = dict() if with_mask: @@ -230,49 +235,47 @@ def create_groundtruth_database(dataset_class_name, file2id = dict() for i in imgIds: info = coco.loadImgs([i])[0] - file2id.update({info['file_name']: i}) + file2id.update({info["file_name"]: i}) group_counter = 0 for j in track_iter_progress(list(range(len(dataset)))): input_dict = dataset.get_data_info(j) dataset.pre_pipeline(input_dict) example = dataset.pipeline(input_dict) - annos = example['ann_info'] - image_idx = example['sample_idx'] - points = example['points'].tensor.numpy() - gt_boxes_3d = annos['gt_bboxes_3d'].tensor.numpy() - names = annos['gt_names'] + annos = example["ann_info"] + image_idx = example["sample_idx"] + points = example["points"].tensor.numpy() + gt_boxes_3d = annos["gt_bboxes_3d"].tensor.numpy() + names = annos["gt_names"] group_dict = dict() - if 'group_ids' in annos: - group_ids = annos['group_ids'] + if "group_ids" in annos: + group_ids = annos["group_ids"] else: group_ids = np.arange(gt_boxes_3d.shape[0], dtype=np.int64) difficulty = np.zeros(gt_boxes_3d.shape[0], dtype=np.int32) - if 'difficulty' in annos: - difficulty = annos['difficulty'] + if "difficulty" in annos: + difficulty = annos["difficulty"] num_obj = gt_boxes_3d.shape[0] point_indices = box_np_ops.points_in_rbbox(points, gt_boxes_3d) if with_mask: # prepare masks - gt_boxes = annos['gt_bboxes'] - img_path = osp.split(example['img_info']['filename'])[-1] + gt_boxes = annos["gt_bboxes"] + img_path = osp.split(example["img_info"]["filename"])[-1] if img_path not in file2id.keys(): - print(f'skip image {img_path} for empty mask') + print(f"skip image {img_path} for empty mask") continue img_id = file2id[img_path] kins_annIds = coco.getAnnIds(imgIds=img_id) kins_raw_info = coco.loadAnns(kins_annIds) kins_ann_info = _parse_coco_ann_info(kins_raw_info) - h, w = annos['img_shape'][:2] - gt_masks = [ - _poly2mask(mask, h, w) for mask in kins_ann_info['masks'] - ] + h, w = annos["img_shape"][:2] + gt_masks = [_poly2mask(mask, h, w) for mask in kins_ann_info["masks"]] # get mask inds based on iou mapping - bbox_iou = bbox_overlaps(kins_ann_info['bboxes'], gt_boxes) + bbox_iou = bbox_overlaps(kins_ann_info["bboxes"], gt_boxes) mask_inds = bbox_iou.argmax(axis=0) - valid_inds = (bbox_iou.max(axis=0) > 0.5) + valid_inds = bbox_iou.max(axis=0) > 0.5 # mask the image # use more precise crop when it is ready @@ -283,12 +286,13 @@ def create_groundtruth_database(dataset_class_name, # torch.Tensor(gt_boxes), # torch.Tensor(mask_inds).long(), object_img_patches) object_img_patches, object_masks = crop_image_patch( - gt_boxes, gt_masks, mask_inds, annos['img']) + gt_boxes, gt_masks, mask_inds, annos["img"] + ) for i in range(num_obj): - filename = f'{image_idx}_{names[i]}_{i}.bin' + filename = f"{image_idx}_{names[i]}_{i}.bin" abs_filepath = osp.join(database_save_path, filename) - rel_filepath = osp.join(f'{info_prefix}_gt_database', filename) + rel_filepath = osp.join(f"{info_prefix}_gt_database", filename) # save point clouds and image patches for each object gt_points = points[point_indices[:, i]] @@ -298,41 +302,41 @@ def create_groundtruth_database(dataset_class_name, if object_masks[i].sum() == 0 or not valid_inds[i]: # Skip object for empty or invalid mask continue - img_patch_path = abs_filepath + '.png' - mask_patch_path = abs_filepath + '.mask.png' + img_patch_path = abs_filepath + ".png" + mask_patch_path = abs_filepath + ".mask.png" mmcv.imwrite(object_img_patches[i], img_patch_path) mmcv.imwrite(object_masks[i], mask_patch_path) - with open(abs_filepath, 'w') as f: + with open(abs_filepath, "w") as f: gt_points.tofile(f) if (used_classes is None) or names[i] in used_classes: db_info = { - 'name': names[i], - 'path': rel_filepath, - 'image_idx': image_idx, - 'gt_idx': i, - 'box3d_lidar': gt_boxes_3d[i], - 'num_points_in_gt': gt_points.shape[0], - 'difficulty': difficulty[i], + "name": names[i], + "path": rel_filepath, + "image_idx": image_idx, + "gt_idx": i, + "box3d_lidar": gt_boxes_3d[i], + "num_points_in_gt": gt_points.shape[0], + "difficulty": difficulty[i], } local_group_id = group_ids[i] # if local_group_id >= 0: if local_group_id not in group_dict: group_dict[local_group_id] = group_counter group_counter += 1 - db_info['group_id'] = group_dict[local_group_id] - if 'score' in annos: - db_info['score'] = annos['score'][i] + db_info["group_id"] = group_dict[local_group_id] + if "score" in annos: + db_info["score"] = annos["score"][i] if with_mask: - db_info.update({'box2d_camera': gt_boxes[i]}) + db_info.update({"box2d_camera": gt_boxes[i]}) if names[i] in all_db_infos: all_db_infos[names[i]].append(db_info) else: all_db_infos[names[i]] = [db_info] for k, v in all_db_infos.items(): - print(f'load {len(v)} {k} database infos') + print(f"load {len(v)} {k} database infos") - with open(db_info_save_path, 'wb') as f: + with open(db_info_save_path, "wb") as f: pickle.dump(all_db_infos, f) diff --git a/tools/data_converter/indoor_converter.py b/tools/data_converter/indoor_converter.py index 4072397..956cdc8 100644 --- a/tools/data_converter/indoor_converter.py +++ b/tools/data_converter/indoor_converter.py @@ -1,18 +1,17 @@ # Copyright (c) OpenMMLab. All rights reserved. +import os + import mmcv import numpy as np -import os -from tools.data_converter.s3dis_data_utils import S3DISData, S3DISSegData -from tools.data_converter.scannet_data_utils import ScanNetData, ScanNetSegData -from tools.data_converter.sunrgbd_data_utils import SUNRGBDData +from .s3dis_data_utils import S3DISData, S3DISSegData +from .scannet_data_utils import ScanNetData, ScanNetSegData +from .sunrgbd_data_utils import SUNRGBDData -def create_indoor_info_file(data_path, - pkl_prefix='sunrgbd', - save_path=None, - use_v1=False, - workers=4): +def create_indoor_info_file( + data_path, pkl_prefix="sunrgbd", save_path=None, use_v1=False, workers=4 +): """Create indoor information file. Get information of the raw data and save it to the pkl file. @@ -25,84 +24,85 @@ def create_indoor_info_file(data_path, workers (int): Number of threads to be used. Default: 4. """ assert os.path.exists(data_path) - assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis'], \ - f'unsupported indoor dataset {pkl_prefix}' + assert pkl_prefix in [ + "sunrgbd", + "scannet", + "s3dis", + ], f"unsupported indoor dataset {pkl_prefix}" save_path = data_path if save_path is None else save_path assert os.path.exists(save_path) # generate infos for both detection and segmentation task - if pkl_prefix in ['sunrgbd', 'scannet']: - train_filename = os.path.join(save_path, - f'{pkl_prefix}_infos_train.pkl') - val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl') - if pkl_prefix == 'sunrgbd': + if pkl_prefix in ["sunrgbd", "scannet"]: + train_filename = os.path.join(save_path, f"{pkl_prefix}_infos_train.pkl") + val_filename = os.path.join(save_path, f"{pkl_prefix}_infos_val.pkl") + if pkl_prefix == "sunrgbd": # SUN RGB-D has a train-val split train_dataset = SUNRGBDData( - root_path=data_path, split='train', use_v1=use_v1) - val_dataset = SUNRGBDData( - root_path=data_path, split='val', use_v1=use_v1) + root_path=data_path, split="train", use_v1=use_v1 + ) + val_dataset = SUNRGBDData(root_path=data_path, split="val", use_v1=use_v1) else: # ScanNet has a train-val-test split - train_dataset = ScanNetData(root_path=data_path, split='train') - val_dataset = ScanNetData(root_path=data_path, split='val') - test_dataset = ScanNetData(root_path=data_path, split='test') - test_filename = os.path.join(save_path, - f'{pkl_prefix}_infos_test.pkl') + train_dataset = ScanNetData(root_path=data_path, split="train") + val_dataset = ScanNetData(root_path=data_path, split="val") + test_dataset = ScanNetData(root_path=data_path, split="test") + test_filename = os.path.join(save_path, f"{pkl_prefix}_infos_test.pkl") - infos_train = train_dataset.get_infos( - num_workers=workers, has_label=True) - mmcv.dump(infos_train, train_filename, 'pkl') - print(f'{pkl_prefix} info train file is saved to {train_filename}') + infos_train = train_dataset.get_infos(num_workers=workers, has_label=True) + mmcv.dump(infos_train, train_filename, "pkl") + print(f"{pkl_prefix} info train file is saved to {train_filename}") infos_val = val_dataset.get_infos(num_workers=workers, has_label=True) - mmcv.dump(infos_val, val_filename, 'pkl') - print(f'{pkl_prefix} info val file is saved to {val_filename}') + mmcv.dump(infos_val, val_filename, "pkl") + print(f"{pkl_prefix} info val file is saved to {val_filename}") - if pkl_prefix == 'scannet': - infos_test = test_dataset.get_infos( - num_workers=workers, has_label=False) - mmcv.dump(infos_test, test_filename, 'pkl') - print(f'{pkl_prefix} info test file is saved to {test_filename}') + if pkl_prefix == "scannet": + infos_test = test_dataset.get_infos(num_workers=workers, has_label=False) + mmcv.dump(infos_test, test_filename, "pkl") + print(f"{pkl_prefix} info test file is saved to {test_filename}") # generate infos for the semantic segmentation task # e.g. re-sampled scene indexes and label weights # scene indexes are used to re-sample rooms with different number of points # label weights are used to balance classes with different number of points - if pkl_prefix == 'scannet': + if pkl_prefix == "scannet": # label weight computation function is adopted from # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 train_dataset = ScanNetSegData( data_root=data_path, ann_file=train_filename, - split='train', + split="train", num_points=8192, - label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + label_weight_func=lambda x: 1.0 / np.log(1.2 + x), + ) # TODO: do we need to generate on val set? val_dataset = ScanNetSegData( data_root=data_path, ann_file=val_filename, - split='val', + split="val", num_points=8192, - label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + label_weight_func=lambda x: 1.0 / np.log(1.2 + x), + ) # no need to generate for test set train_dataset.get_seg_infos() val_dataset.get_seg_infos() - elif pkl_prefix == 's3dis': + elif pkl_prefix == "s3dis": # S3DIS doesn't have a fixed train-val split # it has 6 areas instead, so we generate info file for each of them # in training, we will use dataset to wrap different areas - splits = [f'Area_{i}' for i in [1, 2, 3, 4, 5, 6]] + splits = [f"Area_{i}" for i in [1, 2, 3, 4, 5, 6]] for split in splits: dataset = S3DISData(root_path=data_path, split=split) info = dataset.get_infos(num_workers=workers, has_label=True) - filename = os.path.join(save_path, - f'{pkl_prefix}_infos_{split}.pkl') - mmcv.dump(info, filename, 'pkl') - print(f'{pkl_prefix} info {split} file is saved to {filename}') + filename = os.path.join(save_path, f"{pkl_prefix}_infos_{split}.pkl") + mmcv.dump(info, filename, "pkl") + print(f"{pkl_prefix} info {split} file is saved to {filename}") seg_dataset = S3DISSegData( data_root=data_path, ann_file=filename, split=split, num_points=4096, - label_weight_func=lambda x: 1.0 / np.log(1.2 + x)) + label_weight_func=lambda x: 1.0 / np.log(1.2 + x), + ) seg_dataset.get_seg_infos() diff --git a/tools/data_converter/kitti_converter.py b/tools/data_converter/kitti_converter.py index 47eec6a..65c325a 100644 --- a/tools/data_converter/kitti_converter.py +++ b/tools/data_converter/kitti_converter.py @@ -1,15 +1,16 @@ # Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np from collections import OrderedDict -from nuscenes.utils.geometry_utils import view_points from pathlib import Path +import mmcv +import numpy as np from mmdet3d.core.bbox import box_np_ops +from nuscenes.utils.geometry_utils import view_points + from .kitti_data_utils import get_kitti_image_info, get_waymo_image_info from .nuscenes_converter import post_process_coords -kitti_categories = ('Pedestrian', 'Cyclist', 'Car') +kitti_categories = ("Pedestrian", "Cyclist", "Car") def convert_to_kitti_info_version2(info): @@ -21,73 +22,69 @@ def convert_to_kitti_info_version2(info): - calib (dict): calibration info - point_cloud (dict): point cloud info """ - if 'image' not in info or 'calib' not in info or 'point_cloud' not in info: - info['image'] = { - 'image_shape': info['img_shape'], - 'image_idx': info['image_idx'], - 'image_path': info['img_path'], + if "image" not in info or "calib" not in info or "point_cloud" not in info: + info["image"] = { + "image_shape": info["img_shape"], + "image_idx": info["image_idx"], + "image_path": info["img_path"], } - info['calib'] = { - 'R0_rect': info['calib/R0_rect'], - 'Tr_velo_to_cam': info['calib/Tr_velo_to_cam'], - 'P2': info['calib/P2'], + info["calib"] = { + "R0_rect": info["calib/R0_rect"], + "Tr_velo_to_cam": info["calib/Tr_velo_to_cam"], + "P2": info["calib/P2"], } - info['point_cloud'] = { - 'velodyne_path': info['velodyne_path'], + info["point_cloud"] = { + "velodyne_path": info["velodyne_path"], } def _read_imageset_file(path): - with open(path, 'r') as f: + with open(path, "r") as f: lines = f.readlines() return [int(line) for line in lines] -def _calculate_num_points_in_gt(data_path, - infos, - relative_path, - remove_outside=True, - num_features=4): +def _calculate_num_points_in_gt( + data_path, infos, relative_path, remove_outside=True, num_features=4 +): for info in mmcv.track_iter_progress(infos): - pc_info = info['point_cloud'] - image_info = info['image'] - calib = info['calib'] + pc_info = info["point_cloud"] + image_info = info["image"] + calib = info["calib"] if relative_path: - v_path = str(Path(data_path) / pc_info['velodyne_path']) + v_path = str(Path(data_path) / pc_info["velodyne_path"]) else: - v_path = pc_info['velodyne_path'] - points_v = np.fromfile( - v_path, dtype=np.float32, count=-1).reshape([-1, num_features]) - rect = calib['R0_rect'] - Trv2c = calib['Tr_velo_to_cam'] - P2 = calib['P2'] + v_path = pc_info["velodyne_path"] + points_v = np.fromfile(v_path, dtype=np.float32, count=-1).reshape( + [-1, num_features] + ) + rect = calib["R0_rect"] + Trv2c = calib["Tr_velo_to_cam"] + P2 = calib["P2"] if remove_outside: points_v = box_np_ops.remove_outside_points( - points_v, rect, Trv2c, P2, image_info['image_shape']) + points_v, rect, Trv2c, P2, image_info["image_shape"] + ) # points_v = points_v[points_v[:, 0] > 0] - annos = info['annos'] - num_obj = len([n for n in annos['name'] if n != 'DontCare']) + annos = info["annos"] + num_obj = len([n for n in annos["name"] if n != "DontCare"]) # annos = kitti.filter_kitti_anno(annos, ['DontCare']) - dims = annos['dimensions'][:num_obj] - loc = annos['location'][:num_obj] - rots = annos['rotation_y'][:num_obj] - gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], - axis=1) - gt_boxes_lidar = box_np_ops.box_camera_to_lidar( - gt_boxes_camera, rect, Trv2c) + dims = annos["dimensions"][:num_obj] + loc = annos["location"][:num_obj] + rots = annos["rotation_y"][:num_obj] + gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1) + gt_boxes_lidar = box_np_ops.box_camera_to_lidar(gt_boxes_camera, rect, Trv2c) indices = box_np_ops.points_in_rbbox(points_v[:, :3], gt_boxes_lidar) num_points_in_gt = indices.sum(0) - num_ignored = len(annos['dimensions']) - num_obj - num_points_in_gt = np.concatenate( - [num_points_in_gt, -np.ones([num_ignored])]) - annos['num_points_in_gt'] = num_points_in_gt.astype(np.int32) + num_ignored = len(annos["dimensions"]) - num_obj + num_points_in_gt = np.concatenate([num_points_in_gt, -np.ones([num_ignored])]) + annos["num_points_in_gt"] = num_points_in_gt.astype(np.int32) -def create_kitti_info_file(data_path, - pkl_prefix='kitti', - save_path=None, - relative_path=True): +def create_kitti_info_file( + data_path, pkl_prefix="kitti", save_path=None, relative_path=True +): """Create info file of KITTI dataset. Given the raw data, generate its related info file in pkl format. @@ -98,13 +95,13 @@ def create_kitti_info_file(data_path, save_path (str): Path to save the info file. relative_path (bool): Whether to use relative path. """ - imageset_folder = Path(data_path) / 'ImageSets' - train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt')) + imageset_folder = Path(data_path) / "ImageSets" + train_img_ids = _read_imageset_file(str(imageset_folder / "train.txt")) - val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt')) - test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt')) + val_img_ids = _read_imageset_file(str(imageset_folder / "val.txt")) + test_img_ids = _read_imageset_file(str(imageset_folder / "test.txt")) - print('Generate info. this may take several minutes.') + print("Generate info. this may take several minutes.") if save_path is None: save_path = Path(data_path) else: @@ -115,10 +112,11 @@ def create_kitti_info_file(data_path, velodyne=True, calib=True, image_ids=train_img_ids, - relative_path=relative_path) + relative_path=relative_path, + ) _calculate_num_points_in_gt(data_path, kitti_infos_train, relative_path) - filename = save_path / f'{pkl_prefix}_infos_train.pkl' - print(f'Kitti info train file is saved to {filename}') + filename = save_path / f"{pkl_prefix}_infos_train.pkl" + print(f"Kitti info train file is saved to {filename}") mmcv.dump(kitti_infos_train, filename) kitti_infos_val = get_kitti_image_info( data_path, @@ -126,13 +124,14 @@ def create_kitti_info_file(data_path, velodyne=True, calib=True, image_ids=val_img_ids, - relative_path=relative_path) + relative_path=relative_path, + ) _calculate_num_points_in_gt(data_path, kitti_infos_val, relative_path) - filename = save_path / f'{pkl_prefix}_infos_val.pkl' - print(f'Kitti info val file is saved to {filename}') + filename = save_path / f"{pkl_prefix}_infos_val.pkl" + print(f"Kitti info val file is saved to {filename}") mmcv.dump(kitti_infos_val, filename) - filename = save_path / f'{pkl_prefix}_infos_trainval.pkl' - print(f'Kitti info trainval file is saved to {filename}') + filename = save_path / f"{pkl_prefix}_infos_trainval.pkl" + print(f"Kitti info trainval file is saved to {filename}") mmcv.dump(kitti_infos_train + kitti_infos_val, filename) kitti_infos_test = get_kitti_image_info( @@ -142,17 +141,16 @@ def create_kitti_info_file(data_path, velodyne=True, calib=True, image_ids=test_img_ids, - relative_path=relative_path) - filename = save_path / f'{pkl_prefix}_infos_test.pkl' - print(f'Kitti info test file is saved to {filename}') + relative_path=relative_path, + ) + filename = save_path / f"{pkl_prefix}_infos_test.pkl" + print(f"Kitti info test file is saved to {filename}") mmcv.dump(kitti_infos_test, filename) -def create_waymo_info_file(data_path, - pkl_prefix='waymo', - save_path=None, - relative_path=True, - max_sweeps=5): +def create_waymo_info_file( + data_path, pkl_prefix="waymo", save_path=None, relative_path=True, max_sweeps=5 +): """Create info file of waymo dataset. Given the raw data, generate its related info file in pkl format. @@ -164,12 +162,12 @@ def create_waymo_info_file(data_path, relative_path (bool): Whether to use relative path. max_sweeps (int): Max sweeps before the detection frame to be used. """ - imageset_folder = Path(data_path) / 'ImageSets' - train_img_ids = _read_imageset_file(str(imageset_folder / 'train.txt')) + imageset_folder = Path(data_path) / "ImageSets" + train_img_ids = _read_imageset_file(str(imageset_folder / "train.txt")) # val_img_ids = _read_imageset_file(str(imageset_folder / 'val.txt')) # test_img_ids = _read_imageset_file(str(imageset_folder / 'test.txt')) train_img_ids = [each for each in train_img_ids if each % 5 == 0] - print('Generate info. this may take several minutes.') + print("Generate info. this may take several minutes.") if save_path is None: save_path = Path(data_path) else: @@ -182,15 +180,17 @@ def create_waymo_info_file(data_path, pose=True, image_ids=train_img_ids, relative_path=relative_path, - max_sweeps=max_sweeps) + max_sweeps=max_sweeps, + ) _calculate_num_points_in_gt( data_path, waymo_infos_train, relative_path, num_features=6, - remove_outside=False) - filename = save_path / f'{pkl_prefix}_infos_train.pkl' - print(f'Waymo info train file is saved to {filename}') + remove_outside=False, + ) + filename = save_path / f"{pkl_prefix}_infos_train.pkl" + print(f"Waymo info train file is saved to {filename}") mmcv.dump(waymo_infos_train, filename) # # waymo_infos_val = get_waymo_image_info( @@ -229,12 +229,9 @@ def create_waymo_info_file(data_path, # mmcv.dump(waymo_infos_test, filename) -def _create_reduced_point_cloud(data_path, - info_path, - save_path=None, - back=False, - num_features=4, - front_camera_id=2): +def _create_reduced_point_cloud( + data_path, info_path, save_path=None, back=False, num_features=4, front_camera_id=2 +): """Create reduced point clouds for given info. Args: @@ -249,52 +246,55 @@ def _create_reduced_point_cloud(data_path, kitti_infos = mmcv.load(info_path) for info in mmcv.track_iter_progress(kitti_infos): - pc_info = info['point_cloud'] - image_info = info['image'] - calib = info['calib'] + pc_info = info["point_cloud"] + image_info = info["image"] + calib = info["calib"] - v_path = pc_info['velodyne_path'] + v_path = pc_info["velodyne_path"] v_path = Path(data_path) / v_path - points_v = np.fromfile( - str(v_path), dtype=np.float32, - count=-1).reshape([-1, num_features]) - rect = calib['R0_rect'] + points_v = np.fromfile(str(v_path), dtype=np.float32, count=-1).reshape( + [-1, num_features] + ) + rect = calib["R0_rect"] if front_camera_id == 2: - P2 = calib['P2'] + P2 = calib["P2"] else: - P2 = calib[f'P{str(front_camera_id)}'] - Trv2c = calib['Tr_velo_to_cam'] + P2 = calib[f"P{str(front_camera_id)}"] + Trv2c = calib["Tr_velo_to_cam"] # first remove z < 0 points # keep = points_v[:, -1] > 0 # points_v = points_v[keep] # then remove outside. if back: points_v[:, 0] = -points_v[:, 0] - points_v = box_np_ops.remove_outside_points(points_v, rect, Trv2c, P2, - image_info['image_shape']) + points_v = box_np_ops.remove_outside_points( + points_v, rect, Trv2c, P2, image_info["image_shape"] + ) if save_path is None: - save_dir = v_path.parent.parent / (v_path.parent.stem + '_reduced') + save_dir = v_path.parent.parent / (v_path.parent.stem + "_reduced") if not save_dir.exists(): save_dir.mkdir() save_filename = save_dir / v_path.name # save_filename = str(v_path) + '_reduced' if back: - save_filename += '_back' + save_filename += "_back" else: save_filename = str(Path(save_path) / v_path.name) if back: - save_filename += '_back' - with open(save_filename, 'w') as f: + save_filename += "_back" + with open(save_filename, "w") as f: points_v.tofile(f) -def create_reduced_point_cloud(data_path, - pkl_prefix, - train_info_path=None, - val_info_path=None, - test_info_path=None, - save_path=None, - with_back=False): +def create_reduced_point_cloud( + data_path, + pkl_prefix, + train_info_path=None, + val_info_path=None, + test_info_path=None, + save_path=None, + with_back=False, +): """Create reduced point clouds for training/validation/testing. Args: @@ -310,25 +310,22 @@ def create_reduced_point_cloud(data_path, with_back (bool): Whether to flip the points to back. """ if train_info_path is None: - train_info_path = Path(data_path) / f'{pkl_prefix}_infos_train.pkl' + train_info_path = Path(data_path) / f"{pkl_prefix}_infos_train.pkl" if val_info_path is None: - val_info_path = Path(data_path) / f'{pkl_prefix}_infos_val.pkl' + val_info_path = Path(data_path) / f"{pkl_prefix}_infos_val.pkl" if test_info_path is None: - test_info_path = Path(data_path) / f'{pkl_prefix}_infos_test.pkl' + test_info_path = Path(data_path) / f"{pkl_prefix}_infos_test.pkl" - print('create reduced point cloud for training set') + print("create reduced point cloud for training set") _create_reduced_point_cloud(data_path, train_info_path, save_path) - print('create reduced point cloud for validation set') + print("create reduced point cloud for validation set") _create_reduced_point_cloud(data_path, val_info_path, save_path) - print('create reduced point cloud for testing set') + print("create reduced point cloud for testing set") _create_reduced_point_cloud(data_path, test_info_path, save_path) if with_back: - _create_reduced_point_cloud( - data_path, train_info_path, save_path, back=True) - _create_reduced_point_cloud( - data_path, val_info_path, save_path, back=True) - _create_reduced_point_cloud( - data_path, test_info_path, save_path, back=True) + _create_reduced_point_cloud(data_path, train_info_path, save_path, back=True) + _create_reduced_point_cloud(data_path, val_info_path, save_path, back=True) + _create_reduced_point_cloud(data_path, test_info_path, save_path, back=True) def export_2d_annotation(root_path, info_path, mono3d=True): @@ -348,34 +345,37 @@ def export_2d_annotation(root_path, info_path, mono3d=True): coco_ann_id = 0 coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) from os import path as osp + for info in mmcv.track_iter_progress(kitti_infos): coco_infos = get_2d_boxes(info, occluded=[0, 1, 2, 3], mono3d=mono3d) - (height, width, - _) = mmcv.imread(osp.join(root_path, - info['image']['image_path'])).shape - coco_2d_dict['images'].append( + (height, width, _) = mmcv.imread( + osp.join(root_path, info["image"]["image_path"]) + ).shape + coco_2d_dict["images"].append( dict( - file_name=info['image']['image_path'], - id=info['image']['image_idx'], - Tri2v=info['calib']['Tr_imu_to_velo'], - Trv2c=info['calib']['Tr_velo_to_cam'], - rect=info['calib']['R0_rect'], - cam_intrinsic=info['calib']['P2'], + file_name=info["image"]["image_path"], + id=info["image"]["image_idx"], + Tri2v=info["calib"]["Tr_imu_to_velo"], + Trv2c=info["calib"]["Tr_velo_to_cam"], + rect=info["calib"]["R0_rect"], + cam_intrinsic=info["calib"]["P2"], width=width, - height=height)) + height=height, + ) + ) for coco_info in coco_infos: if coco_info is None: continue # add an empty key for coco format - coco_info['segmentation'] = [] - coco_info['id'] = coco_ann_id - coco_2d_dict['annotations'].append(coco_info) + coco_info["segmentation"] = [] + coco_info["id"] = coco_ann_id + coco_2d_dict["annotations"].append(coco_info) coco_ann_id += 1 if mono3d: - json_prefix = f'{info_path[:-4]}_mono3d' + json_prefix = f"{info_path[:-4]}_mono3d" else: - json_prefix = f'{info_path[:-4]}' - mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + json_prefix = f"{info_path[:-4]}" + mmcv.dump(coco_2d_dict, f"{json_prefix}.coco.json") def get_2d_boxes(info, occluded, mono3d=True): @@ -393,22 +393,22 @@ def get_2d_boxes(info, occluded, mono3d=True): `sample_data_token`. """ # Get calibration information - P2 = info['calib']['P2'] + P2 = info["calib"]["P2"] repro_recs = [] # if no annotations in info (test dataset), then return - if 'annos' not in info: + if "annos" not in info: return repro_recs # Get all the annotation with the specified visibilties. - ann_dicts = info['annos'] - mask = [(ocld in occluded) for ocld in ann_dicts['occluded']] + ann_dicts = info["annos"] + mask = [(ocld in occluded) for ocld in ann_dicts["occluded"]] for k in ann_dicts.keys(): ann_dicts[k] = ann_dicts[k][mask] # convert dict of list to list of dict ann_recs = [] - for i in range(len(ann_dicts['occluded'])): + for i in range(len(ann_dicts["occluded"])): ann_rec = {} for k in ann_dicts.keys(): ann_rec[k] = ann_dicts[k][i] @@ -416,20 +416,20 @@ def get_2d_boxes(info, occluded, mono3d=True): for ann_idx, ann_rec in enumerate(ann_recs): # Augment sample_annotation with token information. - ann_rec['sample_annotation_token'] = \ - f"{info['image']['image_idx']}.{ann_idx}" - ann_rec['sample_data_token'] = info['image']['image_idx'] - sample_data_token = info['image']['image_idx'] - - loc = ann_rec['location'][np.newaxis, :] - dim = ann_rec['dimensions'][np.newaxis, :] - rot = ann_rec['rotation_y'][np.newaxis, np.newaxis] + ann_rec["sample_annotation_token"] = f"{info['image']['image_idx']}.{ann_idx}" + ann_rec["sample_data_token"] = info["image"]["image_idx"] + sample_data_token = info["image"]["image_idx"] + + loc = ann_rec["location"][np.newaxis, :] + dim = ann_rec["dimensions"][np.newaxis, :] + rot = ann_rec["rotation_y"][np.newaxis, np.newaxis] # transform the center from [0.5, 1.0, 0.5] to [0.5, 0.5, 0.5] dst = np.array([0.5, 0.5, 0.5]) src = np.array([0.5, 1.0, 0.5]) loc = loc + dim * (dst - src) - offset = (info['calib']['P2'][0, 3] - info['calib']['P0'][0, 3]) \ - / info['calib']['P2'][0, 0] + offset = (info["calib"]["P2"][0, 3] - info["calib"]["P0"][0, 3]) / info[ + "calib" + ]["P2"][0, 0] loc_3d = np.copy(loc) loc_3d[0, 0] += offset gt_bbox_3d = np.concatenate([loc, dim, rot], axis=1).astype(np.float32) @@ -439,16 +439,19 @@ def get_2d_boxes(info, occluded, mono3d=True): corners_3d = box_np_ops.center_to_corner_box3d( gt_bbox_3d[:, :3], gt_bbox_3d[:, 3:6], - gt_bbox_3d[:, 6], [0.5, 0.5, 0.5], - axis=1) + gt_bbox_3d[:, 6], + [0.5, 0.5, 0.5], + axis=1, + ) corners_3d = corners_3d[0].T # (1, 8, 3) -> (3, 8) in_front = np.argwhere(corners_3d[2, :] > 0).flatten() corners_3d = corners_3d[:, in_front] # Project 3d box to 2d. camera_intrinsic = P2 - corner_coords = view_points(corners_3d, camera_intrinsic, - True).T[:, :2].tolist() + corner_coords = ( + view_points(corners_3d, camera_intrinsic, True).T[:, :2].tolist() + ) # Keep only corners that fall within the image. final_coords = post_process_coords(corner_coords) @@ -461,28 +464,38 @@ def get_2d_boxes(info, occluded, mono3d=True): min_x, min_y, max_x, max_y = final_coords # Generate dictionary record to be included in the .json file. - repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, - sample_data_token, - info['image']['image_path']) + repro_rec = generate_record( + ann_rec, + min_x, + min_y, + max_x, + max_y, + sample_data_token, + info["image"]["image_path"], + ) # If mono3d=True, add 3D annotations in camera coordinates if mono3d and (repro_rec is not None): - repro_rec['bbox_cam3d'] = np.concatenate( - [loc_3d, dim, rot], - axis=1).astype(np.float32).squeeze().tolist() - repro_rec['velo_cam3d'] = -1 # no velocity in KITTI + repro_rec["bbox_cam3d"] = ( + np.concatenate([loc_3d, dim, rot], axis=1) + .astype(np.float32) + .squeeze() + .tolist() + ) + repro_rec["velo_cam3d"] = -1 # no velocity in KITTI center3d = np.array(loc).reshape([1, 3]) center2d = box_np_ops.points_cam2img( - center3d, camera_intrinsic, with_depth=True) - repro_rec['center2d'] = center2d.squeeze().tolist() + center3d, camera_intrinsic, with_depth=True + ) + repro_rec["center2d"] = center2d.squeeze().tolist() # normalized center2D + depth # samples with depth < 0 will be removed - if repro_rec['center2d'][2] <= 0: + if repro_rec["center2d"][2] <= 0: continue - repro_rec['attribute_name'] = -1 # no attribute in KITTI - repro_rec['attribute_id'] = -1 + repro_rec["attribute_name"] = -1 # no attribute in KITTI + repro_rec["attribute_id"] = -1 repro_recs.append(repro_rec) @@ -514,33 +527,33 @@ def generate_record(ann_rec, x1, y1, x2, y2, sample_data_token, filename): - iscrowd (int): whether the area is crowd """ repro_rec = OrderedDict() - repro_rec['sample_data_token'] = sample_data_token + repro_rec["sample_data_token"] = sample_data_token coco_rec = dict() key_mapping = { - 'name': 'category_name', - 'num_points_in_gt': 'num_lidar_pts', - 'sample_annotation_token': 'sample_annotation_token', - 'sample_data_token': 'sample_data_token', + "name": "category_name", + "num_points_in_gt": "num_lidar_pts", + "sample_annotation_token": "sample_annotation_token", + "sample_data_token": "sample_data_token", } for key, value in ann_rec.items(): if key in key_mapping.keys(): repro_rec[key_mapping[key]] = value - repro_rec['bbox_corners'] = [x1, y1, x2, y2] - repro_rec['filename'] = filename + repro_rec["bbox_corners"] = [x1, y1, x2, y2] + repro_rec["filename"] = filename - coco_rec['file_name'] = filename - coco_rec['image_id'] = sample_data_token - coco_rec['area'] = (y2 - y1) * (x2 - x1) + coco_rec["file_name"] = filename + coco_rec["image_id"] = sample_data_token + coco_rec["area"] = (y2 - y1) * (x2 - x1) - if repro_rec['category_name'] not in kitti_categories: + if repro_rec["category_name"] not in kitti_categories: return None - cat_name = repro_rec['category_name'] - coco_rec['category_name'] = cat_name - coco_rec['category_id'] = kitti_categories.index(cat_name) - coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] - coco_rec['iscrowd'] = 0 + cat_name = repro_rec["category_name"] + coco_rec["category_name"] = cat_name + coco_rec["category_id"] = kitti_categories.index(cat_name) + coco_rec["bbox"] = [x1, y1, x2 - x1, y2 - y1] + coco_rec["iscrowd"] = 0 return coco_rec diff --git a/tools/data_converter/kitti_data_utils.py b/tools/data_converter/kitti_data_utils.py index 01538e0..cafbbb4 100644 --- a/tools/data_converter/kitti_data_utils.py +++ b/tools/data_converter/kitti_data_utils.py @@ -1,153 +1,204 @@ # Copyright (c) OpenMMLab. All rights reserved. -import numpy as np from collections import OrderedDict from concurrent import futures as futures from os import path as osp from pathlib import Path + +import numpy as np from skimage import io def get_image_index_str(img_idx, use_prefix_id=False): if use_prefix_id: - return '{:07d}'.format(img_idx) + return "{:07d}".format(img_idx) else: - return '{:06d}'.format(img_idx) - - -def get_kitti_info_path(idx, - prefix, - info_type='image_2', - file_tail='.png', - training=True, - relative_path=True, - exist_check=True, - use_prefix_id=False): + return "{:06d}".format(img_idx) + + +def get_kitti_info_path( + idx, + prefix, + info_type="image_2", + file_tail=".png", + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False, +): img_idx_str = get_image_index_str(idx, use_prefix_id) img_idx_str += file_tail prefix = Path(prefix) if training: - file_path = Path('training') / info_type / img_idx_str + file_path = Path("training") / info_type / img_idx_str else: - file_path = Path('testing') / info_type / img_idx_str + file_path = Path("testing") / info_type / img_idx_str if exist_check and not (prefix / file_path).exists(): - raise ValueError('file not exist: {}'.format(file_path)) + raise ValueError("file not exist: {}".format(file_path)) if relative_path: return str(file_path) else: return str(prefix / file_path) -def get_image_path(idx, - prefix, - training=True, - relative_path=True, - exist_check=True, - info_type='image_2', - use_prefix_id=False): - return get_kitti_info_path(idx, prefix, info_type, '.png', training, - relative_path, exist_check, use_prefix_id) - - -def get_label_path(idx, - prefix, - training=True, - relative_path=True, - exist_check=True, - info_type='label_2', - use_prefix_id=False): - return get_kitti_info_path(idx, prefix, info_type, '.txt', training, - relative_path, exist_check, use_prefix_id) - - -def get_velodyne_path(idx, - prefix, - training=True, - relative_path=True, - exist_check=True, - use_prefix_id=False): - return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training, - relative_path, exist_check, use_prefix_id) - - -def get_calib_path(idx, - prefix, - training=True, - relative_path=True, - exist_check=True, - use_prefix_id=False): - return get_kitti_info_path(idx, prefix, 'calib', '.txt', training, - relative_path, exist_check, use_prefix_id) - - -def get_pose_path(idx, - prefix, - training=True, - relative_path=True, - exist_check=True, - use_prefix_id=False): - return get_kitti_info_path(idx, prefix, 'pose', '.txt', training, - relative_path, exist_check, use_prefix_id) +def get_image_path( + idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type="image_2", + use_prefix_id=False, +): + return get_kitti_info_path( + idx, + prefix, + info_type, + ".png", + training, + relative_path, + exist_check, + use_prefix_id, + ) + + +def get_label_path( + idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + info_type="label_2", + use_prefix_id=False, +): + return get_kitti_info_path( + idx, + prefix, + info_type, + ".txt", + training, + relative_path, + exist_check, + use_prefix_id, + ) + + +def get_velodyne_path( + idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False, +): + return get_kitti_info_path( + idx, + prefix, + "velodyne", + ".bin", + training, + relative_path, + exist_check, + use_prefix_id, + ) + + +def get_calib_path( + idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False, +): + return get_kitti_info_path( + idx, + prefix, + "calib", + ".txt", + training, + relative_path, + exist_check, + use_prefix_id, + ) + + +def get_pose_path( + idx, + prefix, + training=True, + relative_path=True, + exist_check=True, + use_prefix_id=False, +): + return get_kitti_info_path( + idx, prefix, "pose", ".txt", training, relative_path, exist_check, use_prefix_id + ) def get_label_anno(label_path): annotations = {} - annotations.update({ - 'name': [], - 'truncated': [], - 'occluded': [], - 'alpha': [], - 'bbox': [], - 'dimensions': [], - 'location': [], - 'rotation_y': [] - }) - with open(label_path, 'r') as f: + annotations.update( + { + "name": [], + "truncated": [], + "occluded": [], + "alpha": [], + "bbox": [], + "dimensions": [], + "location": [], + "rotation_y": [], + } + ) + with open(label_path, "r") as f: lines = f.readlines() # if len(lines) == 0 or len(lines[0]) < 15: # content = [] # else: - content = [line.strip().split(' ') for line in lines] - num_objects = len([x[0] for x in content if x[0] != 'DontCare']) - annotations['name'] = np.array([x[0] for x in content]) - num_gt = len(annotations['name']) - annotations['truncated'] = np.array([float(x[1]) for x in content]) - annotations['occluded'] = np.array([int(x[2]) for x in content]) - annotations['alpha'] = np.array([float(x[3]) for x in content]) - annotations['bbox'] = np.array([[float(info) for info in x[4:8]] - for x in content]).reshape(-1, 4) + content = [line.strip().split(" ") for line in lines] + num_objects = len([x[0] for x in content if x[0] != "DontCare"]) + annotations["name"] = np.array([x[0] for x in content]) + num_gt = len(annotations["name"]) + annotations["truncated"] = np.array([float(x[1]) for x in content]) + annotations["occluded"] = np.array([int(x[2]) for x in content]) + annotations["alpha"] = np.array([float(x[3]) for x in content]) + annotations["bbox"] = np.array( + [[float(info) for info in x[4:8]] for x in content] + ).reshape(-1, 4) # dimensions will convert hwl format to standard lhw(camera) format. - annotations['dimensions'] = np.array([[float(info) for info in x[8:11]] - for x in content - ]).reshape(-1, 3)[:, [2, 0, 1]] - annotations['location'] = np.array([[float(info) for info in x[11:14]] - for x in content]).reshape(-1, 3) - annotations['rotation_y'] = np.array([float(x[14]) - for x in content]).reshape(-1) + annotations["dimensions"] = np.array( + [[float(info) for info in x[8:11]] for x in content] + ).reshape(-1, 3)[:, [2, 0, 1]] + annotations["location"] = np.array( + [[float(info) for info in x[11:14]] for x in content] + ).reshape(-1, 3) + annotations["rotation_y"] = np.array([float(x[14]) for x in content]).reshape(-1) if len(content) != 0 and len(content[0]) == 16: # have score - annotations['score'] = np.array([float(x[15]) for x in content]) + annotations["score"] = np.array([float(x[15]) for x in content]) else: - annotations['score'] = np.zeros((annotations['bbox'].shape[0], )) + annotations["score"] = np.zeros((annotations["bbox"].shape[0],)) index = list(range(num_objects)) + [-1] * (num_gt - num_objects) - annotations['index'] = np.array(index, dtype=np.int32) - annotations['group_ids'] = np.arange(num_gt, dtype=np.int32) + annotations["index"] = np.array(index, dtype=np.int32) + annotations["group_ids"] = np.arange(num_gt, dtype=np.int32) return annotations def _extend_matrix(mat): - mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0) + mat = np.concatenate([mat, np.array([[0.0, 0.0, 0.0, 1.0]])], axis=0) return mat -def get_kitti_image_info(path, - training=True, - label_info=True, - velodyne=False, - calib=False, - image_ids=7481, - extend_matrix=True, - num_worker=8, - relative_path=True, - with_imageshape=True): +def get_kitti_image_info( + path, + training=True, + label_info=True, + velodyne=False, + calib=False, + image_ids=7481, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True, +): """ KITTI annotation format version 2: { @@ -182,77 +233,81 @@ def get_kitti_image_info(path, def map_func(idx): info = {} - pc_info = {'num_features': 4} + pc_info = {"num_features": 4} calib_info = {} - image_info = {'image_idx': idx} + image_info = {"image_idx": idx} annotations = None if velodyne: - pc_info['velodyne_path'] = get_velodyne_path( - idx, path, training, relative_path) - image_info['image_path'] = get_image_path(idx, path, training, - relative_path) + pc_info["velodyne_path"] = get_velodyne_path( + idx, path, training, relative_path + ) + image_info["image_path"] = get_image_path(idx, path, training, relative_path) if with_imageshape: - img_path = image_info['image_path'] + img_path = image_info["image_path"] if relative_path: img_path = str(root_path / img_path) - image_info['image_shape'] = np.array( - io.imread(img_path).shape[:2], dtype=np.int32) + image_info["image_shape"] = np.array( + io.imread(img_path).shape[:2], dtype=np.int32 + ) if label_info: label_path = get_label_path(idx, path, training, relative_path) if relative_path: label_path = str(root_path / label_path) annotations = get_label_anno(label_path) - info['image'] = image_info - info['point_cloud'] = pc_info + info["image"] = image_info + info["point_cloud"] = pc_info if calib: - calib_path = get_calib_path( - idx, path, training, relative_path=False) - with open(calib_path, 'r') as f: + calib_path = get_calib_path(idx, path, training, relative_path=False) + with open(calib_path, "r") as f: lines = f.readlines() - P0 = np.array([float(info) for info in lines[0].split(' ')[1:13] - ]).reshape([3, 4]) - P1 = np.array([float(info) for info in lines[1].split(' ')[1:13] - ]).reshape([3, 4]) - P2 = np.array([float(info) for info in lines[2].split(' ')[1:13] - ]).reshape([3, 4]) - P3 = np.array([float(info) for info in lines[3].split(' ')[1:13] - ]).reshape([3, 4]) + P0 = np.array([float(info) for info in lines[0].split(" ")[1:13]]).reshape( + [3, 4] + ) + P1 = np.array([float(info) for info in lines[1].split(" ")[1:13]]).reshape( + [3, 4] + ) + P2 = np.array([float(info) for info in lines[2].split(" ")[1:13]]).reshape( + [3, 4] + ) + P3 = np.array([float(info) for info in lines[3].split(" ")[1:13]]).reshape( + [3, 4] + ) if extend_matrix: P0 = _extend_matrix(P0) P1 = _extend_matrix(P1) P2 = _extend_matrix(P2) P3 = _extend_matrix(P3) - R0_rect = np.array([ - float(info) for info in lines[4].split(' ')[1:10] - ]).reshape([3, 3]) + R0_rect = np.array( + [float(info) for info in lines[4].split(" ")[1:10]] + ).reshape([3, 3]) if extend_matrix: rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) - rect_4x4[3, 3] = 1. + rect_4x4[3, 3] = 1.0 rect_4x4[:3, :3] = R0_rect else: rect_4x4 = R0_rect - Tr_velo_to_cam = np.array([ - float(info) for info in lines[5].split(' ')[1:13] - ]).reshape([3, 4]) - Tr_imu_to_velo = np.array([ - float(info) for info in lines[6].split(' ')[1:13] - ]).reshape([3, 4]) + Tr_velo_to_cam = np.array( + [float(info) for info in lines[5].split(" ")[1:13]] + ).reshape([3, 4]) + Tr_imu_to_velo = np.array( + [float(info) for info in lines[6].split(" ")[1:13]] + ).reshape([3, 4]) if extend_matrix: Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo) - calib_info['P0'] = P0 - calib_info['P1'] = P1 - calib_info['P2'] = P2 - calib_info['P3'] = P3 - calib_info['R0_rect'] = rect_4x4 - calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam - calib_info['Tr_imu_to_velo'] = Tr_imu_to_velo - info['calib'] = calib_info + calib_info["P0"] = P0 + calib_info["P1"] = P1 + calib_info["P2"] = P2 + calib_info["P3"] = P3 + calib_info["R0_rect"] = rect_4x4 + calib_info["Tr_velo_to_cam"] = Tr_velo_to_cam + calib_info["Tr_imu_to_velo"] = Tr_imu_to_velo + info["calib"] = calib_info if annotations is not None: - info['annos'] = annotations + info["annos"] = annotations add_difficulty_to_annos(info) return info @@ -262,18 +317,20 @@ def map_func(idx): return list(image_infos) -def get_waymo_image_info(path, - training=True, - label_info=True, - velodyne=False, - calib=False, - pose=False, - image_ids=7481, - extend_matrix=True, - num_worker=8, - relative_path=True, - with_imageshape=True, - max_sweeps=5): +def get_waymo_image_info( + path, + training=True, + label_info=True, + velodyne=False, + calib=False, + pose=False, + image_ids=7481, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True, + max_sweeps=5, +): """ Waymo annotation format version like KITTI: { @@ -308,97 +365,104 @@ def get_waymo_image_info(path, def map_func(idx): info = {} - pc_info = {'num_features': 6} + pc_info = {"num_features": 6} calib_info = {} - image_info = {'image_idx': idx} + image_info = {"image_idx": idx} annotations = None if velodyne: - pc_info['velodyne_path'] = get_velodyne_path( - idx, path, training, relative_path, use_prefix_id=True) + pc_info["velodyne_path"] = get_velodyne_path( + idx, path, training, relative_path, use_prefix_id=True + ) points = np.fromfile( - Path(path) / pc_info['velodyne_path'], dtype=np.float32) - points = np.copy(points).reshape(-1, pc_info['num_features']) - info['timestamp'] = np.int64(points[0, -1]) + Path(path) / pc_info["velodyne_path"], dtype=np.float32 + ) + points = np.copy(points).reshape(-1, pc_info["num_features"]) + info["timestamp"] = np.int64(points[0, -1]) # values of the last dim are all the timestamp - image_info['image_path'] = get_image_path( - idx, - path, - training, - relative_path, - info_type='image_0', - use_prefix_id=True) + image_info["image_path"] = get_image_path( + idx, path, training, relative_path, info_type="image_0", use_prefix_id=True + ) if with_imageshape: - img_path = image_info['image_path'] + img_path = image_info["image_path"] if relative_path: img_path = str(root_path / img_path) - image_info['image_shape'] = np.array( - io.imread(img_path).shape[:2], dtype=np.int32) + image_info["image_shape"] = np.array( + io.imread(img_path).shape[:2], dtype=np.int32 + ) if label_info: label_path = get_label_path( idx, path, training, relative_path, - info_type='label_all', - use_prefix_id=True) + info_type="label_all", + use_prefix_id=True, + ) if relative_path: label_path = str(root_path / label_path) annotations = get_label_anno(label_path) - info['image'] = image_info - info['point_cloud'] = pc_info + info["image"] = image_info + info["point_cloud"] = pc_info if calib: calib_path = get_calib_path( - idx, path, training, relative_path=False, use_prefix_id=True) - with open(calib_path, 'r') as f: + idx, path, training, relative_path=False, use_prefix_id=True + ) + with open(calib_path, "r") as f: lines = f.readlines() - P0 = np.array([float(info) for info in lines[0].split(' ')[1:13] - ]).reshape([3, 4]) - P1 = np.array([float(info) for info in lines[1].split(' ')[1:13] - ]).reshape([3, 4]) - P2 = np.array([float(info) for info in lines[2].split(' ')[1:13] - ]).reshape([3, 4]) - P3 = np.array([float(info) for info in lines[3].split(' ')[1:13] - ]).reshape([3, 4]) - P4 = np.array([float(info) for info in lines[4].split(' ')[1:13] - ]).reshape([3, 4]) + P0 = np.array([float(info) for info in lines[0].split(" ")[1:13]]).reshape( + [3, 4] + ) + P1 = np.array([float(info) for info in lines[1].split(" ")[1:13]]).reshape( + [3, 4] + ) + P2 = np.array([float(info) for info in lines[2].split(" ")[1:13]]).reshape( + [3, 4] + ) + P3 = np.array([float(info) for info in lines[3].split(" ")[1:13]]).reshape( + [3, 4] + ) + P4 = np.array([float(info) for info in lines[4].split(" ")[1:13]]).reshape( + [3, 4] + ) if extend_matrix: P0 = _extend_matrix(P0) P1 = _extend_matrix(P1) P2 = _extend_matrix(P2) P3 = _extend_matrix(P3) P4 = _extend_matrix(P4) - R0_rect = np.array([ - float(info) for info in lines[5].split(' ')[1:10] - ]).reshape([3, 3]) + R0_rect = np.array( + [float(info) for info in lines[5].split(" ")[1:10]] + ).reshape([3, 3]) if extend_matrix: rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) - rect_4x4[3, 3] = 1. + rect_4x4[3, 3] = 1.0 rect_4x4[:3, :3] = R0_rect else: rect_4x4 = R0_rect - Tr_velo_to_cam = np.array([ - float(info) for info in lines[6].split(' ')[1:13] - ]).reshape([3, 4]) + Tr_velo_to_cam = np.array( + [float(info) for info in lines[6].split(" ")[1:13]] + ).reshape([3, 4]) if extend_matrix: Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) - calib_info['P0'] = P0 - calib_info['P1'] = P1 - calib_info['P2'] = P2 - calib_info['P3'] = P3 - calib_info['P4'] = P4 - calib_info['R0_rect'] = rect_4x4 - calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam - info['calib'] = calib_info + calib_info["P0"] = P0 + calib_info["P1"] = P1 + calib_info["P2"] = P2 + calib_info["P3"] = P3 + calib_info["P4"] = P4 + calib_info["R0_rect"] = rect_4x4 + calib_info["Tr_velo_to_cam"] = Tr_velo_to_cam + info["calib"] = calib_info if pose: pose_path = get_pose_path( - idx, path, training, relative_path=False, use_prefix_id=True) - info['pose'] = np.loadtxt(pose_path) + idx, path, training, relative_path=False, use_prefix_id=True + ) + info["pose"] = np.loadtxt(pose_path) if annotations is not None: - info['annos'] = annotations - info['annos']['camera_id'] = info['annos'].pop('score') + info["annos"] = annotations + info["annos"]["camera_id"] = info["annos"].pop("score") add_difficulty_to_annos(info) sweeps = [] @@ -406,32 +470,29 @@ def map_func(idx): while len(sweeps) < max_sweeps: prev_info = {} prev_idx -= 1 - prev_info['velodyne_path'] = get_velodyne_path( + prev_info["velodyne_path"] = get_velodyne_path( prev_idx, path, training, relative_path, exist_check=False, - use_prefix_id=True) - if_prev_exists = osp.exists( - Path(path) / prev_info['velodyne_path']) + use_prefix_id=True, + ) + if_prev_exists = osp.exists(Path(path) / prev_info["velodyne_path"]) if if_prev_exists: prev_points = np.fromfile( - Path(path) / prev_info['velodyne_path'], dtype=np.float32) - prev_points = np.copy(prev_points).reshape( - -1, pc_info['num_features']) - prev_info['timestamp'] = np.int64(prev_points[0, -1]) + Path(path) / prev_info["velodyne_path"], dtype=np.float32 + ) + prev_points = np.copy(prev_points).reshape(-1, pc_info["num_features"]) + prev_info["timestamp"] = np.int64(prev_points[0, -1]) prev_pose_path = get_pose_path( - prev_idx, - path, - training, - relative_path=False, - use_prefix_id=True) - prev_info['pose'] = np.loadtxt(prev_pose_path) + prev_idx, path, training, relative_path=False, use_prefix_id=True + ) + prev_info["pose"] = np.loadtxt(prev_pose_path) sweeps.append(prev_info) else: break - info['sweeps'] = sweeps + info["sweeps"] = sweeps return info @@ -444,45 +505,48 @@ def map_func(idx): def kitti_anno_to_label_file(annos, folder): folder = Path(folder) for anno in annos: - image_idx = anno['metadata']['image_idx'] + image_idx = anno["metadata"]["image_idx"] label_lines = [] - for j in range(anno['bbox'].shape[0]): + for j in range(anno["bbox"].shape[0]): label_dict = { - 'name': anno['name'][j], - 'alpha': anno['alpha'][j], - 'bbox': anno['bbox'][j], - 'location': anno['location'][j], - 'dimensions': anno['dimensions'][j], - 'rotation_y': anno['rotation_y'][j], - 'score': anno['score'][j], + "name": anno["name"][j], + "alpha": anno["alpha"][j], + "bbox": anno["bbox"][j], + "location": anno["location"][j], + "dimensions": anno["dimensions"][j], + "rotation_y": anno["rotation_y"][j], + "score": anno["score"][j], } label_line = kitti_result_line(label_dict) label_lines.append(label_line) - label_file = folder / f'{get_image_index_str(image_idx)}.txt' - label_str = '\n'.join(label_lines) - with open(label_file, 'w') as f: + label_file = folder / f"{get_image_index_str(image_idx)}.txt" + label_str = "\n".join(label_lines) + with open(label_file, "w") as f: f.write(label_str) def add_difficulty_to_annos(info): - min_height = [40, 25, - 25] # minimum height for evaluated groundtruth/detections + min_height = [40, 25, 25] # minimum height for evaluated groundtruth/detections max_occlusion = [ - 0, 1, 2 + 0, + 1, + 2, ] # maximum occlusion level of the groundtruth used for evaluation max_trunc = [ - 0.15, 0.3, 0.5 + 0.15, + 0.3, + 0.5, ] # maximum truncation level of the groundtruth used for evaluation - annos = info['annos'] - dims = annos['dimensions'] # lhw format - bbox = annos['bbox'] + annos = info["annos"] + dims = annos["dimensions"] # lhw format + bbox = annos["bbox"] height = bbox[:, 3] - bbox[:, 1] - occlusion = annos['occluded'] - truncation = annos['truncated'] + occlusion = annos["occluded"] + truncation = annos["truncated"] diff = [] - easy_mask = np.ones((len(dims), ), dtype=np.bool) - moderate_mask = np.ones((len(dims), ), dtype=np.bool) - hard_mask = np.ones((len(dims), ), dtype=np.bool) + easy_mask = np.ones((len(dims),), dtype=np.bool) + moderate_mask = np.ones((len(dims),), dtype=np.bool) + hard_mask = np.ones((len(dims),), dtype=np.bool) i = 0 for h, o, t in zip(height, occlusion, truncation): if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: @@ -505,50 +569,51 @@ def add_difficulty_to_annos(info): diff.append(2) else: diff.append(-1) - annos['difficulty'] = np.array(diff, np.int32) + annos["difficulty"] = np.array(diff, np.int32) return diff def kitti_result_line(result_dict, precision=4): - prec_float = '{' + ':.{}f'.format(precision) + '}' + prec_float = "{" + ":.{}f".format(precision) + "}" res_line = [] - all_field_default = OrderedDict([ - ('name', None), - ('truncated', -1), - ('occluded', -1), - ('alpha', -10), - ('bbox', None), - ('dimensions', [-1, -1, -1]), - ('location', [-1000, -1000, -1000]), - ('rotation_y', -10), - ('score', 0.0), - ]) + all_field_default = OrderedDict( + [ + ("name", None), + ("truncated", -1), + ("occluded", -1), + ("alpha", -10), + ("bbox", None), + ("dimensions", [-1, -1, -1]), + ("location", [-1000, -1000, -1000]), + ("rotation_y", -10), + ("score", 0.0), + ] + ) res_dict = [(key, None) for key, val in all_field_default.items()] res_dict = OrderedDict(res_dict) for key, val in result_dict.items(): if all_field_default[key] is None and val is None: - raise ValueError('you must specify a value for {}'.format(key)) + raise ValueError("you must specify a value for {}".format(key)) res_dict[key] = val for key, val in res_dict.items(): - if key == 'name': + if key == "name": res_line.append(val) - elif key in ['truncated', 'alpha', 'rotation_y', 'score']: + elif key in ["truncated", "alpha", "rotation_y", "score"]: if val is None: res_line.append(str(all_field_default[key])) else: res_line.append(prec_float.format(val)) - elif key == 'occluded': + elif key == "occluded": if val is None: res_line.append(str(all_field_default[key])) else: - res_line.append('{}'.format(val)) - elif key in ['bbox', 'dimensions', 'location']: + res_line.append("{}".format(val)) + elif key in ["bbox", "dimensions", "location"]: if val is None: res_line += [str(v) for v in all_field_default[key]] else: res_line += [prec_float.format(v) for v in val] else: - raise ValueError('unknown key. supported key:{}'.format( - res_dict.keys())) - return ' '.join(res_line) + raise ValueError("unknown key. supported key:{}".format(res_dict.keys())) + return " ".join(res_line) diff --git a/tools/data_converter/lyft_converter.py b/tools/data_converter/lyft_converter.py index 1fc1555..685bd1b 100644 --- a/tools/data_converter/lyft_converter.py +++ b/tools/data_converter/lyft_converter.py @@ -1,24 +1,30 @@ # Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np import os from logging import warning -from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft from os import path as osp -from pyquaternion import Quaternion +import mmcv +import numpy as np +from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft from mmdet3d.datasets import LyftDataset -from .nuscenes_converter import (get_2d_boxes, get_available_scenes, - obtain_sensor2top) +from pyquaternion import Quaternion -lyft_categories = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', - 'motorcycle', 'bicycle', 'pedestrian', 'animal') +from .nuscenes_converter import get_2d_boxes, get_available_scenes, obtain_sensor2top +lyft_categories = ( + "car", + "truck", + "bus", + "emergency_vehicle", + "other_vehicle", + "motorcycle", + "bicycle", + "pedestrian", + "animal", +) -def create_lyft_infos(root_path, - info_prefix, - version='v1.01-train', - max_sweeps=10): + +def create_lyft_infos(root_path, info_prefix, version="v1.01-train", max_sweeps=10): """Create info file of lyft dataset. Given the raw data, generate its related info file in pkl format. @@ -34,67 +40,69 @@ def create_lyft_infos(root_path, lyft = Lyft( data_path=osp.join(root_path, version), json_path=osp.join(root_path, version, version), - verbose=True) - available_vers = ['v1.01-train', 'v1.01-test'] + verbose=True, + ) + available_vers = ["v1.01-train", "v1.01-test"] assert version in available_vers - if version == 'v1.01-train': - train_scenes = mmcv.list_from_file('data/lyft/train.txt') - val_scenes = mmcv.list_from_file('data/lyft/val.txt') - elif version == 'v1.01-test': - train_scenes = mmcv.list_from_file('data/lyft/test.txt') + if version == "v1.01-train": + train_scenes = mmcv.list_from_file("data/lyft/train.txt") + val_scenes = mmcv.list_from_file("data/lyft/val.txt") + elif version == "v1.01-test": + train_scenes = mmcv.list_from_file("data/lyft/test.txt") val_scenes = [] else: - raise ValueError('unknown') + raise ValueError("unknown") # filter existing scenes. available_scenes = get_available_scenes(lyft) - available_scene_names = [s['name'] for s in available_scenes] - train_scenes = list( - filter(lambda x: x in available_scene_names, train_scenes)) + available_scene_names = [s["name"] for s in available_scenes] + train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) - train_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in train_scenes - ]) - val_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in val_scenes - ]) + train_scenes = set( + [ + available_scenes[available_scene_names.index(s)]["token"] + for s in train_scenes + ] + ) + val_scenes = set( + [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes] + ) - test = 'test' in version + test = "test" in version if test: - print(f'test scene: {len(train_scenes)}') + print(f"test scene: {len(train_scenes)}") else: - print(f'train scene: {len(train_scenes)}, \ - val scene: {len(val_scenes)}') + print( + f"train scene: {len(train_scenes)}, \ + val scene: {len(val_scenes)}" + ) train_lyft_infos, val_lyft_infos = _fill_trainval_infos( - lyft, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + lyft, train_scenes, val_scenes, test, max_sweeps=max_sweeps + ) metadata = dict(version=version) if test: - print(f'test sample: {len(train_lyft_infos)}') + print(f"test sample: {len(train_lyft_infos)}") data = dict(infos=train_lyft_infos, metadata=metadata) - info_name = f'{info_prefix}_infos_test' - info_path = osp.join(root_path, f'{info_name}.pkl') + info_name = f"{info_prefix}_infos_test" + info_path = osp.join(root_path, f"{info_name}.pkl") mmcv.dump(data, info_path) else: - print(f'train sample: {len(train_lyft_infos)}, \ - val sample: {len(val_lyft_infos)}') + print( + f"train sample: {len(train_lyft_infos)}, \ + val sample: {len(val_lyft_infos)}" + ) data = dict(infos=train_lyft_infos, metadata=metadata) - train_info_name = f'{info_prefix}_infos_train' - info_path = osp.join(root_path, f'{train_info_name}.pkl') + train_info_name = f"{info_prefix}_infos_train" + info_path = osp.join(root_path, f"{train_info_name}.pkl") mmcv.dump(data, info_path) - data['infos'] = val_lyft_infos - val_info_name = f'{info_prefix}_infos_val' - info_val_path = osp.join(root_path, f'{val_info_name}.pkl') + data["infos"] = val_lyft_infos + val_info_name = f"{info_prefix}_infos_val" + info_val_path = osp.join(root_path, f"{val_info_name}.pkl") mmcv.dump(data, info_val_path) -def _fill_trainval_infos(lyft, - train_scenes, - val_scenes, - test=False, - max_sweeps=10): +def _fill_trainval_infos(lyft, train_scenes, val_scenes, test=False, max_sweeps=10): """Generate the train/val infos from the raw data. Args: @@ -113,78 +121,79 @@ def _fill_trainval_infos(lyft, val_lyft_infos = [] for sample in mmcv.track_iter_progress(lyft.sample): - lidar_token = sample['data']['LIDAR_TOP'] - sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP']) - cs_record = lyft.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = lyft.get('ego_pose', sd_rec['ego_pose_token']) + lidar_token = sample["data"]["LIDAR_TOP"] + sd_rec = lyft.get("sample_data", sample["data"]["LIDAR_TOP"]) + cs_record = lyft.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = lyft.get("ego_pose", sd_rec["ego_pose_token"]) abs_lidar_path, boxes, _ = lyft.get_sample_data(lidar_token) # nuScenes devkit returns more convenient relative paths while # lyft devkit returns absolute paths abs_lidar_path = str(abs_lidar_path) # absolute path - lidar_path = abs_lidar_path.split(f'{os.getcwd()}/')[-1] + lidar_path = abs_lidar_path.split(f"{os.getcwd()}/")[-1] # relative path mmcv.check_file_exist(lidar_path) info = { - 'lidar_path': lidar_path, - 'token': sample['token'], - 'sweeps': [], - 'cams': dict(), - 'lidar2ego_translation': cs_record['translation'], - 'lidar2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sample['timestamp'], + "lidar_path": lidar_path, + "token": sample["token"], + "sweeps": [], + "cams": dict(), + "lidar2ego_translation": cs_record["translation"], + "lidar2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sample["timestamp"], } - l2e_r = info['lidar2ego_rotation'] - l2e_t = info['lidar2ego_translation'] - e2g_r = info['ego2global_rotation'] - e2g_t = info['ego2global_translation'] + l2e_r = info["lidar2ego_rotation"] + l2e_t = info["lidar2ego_translation"] + e2g_r = info["ego2global_rotation"] + e2g_t = info["ego2global_translation"] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] for cam in camera_types: - cam_token = sample['data'][cam] + cam_token = sample["data"][cam] cam_path, _, cam_intrinsic = lyft.get_sample_data(cam_token) - cam_info = obtain_sensor2top(lyft, cam_token, l2e_t, l2e_r_mat, - e2g_t, e2g_r_mat, cam) + cam_info = obtain_sensor2top( + lyft, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam + ) cam_info.update(cam_intrinsic=cam_intrinsic) - info['cams'].update({cam: cam_info}) + info["cams"].update({cam: cam_info}) # obtain sweeps for a single key-frame - sd_rec = lyft.get('sample_data', sample['data']['LIDAR_TOP']) + sd_rec = lyft.get("sample_data", sample["data"]["LIDAR_TOP"]) sweeps = [] while len(sweeps) < max_sweeps: - if not sd_rec['prev'] == '': - sweep = obtain_sensor2top(lyft, sd_rec['prev'], l2e_t, - l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + if not sd_rec["prev"] == "": + sweep = obtain_sensor2top( + lyft, sd_rec["prev"], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, "lidar" + ) sweeps.append(sweep) - sd_rec = lyft.get('sample_data', sd_rec['prev']) + sd_rec = lyft.get("sample_data", sd_rec["prev"]) else: break - info['sweeps'] = sweeps + info["sweeps"] = sweeps # obtain annotation if not test: annotations = [ - lyft.get('sample_annotation', token) - for token in sample['anns'] + lyft.get("sample_annotation", token) for token in sample["anns"] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) - rots = np.array([b.orientation.yaw_pitch_roll[0] - for b in boxes]).reshape(-1, 1) + rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape( + -1, 1 + ) names = [b.name for b in boxes] for i in range(len(names)): @@ -195,15 +204,14 @@ def _fill_trainval_infos(lyft, # we need to convert rot to SECOND format. gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) assert len(gt_boxes) == len( - annotations), f'{len(gt_boxes)}, {len(annotations)}' - info['gt_boxes'] = gt_boxes - info['gt_names'] = names - info['num_lidar_pts'] = np.array( - [a['num_lidar_pts'] for a in annotations]) - info['num_radar_pts'] = np.array( - [a['num_radar_pts'] for a in annotations]) + annotations + ), f"{len(gt_boxes)}, {len(annotations)}" + info["gt_boxes"] = gt_boxes + info["gt_names"] = names + info["num_lidar_pts"] = np.array([a["num_lidar_pts"] for a in annotations]) + info["num_radar_pts"] = np.array([a["num_radar_pts"] for a in annotations]) - if sample['scene_token'] in train_scenes: + if sample["scene_token"] in train_scenes: train_lyft_infos.append(info) else: val_lyft_infos.append(info) @@ -219,23 +227,26 @@ def export_2d_annotation(root_path, info_path, version): info_path (str): Path of the info file. version (str): Dataset version. """ - warning.warn('DeprecationWarning: 2D annotations are not used on the ' - 'Lyft dataset. The function export_2d_annotation will be ' - 'deprecated.') + warning.warn( + "DeprecationWarning: 2D annotations are not used on the " + "Lyft dataset. The function export_2d_annotation will be " + "deprecated." + ) # get bbox annotations for camera camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] - lyft_infos = mmcv.load(info_path)['infos'] + lyft_infos = mmcv.load(info_path)["infos"] lyft = Lyft( data_path=osp.join(root_path, version), json_path=osp.join(root_path, version, version), - verbose=True) + verbose=True, + ) # info_2d_list = [] cat2Ids = [ dict(id=lyft_categories.index(cat_name), name=cat_name) @@ -245,24 +256,27 @@ def export_2d_annotation(root_path, info_path, version): coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) for info in mmcv.track_iter_progress(lyft_infos): for cam in camera_types: - cam_info = info['cams'][cam] + cam_info = info["cams"][cam] coco_infos = get_2d_boxes( lyft, - cam_info['sample_data_token'], - visibilities=['', '1', '2', '3', '4']) - (height, width, _) = mmcv.imread(cam_info['data_path']).shape - coco_2d_dict['images'].append( + cam_info["sample_data_token"], + visibilities=["", "1", "2", "3", "4"], + ) + (height, width, _) = mmcv.imread(cam_info["data_path"]).shape + coco_2d_dict["images"].append( dict( - file_name=cam_info['data_path'], - id=cam_info['sample_data_token'], + file_name=cam_info["data_path"], + id=cam_info["sample_data_token"], width=width, - height=height)) + height=height, + ) + ) for coco_info in coco_infos: if coco_info is None: continue # add an empty key for coco format - coco_info['segmentation'] = [] - coco_info['id'] = coco_ann_id - coco_2d_dict['annotations'].append(coco_info) + coco_info["segmentation"] = [] + coco_info["id"] = coco_ann_id + coco_2d_dict["annotations"].append(coco_info) coco_ann_id += 1 - mmcv.dump(coco_2d_dict, f'{info_path[:-4]}.coco.json') + mmcv.dump(coco_2d_dict, f"{info_path[:-4]}.coco.json") diff --git a/tools/data_converter/lyft_data_fixer.py b/tools/data_converter/lyft_data_fixer.py index 4207049..425a0d1 100644 --- a/tools/data_converter/lyft_data_fixer.py +++ b/tools/data_converter/lyft_data_fixer.py @@ -1,38 +1,40 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse -import numpy as np import os +import numpy as np + -def fix_lyft(root_folder='./data/lyft', version='v1.01'): +def fix_lyft(root_folder="./data/lyft", version="v1.01"): # refer to https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000 # noqa - lidar_path = 'lidar/host-a011_lidar1_1233090652702363606.bin' - root_folder = os.path.join(root_folder, f'{version}-train') + lidar_path = "lidar/host-a011_lidar1_1233090652702363606.bin" + root_folder = os.path.join(root_folder, f"{version}-train") lidar_path = os.path.join(root_folder, lidar_path) - assert os.path.isfile(lidar_path), f'Please download the complete Lyft ' \ - f'dataset and make sure {lidar_path} is present.' + assert os.path.isfile(lidar_path), ( + f"Please download the complete Lyft " + f"dataset and make sure {lidar_path} is present." + ) points = np.fromfile(lidar_path, dtype=np.float32, count=-1) try: points.reshape([-1, 5]) - print(f'This fix is not required for version {version}.') + print(f"This fix is not required for version {version}.") except ValueError: - new_points = np.array(list(points) + [100.0, 1.0], dtype='float32') + new_points = np.array(list(points) + [100.0, 1.0], dtype="float32") new_points.tofile(lidar_path) - print(f'Appended 100.0 and 1.0 to the end of {lidar_path}.') + print(f"Appended 100.0 and 1.0 to the end of {lidar_path}.") -parser = argparse.ArgumentParser(description='Lyft dataset fixer arg parser') +parser = argparse.ArgumentParser(description="Lyft dataset fixer arg parser") parser.add_argument( - '--root-folder', + "--root-folder", type=str, - default='./data/lyft', - help='specify the root path of Lyft dataset') + default="./data/lyft", + help="specify the root path of Lyft dataset", +) parser.add_argument( - '--version', - type=str, - default='v1.01', - help='specify Lyft dataset version') + "--version", type=str, default="v1.01", help="specify Lyft dataset version" +) args = parser.parse_args() -if __name__ == '__main__': +if __name__ == "__main__": fix_lyft(root_folder=args.root_folder, version=args.version) diff --git a/tools/data_converter/nuimage_converter.py b/tools/data_converter/nuimage_converter.py index 92be1de..e9cd79c 100644 --- a/tools/data_converter/nuimage_converter.py +++ b/tools/data_converter/nuimage_converter.py @@ -1,61 +1,75 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse import base64 +from os import path as osp + import mmcv import numpy as np from nuimages import NuImages from nuimages.utils.utils import mask_decode, name_to_index_mapping -from os import path as osp -nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', - 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', - 'barrier') +nus_categories = ( + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", +) NAME_MAPPING = { - 'movable_object.barrier': 'barrier', - 'vehicle.bicycle': 'bicycle', - 'vehicle.bus.bendy': 'bus', - 'vehicle.bus.rigid': 'bus', - 'vehicle.car': 'car', - 'vehicle.construction': 'construction_vehicle', - 'vehicle.motorcycle': 'motorcycle', - 'human.pedestrian.adult': 'pedestrian', - 'human.pedestrian.child': 'pedestrian', - 'human.pedestrian.construction_worker': 'pedestrian', - 'human.pedestrian.police_officer': 'pedestrian', - 'movable_object.trafficcone': 'traffic_cone', - 'vehicle.trailer': 'trailer', - 'vehicle.truck': 'truck', + "movable_object.barrier": "barrier", + "vehicle.bicycle": "bicycle", + "vehicle.bus.bendy": "bus", + "vehicle.bus.rigid": "bus", + "vehicle.car": "car", + "vehicle.construction": "construction_vehicle", + "vehicle.motorcycle": "motorcycle", + "human.pedestrian.adult": "pedestrian", + "human.pedestrian.child": "pedestrian", + "human.pedestrian.construction_worker": "pedestrian", + "human.pedestrian.police_officer": "pedestrian", + "movable_object.trafficcone": "traffic_cone", + "vehicle.trailer": "trailer", + "vehicle.truck": "truck", } def parse_args(): - parser = argparse.ArgumentParser(description='Data converter arg parser') + parser = argparse.ArgumentParser(description="Data converter arg parser") parser.add_argument( - '--data-root', + "--data-root", type=str, - default='./data/nuimages', - help='specify the root path of dataset') + default="./data/nuimages", + help="specify the root path of dataset", + ) parser.add_argument( - '--version', + "--version", type=str, - nargs='+', - default=['v1.0-mini'], + nargs="+", + default=["v1.0-mini"], required=False, - help='specify the dataset version') + help="specify the dataset version", + ) parser.add_argument( - '--out-dir', + "--out-dir", type=str, - default='./data/nuimages/annotations/', + default="./data/nuimages/annotations/", required=False, - help='path to save the exported json') + help="path to save the exported json", + ) parser.add_argument( - '--nproc', + "--nproc", type=int, default=4, required=False, - help='workers to process semantic masks') - parser.add_argument('--extra-tag', type=str, default='nuimages') + help="workers to process semantic masks", + ) + parser.add_argument("--extra-tag", type=str, default="nuimages") args = parser.parse_args() return args @@ -70,39 +84,35 @@ def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root): Returns: np.ndarray: Semantic segmentation map of the image """ - sd_token = img_info['token'] - image_id = img_info['id'] + sd_token = img_info["token"] + image_id = img_info["id"] name_to_index = name_to_index_mapping(nuim.category) # Get image data. - width, height = img_info['width'], img_info['height'] - semseg_mask = np.zeros((height, width)).astype('uint8') + width, height = img_info["width"], img_info["height"] + semseg_mask = np.zeros((height, width)).astype("uint8") # Load stuff / surface regions. - surface_anns = [ - o for o in nuim.surface_ann if o['sample_data_token'] == sd_token - ] + surface_anns = [o for o in nuim.surface_ann if o["sample_data_token"] == sd_token] # Draw stuff / surface regions. for ann in surface_anns: # Get color and mask. - category_token = ann['category_token'] - category_name = nuim.get('category', category_token)['name'] - if ann['mask'] is None: + category_token = ann["category_token"] + category_name = nuim.get("category", category_token)["name"] + if ann["mask"] is None: continue - mask = mask_decode(ann['mask']) + mask = mask_decode(ann["mask"]) # Draw mask for semantic segmentation. semseg_mask[mask == 1] = name_to_index[category_name] # Load object instances. - object_anns = [ - o for o in nuim.object_ann if o['sample_data_token'] == sd_token - ] + object_anns = [o for o in nuim.object_ann if o["sample_data_token"] == sd_token] # Sort by token to ensure that objects always appear in the # instance mask in the same order. - object_anns = sorted(object_anns, key=lambda k: k['token']) + object_anns = sorted(object_anns, key=lambda k: k["token"]) # Draw object instances. # The 0 index is reserved for background; thus, the instances @@ -110,11 +120,11 @@ def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root): annotations = [] for i, ann in enumerate(object_anns, start=1): # Get color, box, mask and name. - category_token = ann['category_token'] - category_name = nuim.get('category', category_token)['name'] - if ann['mask'] is None: + category_token = ann["category_token"] + category_name = nuim.get("category", category_token)["name"] + if ann["mask"] is None: continue - mask = mask_decode(ann['mask']) + mask = mask_decode(ann["mask"]) # Draw masks for semantic segmentation and instance segmentation. semseg_mask[mask == 1] = name_to_index[category_name] @@ -123,12 +133,11 @@ def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root): cat_name = NAME_MAPPING[category_name] cat_id = cat2id[cat_name] - x_min, y_min, x_max, y_max = ann['bbox'] + x_min, y_min, x_max, y_max = ann["bbox"] # encode calibrated instance mask mask_anno = dict() - mask_anno['counts'] = base64.b64decode( - ann['mask']['counts']).decode() - mask_anno['size'] = ann['mask']['size'] + mask_anno["counts"] = base64.b64decode(ann["mask"]["counts"]).decode() + mask_anno["size"] = ann["mask"]["size"] data_anno = dict( image_id=image_id, @@ -136,62 +145,64 @@ def get_img_annos(nuim, img_info, cat2id, out_dir, data_root, seg_root): bbox=[x_min, y_min, x_max - x_min, y_max - y_min], area=(x_max - x_min) * (y_max - y_min), segmentation=mask_anno, - iscrowd=0) + iscrowd=0, + ) annotations.append(data_anno) # after process, save semantic masks - img_filename = img_info['file_name'] - seg_filename = img_filename.replace('jpg', 'png') + img_filename = img_info["file_name"] + seg_filename = img_filename.replace("jpg", "png") seg_filename = osp.join(seg_root, seg_filename) mmcv.imwrite(semseg_mask, seg_filename) return annotations, np.max(semseg_mask) def export_nuim_to_coco(nuim, data_root, out_dir, extra_tag, version, nproc): - print('Process category information') + print("Process category information") categories = [] categories = [ dict(id=nus_categories.index(cat_name), name=cat_name) for cat_name in nus_categories ] - cat2id = {k_v['name']: k_v['id'] for k_v in categories} + cat2id = {k_v["name"]: k_v["id"] for k_v in categories} images = [] - print('Process image meta information...') + print("Process image meta information...") for sample_info in mmcv.track_iter_progress(nuim.sample_data): - if sample_info['is_key_frame']: + if sample_info["is_key_frame"]: img_idx = len(images) images.append( dict( id=img_idx, - token=sample_info['token'], - file_name=sample_info['filename'], - width=sample_info['width'], - height=sample_info['height'])) - - seg_root = f'{out_dir}semantic_masks' + token=sample_info["token"], + file_name=sample_info["filename"], + width=sample_info["width"], + height=sample_info["height"], + ) + ) + + seg_root = f"{out_dir}semantic_masks" mmcv.mkdir_or_exist(seg_root) - mmcv.mkdir_or_exist(osp.join(data_root, 'calibrated')) + mmcv.mkdir_or_exist(osp.join(data_root, "calibrated")) global process_img_anno def process_img_anno(img_info): - single_img_annos, max_cls_id = get_img_annos(nuim, img_info, cat2id, - out_dir, data_root, - seg_root) + single_img_annos, max_cls_id = get_img_annos( + nuim, img_info, cat2id, out_dir, data_root, seg_root + ) return single_img_annos, max_cls_id - print('Process img annotations...') + print("Process img annotations...") if nproc > 1: - outputs = mmcv.track_parallel_progress( - process_img_anno, images, nproc=nproc) + outputs = mmcv.track_parallel_progress(process_img_anno, images, nproc=nproc) else: outputs = [] for img_info in mmcv.track_iter_progress(images): outputs.append(process_img_anno(img_info)) # Determine the index of object annotation - print('Process annotation information...') + print("Process annotation information...") annotations = [] max_cls_ids = [] for single_img_annos, max_cls_id in outputs: @@ -201,14 +212,15 @@ def process_img_anno(img_info): annotations.append(img_anno) max_cls_id = max(max_cls_ids) - print(f'Max ID of class in the semantic map: {max_cls_id}') + print(f"Max ID of class in the semantic map: {max_cls_id}") coco_format_json = dict( - images=images, annotations=annotations, categories=categories) + images=images, annotations=annotations, categories=categories + ) mmcv.mkdir_or_exist(out_dir) - out_file = osp.join(out_dir, f'{extra_tag}_{version}.json') - print(f'Annotation dumped to {out_file}') + out_file = osp.join(out_dir, f"{extra_tag}_{version}.json") + print(f"Annotation dumped to {out_file}") mmcv.dump(coco_format_json, out_file) @@ -216,10 +228,12 @@ def main(): args = parse_args() for version in args.version: nuim = NuImages( - dataroot=args.data_root, version=version, verbose=True, lazy=True) - export_nuim_to_coco(nuim, args.data_root, args.out_dir, args.extra_tag, - version, args.nproc) + dataroot=args.data_root, version=version, verbose=True, lazy=True + ) + export_nuim_to_coco( + nuim, args.data_root, args.out_dir, args.extra_tag, version, args.nproc + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/data_converter/nuscenes_converter.py b/tools/data_converter/nuscenes_converter.py index 42931ab..b3b9613 100644 --- a/tools/data_converter/nuscenes_converter.py +++ b/tools/data_converter/nuscenes_converter.py @@ -3,36 +3,54 @@ # --------------------------------------------- # Modified by Zhiqi Li # --------------------------------------------- -import mmcv -import numpy as np import os from collections import OrderedDict -from nuscenes.nuscenes import NuScenes -from nuscenes.utils.geometry_utils import view_points from os import path as osp -from pyquaternion import Quaternion -from shapely.geometry import MultiPoint, box from typing import List, Tuple, Union +import mmcv +import numpy as np from mmdet3d.core.bbox.box_np_ops import points_cam2img from mmdet3d.datasets import NuScenesDataset +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box -nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', - 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', - 'barrier') - -nus_attributes = ('cycle.with_rider', 'cycle.without_rider', - 'pedestrian.moving', 'pedestrian.standing', - 'pedestrian.sitting_lying_down', 'vehicle.moving', - 'vehicle.parked', 'vehicle.stopped', 'None') - - -def create_nuscenes_infos(root_path, - out_path, - can_bus_root_path, - info_prefix, - version='v1.0-trainval', - max_sweeps=10): +nus_categories = ( + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", +) + +nus_attributes = ( + "cycle.with_rider", + "cycle.without_rider", + "pedestrian.moving", + "pedestrian.standing", + "pedestrian.sitting_lying_down", + "vehicle.moving", + "vehicle.parked", + "vehicle.stopped", + "None", +) + + +def create_nuscenes_infos( + root_path, + out_path, + can_bus_root_path, + info_prefix, + version="v1.0-trainval", + max_sweeps=10, +): """Create info file of nuscene dataset. Given the raw data, generate its related info file in pkl format. @@ -45,68 +63,76 @@ def create_nuscenes_infos(root_path, max_sweeps (int): Max number of sweeps. Default: 10 """ - from nuscenes.nuscenes import NuScenes from nuscenes.can_bus.can_bus_api import NuScenesCanBus + from nuscenes.nuscenes import NuScenes + print(version, root_path) nusc = NuScenes(version=version, dataroot=root_path, verbose=True) nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root_path) from nuscenes.utils import splits - available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + + available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"] assert version in available_vers - if version == 'v1.0-trainval': + if version == "v1.0-trainval": train_scenes = splits.train val_scenes = splits.val - elif version == 'v1.0-test': + elif version == "v1.0-test": train_scenes = splits.test val_scenes = [] - elif version == 'v1.0-mini': + elif version == "v1.0-mini": train_scenes = splits.mini_train val_scenes = splits.mini_val else: - raise ValueError('unknown') + raise ValueError("unknown") # filter existing scenes. available_scenes = get_available_scenes(nusc) - available_scene_names = [s['name'] for s in available_scenes] - train_scenes = list( - filter(lambda x: x in available_scene_names, train_scenes)) + available_scene_names = [s["name"] for s in available_scenes] + train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) - train_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in train_scenes - ]) - val_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in val_scenes - ]) - - test = 'test' in version + train_scenes = set( + [ + available_scenes[available_scene_names.index(s)]["token"] + for s in train_scenes + ] + ) + val_scenes = set( + [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes] + ) + + test = "test" in version if test: - print('test scene: {}'.format(len(train_scenes))) + print("test scene: {}".format(len(train_scenes))) else: - print('train scene: {}, val scene: {}'.format( - len(train_scenes), len(val_scenes))) + print( + "train scene: {}, val scene: {}".format(len(train_scenes), len(val_scenes)) + ) train_nusc_infos, val_nusc_infos = _fill_trainval_infos( - nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps + ) metadata = dict(version=version) if test: - print('test sample: {}'.format(len(train_nusc_infos))) + print("test sample: {}".format(len(train_nusc_infos))) data = dict(infos=train_nusc_infos, metadata=metadata) - info_path = osp.join(out_path, - '{}_infos_temporal_test.pkl'.format(info_prefix)) + info_path = osp.join(out_path, "{}_infos_temporal_test.pkl".format(info_prefix)) mmcv.dump(data, info_path) else: - print('train sample: {}, val sample: {}'.format( - len(train_nusc_infos), len(val_nusc_infos))) + print( + "train sample: {}, val sample: {}".format( + len(train_nusc_infos), len(val_nusc_infos) + ) + ) data = dict(infos=train_nusc_infos, metadata=metadata) - info_path = osp.join(out_path, - '{}_infos_temporal_train.pkl'.format(info_prefix)) + info_path = osp.join( + out_path, "{}_infos_temporal_train.pkl".format(info_prefix) + ) mmcv.dump(data, info_path) - data['infos'] = val_nusc_infos - info_val_path = osp.join(out_path, - '{}_infos_temporal_val.pkl'.format(info_prefix)) + data["infos"] = val_nusc_infos + info_val_path = osp.join( + out_path, "{}_infos_temporal_val.pkl".format(info_prefix) + ) mmcv.dump(data, info_val_path) @@ -124,20 +150,20 @@ def get_available_scenes(nusc): available scenes. """ available_scenes = [] - print('total scene num: {}'.format(len(nusc.scene))) + print("total scene num: {}".format(len(nusc.scene))) for scene in nusc.scene: - scene_token = scene['token'] - scene_rec = nusc.get('scene', scene_token) - sample_rec = nusc.get('sample', scene_rec['first_sample_token']) - sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + scene_token = scene["token"] + scene_rec = nusc.get("scene", scene_token) + sample_rec = nusc.get("sample", scene_rec["first_sample_token"]) + sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"]) has_more_frames = True scene_not_exist = False while has_more_frames: - lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"]) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path - lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + lidar_path = lidar_path.split(f"{os.getcwd()}/")[-1] # relative path if not mmcv.is_filepath(lidar_path): scene_not_exist = True @@ -147,41 +173,38 @@ def get_available_scenes(nusc): if scene_not_exist: continue available_scenes.append(scene) - print('exist scene num: {}'.format(len(available_scenes))) + print("exist scene num: {}".format(len(available_scenes))) return available_scenes def _get_can_bus_info(nusc, nusc_can_bus, sample): - scene_name = nusc.get('scene', sample['scene_token'])['name'] - sample_timestamp = sample['timestamp'] + scene_name = nusc.get("scene", sample["scene_token"])["name"] + sample_timestamp = sample["timestamp"] try: - pose_list = nusc_can_bus.get_messages(scene_name, 'pose') + pose_list = nusc_can_bus.get_messages(scene_name, "pose") except: return np.zeros(18) # server scenes do not have can bus information. can_bus = [] # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp last_pose = pose_list[0] for i, pose in enumerate(pose_list): - if pose['utime'] > sample_timestamp: + if pose["utime"] > sample_timestamp: break last_pose = pose - _ = last_pose.pop('utime') # useless - pos = last_pose.pop('pos') - rotation = last_pose.pop('orientation') + _ = last_pose.pop("utime") # useless + pos = last_pose.pop("pos") + rotation = last_pose.pop("orientation") can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements - can_bus.extend([0., 0.]) + can_bus.extend([0.0, 0.0]) return np.array(can_bus) -def _fill_trainval_infos(nusc, - nusc_can_bus, - train_scenes, - val_scenes, - test=False, - max_sweeps=10): +def _fill_trainval_infos( + nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10 +): """Generate the train/val infos from the raw data. Args: @@ -200,95 +223,99 @@ def _fill_trainval_infos(nusc, val_nusc_infos = [] frame_idx = 0 for sample in mmcv.track_iter_progress(nusc.sample): - lidar_token = sample['data']['LIDAR_TOP'] - sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) - cs_record = nusc.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_token = sample["data"]["LIDAR_TOP"] + sd_rec = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) mmcv.check_file_exist(lidar_path) can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) ## info = { - 'lidar_path': lidar_path, - 'token': sample['token'], - 'prev': sample['prev'], - 'next': sample['next'], - 'can_bus': can_bus, - 'frame_idx': frame_idx, # temporal related info - 'sweeps': [], - 'cams': dict(), - 'scene_token': sample['scene_token'], # temporal related info - 'lidar2ego_translation': cs_record['translation'], - 'lidar2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sample['timestamp'], + "lidar_path": lidar_path, + "token": sample["token"], + "prev": sample["prev"], + "next": sample["next"], + "can_bus": can_bus, + "frame_idx": frame_idx, # temporal related info + "sweeps": [], + "cams": dict(), + "scene_token": sample["scene_token"], # temporal related info + "lidar2ego_translation": cs_record["translation"], + "lidar2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sample["timestamp"], } - if sample['next'] == '': + if sample["next"] == "": frame_idx = 0 else: frame_idx += 1 - l2e_r = info['lidar2ego_rotation'] - l2e_t = info['lidar2ego_translation'] - e2g_r = info['ego2global_rotation'] - e2g_t = info['ego2global_translation'] + l2e_r = info["lidar2ego_rotation"] + l2e_t = info["lidar2ego_translation"] + e2g_r = info["ego2global_rotation"] + e2g_t = info["ego2global_translation"] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] for cam in camera_types: - cam_token = sample['data'][cam] + cam_token = sample["data"][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) - cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, - e2g_t, e2g_r_mat, cam) + cam_info = obtain_sensor2top( + nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam + ) cam_info.update(cam_intrinsic=cam_intrinsic) - info['cams'].update({cam: cam_info}) + info["cams"].update({cam: cam_info}) # obtain sweeps for a single key-frame - sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sd_rec = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) sweeps = [] while len(sweeps) < max_sweeps: - if not sd_rec['prev'] == '': - sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, - l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + if not sd_rec["prev"] == "": + sweep = obtain_sensor2top( + nusc, sd_rec["prev"], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, "lidar" + ) sweeps.append(sweep) - sd_rec = nusc.get('sample_data', sd_rec['prev']) + sd_rec = nusc.get("sample_data", sd_rec["prev"]) else: break - info['sweeps'] = sweeps + info["sweeps"] = sweeps # obtain annotation if not test: annotations = [ - nusc.get('sample_annotation', token) - for token in sample['anns'] + nusc.get("sample_annotation", token) for token in sample["anns"] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) - rots = np.array([b.orientation.yaw_pitch_roll[0] - for b in boxes]).reshape(-1, 1) + rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape( + -1, 1 + ) velocity = np.array( - [nusc.box_velocity(token)[:2] for token in sample['anns']]) + [nusc.box_velocity(token)[:2] for token in sample["anns"]] + ) valid_flag = np.array( - [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 - for anno in annotations], - dtype=bool).reshape(-1) + [ + (anno["num_lidar_pts"] + anno["num_radar_pts"]) > 0 + for anno in annotations + ], + dtype=bool, + ).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) - velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( - l2e_r_mat).T + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] @@ -299,17 +326,16 @@ def _fill_trainval_infos(nusc, # we need to convert rot to SECOND format. gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) assert len(gt_boxes) == len( - annotations), f'{len(gt_boxes)}, {len(annotations)}' - info['gt_boxes'] = gt_boxes - info['gt_names'] = names - info['gt_velocity'] = velocity.reshape(-1, 2) - info['num_lidar_pts'] = np.array( - [a['num_lidar_pts'] for a in annotations]) - info['num_radar_pts'] = np.array( - [a['num_radar_pts'] for a in annotations]) - info['valid_flag'] = valid_flag - - if sample['scene_token'] in train_scenes: + annotations + ), f"{len(gt_boxes)}, {len(annotations)}" + info["gt_boxes"] = gt_boxes + info["gt_names"] = names + info["gt_velocity"] = velocity.reshape(-1, 2) + info["num_lidar_pts"] = np.array([a["num_lidar_pts"] for a in annotations]) + info["num_radar_pts"] = np.array([a["num_radar_pts"] for a in annotations]) + info["valid_flag"] = valid_flag + + if sample["scene_token"] in train_scenes: train_nusc_infos.append(info) else: val_nusc_infos.append(info) @@ -317,13 +343,9 @@ def _fill_trainval_infos(nusc, return train_nusc_infos, val_nusc_infos -def obtain_sensor2top(nusc, - sensor_token, - l2e_t, - l2e_r_mat, - e2g_t, - e2g_r_mat, - sensor_type='lidar'): +def obtain_sensor2top( + nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type="lidar" +): """Obtain the info with RT matric from general sensor to Top LiDAR. Args: @@ -341,41 +363,44 @@ def obtain_sensor2top(nusc, Returns: sweep (dict): Sweep information after transformation. """ - sd_rec = nusc.get('sample_data', sensor_token) - cs_record = nusc.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) - data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + sd_rec = nusc.get("sample_data", sensor_token) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + data_path = str(nusc.get_sample_data_path(sd_rec["token"])) if os.getcwd() in data_path: # path from lyftdataset is absolute path - data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + data_path = data_path.split(f"{os.getcwd()}/")[-1] # relative path sweep = { - 'data_path': data_path, - 'type': sensor_type, - 'sample_data_token': sd_rec['token'], - 'sensor2ego_translation': cs_record['translation'], - 'sensor2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sd_rec['timestamp'] + "data_path": data_path, + "type": sensor_type, + "sample_data_token": sd_rec["token"], + "sensor2ego_translation": cs_record["translation"], + "sensor2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sd_rec["timestamp"], } - l2e_r_s = sweep['sensor2ego_rotation'] - l2e_t_s = sweep['sensor2ego_translation'] - e2g_r_s = sweep['ego2global_rotation'] - e2g_t_s = sweep['ego2global_translation'] + l2e_r_s = sweep["sensor2ego_rotation"] + l2e_t_s = sweep["sensor2ego_translation"] + e2g_r_s = sweep["ego2global_rotation"] + e2g_t_s = sweep["ego2global_translation"] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( - np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( - np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) - T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T - ) + l2e_t @ np.linalg.inv(l2e_r_mat).T - sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T - sweep['sensor2lidar_translation'] = T + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + T -= ( + e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + + l2e_t @ np.linalg.inv(l2e_r_mat).T + ) + sweep["sensor2lidar_rotation"] = R.T # points @ R.T + T + sweep["sensor2lidar_translation"] = T return sweep @@ -390,14 +415,14 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): """ # get bbox annotations for camera camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] - nusc_infos = mmcv.load(info_path)['infos'] + nusc_infos = mmcv.load(info_path)["infos"] nusc = NuScenes(version=version, dataroot=root_path, verbose=True) # info_2d_list = [] cat2Ids = [ @@ -408,45 +433,44 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) for info in mmcv.track_iter_progress(nusc_infos): for cam in camera_types: - cam_info = info['cams'][cam] + cam_info = info["cams"][cam] coco_infos = get_2d_boxes( nusc, - cam_info['sample_data_token'], - visibilities=['', '1', '2', '3', '4'], - mono3d=mono3d) - (height, width, _) = mmcv.imread(cam_info['data_path']).shape - coco_2d_dict['images'].append( + cam_info["sample_data_token"], + visibilities=["", "1", "2", "3", "4"], + mono3d=mono3d, + ) + (height, width, _) = mmcv.imread(cam_info["data_path"]).shape + coco_2d_dict["images"].append( dict( - file_name=cam_info['data_path'].split('data/nuscenes/') - [-1], - id=cam_info['sample_data_token'], - token=info['token'], - cam2ego_rotation=cam_info['sensor2ego_rotation'], - cam2ego_translation=cam_info['sensor2ego_translation'], - ego2global_rotation=info['ego2global_rotation'], - ego2global_translation=info['ego2global_translation'], - cam_intrinsic=cam_info['cam_intrinsic'], + file_name=cam_info["data_path"].split("data/nuscenes/")[-1], + id=cam_info["sample_data_token"], + token=info["token"], + cam2ego_rotation=cam_info["sensor2ego_rotation"], + cam2ego_translation=cam_info["sensor2ego_translation"], + ego2global_rotation=info["ego2global_rotation"], + ego2global_translation=info["ego2global_translation"], + cam_intrinsic=cam_info["cam_intrinsic"], width=width, - height=height)) + height=height, + ) + ) for coco_info in coco_infos: if coco_info is None: continue # add an empty key for coco format - coco_info['segmentation'] = [] - coco_info['id'] = coco_ann_id - coco_2d_dict['annotations'].append(coco_info) + coco_info["segmentation"] = [] + coco_info["id"] = coco_ann_id + coco_2d_dict["annotations"].append(coco_info) coco_ann_id += 1 if mono3d: - json_prefix = f'{info_path[:-4]}_mono3d' + json_prefix = f"{info_path[:-4]}_mono3d" else: - json_prefix = f'{info_path[:-4]}' - mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + json_prefix = f"{info_path[:-4]}" + mmcv.dump(coco_2d_dict, f"{json_prefix}.coco.json") -def get_2d_boxes(nusc, - sample_data_token: str, - visibilities: List[str], - mono3d=True): +def get_2d_boxes(nusc, sample_data_token: str, visibilities: List[str], mono3d=True): """Get the 2D annotation records for a given `sample_data_token`. Args: @@ -461,49 +485,45 @@ def get_2d_boxes(nusc, """ # Get the sample data and the sample corresponding to that sample data. - sd_rec = nusc.get('sample_data', sample_data_token) + sd_rec = nusc.get("sample_data", sample_data_token) - assert sd_rec[ - 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ - ' for camera sample_data!' - if not sd_rec['is_key_frame']: - raise ValueError( - 'The 2D re-projections are available only for keyframes.') + assert sd_rec["sensor_modality"] == "camera", ( + "Error: get_2d_boxes only works" " for camera sample_data!" + ) + if not sd_rec["is_key_frame"]: + raise ValueError("The 2D re-projections are available only for keyframes.") - s_rec = nusc.get('sample', sd_rec['sample_token']) + s_rec = nusc.get("sample", sd_rec["sample_token"]) # Get the calibrated sensor and ego pose # record to get the transformation matrices. - cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) - pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) - camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + cs_rec = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_rec = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + camera_intrinsic = np.array(cs_rec["camera_intrinsic"]) # Get all the annotation with the specified visibilties. + ann_recs = [nusc.get("sample_annotation", token) for token in s_rec["anns"]] ann_recs = [ - nusc.get('sample_annotation', token) for token in s_rec['anns'] - ] - ann_recs = [ - ann_rec for ann_rec in ann_recs - if (ann_rec['visibility_token'] in visibilities) + ann_rec for ann_rec in ann_recs if (ann_rec["visibility_token"] in visibilities) ] repro_recs = [] for ann_rec in ann_recs: # Augment sample_annotation with token information. - ann_rec['sample_annotation_token'] = ann_rec['token'] - ann_rec['sample_data_token'] = sample_data_token + ann_rec["sample_annotation_token"] = ann_rec["token"] + ann_rec["sample_data_token"] = sample_data_token # Get the box in global coordinates. - box = nusc.get_box(ann_rec['token']) + box = nusc.get_box(ann_rec["token"]) # Move them to the ego-pose frame. - box.translate(-np.array(pose_rec['translation'])) - box.rotate(Quaternion(pose_rec['rotation']).inverse) + box.translate(-np.array(pose_rec["translation"])) + box.rotate(Quaternion(pose_rec["rotation"]).inverse) # Move them to the calibrated sensor frame. - box.translate(-np.array(cs_rec['translation'])) - box.rotate(Quaternion(cs_rec['rotation']).inverse) + box.translate(-np.array(cs_rec["translation"])) + box.rotate(Quaternion(cs_rec["rotation"]).inverse) # Filter out the corners that are not in front of the calibrated # sensor. @@ -512,8 +532,9 @@ def get_2d_boxes(nusc, corners_3d = corners_3d[:, in_front] # Project 3d box to 2d. - corner_coords = view_points(corners_3d, camera_intrinsic, - True).T[:, :2].tolist() + corner_coords = ( + view_points(corners_3d, camera_intrinsic, True).T[:, :2].tolist() + ) # Keep only corners that fall within the image. final_coords = post_process_coords(corner_coords) @@ -526,8 +547,9 @@ def get_2d_boxes(nusc, min_x, min_y, max_x, max_y = final_coords # Generate dictionary record to be included in the .json file. - repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, - sample_data_token, sd_rec['filename']) + repro_rec = generate_record( + ann_rec, min_x, min_y, max_x, max_y, sample_data_token, sd_rec["filename"] + ) # If mono3d=True, add 3D annotations in camera coordinates if mono3d and (repro_rec is not None): @@ -542,33 +564,32 @@ def get_2d_boxes(nusc, global_velo2d = nusc.box_velocity(box.token)[:2] global_velo3d = np.array([*global_velo2d, 0.0]) - e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix - c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix - cam_velo3d = global_velo3d @ np.linalg.inv( - e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + e2g_r_mat = Quaternion(pose_rec["rotation"]).rotation_matrix + c2e_r_mat = Quaternion(cs_rec["rotation"]).rotation_matrix + cam_velo3d = ( + global_velo3d @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + ) velo = cam_velo3d[0::2].tolist() - repro_rec['bbox_cam3d'] = loc + dim + rot - repro_rec['velo_cam3d'] = velo + repro_rec["bbox_cam3d"] = loc + dim + rot + repro_rec["velo_cam3d"] = velo center3d = np.array(loc).reshape([1, 3]) - center2d = points_cam2img( - center3d, camera_intrinsic, with_depth=True) - repro_rec['center2d'] = center2d.squeeze().tolist() + center2d = points_cam2img(center3d, camera_intrinsic, with_depth=True) + repro_rec["center2d"] = center2d.squeeze().tolist() # normalized center2D + depth # if samples with depth < 0 will be removed - if repro_rec['center2d'][2] <= 0: + if repro_rec["center2d"][2] <= 0: continue - ann_token = nusc.get('sample_annotation', - box.token)['attribute_tokens'] + ann_token = nusc.get("sample_annotation", box.token)["attribute_tokens"] if len(ann_token) == 0: - attr_name = 'None' + attr_name = "None" else: - attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_name = nusc.get("attribute", ann_token[0])["name"] attr_id = nus_attributes.index(attr_name) - repro_rec['attribute_name'] = attr_name - repro_rec['attribute_id'] = attr_id + repro_rec["attribute_name"] = attr_name + repro_rec["attribute_id"] = attr_id repro_recs.append(repro_rec) @@ -596,7 +617,8 @@ def post_process_coords( if polygon_from_2d_box.intersects(img_canvas): img_intersection = polygon_from_2d_box.intersection(img_canvas) intersection_coords = np.array( - [coord for coord in img_intersection.exterior.coords]) + [coord for coord in img_intersection.exterior.coords] + ) min_x = min(intersection_coords[:, 0]) min_y = min(intersection_coords[:, 1]) @@ -608,8 +630,15 @@ def post_process_coords( return None -def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, - sample_data_token: str, filename: str) -> OrderedDict: +def generate_record( + ann_rec: dict, + x1: float, + y1: float, + x2: float, + y2: float, + sample_data_token: str, + filename: str, +) -> OrderedDict: """Generate one 2D annotation record given various informations on top of the 2D bounding box coordinates. @@ -634,39 +663,39 @@ def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, - iscrowd (int): whether the area is crowd """ repro_rec = OrderedDict() - repro_rec['sample_data_token'] = sample_data_token + repro_rec["sample_data_token"] = sample_data_token coco_rec = dict() relevant_keys = [ - 'attribute_tokens', - 'category_name', - 'instance_token', - 'next', - 'num_lidar_pts', - 'num_radar_pts', - 'prev', - 'sample_annotation_token', - 'sample_data_token', - 'visibility_token', + "attribute_tokens", + "category_name", + "instance_token", + "next", + "num_lidar_pts", + "num_radar_pts", + "prev", + "sample_annotation_token", + "sample_data_token", + "visibility_token", ] for key, value in ann_rec.items(): if key in relevant_keys: repro_rec[key] = value - repro_rec['bbox_corners'] = [x1, y1, x2, y2] - repro_rec['filename'] = filename + repro_rec["bbox_corners"] = [x1, y1, x2, y2] + repro_rec["filename"] = filename - coco_rec['file_name'] = filename - coco_rec['image_id'] = sample_data_token - coco_rec['area'] = (y2 - y1) * (x2 - x1) + coco_rec["file_name"] = filename + coco_rec["image_id"] = sample_data_token + coco_rec["area"] = (y2 - y1) * (x2 - x1) - if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + if repro_rec["category_name"] not in NuScenesDataset.NameMapping: return None - cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] - coco_rec['category_name'] = cat_name - coco_rec['category_id'] = nus_categories.index(cat_name) - coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] - coco_rec['iscrowd'] = 0 + cat_name = NuScenesDataset.NameMapping[repro_rec["category_name"]] + coco_rec["category_name"] = cat_name + coco_rec["category_id"] = nus_categories.index(cat_name) + coco_rec["bbox"] = [x1, y1, x2 - x1, y2 - y1] + coco_rec["iscrowd"] = 0 return coco_rec diff --git a/tools/data_converter/nuscenes_occ_converter.py b/tools/data_converter/nuscenes_occ_converter.py index 9fa897b..af09f25 100644 --- a/tools/data_converter/nuscenes_occ_converter.py +++ b/tools/data_converter/nuscenes_occ_converter.py @@ -3,36 +3,54 @@ # --------------------------------------------- # Modified by Zhiqi Li # --------------------------------------------- -import mmcv -import numpy as np import os from collections import OrderedDict -from nuscenes.nuscenes import NuScenes -from nuscenes.utils.geometry_utils import view_points from os import path as osp -from pyquaternion import Quaternion -from shapely.geometry import MultiPoint, box from typing import List, Tuple, Union +import mmcv +import numpy as np from mmdet3d.core.bbox.box_np_ops import points_cam2img from mmdet3d.datasets import NuScenesDataset +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box -nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', - 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', - 'barrier') - -nus_attributes = ('cycle.with_rider', 'cycle.without_rider', - 'pedestrian.moving', 'pedestrian.standing', - 'pedestrian.sitting_lying_down', 'vehicle.moving', - 'vehicle.parked', 'vehicle.stopped', 'None') - - -def create_nuscenes_infos(root_path, - out_path, - can_bus_root_path, - info_prefix, - version='v1.0-trainval', - max_sweeps=10): +nus_categories = ( + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", +) + +nus_attributes = ( + "cycle.with_rider", + "cycle.without_rider", + "pedestrian.moving", + "pedestrian.standing", + "pedestrian.sitting_lying_down", + "vehicle.moving", + "vehicle.parked", + "vehicle.stopped", + "None", +) + + +def create_nuscenes_infos( + root_path, + out_path, + can_bus_root_path, + info_prefix, + version="v1.0-trainval", + max_sweeps=10, +): """Create info file of nuscene dataset. Given the raw data, generate its related info file in pkl format. @@ -45,69 +63,77 @@ def create_nuscenes_infos(root_path, max_sweeps (int): Max number of sweeps. Default: 10 """ - from nuscenes.nuscenes import NuScenes from nuscenes.can_bus.can_bus_api import NuScenesCanBus + from nuscenes.nuscenes import NuScenes + print(version, root_path) nusc = NuScenes(version=version, dataroot=root_path, verbose=True) nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root_path) from nuscenes.utils import splits - available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + + available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"] assert version in available_vers - if version == 'v1.0-trainval': + if version == "v1.0-trainval": train_scenes = splits.train val_scenes = splits.val - elif version == 'v1.0-test': + elif version == "v1.0-test": train_scenes = splits.test val_scenes = [] - elif version == 'v1.0-mini': + elif version == "v1.0-mini": train_scenes = splits.mini_train val_scenes = splits.mini_val else: - raise ValueError('unknown') + raise ValueError("unknown") # filter existing scenes. available_scenes = get_available_scenes(nusc) - available_scene_names = [s['name'] for s in available_scenes] - train_scenes = list( - filter(lambda x: x in available_scene_names, train_scenes)) + available_scene_names = [s["name"] for s in available_scenes] + train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) - train_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in train_scenes - ]) - val_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in val_scenes - ]) - - test = 'test' in version + train_scenes = set( + [ + available_scenes[available_scene_names.index(s)]["token"] + for s in train_scenes + ] + ) + val_scenes = set( + [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes] + ) + + test = "test" in version if test: - print('test scene: {}'.format(len(train_scenes))) + print("test scene: {}".format(len(train_scenes))) else: - print('train scene: {}, val scene: {}'.format( - len(train_scenes), len(val_scenes))) + print( + "train scene: {}, val scene: {}".format(len(train_scenes), len(val_scenes)) + ) train_nusc_infos, val_nusc_infos = _fill_trainval_infos( - nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + nusc, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps + ) metadata = dict(version=version) if test: - print('test sample: {}'.format(len(train_nusc_infos))) + print("test sample: {}".format(len(train_nusc_infos))) data = dict(infos=train_nusc_infos, metadata=metadata) - info_path = osp.join(out_path, - '{}_infos_temporal_test.pkl'.format(info_prefix)) + info_path = osp.join(out_path, "{}_infos_temporal_test.pkl".format(info_prefix)) mmcv.dump(data, info_path) else: - print('train sample: {}, val sample: {}'.format( - len(train_nusc_infos), len(val_nusc_infos))) + print( + "train sample: {}, val sample: {}".format( + len(train_nusc_infos), len(val_nusc_infos) + ) + ) data = dict(infos=train_nusc_infos, metadata=metadata) - info_path = osp.join(out_path, - '{}_infos_temporal_train.pkl'.format(info_prefix)) + info_path = osp.join( + out_path, "{}_infos_temporal_train.pkl".format(info_prefix) + ) mmcv.dump(data, info_path) - data['infos'] = val_nusc_infos - info_val_path = osp.join(out_path, - '{}_infos_temporal_val.pkl'.format(info_prefix)) + data["infos"] = val_nusc_infos + info_val_path = osp.join( + out_path, "{}_infos_temporal_val.pkl".format(info_prefix) + ) mmcv.dump(data, info_val_path) @@ -125,20 +151,20 @@ def get_available_scenes(nusc): available scenes. """ available_scenes = [] - print('total scene num: {}'.format(len(nusc.scene))) + print("total scene num: {}".format(len(nusc.scene))) for scene in nusc.scene: - scene_token = scene['token'] - scene_rec = nusc.get('scene', scene_token) - sample_rec = nusc.get('sample', scene_rec['first_sample_token']) - sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + scene_token = scene["token"] + scene_rec = nusc.get("scene", scene_token) + sample_rec = nusc.get("sample", scene_rec["first_sample_token"]) + sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"]) has_more_frames = True scene_not_exist = False while has_more_frames: - lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"]) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path - lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + lidar_path = lidar_path.split(f"{os.getcwd()}/")[-1] # relative path if not mmcv.is_filepath(lidar_path): scene_not_exist = True @@ -148,41 +174,38 @@ def get_available_scenes(nusc): if scene_not_exist: continue available_scenes.append(scene) - print('exist scene num: {}'.format(len(available_scenes))) + print("exist scene num: {}".format(len(available_scenes))) return available_scenes def _get_can_bus_info(nusc, nusc_can_bus, sample): - scene_name = nusc.get('scene', sample['scene_token'])['name'] - sample_timestamp = sample['timestamp'] + scene_name = nusc.get("scene", sample["scene_token"])["name"] + sample_timestamp = sample["timestamp"] try: - pose_list = nusc_can_bus.get_messages(scene_name, 'pose') + pose_list = nusc_can_bus.get_messages(scene_name, "pose") except: return np.zeros(18) # server scenes do not have can bus information. can_bus = [] # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp last_pose = pose_list[0] for i, pose in enumerate(pose_list): - if pose['utime'] > sample_timestamp: + if pose["utime"] > sample_timestamp: break last_pose = pose - _ = last_pose.pop('utime') # useless - pos = last_pose.pop('pos') - rotation = last_pose.pop('orientation') + _ = last_pose.pop("utime") # useless + pos = last_pose.pop("pos") + rotation = last_pose.pop("orientation") can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements - can_bus.extend([0., 0.]) + can_bus.extend([0.0, 0.0]) return np.array(can_bus) -def _fill_trainval_infos(nusc, - nusc_can_bus, - train_scenes, - val_scenes, - test=False, - max_sweeps=10): +def _fill_trainval_infos( + nusc, nusc_can_bus, train_scenes, val_scenes, test=False, max_sweeps=10 +): """Generate the train/val infos from the raw data. Args: @@ -201,95 +224,99 @@ def _fill_trainval_infos(nusc, val_nusc_infos = [] frame_idx = 0 for sample in mmcv.track_iter_progress(nusc.sample): - lidar_token = sample['data']['LIDAR_TOP'] - sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) - cs_record = nusc.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + lidar_token = sample["data"]["LIDAR_TOP"] + sd_rec = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) mmcv.check_file_exist(lidar_path) can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) ## info = { - 'lidar_path': lidar_path, - 'token': sample['token'], - 'prev': sample['prev'], - 'next': sample['next'], - 'can_bus': can_bus, - 'frame_idx': frame_idx, # temporal related info - 'sweeps': [], - 'cams': dict(), - 'scene_token': sample['scene_token'], # temporal related info - 'lidar2ego_translation': cs_record['translation'], - 'lidar2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sample['timestamp'], + "lidar_path": lidar_path, + "token": sample["token"], + "prev": sample["prev"], + "next": sample["next"], + "can_bus": can_bus, + "frame_idx": frame_idx, # temporal related info + "sweeps": [], + "cams": dict(), + "scene_token": sample["scene_token"], # temporal related info + "lidar2ego_translation": cs_record["translation"], + "lidar2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sample["timestamp"], } - if sample['next'] == '': + if sample["next"] == "": frame_idx = 0 else: frame_idx += 1 - l2e_r = info['lidar2ego_rotation'] - l2e_t = info['lidar2ego_translation'] - e2g_r = info['ego2global_rotation'] - e2g_t = info['ego2global_translation'] + l2e_r = info["lidar2ego_rotation"] + l2e_t = info["lidar2ego_translation"] + e2g_r = info["ego2global_rotation"] + e2g_t = info["ego2global_translation"] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] for cam in camera_types: - cam_token = sample['data'][cam] + cam_token = sample["data"][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) - cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, - e2g_t, e2g_r_mat, cam) + cam_info = obtain_sensor2top( + nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam + ) cam_info.update(cam_intrinsic=cam_intrinsic) - info['cams'].update({cam: cam_info}) + info["cams"].update({cam: cam_info}) # obtain sweeps for a single key-frame - sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sd_rec = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) sweeps = [] while len(sweeps) < max_sweeps: - if not sd_rec['prev'] == '': - sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, - l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + if not sd_rec["prev"] == "": + sweep = obtain_sensor2top( + nusc, sd_rec["prev"], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, "lidar" + ) sweeps.append(sweep) - sd_rec = nusc.get('sample_data', sd_rec['prev']) + sd_rec = nusc.get("sample_data", sd_rec["prev"]) else: break - info['sweeps'] = sweeps + info["sweeps"] = sweeps # obtain annotation if not test: annotations = [ - nusc.get('sample_annotation', token) - for token in sample['anns'] + nusc.get("sample_annotation", token) for token in sample["anns"] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) - rots = np.array([b.orientation.yaw_pitch_roll[0] - for b in boxes]).reshape(-1, 1) + rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape( + -1, 1 + ) velocity = np.array( - [nusc.box_velocity(token)[:2] for token in sample['anns']]) + [nusc.box_velocity(token)[:2] for token in sample["anns"]] + ) valid_flag = np.array( - [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 - for anno in annotations], - dtype=bool).reshape(-1) + [ + (anno["num_lidar_pts"] + anno["num_radar_pts"]) > 0 + for anno in annotations + ], + dtype=bool, + ).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) - velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( - l2e_r_mat).T + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] @@ -300,17 +327,16 @@ def _fill_trainval_infos(nusc, # we need to convert rot to SECOND format. gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) assert len(gt_boxes) == len( - annotations), f'{len(gt_boxes)}, {len(annotations)}' - info['gt_boxes'] = gt_boxes - info['gt_names'] = names - info['gt_velocity'] = velocity.reshape(-1, 2) - info['num_lidar_pts'] = np.array( - [a['num_lidar_pts'] for a in annotations]) - info['num_radar_pts'] = np.array( - [a['num_radar_pts'] for a in annotations]) - info['valid_flag'] = valid_flag - - if sample['scene_token'] in train_scenes: + annotations + ), f"{len(gt_boxes)}, {len(annotations)}" + info["gt_boxes"] = gt_boxes + info["gt_names"] = names + info["gt_velocity"] = velocity.reshape(-1, 2) + info["num_lidar_pts"] = np.array([a["num_lidar_pts"] for a in annotations]) + info["num_radar_pts"] = np.array([a["num_radar_pts"] for a in annotations]) + info["valid_flag"] = valid_flag + + if sample["scene_token"] in train_scenes: train_nusc_infos.append(info) else: val_nusc_infos.append(info) @@ -318,13 +344,9 @@ def _fill_trainval_infos(nusc, return train_nusc_infos, val_nusc_infos -def obtain_sensor2top(nusc, - sensor_token, - l2e_t, - l2e_r_mat, - e2g_t, - e2g_r_mat, - sensor_type='lidar'): +def obtain_sensor2top( + nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type="lidar" +): """Obtain the info with RT matric from general sensor to Top LiDAR. Args: @@ -342,41 +364,44 @@ def obtain_sensor2top(nusc, Returns: sweep (dict): Sweep information after transformation. """ - sd_rec = nusc.get('sample_data', sensor_token) - cs_record = nusc.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) - data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + sd_rec = nusc.get("sample_data", sensor_token) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + data_path = str(nusc.get_sample_data_path(sd_rec["token"])) if os.getcwd() in data_path: # path from lyftdataset is absolute path - data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + data_path = data_path.split(f"{os.getcwd()}/")[-1] # relative path sweep = { - 'data_path': data_path, - 'type': sensor_type, - 'sample_data_token': sd_rec['token'], - 'sensor2ego_translation': cs_record['translation'], - 'sensor2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sd_rec['timestamp'] + "data_path": data_path, + "type": sensor_type, + "sample_data_token": sd_rec["token"], + "sensor2ego_translation": cs_record["translation"], + "sensor2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sd_rec["timestamp"], } - l2e_r_s = sweep['sensor2ego_rotation'] - l2e_t_s = sweep['sensor2ego_translation'] - e2g_r_s = sweep['ego2global_rotation'] - e2g_t_s = sweep['ego2global_translation'] + l2e_r_s = sweep["sensor2ego_rotation"] + l2e_t_s = sweep["sensor2ego_translation"] + e2g_r_s = sweep["ego2global_rotation"] + e2g_t_s = sweep["ego2global_translation"] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( - np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( - np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) - T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T - ) + l2e_t @ np.linalg.inv(l2e_r_mat).T - sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T - sweep['sensor2lidar_translation'] = T + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + T -= ( + e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + + l2e_t @ np.linalg.inv(l2e_r_mat).T + ) + sweep["sensor2lidar_rotation"] = R.T # points @ R.T + T + sweep["sensor2lidar_translation"] = T return sweep @@ -391,14 +416,14 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): """ # get bbox annotations for camera camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] - nusc_infos = mmcv.load(info_path)['infos'] + nusc_infos = mmcv.load(info_path)["infos"] nusc = NuScenes(version=version, dataroot=root_path, verbose=True) # info_2d_list = [] cat2Ids = [ @@ -409,45 +434,44 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) for info in mmcv.track_iter_progress(nusc_infos): for cam in camera_types: - cam_info = info['cams'][cam] + cam_info = info["cams"][cam] coco_infos = get_2d_boxes( nusc, - cam_info['sample_data_token'], - visibilities=['', '1', '2', '3', '4'], - mono3d=mono3d) - (height, width, _) = mmcv.imread(cam_info['data_path']).shape - coco_2d_dict['images'].append( + cam_info["sample_data_token"], + visibilities=["", "1", "2", "3", "4"], + mono3d=mono3d, + ) + (height, width, _) = mmcv.imread(cam_info["data_path"]).shape + coco_2d_dict["images"].append( dict( - file_name=cam_info['data_path'].split('data/nuscenes/') - [-1], - id=cam_info['sample_data_token'], - token=info['token'], - cam2ego_rotation=cam_info['sensor2ego_rotation'], - cam2ego_translation=cam_info['sensor2ego_translation'], - ego2global_rotation=info['ego2global_rotation'], - ego2global_translation=info['ego2global_translation'], - cam_intrinsic=cam_info['cam_intrinsic'], + file_name=cam_info["data_path"].split("data/nuscenes/")[-1], + id=cam_info["sample_data_token"], + token=info["token"], + cam2ego_rotation=cam_info["sensor2ego_rotation"], + cam2ego_translation=cam_info["sensor2ego_translation"], + ego2global_rotation=info["ego2global_rotation"], + ego2global_translation=info["ego2global_translation"], + cam_intrinsic=cam_info["cam_intrinsic"], width=width, - height=height)) + height=height, + ) + ) for coco_info in coco_infos: if coco_info is None: continue # add an empty key for coco format - coco_info['segmentation'] = [] - coco_info['id'] = coco_ann_id - coco_2d_dict['annotations'].append(coco_info) + coco_info["segmentation"] = [] + coco_info["id"] = coco_ann_id + coco_2d_dict["annotations"].append(coco_info) coco_ann_id += 1 if mono3d: - json_prefix = f'{info_path[:-4]}_mono3d' + json_prefix = f"{info_path[:-4]}_mono3d" else: - json_prefix = f'{info_path[:-4]}' - mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + json_prefix = f"{info_path[:-4]}" + mmcv.dump(coco_2d_dict, f"{json_prefix}.coco.json") -def get_2d_boxes(nusc, - sample_data_token: str, - visibilities: List[str], - mono3d=True): +def get_2d_boxes(nusc, sample_data_token: str, visibilities: List[str], mono3d=True): """Get the 2D annotation records for a given `sample_data_token`. Args: @@ -462,49 +486,45 @@ def get_2d_boxes(nusc, """ # Get the sample data and the sample corresponding to that sample data. - sd_rec = nusc.get('sample_data', sample_data_token) + sd_rec = nusc.get("sample_data", sample_data_token) - assert sd_rec[ - 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ - ' for camera sample_data!' - if not sd_rec['is_key_frame']: - raise ValueError( - 'The 2D re-projections are available only for keyframes.') + assert sd_rec["sensor_modality"] == "camera", ( + "Error: get_2d_boxes only works" " for camera sample_data!" + ) + if not sd_rec["is_key_frame"]: + raise ValueError("The 2D re-projections are available only for keyframes.") - s_rec = nusc.get('sample', sd_rec['sample_token']) + s_rec = nusc.get("sample", sd_rec["sample_token"]) # Get the calibrated sensor and ego pose # record to get the transformation matrices. - cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) - pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) - camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + cs_rec = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_rec = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + camera_intrinsic = np.array(cs_rec["camera_intrinsic"]) # Get all the annotation with the specified visibilties. + ann_recs = [nusc.get("sample_annotation", token) for token in s_rec["anns"]] ann_recs = [ - nusc.get('sample_annotation', token) for token in s_rec['anns'] - ] - ann_recs = [ - ann_rec for ann_rec in ann_recs - if (ann_rec['visibility_token'] in visibilities) + ann_rec for ann_rec in ann_recs if (ann_rec["visibility_token"] in visibilities) ] repro_recs = [] for ann_rec in ann_recs: # Augment sample_annotation with token information. - ann_rec['sample_annotation_token'] = ann_rec['token'] - ann_rec['sample_data_token'] = sample_data_token + ann_rec["sample_annotation_token"] = ann_rec["token"] + ann_rec["sample_data_token"] = sample_data_token # Get the box in global coordinates. - box = nusc.get_box(ann_rec['token']) + box = nusc.get_box(ann_rec["token"]) # Move them to the ego-pose frame. - box.translate(-np.array(pose_rec['translation'])) - box.rotate(Quaternion(pose_rec['rotation']).inverse) + box.translate(-np.array(pose_rec["translation"])) + box.rotate(Quaternion(pose_rec["rotation"]).inverse) # Move them to the calibrated sensor frame. - box.translate(-np.array(cs_rec['translation'])) - box.rotate(Quaternion(cs_rec['rotation']).inverse) + box.translate(-np.array(cs_rec["translation"])) + box.rotate(Quaternion(cs_rec["rotation"]).inverse) # Filter out the corners that are not in front of the calibrated # sensor. @@ -513,8 +533,9 @@ def get_2d_boxes(nusc, corners_3d = corners_3d[:, in_front] # Project 3d box to 2d. - corner_coords = view_points(corners_3d, camera_intrinsic, - True).T[:, :2].tolist() + corner_coords = ( + view_points(corners_3d, camera_intrinsic, True).T[:, :2].tolist() + ) # Keep only corners that fall within the image. final_coords = post_process_coords(corner_coords) @@ -527,8 +548,9 @@ def get_2d_boxes(nusc, min_x, min_y, max_x, max_y = final_coords # Generate dictionary record to be included in the .json file. - repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, - sample_data_token, sd_rec['filename']) + repro_rec = generate_record( + ann_rec, min_x, min_y, max_x, max_y, sample_data_token, sd_rec["filename"] + ) # If mono3d=True, add 3D annotations in camera coordinates if mono3d and (repro_rec is not None): @@ -543,33 +565,32 @@ def get_2d_boxes(nusc, global_velo2d = nusc.box_velocity(box.token)[:2] global_velo3d = np.array([*global_velo2d, 0.0]) - e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix - c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix - cam_velo3d = global_velo3d @ np.linalg.inv( - e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + e2g_r_mat = Quaternion(pose_rec["rotation"]).rotation_matrix + c2e_r_mat = Quaternion(cs_rec["rotation"]).rotation_matrix + cam_velo3d = ( + global_velo3d @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + ) velo = cam_velo3d[0::2].tolist() - repro_rec['bbox_cam3d'] = loc + dim + rot - repro_rec['velo_cam3d'] = velo + repro_rec["bbox_cam3d"] = loc + dim + rot + repro_rec["velo_cam3d"] = velo center3d = np.array(loc).reshape([1, 3]) - center2d = points_cam2img( - center3d, camera_intrinsic, with_depth=True) - repro_rec['center2d'] = center2d.squeeze().tolist() + center2d = points_cam2img(center3d, camera_intrinsic, with_depth=True) + repro_rec["center2d"] = center2d.squeeze().tolist() # normalized center2D + depth # if samples with depth < 0 will be removed - if repro_rec['center2d'][2] <= 0: + if repro_rec["center2d"][2] <= 0: continue - ann_token = nusc.get('sample_annotation', - box.token)['attribute_tokens'] + ann_token = nusc.get("sample_annotation", box.token)["attribute_tokens"] if len(ann_token) == 0: - attr_name = 'None' + attr_name = "None" else: - attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_name = nusc.get("attribute", ann_token[0])["name"] attr_id = nus_attributes.index(attr_name) - repro_rec['attribute_name'] = attr_name - repro_rec['attribute_id'] = attr_id + repro_rec["attribute_name"] = attr_name + repro_rec["attribute_id"] = attr_id repro_recs.append(repro_rec) @@ -597,7 +618,8 @@ def post_process_coords( if polygon_from_2d_box.intersects(img_canvas): img_intersection = polygon_from_2d_box.intersection(img_canvas) intersection_coords = np.array( - [coord for coord in img_intersection.exterior.coords]) + [coord for coord in img_intersection.exterior.coords] + ) min_x = min(intersection_coords[:, 0]) min_y = min(intersection_coords[:, 1]) @@ -609,8 +631,15 @@ def post_process_coords( return None -def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, - sample_data_token: str, filename: str) -> OrderedDict: +def generate_record( + ann_rec: dict, + x1: float, + y1: float, + x2: float, + y2: float, + sample_data_token: str, + filename: str, +) -> OrderedDict: """Generate one 2D annotation record given various informations on top of the 2D bounding box coordinates. @@ -635,39 +664,39 @@ def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, - iscrowd (int): whether the area is crowd """ repro_rec = OrderedDict() - repro_rec['sample_data_token'] = sample_data_token + repro_rec["sample_data_token"] = sample_data_token coco_rec = dict() relevant_keys = [ - 'attribute_tokens', - 'category_name', - 'instance_token', - 'next', - 'num_lidar_pts', - 'num_radar_pts', - 'prev', - 'sample_annotation_token', - 'sample_data_token', - 'visibility_token', + "attribute_tokens", + "category_name", + "instance_token", + "next", + "num_lidar_pts", + "num_radar_pts", + "prev", + "sample_annotation_token", + "sample_data_token", + "visibility_token", ] for key, value in ann_rec.items(): if key in relevant_keys: repro_rec[key] = value - repro_rec['bbox_corners'] = [x1, y1, x2, y2] - repro_rec['filename'] = filename + repro_rec["bbox_corners"] = [x1, y1, x2, y2] + repro_rec["filename"] = filename - coco_rec['file_name'] = filename - coco_rec['image_id'] = sample_data_token - coco_rec['area'] = (y2 - y1) * (x2 - x1) + coco_rec["file_name"] = filename + coco_rec["image_id"] = sample_data_token + coco_rec["area"] = (y2 - y1) * (x2 - x1) - if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + if repro_rec["category_name"] not in NuScenesDataset.NameMapping: return None - cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] - coco_rec['category_name'] = cat_name - coco_rec['category_id'] = nus_categories.index(cat_name) - coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] - coco_rec['iscrowd'] = 0 + cat_name = NuScenesDataset.NameMapping[repro_rec["category_name"]] + coco_rec["category_name"] = cat_name + coco_rec["category_id"] = nus_categories.index(cat_name) + coco_rec["bbox"] = [x1, y1, x2 - x1, y2 - y1] + coco_rec["iscrowd"] = 0 return coco_rec diff --git a/tools/data_converter/nuscenes_occ_converter_own.py b/tools/data_converter/nuscenes_occ_converter_own.py index be18c31..f958f41 100644 --- a/tools/data_converter/nuscenes_occ_converter_own.py +++ b/tools/data_converter/nuscenes_occ_converter_own.py @@ -3,38 +3,56 @@ # --------------------------------------------- # Modified by Zhiqi Li # --------------------------------------------- -import mmcv -import numpy as np import os from collections import OrderedDict -from nuscenes.nuscenes import NuScenes -from nuscenes.utils.geometry_utils import view_points from os import path as osp -from pyquaternion import Quaternion -from shapely.geometry import MultiPoint, box from typing import List, Tuple, Union +import mmcv +import numpy as np +import simplejson as json from mmdet3d.core.bbox.box_np_ops import points_cam2img from mmdet3d.datasets import NuScenesDataset -import simplejson as json - -nus_categories = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle', - 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', - 'barrier') - -nus_attributes = ('cycle.with_rider', 'cycle.without_rider', - 'pedestrian.moving', 'pedestrian.standing', - 'pedestrian.sitting_lying_down', 'vehicle.moving', - 'vehicle.parked', 'vehicle.stopped', 'None') - +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.geometry_utils import view_points +from pyquaternion import Quaternion +from shapely.geometry import MultiPoint, box -def create_nuscenes_occ_infos(root_path, - occ_path, - out_path, - can_bus_root_path, - info_prefix, - version='v1.0-trainval', - max_sweeps=10): +nus_categories = ( + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", +) + +nus_attributes = ( + "cycle.with_rider", + "cycle.without_rider", + "pedestrian.moving", + "pedestrian.standing", + "pedestrian.sitting_lying_down", + "vehicle.moving", + "vehicle.parked", + "vehicle.stopped", + "None", +) + + +def create_nuscenes_occ_infos( + root_path, + occ_path, + out_path, + can_bus_root_path, + info_prefix, + version="v1.0-trainval", + max_sweeps=10, +): """Create info file of nuscene dataset. Given the raw data, generate its related info file in pkl format. @@ -48,77 +66,91 @@ def create_nuscenes_occ_infos(root_path, Default: 10 """ - from nuscenes.nuscenes import NuScenes from nuscenes.can_bus.can_bus_api import NuScenesCanBus + from nuscenes.nuscenes import NuScenes + print(version, root_path) nusc = NuScenes(version=version, dataroot=root_path, verbose=True) nusc_can_bus = NuScenesCanBus(dataroot=can_bus_root_path) print(type(nusc_can_bus)) from nuscenes.utils import splits - available_vers = ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + + available_vers = ["v1.0-trainval", "v1.0-test", "v1.0-mini"] assert version in available_vers - with open(os.path.join(occ_path,'annotations.json'),'r') as f: + with open(os.path.join(occ_path, "annotations.json"), "r") as f: occ_anno = json.load(f) - if version == 'v1.0-trainval': + if version == "v1.0-trainval": train_scenes = splits.train val_scenes = splits.val - elif version == 'v1.0-test': + elif version == "v1.0-test": train_scenes = splits.test val_scenes = [] - elif version == 'v1.0-mini': + elif version == "v1.0-mini": train_scenes = splits.mini_train val_scenes = splits.mini_val else: - raise ValueError('unknown') + raise ValueError("unknown") # filter existing scenes. available_scenes = get_available_scenes(nusc) - available_scene_names = [s['name'] for s in available_scenes] - train_scenes = list( - filter(lambda x: x in available_scene_names, train_scenes)) + available_scene_names = [s["name"] for s in available_scenes] + train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) - train_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in train_scenes - ]) - val_scenes = set([ - available_scenes[available_scene_names.index(s)]['token'] - for s in val_scenes - ]) + train_scenes = set( + [ + available_scenes[available_scene_names.index(s)]["token"] + for s in train_scenes + ] + ) + val_scenes = set( + [available_scenes[available_scene_names.index(s)]["token"] for s in val_scenes] + ) token2name = dict() for scene in nusc.scene: - token2name[scene['token']]=scene['name'] - + token2name[scene["token"]] = scene["name"] - test = 'test' in version + test = "test" in version if test: - print('test scene: {}'.format(len(train_scenes))) + print("test scene: {}".format(len(train_scenes))) else: - print('train scene: {}, val scene: {}'.format( - len(train_scenes), len(val_scenes))) + print( + "train scene: {}, val scene: {}".format(len(train_scenes), len(val_scenes)) + ) train_nusc_infos, val_nusc_infos = _fill_occ_trainval_infos( - nusc,occ_anno,token2name, nusc_can_bus, train_scenes, val_scenes, test, max_sweeps=max_sweeps) + nusc, + occ_anno, + token2name, + nusc_can_bus, + train_scenes, + val_scenes, + test, + max_sweeps=max_sweeps, + ) metadata = dict(version=version) if test: - print('test sample: {}'.format(len(train_nusc_infos))) + print("test sample: {}".format(len(train_nusc_infos))) data = dict(infos=train_nusc_infos, metadata=metadata) - info_path = osp.join(out_path, - '{}_infos_temporal_test.pkl'.format(info_prefix)) + info_path = osp.join(out_path, "{}_infos_temporal_test.pkl".format(info_prefix)) mmcv.dump(data, info_path) else: - print('train sample: {}, val sample: {}'.format( - len(train_nusc_infos), len(val_nusc_infos))) + print( + "train sample: {}, val sample: {}".format( + len(train_nusc_infos), len(val_nusc_infos) + ) + ) data = dict(infos=train_nusc_infos, metadata=metadata) - info_path = osp.join(out_path, - '{}_infos_temporal_train.pkl'.format(info_prefix)) + info_path = osp.join( + out_path, "{}_infos_temporal_train.pkl".format(info_prefix) + ) mmcv.dump(data, info_path) - data['infos'] = val_nusc_infos - info_val_path = osp.join(out_path, - '{}_infos_temporal_val.pkl'.format(info_prefix)) + data["infos"] = val_nusc_infos + info_val_path = osp.join( + out_path, "{}_infos_temporal_val.pkl".format(info_prefix) + ) mmcv.dump(data, info_val_path) @@ -136,20 +168,20 @@ def get_available_scenes(nusc): available scenes. """ available_scenes = [] - print('total scene num: {}'.format(len(nusc.scene))) + print("total scene num: {}".format(len(nusc.scene))) for scene in nusc.scene: - scene_token = scene['token'] - scene_rec = nusc.get('scene', scene_token) - sample_rec = nusc.get('sample', scene_rec['first_sample_token']) - sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + scene_token = scene["token"] + scene_rec = nusc.get("scene", scene_token) + sample_rec = nusc.get("sample", scene_rec["first_sample_token"]) + sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"]) has_more_frames = True scene_not_exist = False while has_more_frames: - lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"]) lidar_path = str(lidar_path) if os.getcwd() in lidar_path: # path from lyftdataset is absolute path - lidar_path = lidar_path.split(f'{os.getcwd()}/')[-1] + lidar_path = lidar_path.split(f"{os.getcwd()}/")[-1] # relative path if not mmcv.is_filepath(lidar_path): scene_not_exist = True @@ -159,43 +191,45 @@ def get_available_scenes(nusc): if scene_not_exist: continue available_scenes.append(scene) - print('exist scene num: {}'.format(len(available_scenes))) + print("exist scene num: {}".format(len(available_scenes))) return available_scenes def _get_can_bus_info(nusc, nusc_can_bus, sample): - scene_name = nusc.get('scene', sample['scene_token'])['name'] - sample_timestamp = sample['timestamp'] + scene_name = nusc.get("scene", sample["scene_token"])["name"] + sample_timestamp = sample["timestamp"] try: - pose_list = nusc_can_bus.get_messages(scene_name, 'pose') + pose_list = nusc_can_bus.get_messages(scene_name, "pose") except: return np.zeros(18) # server scenes do not have can bus information. can_bus = [] # during each scene, the first timestamp of can_bus may be large than the first sample's timestamp last_pose = pose_list[0] for i, pose in enumerate(pose_list): - if pose['utime'] > sample_timestamp: + if pose["utime"] > sample_timestamp: break last_pose = pose - _ = last_pose.pop('utime') # useless - pos = last_pose.pop('pos') - rotation = last_pose.pop('orientation') + _ = last_pose.pop("utime") # useless + pos = last_pose.pop("pos") + rotation = last_pose.pop("orientation") can_bus.extend(pos) can_bus.extend(rotation) for key in last_pose.keys(): can_bus.extend(pose[key]) # 16 elements - can_bus.extend([0., 0.]) + can_bus.extend([0.0, 0.0]) return np.array(can_bus) -def _fill_occ_trainval_infos(nusc, - occ_anno, - token2name, - nusc_can_bus, - train_scenes, - val_scenes, - test=False, - max_sweeps=10): +def _fill_occ_trainval_infos( + nusc, + occ_anno, + token2name, + nusc_can_bus, + train_scenes, + val_scenes, + test=False, + max_sweeps=10, +): """Generate the train/val infos from the raw data. Args: @@ -213,110 +247,111 @@ def _fill_occ_trainval_infos(nusc, train_nusc_infos = [] val_nusc_infos = [] frame_idx = 0 - scene_infos=occ_anno['scene_infos'] + scene_infos = occ_anno["scene_infos"] for sample in mmcv.track_iter_progress(nusc.sample): + lidar_token = sample["data"]["LIDAR_TOP"] + sd_rec = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) - - - lidar_token = sample['data']['LIDAR_TOP'] - sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) - - scene_token = sample['scene_token'] + scene_token = sample["scene_token"] scene_name = token2name[scene_token] - sample_token=sd_rec['sample_token'] + sample_token = sd_rec["sample_token"] if sample_token in scene_infos[scene_name].keys(): - occ_sample=scene_infos[scene_name][sample_token] + occ_sample = scene_infos[scene_name][sample_token] else: continue - cs_record = nusc.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) mmcv.check_file_exist(lidar_path) can_bus = _get_can_bus_info(nusc, nusc_can_bus, sample) ## info = { - 'lidar_path': lidar_path, - 'token': sample['token'], - 'prev': sample['prev'], - 'next': sample['next'], - 'can_bus': can_bus, - 'frame_idx': frame_idx, # temporal related info - 'sweeps': [], - 'cams': dict(), - 'scene_token': sample['scene_token'], # temporal related info - 'lidar2ego_translation': cs_record['translation'], - 'lidar2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sample['timestamp'], + "lidar_path": lidar_path, + "token": sample["token"], + "prev": sample["prev"], + "next": sample["next"], + "can_bus": can_bus, + "frame_idx": frame_idx, # temporal related info + "sweeps": [], + "cams": dict(), + "scene_token": sample["scene_token"], # temporal related info + "lidar2ego_translation": cs_record["translation"], + "lidar2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sample["timestamp"], } - info['occ_gt_path'] = occ_sample['gt_path'] - if sample['next'] == '': + info["occ_gt_path"] = occ_sample["gt_path"] + if sample["next"] == "": frame_idx = 0 else: frame_idx += 1 - l2e_r = info['lidar2ego_rotation'] - l2e_t = info['lidar2ego_translation'] - e2g_r = info['ego2global_rotation'] - e2g_t = info['ego2global_translation'] + l2e_r = info["lidar2ego_rotation"] + l2e_t = info["lidar2ego_translation"] + e2g_r = info["ego2global_rotation"] + e2g_t = info["ego2global_translation"] l2e_r_mat = Quaternion(l2e_r).rotation_matrix e2g_r_mat = Quaternion(e2g_r).rotation_matrix # obtain 6 image's information per frame camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] for cam in camera_types: - cam_token = sample['data'][cam] + cam_token = sample["data"][cam] cam_path, _, cam_intrinsic = nusc.get_sample_data(cam_token) - cam_info = obtain_sensor2top(nusc, cam_token, l2e_t, l2e_r_mat, - e2g_t, e2g_r_mat, cam) + cam_info = obtain_sensor2top( + nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam + ) cam_info.update(cam_intrinsic=cam_intrinsic) - info['cams'].update({cam: cam_info}) + info["cams"].update({cam: cam_info}) # obtain sweeps for a single key-frame - sd_rec = nusc.get('sample_data', sample['data']['LIDAR_TOP']) + sd_rec = nusc.get("sample_data", sample["data"]["LIDAR_TOP"]) sweeps = [] while len(sweeps) < max_sweeps: - if not sd_rec['prev'] == '': - sweep = obtain_sensor2top(nusc, sd_rec['prev'], l2e_t, - l2e_r_mat, e2g_t, e2g_r_mat, 'lidar') + if not sd_rec["prev"] == "": + sweep = obtain_sensor2top( + nusc, sd_rec["prev"], l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, "lidar" + ) sweeps.append(sweep) - sd_rec = nusc.get('sample_data', sd_rec['prev']) + sd_rec = nusc.get("sample_data", sd_rec["prev"]) else: break - info['sweeps'] = sweeps + info["sweeps"] = sweeps # obtain annotation if not test: annotations = [ - nusc.get('sample_annotation', token) - for token in sample['anns'] + nusc.get("sample_annotation", token) for token in sample["anns"] ] locs = np.array([b.center for b in boxes]).reshape(-1, 3) dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) - rots = np.array([b.orientation.yaw_pitch_roll[0] - for b in boxes]).reshape(-1, 1) + rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape( + -1, 1 + ) velocity = np.array( - [nusc.box_velocity(token)[:2] for token in sample['anns']]) + [nusc.box_velocity(token)[:2] for token in sample["anns"]] + ) valid_flag = np.array( - [(anno['num_lidar_pts'] + anno['num_radar_pts']) > 0 - for anno in annotations], - dtype=bool).reshape(-1) + [ + (anno["num_lidar_pts"] + anno["num_radar_pts"]) > 0 + for anno in annotations + ], + dtype=bool, + ).reshape(-1) # convert velo from global to lidar for i in range(len(boxes)): velo = np.array([*velocity[i], 0.0]) - velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv( - l2e_r_mat).T + velo = velo @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T velocity[i] = velo[:2] names = [b.name for b in boxes] @@ -327,17 +362,16 @@ def _fill_occ_trainval_infos(nusc, # we need to convert rot to SECOND format. gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) assert len(gt_boxes) == len( - annotations), f'{len(gt_boxes)}, {len(annotations)}' - info['gt_boxes'] = gt_boxes - info['gt_names'] = names - info['gt_velocity'] = velocity.reshape(-1, 2) - info['num_lidar_pts'] = np.array( - [a['num_lidar_pts'] for a in annotations]) - info['num_radar_pts'] = np.array( - [a['num_radar_pts'] for a in annotations]) - info['valid_flag'] = valid_flag - - if sample['scene_token'] in train_scenes: + annotations + ), f"{len(gt_boxes)}, {len(annotations)}" + info["gt_boxes"] = gt_boxes + info["gt_names"] = names + info["gt_velocity"] = velocity.reshape(-1, 2) + info["num_lidar_pts"] = np.array([a["num_lidar_pts"] for a in annotations]) + info["num_radar_pts"] = np.array([a["num_radar_pts"] for a in annotations]) + info["valid_flag"] = valid_flag + + if sample["scene_token"] in train_scenes: train_nusc_infos.append(info) else: val_nusc_infos.append(info) @@ -345,13 +379,9 @@ def _fill_occ_trainval_infos(nusc, return train_nusc_infos, val_nusc_infos -def obtain_sensor2top(nusc, - sensor_token, - l2e_t, - l2e_r_mat, - e2g_t, - e2g_r_mat, - sensor_type='lidar'): +def obtain_sensor2top( + nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type="lidar" +): """Obtain the info with RT matric from general sensor to Top LiDAR. Args: @@ -369,41 +399,44 @@ def obtain_sensor2top(nusc, Returns: sweep (dict): Sweep information after transformation. """ - sd_rec = nusc.get('sample_data', sensor_token) - cs_record = nusc.get('calibrated_sensor', - sd_rec['calibrated_sensor_token']) - pose_record = nusc.get('ego_pose', sd_rec['ego_pose_token']) - data_path = str(nusc.get_sample_data_path(sd_rec['token'])) + sd_rec = nusc.get("sample_data", sensor_token) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + data_path = str(nusc.get_sample_data_path(sd_rec["token"])) if os.getcwd() in data_path: # path from lyftdataset is absolute path - data_path = data_path.split(f'{os.getcwd()}/')[-1] # relative path + data_path = data_path.split(f"{os.getcwd()}/")[-1] # relative path sweep = { - 'data_path': data_path, - 'type': sensor_type, - 'sample_data_token': sd_rec['token'], - 'sensor2ego_translation': cs_record['translation'], - 'sensor2ego_rotation': cs_record['rotation'], - 'ego2global_translation': pose_record['translation'], - 'ego2global_rotation': pose_record['rotation'], - 'timestamp': sd_rec['timestamp'] + "data_path": data_path, + "type": sensor_type, + "sample_data_token": sd_rec["token"], + "sensor2ego_translation": cs_record["translation"], + "sensor2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sd_rec["timestamp"], } - l2e_r_s = sweep['sensor2ego_rotation'] - l2e_t_s = sweep['sensor2ego_translation'] - e2g_r_s = sweep['ego2global_rotation'] - e2g_t_s = sweep['ego2global_translation'] + l2e_r_s = sweep["sensor2ego_rotation"] + l2e_t_s = sweep["sensor2ego_translation"] + e2g_r_s = sweep["ego2global_rotation"] + e2g_t_s = sweep["ego2global_translation"] # obtain the RT from sensor to Top LiDAR # sweep->ego->global->ego'->lidar l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( - np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( - np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) - T -= e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T - ) + l2e_t @ np.linalg.inv(l2e_r_mat).T - sweep['sensor2lidar_rotation'] = R.T # points @ R.T + T - sweep['sensor2lidar_translation'] = T + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + T -= ( + e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + + l2e_t @ np.linalg.inv(l2e_r_mat).T + ) + sweep["sensor2lidar_rotation"] = R.T # points @ R.T + T + sweep["sensor2lidar_translation"] = T return sweep @@ -418,14 +451,14 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): """ # get bbox annotations for camera camera_types = [ - 'CAM_FRONT', - 'CAM_FRONT_RIGHT', - 'CAM_FRONT_LEFT', - 'CAM_BACK', - 'CAM_BACK_LEFT', - 'CAM_BACK_RIGHT', + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", ] - nusc_infos = mmcv.load(info_path)['infos'] + nusc_infos = mmcv.load(info_path)["infos"] nusc = NuScenes(version=version, dataroot=root_path, verbose=True) # info_2d_list = [] cat2Ids = [ @@ -436,45 +469,44 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids) for info in mmcv.track_iter_progress(nusc_infos): for cam in camera_types: - cam_info = info['cams'][cam] + cam_info = info["cams"][cam] coco_infos = get_2d_boxes( nusc, - cam_info['sample_data_token'], - visibilities=['', '1', '2', '3', '4'], - mono3d=mono3d) - (height, width, _) = mmcv.imread(cam_info['data_path']).shape - coco_2d_dict['images'].append( + cam_info["sample_data_token"], + visibilities=["", "1", "2", "3", "4"], + mono3d=mono3d, + ) + (height, width, _) = mmcv.imread(cam_info["data_path"]).shape + coco_2d_dict["images"].append( dict( - file_name=cam_info['data_path'].split('data/nuscenes/') - [-1], - id=cam_info['sample_data_token'], - token=info['token'], - cam2ego_rotation=cam_info['sensor2ego_rotation'], - cam2ego_translation=cam_info['sensor2ego_translation'], - ego2global_rotation=info['ego2global_rotation'], - ego2global_translation=info['ego2global_translation'], - cam_intrinsic=cam_info['cam_intrinsic'], + file_name=cam_info["data_path"].split("data/nuscenes/")[-1], + id=cam_info["sample_data_token"], + token=info["token"], + cam2ego_rotation=cam_info["sensor2ego_rotation"], + cam2ego_translation=cam_info["sensor2ego_translation"], + ego2global_rotation=info["ego2global_rotation"], + ego2global_translation=info["ego2global_translation"], + cam_intrinsic=cam_info["cam_intrinsic"], width=width, - height=height)) + height=height, + ) + ) for coco_info in coco_infos: if coco_info is None: continue # add an empty key for coco format - coco_info['segmentation'] = [] - coco_info['id'] = coco_ann_id - coco_2d_dict['annotations'].append(coco_info) + coco_info["segmentation"] = [] + coco_info["id"] = coco_ann_id + coco_2d_dict["annotations"].append(coco_info) coco_ann_id += 1 if mono3d: - json_prefix = f'{info_path[:-4]}_mono3d' + json_prefix = f"{info_path[:-4]}_mono3d" else: - json_prefix = f'{info_path[:-4]}' - mmcv.dump(coco_2d_dict, f'{json_prefix}.coco.json') + json_prefix = f"{info_path[:-4]}" + mmcv.dump(coco_2d_dict, f"{json_prefix}.coco.json") -def get_2d_boxes(nusc, - sample_data_token: str, - visibilities: List[str], - mono3d=True): +def get_2d_boxes(nusc, sample_data_token: str, visibilities: List[str], mono3d=True): """Get the 2D annotation records for a given `sample_data_token`. Args: @@ -489,49 +521,45 @@ def get_2d_boxes(nusc, """ # Get the sample data and the sample corresponding to that sample data. - sd_rec = nusc.get('sample_data', sample_data_token) + sd_rec = nusc.get("sample_data", sample_data_token) - assert sd_rec[ - 'sensor_modality'] == 'camera', 'Error: get_2d_boxes only works' \ - ' for camera sample_data!' - if not sd_rec['is_key_frame']: - raise ValueError( - 'The 2D re-projections are available only for keyframes.') + assert sd_rec["sensor_modality"] == "camera", ( + "Error: get_2d_boxes only works" " for camera sample_data!" + ) + if not sd_rec["is_key_frame"]: + raise ValueError("The 2D re-projections are available only for keyframes.") - s_rec = nusc.get('sample', sd_rec['sample_token']) + s_rec = nusc.get("sample", sd_rec["sample_token"]) # Get the calibrated sensor and ego pose # record to get the transformation matrices. - cs_rec = nusc.get('calibrated_sensor', sd_rec['calibrated_sensor_token']) - pose_rec = nusc.get('ego_pose', sd_rec['ego_pose_token']) - camera_intrinsic = np.array(cs_rec['camera_intrinsic']) + cs_rec = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_rec = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + camera_intrinsic = np.array(cs_rec["camera_intrinsic"]) # Get all the annotation with the specified visibilties. + ann_recs = [nusc.get("sample_annotation", token) for token in s_rec["anns"]] ann_recs = [ - nusc.get('sample_annotation', token) for token in s_rec['anns'] - ] - ann_recs = [ - ann_rec for ann_rec in ann_recs - if (ann_rec['visibility_token'] in visibilities) + ann_rec for ann_rec in ann_recs if (ann_rec["visibility_token"] in visibilities) ] repro_recs = [] for ann_rec in ann_recs: # Augment sample_annotation with token information. - ann_rec['sample_annotation_token'] = ann_rec['token'] - ann_rec['sample_data_token'] = sample_data_token + ann_rec["sample_annotation_token"] = ann_rec["token"] + ann_rec["sample_data_token"] = sample_data_token # Get the box in global coordinates. - box = nusc.get_box(ann_rec['token']) + box = nusc.get_box(ann_rec["token"]) # Move them to the ego-pose frame. - box.translate(-np.array(pose_rec['translation'])) - box.rotate(Quaternion(pose_rec['rotation']).inverse) + box.translate(-np.array(pose_rec["translation"])) + box.rotate(Quaternion(pose_rec["rotation"]).inverse) # Move them to the calibrated sensor frame. - box.translate(-np.array(cs_rec['translation'])) - box.rotate(Quaternion(cs_rec['rotation']).inverse) + box.translate(-np.array(cs_rec["translation"])) + box.rotate(Quaternion(cs_rec["rotation"]).inverse) # Filter out the corners that are not in front of the calibrated # sensor. @@ -540,8 +568,9 @@ def get_2d_boxes(nusc, corners_3d = corners_3d[:, in_front] # Project 3d box to 2d. - corner_coords = view_points(corners_3d, camera_intrinsic, - True).T[:, :2].tolist() + corner_coords = ( + view_points(corners_3d, camera_intrinsic, True).T[:, :2].tolist() + ) # Keep only corners that fall within the image. final_coords = post_process_coords(corner_coords) @@ -554,8 +583,9 @@ def get_2d_boxes(nusc, min_x, min_y, max_x, max_y = final_coords # Generate dictionary record to be included in the .json file. - repro_rec = generate_record(ann_rec, min_x, min_y, max_x, max_y, - sample_data_token, sd_rec['filename']) + repro_rec = generate_record( + ann_rec, min_x, min_y, max_x, max_y, sample_data_token, sd_rec["filename"] + ) # If mono3d=True, add 3D annotations in camera coordinates if mono3d and (repro_rec is not None): @@ -570,33 +600,32 @@ def get_2d_boxes(nusc, global_velo2d = nusc.box_velocity(box.token)[:2] global_velo3d = np.array([*global_velo2d, 0.0]) - e2g_r_mat = Quaternion(pose_rec['rotation']).rotation_matrix - c2e_r_mat = Quaternion(cs_rec['rotation']).rotation_matrix - cam_velo3d = global_velo3d @ np.linalg.inv( - e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + e2g_r_mat = Quaternion(pose_rec["rotation"]).rotation_matrix + c2e_r_mat = Quaternion(cs_rec["rotation"]).rotation_matrix + cam_velo3d = ( + global_velo3d @ np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(c2e_r_mat).T + ) velo = cam_velo3d[0::2].tolist() - repro_rec['bbox_cam3d'] = loc + dim + rot - repro_rec['velo_cam3d'] = velo + repro_rec["bbox_cam3d"] = loc + dim + rot + repro_rec["velo_cam3d"] = velo center3d = np.array(loc).reshape([1, 3]) - center2d = points_cam2img( - center3d, camera_intrinsic, with_depth=True) - repro_rec['center2d'] = center2d.squeeze().tolist() + center2d = points_cam2img(center3d, camera_intrinsic, with_depth=True) + repro_rec["center2d"] = center2d.squeeze().tolist() # normalized center2D + depth # if samples with depth < 0 will be removed - if repro_rec['center2d'][2] <= 0: + if repro_rec["center2d"][2] <= 0: continue - ann_token = nusc.get('sample_annotation', - box.token)['attribute_tokens'] + ann_token = nusc.get("sample_annotation", box.token)["attribute_tokens"] if len(ann_token) == 0: - attr_name = 'None' + attr_name = "None" else: - attr_name = nusc.get('attribute', ann_token[0])['name'] + attr_name = nusc.get("attribute", ann_token[0])["name"] attr_id = nus_attributes.index(attr_name) - repro_rec['attribute_name'] = attr_name - repro_rec['attribute_id'] = attr_id + repro_rec["attribute_name"] = attr_name + repro_rec["attribute_id"] = attr_id repro_recs.append(repro_rec) @@ -624,7 +653,8 @@ def post_process_coords( if polygon_from_2d_box.intersects(img_canvas): img_intersection = polygon_from_2d_box.intersection(img_canvas) intersection_coords = np.array( - [coord for coord in img_intersection.exterior.coords]) + [coord for coord in img_intersection.exterior.coords] + ) min_x = min(intersection_coords[:, 0]) min_y = min(intersection_coords[:, 1]) @@ -636,8 +666,15 @@ def post_process_coords( return None -def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, - sample_data_token: str, filename: str) -> OrderedDict: +def generate_record( + ann_rec: dict, + x1: float, + y1: float, + x2: float, + y2: float, + sample_data_token: str, + filename: str, +) -> OrderedDict: """Generate one 2D annotation record given various informations on top of the 2D bounding box coordinates. @@ -662,39 +699,39 @@ def generate_record(ann_rec: dict, x1: float, y1: float, x2: float, y2: float, - iscrowd (int): whether the area is crowd """ repro_rec = OrderedDict() - repro_rec['sample_data_token'] = sample_data_token + repro_rec["sample_data_token"] = sample_data_token coco_rec = dict() relevant_keys = [ - 'attribute_tokens', - 'category_name', - 'instance_token', - 'next', - 'num_lidar_pts', - 'num_radar_pts', - 'prev', - 'sample_annotation_token', - 'sample_data_token', - 'visibility_token', + "attribute_tokens", + "category_name", + "instance_token", + "next", + "num_lidar_pts", + "num_radar_pts", + "prev", + "sample_annotation_token", + "sample_data_token", + "visibility_token", ] for key, value in ann_rec.items(): if key in relevant_keys: repro_rec[key] = value - repro_rec['bbox_corners'] = [x1, y1, x2, y2] - repro_rec['filename'] = filename + repro_rec["bbox_corners"] = [x1, y1, x2, y2] + repro_rec["filename"] = filename - coco_rec['file_name'] = filename - coco_rec['image_id'] = sample_data_token - coco_rec['area'] = (y2 - y1) * (x2 - x1) + coco_rec["file_name"] = filename + coco_rec["image_id"] = sample_data_token + coco_rec["area"] = (y2 - y1) * (x2 - x1) - if repro_rec['category_name'] not in NuScenesDataset.NameMapping: + if repro_rec["category_name"] not in NuScenesDataset.NameMapping: return None - cat_name = NuScenesDataset.NameMapping[repro_rec['category_name']] - coco_rec['category_name'] = cat_name - coco_rec['category_id'] = nus_categories.index(cat_name) - coco_rec['bbox'] = [x1, y1, x2 - x1, y2 - y1] - coco_rec['iscrowd'] = 0 + cat_name = NuScenesDataset.NameMapping[repro_rec["category_name"]] + coco_rec["category_name"] = cat_name + coco_rec["category_id"] = nus_categories.index(cat_name) + coco_rec["bbox"] = [x1, y1, x2 - x1, y2 - y1] + coco_rec["iscrowd"] = 0 return coco_rec diff --git a/tools/data_converter/s3dis_data_utils.py b/tools/data_converter/s3dis_data_utils.py index d2b6b77..19ee9d0 100644 --- a/tools/data_converter/s3dis_data_utils.py +++ b/tools/data_converter/s3dis_data_utils.py @@ -1,10 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np import os from concurrent import futures as futures from os import path as osp +import mmcv +import numpy as np + class S3DISData(object): """S3DIS data. @@ -16,25 +17,20 @@ class S3DISData(object): split (str): Set split type of the data. Default: 'Area_1'. """ - def __init__(self, root_path, split='Area_1'): + def __init__(self, root_path, split="Area_1"): self.root_dir = root_path self.split = split - self.data_dir = osp.join(root_path, - 'Stanford3dDataset_v1.2_Aligned_Version') + self.data_dir = osp.join(root_path, "Stanford3dDataset_v1.2_Aligned_Version") # Following `GSDN `_, use 5 furniture # classes for detection: table, chair, sofa, bookcase, board. self.cat_ids = np.array([7, 8, 9, 10, 11]) - self.cat_ids2class = { - cat_id: i - for i, cat_id in enumerate(list(self.cat_ids)) - } - - assert split in [ - 'Area_1', 'Area_2', 'Area_3', 'Area_4', 'Area_5', 'Area_6' - ] - self.sample_id_list = os.listdir(osp.join(self.data_dir, - split)) # conferenceRoom_1 + self.cat_ids2class = {cat_id: i for i, cat_id in enumerate(list(self.cat_ids))} + + assert split in ["Area_1", "Area_2", "Area_3", "Area_4", "Area_5", "Area_6"] + self.sample_id_list = os.listdir( + osp.join(self.data_dir, split) + ) # conferenceRoom_1 for sample_id in self.sample_id_list: if os.path.isfile(osp.join(self.data_dir, split, sample_id)): self.sample_id_list.remove(sample_id) @@ -58,53 +54,58 @@ def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): """ def process_single_scene(sample_idx): - print(f'{self.split} sample_idx: {sample_idx}') + print(f"{self.split} sample_idx: {sample_idx}") info = dict() - pc_info = { - 'num_features': 6, - 'lidar_idx': f'{self.split}_{sample_idx}' - } - info['point_cloud'] = pc_info - pts_filename = osp.join(self.root_dir, 's3dis_data', - f'{self.split}_{sample_idx}_point.npy') + pc_info = {"num_features": 6, "lidar_idx": f"{self.split}_{sample_idx}"} + info["point_cloud"] = pc_info + pts_filename = osp.join( + self.root_dir, "s3dis_data", f"{self.split}_{sample_idx}_point.npy" + ) pts_instance_mask_path = osp.join( - self.root_dir, 's3dis_data', - f'{self.split}_{sample_idx}_ins_label.npy') + self.root_dir, "s3dis_data", f"{self.split}_{sample_idx}_ins_label.npy" + ) pts_semantic_mask_path = osp.join( - self.root_dir, 's3dis_data', - f'{self.split}_{sample_idx}_sem_label.npy') + self.root_dir, "s3dis_data", f"{self.split}_{sample_idx}_sem_label.npy" + ) points = np.load(pts_filename).astype(np.float32) pts_instance_mask = np.load(pts_instance_mask_path).astype(np.int) pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.int) - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points')) - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask')) - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask')) + mmcv.mkdir_or_exist(osp.join(self.root_dir, "points")) + mmcv.mkdir_or_exist(osp.join(self.root_dir, "instance_mask")) + mmcv.mkdir_or_exist(osp.join(self.root_dir, "semantic_mask")) points.tofile( - osp.join(self.root_dir, 'points', - f'{self.split}_{sample_idx}.bin')) + osp.join(self.root_dir, "points", f"{self.split}_{sample_idx}.bin") + ) pts_instance_mask.tofile( - osp.join(self.root_dir, 'instance_mask', - f'{self.split}_{sample_idx}.bin')) + osp.join( + self.root_dir, "instance_mask", f"{self.split}_{sample_idx}.bin" + ) + ) pts_semantic_mask.tofile( - osp.join(self.root_dir, 'semantic_mask', - f'{self.split}_{sample_idx}.bin')) - - info['pts_path'] = osp.join('points', - f'{self.split}_{sample_idx}.bin') - info['pts_instance_mask_path'] = osp.join( - 'instance_mask', f'{self.split}_{sample_idx}.bin') - info['pts_semantic_mask_path'] = osp.join( - 'semantic_mask', f'{self.split}_{sample_idx}.bin') - info['annos'] = self.get_bboxes(points, pts_instance_mask, - pts_semantic_mask) + osp.join( + self.root_dir, "semantic_mask", f"{self.split}_{sample_idx}.bin" + ) + ) + + info["pts_path"] = osp.join("points", f"{self.split}_{sample_idx}.bin") + info["pts_instance_mask_path"] = osp.join( + "instance_mask", f"{self.split}_{sample_idx}.bin" + ) + info["pts_semantic_mask_path"] = osp.join( + "semantic_mask", f"{self.split}_{sample_idx}.bin" + ) + info["annos"] = self.get_bboxes( + points, pts_instance_mask, pts_semantic_mask + ) return info - sample_id_list = sample_id_list if sample_id_list is not None \ - else self.sample_id_list + sample_id_list = ( + sample_id_list if sample_id_list is not None else self.sample_id_list + ) with futures.ThreadPoolExecutor(num_workers) as executor: infos = executor.map(process_single_scene, sample_id_list) return list(infos) @@ -142,9 +143,9 @@ def get_bboxes(self, points, pts_instance_mask, pts_semantic_mask): bboxes.append(np.concatenate((locations, dimensions))) annotation = dict() # follow ScanNet and SUN RGB-D keys - annotation['gt_boxes_upright_depth'] = np.array(bboxes) - annotation['class'] = np.array(labels) - annotation['gt_num'] = len(labels) + annotation["gt_boxes_upright_depth"] = np.array(bboxes) + annotation["class"] = np.array(labels) + annotation["gt_num"] = len(labels) return annotation @@ -160,48 +161,53 @@ class S3DISSegData(object): Default: None. """ - def __init__(self, - data_root, - ann_file, - split='Area_1', - num_points=4096, - label_weight_func=None): + def __init__( + self, + data_root, + ann_file, + split="Area_1", + num_points=4096, + label_weight_func=None, + ): self.data_root = data_root self.data_infos = mmcv.load(ann_file) self.split = split self.num_points = num_points self.all_ids = np.arange(13) # all possible ids - self.cat_ids = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12]) # used for seg task + self.cat_ids = np.array( + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + ) # used for seg task self.ignore_index = len(self.cat_ids) - self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \ - self.ignore_index + self.cat_id2class = ( + np.ones((self.all_ids.shape[0],), dtype=np.int) * self.ignore_index + ) for i, cat_id in enumerate(self.cat_ids): self.cat_id2class[cat_id] = i # label weighting function is taken from # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 - self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \ - label_weight_func is None else label_weight_func + self.label_weight_func = ( + (lambda x: 1.0 / np.log(1.2 + x)) + if label_weight_func is None + else label_weight_func + ) def get_seg_infos(self): scene_idxs, label_weight = self.get_scene_idxs_and_label_weight() - save_folder = osp.join(self.data_root, 'seg_info') + save_folder = osp.join(self.data_root, "seg_info") mmcv.mkdir_or_exist(save_folder) np.save( - osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'), - scene_idxs) - np.save( - osp.join(save_folder, f'{self.split}_label_weight.npy'), - label_weight) - print(f'{self.split} resampled scene index and label weight saved') + osp.join(save_folder, f"{self.split}_resampled_scene_idxs.npy"), scene_idxs + ) + np.save(osp.join(save_folder, f"{self.split}_label_weight.npy"), label_weight) + print(f"{self.split} resampled scene index and label weight saved") def _convert_to_label(self, mask): """Convert class_id in loaded segmentation mask to label.""" if isinstance(mask, str): - if mask.endswith('npy'): + if mask.endswith("npy"): mask = np.load(mask) else: mask = np.fromfile(mask, dtype=np.long) @@ -217,10 +223,11 @@ def get_scene_idxs_and_label_weight(self): """ num_classes = len(self.cat_ids) num_point_all = [] - label_weight = np.zeros((num_classes + 1, )) # ignore_index + label_weight = np.zeros((num_classes + 1,)) # ignore_index for data_info in self.data_infos: label = self._convert_to_label( - osp.join(self.data_root, data_info['pts_semantic_mask_path'])) + osp.join(self.data_root, data_info["pts_semantic_mask_path"]) + ) num_point_all.append(label.shape[0]) class_count, _ = np.histogram(label, range(num_classes + 2)) label_weight += class_count diff --git a/tools/data_converter/scannet_data_utils.py b/tools/data_converter/scannet_data_utils.py index a437fe0..c500ad4 100644 --- a/tools/data_converter/scannet_data_utils.py +++ b/tools/data_converter/scannet_data_utils.py @@ -1,10 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np import os from concurrent import futures as futures from os import path as osp +import mmcv +import numpy as np + class ScanNetData(object): """ScanNet data. @@ -16,71 +17,86 @@ class ScanNetData(object): split (str): Set split type of the data. Default: 'train'. """ - def __init__(self, root_path, split='train'): + def __init__(self, root_path, split="train"): self.root_dir = root_path self.split = split self.split_dir = osp.join(root_path) self.classes = [ - 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', - 'bookshelf', 'picture', 'counter', 'desk', 'curtain', - 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', - 'garbagebin' + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "showercurtrain", + "toilet", + "sink", + "bathtub", + "garbagebin", ] self.cat2label = {cat: self.classes.index(cat) for cat in self.classes} self.label2cat = {self.cat2label[t]: t for t in self.cat2label} self.cat_ids = np.array( - [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) + [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39] + ) self.cat_ids2class = { - nyu40id: i - for i, nyu40id in enumerate(list(self.cat_ids)) + nyu40id: i for i, nyu40id in enumerate(list(self.cat_ids)) } - assert split in ['train', 'val', 'test'] - split_file = osp.join(self.root_dir, 'meta_data', - f'scannetv2_{split}.txt') + assert split in ["train", "val", "test"] + split_file = osp.join(self.root_dir, "meta_data", f"scannetv2_{split}.txt") mmcv.check_file_exist(split_file) self.sample_id_list = mmcv.list_from_file(split_file) - self.test_mode = (split == 'test') + self.test_mode = split == "test" def __len__(self): return len(self.sample_id_list) def get_aligned_box_label(self, idx): - box_file = osp.join(self.root_dir, 'scannet_instance_data', - f'{idx}_aligned_bbox.npy') + box_file = osp.join( + self.root_dir, "scannet_instance_data", f"{idx}_aligned_bbox.npy" + ) mmcv.check_file_exist(box_file) return np.load(box_file) def get_unaligned_box_label(self, idx): - box_file = osp.join(self.root_dir, 'scannet_instance_data', - f'{idx}_unaligned_bbox.npy') + box_file = osp.join( + self.root_dir, "scannet_instance_data", f"{idx}_unaligned_bbox.npy" + ) mmcv.check_file_exist(box_file) return np.load(box_file) def get_axis_align_matrix(self, idx): - matrix_file = osp.join(self.root_dir, 'scannet_instance_data', - f'{idx}_axis_align_matrix.npy') + matrix_file = osp.join( + self.root_dir, "scannet_instance_data", f"{idx}_axis_align_matrix.npy" + ) mmcv.check_file_exist(matrix_file) return np.load(matrix_file) def get_images(self, idx): paths = [] - path = osp.join(self.root_dir, 'posed_images', idx) + path = osp.join(self.root_dir, "posed_images", idx) for file in sorted(os.listdir(path)): - if file.endswith('.jpg'): - paths.append(osp.join('posed_images', idx, file)) + if file.endswith(".jpg"): + paths.append(osp.join("posed_images", idx, file)) return paths def get_extrinsics(self, idx): extrinsics = [] - path = osp.join(self.root_dir, 'posed_images', idx) + path = osp.join(self.root_dir, "posed_images", idx) for file in sorted(os.listdir(path)): - if file.endswith('.txt') and not file == 'intrinsic.txt': + if file.endswith(".txt") and not file == "intrinsic.txt": extrinsics.append(np.loadtxt(osp.join(path, file))) return extrinsics def get_intrinsics(self, idx): - matrix_file = osp.join(self.root_dir, 'posed_images', idx, - 'intrinsic.txt') + matrix_file = osp.join(self.root_dir, "posed_images", idx, "intrinsic.txt") mmcv.check_file_exist(matrix_file) return np.loadtxt(matrix_file) @@ -100,21 +116,21 @@ def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): """ def process_single_scene(sample_idx): - print(f'{self.split} sample_idx: {sample_idx}') + print(f"{self.split} sample_idx: {sample_idx}") info = dict() - pc_info = {'num_features': 6, 'lidar_idx': sample_idx} - info['point_cloud'] = pc_info - pts_filename = osp.join(self.root_dir, 'scannet_instance_data', - f'{sample_idx}_vert.npy') + pc_info = {"num_features": 6, "lidar_idx": sample_idx} + info["point_cloud"] = pc_info + pts_filename = osp.join( + self.root_dir, "scannet_instance_data", f"{sample_idx}_vert.npy" + ) points = np.load(pts_filename) - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points')) - points.tofile( - osp.join(self.root_dir, 'points', f'{sample_idx}.bin')) - info['pts_path'] = osp.join('points', f'{sample_idx}.bin') + mmcv.mkdir_or_exist(osp.join(self.root_dir, "points")) + points.tofile(osp.join(self.root_dir, "points", f"{sample_idx}.bin")) + info["pts_path"] = osp.join("points", f"{sample_idx}.bin") # update with RGB image paths if exist - if os.path.exists(osp.join(self.root_dir, 'posed_images')): - info['intrinsics'] = self.get_intrinsics(sample_idx) + if os.path.exists(osp.join(self.root_dir, "posed_images")): + info["intrinsics"] = self.get_intrinsics(sample_idx) all_extrinsics = self.get_extrinsics(sample_idx) all_img_paths = self.get_images(sample_idx) # some poses in ScanNet are invalid @@ -123,73 +139,82 @@ def process_single_scene(sample_idx): if np.all(np.isfinite(extrinsic)): img_paths.append(img_path) extrinsics.append(extrinsic) - info['extrinsics'] = extrinsics - info['img_paths'] = img_paths + info["extrinsics"] = extrinsics + info["img_paths"] = img_paths if not self.test_mode: pts_instance_mask_path = osp.join( - self.root_dir, 'scannet_instance_data', - f'{sample_idx}_ins_label.npy') + self.root_dir, + "scannet_instance_data", + f"{sample_idx}_ins_label.npy", + ) pts_semantic_mask_path = osp.join( - self.root_dir, 'scannet_instance_data', - f'{sample_idx}_sem_label.npy') + self.root_dir, + "scannet_instance_data", + f"{sample_idx}_sem_label.npy", + ) - pts_instance_mask = np.load(pts_instance_mask_path).astype( - np.long) - pts_semantic_mask = np.load(pts_semantic_mask_path).astype( - np.long) + pts_instance_mask = np.load(pts_instance_mask_path).astype(np.long) + pts_semantic_mask = np.load(pts_semantic_mask_path).astype(np.long) - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'instance_mask')) - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'semantic_mask')) + mmcv.mkdir_or_exist(osp.join(self.root_dir, "instance_mask")) + mmcv.mkdir_or_exist(osp.join(self.root_dir, "semantic_mask")) pts_instance_mask.tofile( - osp.join(self.root_dir, 'instance_mask', - f'{sample_idx}.bin')) + osp.join(self.root_dir, "instance_mask", f"{sample_idx}.bin") + ) pts_semantic_mask.tofile( - osp.join(self.root_dir, 'semantic_mask', - f'{sample_idx}.bin')) + osp.join(self.root_dir, "semantic_mask", f"{sample_idx}.bin") + ) - info['pts_instance_mask_path'] = osp.join( - 'instance_mask', f'{sample_idx}.bin') - info['pts_semantic_mask_path'] = osp.join( - 'semantic_mask', f'{sample_idx}.bin') + info["pts_instance_mask_path"] = osp.join( + "instance_mask", f"{sample_idx}.bin" + ) + info["pts_semantic_mask_path"] = osp.join( + "semantic_mask", f"{sample_idx}.bin" + ) if has_label: annotations = {} # box is of shape [k, 6 + class] aligned_box_label = self.get_aligned_box_label(sample_idx) unaligned_box_label = self.get_unaligned_box_label(sample_idx) - annotations['gt_num'] = aligned_box_label.shape[0] - if annotations['gt_num'] != 0: + annotations["gt_num"] = aligned_box_label.shape[0] + if annotations["gt_num"] != 0: aligned_box = aligned_box_label[:, :-1] # k, 6 unaligned_box = unaligned_box_label[:, :-1] classes = aligned_box_label[:, -1] # k - annotations['name'] = np.array([ - self.label2cat[self.cat_ids2class[classes[i]]] - for i in range(annotations['gt_num']) - ]) + annotations["name"] = np.array( + [ + self.label2cat[self.cat_ids2class[classes[i]]] + for i in range(annotations["gt_num"]) + ] + ) # default names are given to aligned bbox for compatibility # we also save unaligned bbox info with marked names - annotations['location'] = aligned_box[:, :3] - annotations['dimensions'] = aligned_box[:, 3:6] - annotations['gt_boxes_upright_depth'] = aligned_box - annotations['unaligned_location'] = unaligned_box[:, :3] - annotations['unaligned_dimensions'] = unaligned_box[:, 3:6] - annotations[ - 'unaligned_gt_boxes_upright_depth'] = unaligned_box - annotations['index'] = np.arange( - annotations['gt_num'], dtype=np.int32) - annotations['class'] = np.array([ - self.cat_ids2class[classes[i]] - for i in range(annotations['gt_num']) - ]) + annotations["location"] = aligned_box[:, :3] + annotations["dimensions"] = aligned_box[:, 3:6] + annotations["gt_boxes_upright_depth"] = aligned_box + annotations["unaligned_location"] = unaligned_box[:, :3] + annotations["unaligned_dimensions"] = unaligned_box[:, 3:6] + annotations["unaligned_gt_boxes_upright_depth"] = unaligned_box + annotations["index"] = np.arange( + annotations["gt_num"], dtype=np.int32 + ) + annotations["class"] = np.array( + [ + self.cat_ids2class[classes[i]] + for i in range(annotations["gt_num"]) + ] + ) axis_align_matrix = self.get_axis_align_matrix(sample_idx) - annotations['axis_align_matrix'] = axis_align_matrix # 4x4 - info['annos'] = annotations + annotations["axis_align_matrix"] = axis_align_matrix # 4x4 + info["annos"] = annotations return info - sample_id_list = sample_id_list if sample_id_list is not None \ - else self.sample_id_list + sample_id_list = ( + sample_id_list if sample_id_list is not None else self.sample_id_list + ) with futures.ThreadPoolExecutor(num_workers) as executor: infos = executor.map(process_single_scene, sample_id_list) return list(infos) @@ -207,53 +232,56 @@ class ScanNetSegData(object): Default: None. """ - def __init__(self, - data_root, - ann_file, - split='train', - num_points=8192, - label_weight_func=None): + def __init__( + self, + data_root, + ann_file, + split="train", + num_points=8192, + label_weight_func=None, + ): self.data_root = data_root self.data_infos = mmcv.load(ann_file) self.split = split - assert split in ['train', 'val', 'test'] + assert split in ["train", "val", "test"] self.num_points = num_points self.all_ids = np.arange(41) # all possible ids - self.cat_ids = np.array([ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, - 39 - ]) # used for seg task + self.cat_ids = np.array( + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39] + ) # used for seg task self.ignore_index = len(self.cat_ids) - self.cat_id2class = np.ones((self.all_ids.shape[0],), dtype=np.int) * \ - self.ignore_index + self.cat_id2class = ( + np.ones((self.all_ids.shape[0],), dtype=np.int) * self.ignore_index + ) for i, cat_id in enumerate(self.cat_ids): self.cat_id2class[cat_id] = i # label weighting function is taken from # https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24 - self.label_weight_func = (lambda x: 1.0 / np.log(1.2 + x)) if \ - label_weight_func is None else label_weight_func + self.label_weight_func = ( + (lambda x: 1.0 / np.log(1.2 + x)) + if label_weight_func is None + else label_weight_func + ) def get_seg_infos(self): - if self.split == 'test': + if self.split == "test": return scene_idxs, label_weight = self.get_scene_idxs_and_label_weight() - save_folder = osp.join(self.data_root, 'seg_info') + save_folder = osp.join(self.data_root, "seg_info") mmcv.mkdir_or_exist(save_folder) np.save( - osp.join(save_folder, f'{self.split}_resampled_scene_idxs.npy'), - scene_idxs) - np.save( - osp.join(save_folder, f'{self.split}_label_weight.npy'), - label_weight) - print(f'{self.split} resampled scene index and label weight saved') + osp.join(save_folder, f"{self.split}_resampled_scene_idxs.npy"), scene_idxs + ) + np.save(osp.join(save_folder, f"{self.split}_label_weight.npy"), label_weight) + print(f"{self.split} resampled scene index and label weight saved") def _convert_to_label(self, mask): """Convert class_id in loaded segmentation mask to label.""" if isinstance(mask, str): - if mask.endswith('npy'): + if mask.endswith("npy"): mask = np.load(mask) else: mask = np.fromfile(mask, dtype=np.long) @@ -269,10 +297,11 @@ def get_scene_idxs_and_label_weight(self): """ num_classes = len(self.cat_ids) num_point_all = [] - label_weight = np.zeros((num_classes + 1, )) # ignore_index + label_weight = np.zeros((num_classes + 1,)) # ignore_index for data_info in self.data_infos: label = self._convert_to_label( - osp.join(self.data_root, data_info['pts_semantic_mask_path'])) + osp.join(self.data_root, data_info["pts_semantic_mask_path"]) + ) num_point_all.append(label.shape[0]) class_count, _ = np.histogram(label, range(num_classes + 2)) label_weight += class_count diff --git a/tools/data_converter/sunrgbd_data_utils.py b/tools/data_converter/sunrgbd_data_utils.py index 9f8a502..e5bc8e1 100644 --- a/tools/data_converter/sunrgbd_data_utils.py +++ b/tools/data_converter/sunrgbd_data_utils.py @@ -1,8 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import numpy as np from concurrent import futures as futures from os import path as osp + +import mmcv +import numpy as np from scipy import io as sio @@ -22,7 +23,7 @@ def random_sampling(points, num_points, replace=None, return_choices=False): """ if replace is None: - replace = (points.shape[0] < num_points) + replace = points.shape[0] < num_points choices = np.random.choice(points.shape[0], num_points, replace=replace) if return_choices: return points[choices], choices @@ -31,9 +32,8 @@ def random_sampling(points, num_points, replace=None, return_choices=False): class SUNRGBDInstance(object): - def __init__(self, line): - data = line.split(' ') + data = line.split(" ") data[1:] = [float(x) for x in data[1:]] self.classname = data[0] self.xmin = data[1] @@ -45,15 +45,16 @@ def __init__(self, line): self.w = data[8] self.l = data[9] # noqa: E741 self.h = data[10] - self.orientation = np.zeros((3, )) + self.orientation = np.zeros((3,)) self.orientation[0] = data[11] self.orientation[1] = data[12] - self.heading_angle = -1 * np.arctan2(self.orientation[1], - self.orientation[0]) - self.box3d = np.concatenate([ - self.centroid, - np.array([self.l * 2, self.w * 2, self.h * 2, self.heading_angle]) - ]) + self.heading_angle = -1 * np.arctan2(self.orientation[1], self.orientation[0]) + self.box3d = np.concatenate( + [ + self.centroid, + np.array([self.l * 2, self.w * 2, self.h * 2, self.heading_angle]), + ] + ) class SUNRGBDData(object): @@ -67,36 +68,43 @@ class SUNRGBDData(object): use_v1 (bool): Whether to use v1. Default: False. """ - def __init__(self, root_path, split='train', use_v1=False): + def __init__(self, root_path, split="train", use_v1=False): self.root_dir = root_path self.split = split - self.split_dir = osp.join(root_path, 'sunrgbd_trainval') + self.split_dir = osp.join(root_path, "sunrgbd_trainval") self.classes = [ - 'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', - 'night_stand', 'bookshelf', 'bathtub' + "bed", + "table", + "sofa", + "chair", + "toilet", + "desk", + "dresser", + "night_stand", + "bookshelf", + "bathtub", ] self.cat2label = {cat: self.classes.index(cat) for cat in self.classes} self.label2cat = { - label: self.classes[label] - for label in range(len(self.classes)) + label: self.classes[label] for label in range(len(self.classes)) } - assert split in ['train', 'val', 'test'] - split_file = osp.join(self.split_dir, f'{split}_data_idx.txt') + assert split in ["train", "val", "test"] + split_file = osp.join(self.split_dir, f"{split}_data_idx.txt") mmcv.check_file_exist(split_file) self.sample_id_list = map(int, mmcv.list_from_file(split_file)) - self.image_dir = osp.join(self.split_dir, 'image') - self.calib_dir = osp.join(self.split_dir, 'calib') - self.depth_dir = osp.join(self.split_dir, 'depth') + self.image_dir = osp.join(self.split_dir, "image") + self.calib_dir = osp.join(self.split_dir, "calib") + self.depth_dir = osp.join(self.split_dir, "depth") if use_v1: - self.label_dir = osp.join(self.split_dir, 'label_v1') + self.label_dir = osp.join(self.split_dir, "label_v1") else: - self.label_dir = osp.join(self.split_dir, 'label') + self.label_dir = osp.join(self.split_dir, "label") def __len__(self): return len(self.sample_id_list) def get_image(self, idx): - img_filename = osp.join(self.image_dir, f'{idx:06d}.jpg') + img_filename = osp.join(self.image_dir, f"{idx:06d}.jpg") return mmcv.imread(img_filename) def get_image_shape(self, idx): @@ -104,21 +112,21 @@ def get_image_shape(self, idx): return np.array(image.shape[:2], dtype=np.int32) def get_depth(self, idx): - depth_filename = osp.join(self.depth_dir, f'{idx:06d}.mat') - depth = sio.loadmat(depth_filename)['instance'] + depth_filename = osp.join(self.depth_dir, f"{idx:06d}.mat") + depth = sio.loadmat(depth_filename)["instance"] return depth def get_calibration(self, idx): - calib_filepath = osp.join(self.calib_dir, f'{idx:06d}.txt') + calib_filepath = osp.join(self.calib_dir, f"{idx:06d}.txt") lines = [line.rstrip() for line in open(calib_filepath)] - Rt = np.array([float(x) for x in lines[0].split(' ')]) - Rt = np.reshape(Rt, (3, 3), order='F').astype(np.float32) - K = np.array([float(x) for x in lines[1].split(' ')]) - K = np.reshape(K, (3, 3), order='F').astype(np.float32) + Rt = np.array([float(x) for x in lines[0].split(" ")]) + Rt = np.reshape(Rt, (3, 3), order="F").astype(np.float32) + K = np.array([float(x) for x in lines[1].split(" ")]) + K = np.reshape(K, (3, 3), order="F").astype(np.float32) return K, Rt def get_label_objects(self, idx): - label_filename = osp.join(self.label_dir, f'{idx:06d}.txt') + label_filename = osp.join(self.label_dir, f"{idx:06d}.txt") lines = [line.rstrip() for line in open(label_filename)] objects = [SUNRGBDInstance(line) for line in lines] return objects @@ -139,83 +147,106 @@ def get_infos(self, num_workers=4, has_label=True, sample_id_list=None): """ def process_single_scene(sample_idx): - print(f'{self.split} sample_idx: {sample_idx}') + print(f"{self.split} sample_idx: {sample_idx}") # convert depth to points SAMPLE_NUM = 50000 # TODO: Check whether can move the point # sampling process during training. pc_upright_depth = self.get_depth(sample_idx) - pc_upright_depth_subsampled = random_sampling( - pc_upright_depth, SAMPLE_NUM) + pc_upright_depth_subsampled = random_sampling(pc_upright_depth, SAMPLE_NUM) info = dict() - pc_info = {'num_features': 6, 'lidar_idx': sample_idx} - info['point_cloud'] = pc_info + pc_info = {"num_features": 6, "lidar_idx": sample_idx} + info["point_cloud"] = pc_info - mmcv.mkdir_or_exist(osp.join(self.root_dir, 'points')) + mmcv.mkdir_or_exist(osp.join(self.root_dir, "points")) pc_upright_depth_subsampled.tofile( - osp.join(self.root_dir, 'points', f'{sample_idx:06d}.bin')) + osp.join(self.root_dir, "points", f"{sample_idx:06d}.bin") + ) - info['pts_path'] = osp.join('points', f'{sample_idx:06d}.bin') - img_path = osp.join('image', f'{sample_idx:06d}.jpg') + info["pts_path"] = osp.join("points", f"{sample_idx:06d}.bin") + img_path = osp.join("image", f"{sample_idx:06d}.jpg") image_info = { - 'image_idx': sample_idx, - 'image_shape': self.get_image_shape(sample_idx), - 'image_path': img_path + "image_idx": sample_idx, + "image_shape": self.get_image_shape(sample_idx), + "image_path": img_path, } - info['image'] = image_info + info["image"] = image_info K, Rt = self.get_calibration(sample_idx) - calib_info = {'K': K, 'Rt': Rt} - info['calib'] = calib_info + calib_info = {"K": K, "Rt": Rt} + info["calib"] = calib_info if has_label: obj_list = self.get_label_objects(sample_idx) annotations = {} - annotations['gt_num'] = len([ - obj.classname for obj in obj_list - if obj.classname in self.cat2label.keys() - ]) - if annotations['gt_num'] != 0: - annotations['name'] = np.array([ - obj.classname for obj in obj_list - if obj.classname in self.cat2label.keys() - ]) - annotations['bbox'] = np.concatenate([ - obj.box2d.reshape(1, 4) for obj in obj_list + annotations["gt_num"] = len( + [ + obj.classname + for obj in obj_list if obj.classname in self.cat2label.keys() - ], - axis=0) - annotations['location'] = np.concatenate([ - obj.centroid.reshape(1, 3) for obj in obj_list - if obj.classname in self.cat2label.keys() - ], - axis=0) - annotations['dimensions'] = 2 * np.array([ - [obj.l, obj.w, obj.h] for obj in obj_list - if obj.classname in self.cat2label.keys() - ]) # lwh (depth) format - annotations['rotation_y'] = np.array([ - obj.heading_angle for obj in obj_list - if obj.classname in self.cat2label.keys() - ]) - annotations['index'] = np.arange( - len(obj_list), dtype=np.int32) - annotations['class'] = np.array([ - self.cat2label[obj.classname] for obj in obj_list - if obj.classname in self.cat2label.keys() - ]) - annotations['gt_boxes_upright_depth'] = np.stack( + ] + ) + if annotations["gt_num"] != 0: + annotations["name"] = np.array( + [ + obj.classname + for obj in obj_list + if obj.classname in self.cat2label.keys() + ] + ) + annotations["bbox"] = np.concatenate( + [ + obj.box2d.reshape(1, 4) + for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0, + ) + annotations["location"] = np.concatenate( + [ + obj.centroid.reshape(1, 3) + for obj in obj_list + if obj.classname in self.cat2label.keys() + ], + axis=0, + ) + annotations["dimensions"] = 2 * np.array( + [ + [obj.l, obj.w, obj.h] + for obj in obj_list + if obj.classname in self.cat2label.keys() + ] + ) # lwh (depth) format + annotations["rotation_y"] = np.array( + [ + obj.heading_angle + for obj in obj_list + if obj.classname in self.cat2label.keys() + ] + ) + annotations["index"] = np.arange(len(obj_list), dtype=np.int32) + annotations["class"] = np.array( + [ + self.cat2label[obj.classname] + for obj in obj_list + if obj.classname in self.cat2label.keys() + ] + ) + annotations["gt_boxes_upright_depth"] = np.stack( [ - obj.box3d for obj in obj_list + obj.box3d + for obj in obj_list if obj.classname in self.cat2label.keys() ], - axis=0) # (K,8) - info['annos'] = annotations + axis=0, + ) # (K,8) + info["annos"] = annotations return info - sample_id_list = sample_id_list if \ - sample_id_list is not None else self.sample_id_list + sample_id_list = ( + sample_id_list if sample_id_list is not None else self.sample_id_list + ) with futures.ThreadPoolExecutor(num_workers) as executor: infos = executor.map(process_single_scene, sample_id_list) return list(infos) diff --git a/tools/data_converter/test_nus.py b/tools/data_converter/test_nus.py index 996f3c6..8cc8dc7 100644 --- a/tools/data_converter/test_nus.py +++ b/tools/data_converter/test_nus.py @@ -12,8 +12,11 @@ # # print(my_sample['scene_token'],scene['token']) import simplejson as json + # with open ('D:/vis_occ/mini-data/annotations.json','r') as f: -with open ('D:/annotations.json','r') as f: - data=json.load(f) - print(data['scene_infos']['scene-0061']['ca9a282c9e77460f8360f564131a8af5']['gt_path']) - # print(data.keys()) \ No newline at end of file +with open("D:/annotations.json", "r") as f: + data = json.load(f) + print( + data["scene_infos"]["scene-0061"]["ca9a282c9e77460f8360f564131a8af5"]["gt_path"] + ) + # print(data.keys()) diff --git a/tools/data_converter/waymo_converter.py b/tools/data_converter/waymo_converter.py index 94fcae1..07a4c53 100644 --- a/tools/data_converter/waymo_converter.py +++ b/tools/data_converter/waymo_converter.py @@ -8,16 +8,17 @@ except ImportError: raise ImportError( 'Please run "pip install waymo-open-dataset-tf-2-2-0==1.2.0" ' - 'to install the official devkit first.') + "to install the official devkit first." + ) + +from glob import glob +from os.path import join import mmcv import numpy as np import tensorflow as tf -from glob import glob -from os.path import join from waymo_open_dataset.utils import range_image_utils, transform_utils -from waymo_open_dataset.utils.frame_utils import \ - parse_range_image_and_camera_projection +from waymo_open_dataset.utils.frame_utils import parse_range_image_and_camera_projection class Waymo2KITTI(object): @@ -35,16 +36,11 @@ class Waymo2KITTI(object): test_mode (bool): Whether in the test_mode. Default: False. """ - def __init__(self, - load_dir, - save_dir, - prefix, - workers=64, - test_mode=False): + def __init__(self, load_dir, save_dir, prefix, workers=64, test_mode=False): self.filter_empty_3dboxes = True self.filter_no_label_zone_points = True - self.selected_waymo_classes = ['VEHICLE', 'PEDESTRIAN', 'CYCLIST'] + self.selected_waymo_classes = ["VEHICLE", "PEDESTRIAN", "CYCLIST"] # Only data collected in specific locations will be converted # If set None, this filter is disabled @@ -53,22 +49,23 @@ def __init__(self, self.save_track_id = False # turn on eager execution for older tensorflow versions - if int(tf.__version__.split('.')[0]) < 2: + if int(tf.__version__.split(".")[0]) < 2: tf.enable_eager_execution() self.lidar_list = [ - '_FRONT', '_FRONT_RIGHT', '_FRONT_LEFT', '_SIDE_RIGHT', - '_SIDE_LEFT' - ] - self.type_list = [ - 'UNKNOWN', 'VEHICLE', 'PEDESTRIAN', 'SIGN', 'CYCLIST' + "_FRONT", + "_FRONT_RIGHT", + "_FRONT_LEFT", + "_SIDE_RIGHT", + "_SIDE_LEFT", ] + self.type_list = ["UNKNOWN", "VEHICLE", "PEDESTRIAN", "SIGN", "CYCLIST"] self.waymo_to_kitti_class_map = { - 'UNKNOWN': 'DontCare', - 'PEDESTRIAN': 'Pedestrian', - 'VEHICLE': 'Car', - 'CYCLIST': 'Cyclist', - 'SIGN': 'Sign' # not in kitti + "UNKNOWN": "DontCare", + "PEDESTRIAN": "Pedestrian", + "VEHICLE": "Car", + "CYCLIST": "Cyclist", + "SIGN": "Sign", # not in kitti } self.load_dir = load_dir @@ -77,24 +74,22 @@ def __init__(self, self.workers = int(workers) self.test_mode = test_mode - self.tfrecord_pathnames = sorted( - glob(join(self.load_dir, '*.tfrecord'))) + self.tfrecord_pathnames = sorted(glob(join(self.load_dir, "*.tfrecord"))) - self.label_save_dir = f'{self.save_dir}/label_' - self.label_all_save_dir = f'{self.save_dir}/label_all' - self.image_save_dir = f'{self.save_dir}/image_' - self.calib_save_dir = f'{self.save_dir}/calib' - self.point_cloud_save_dir = f'{self.save_dir}/velodyne' - self.pose_save_dir = f'{self.save_dir}/pose' + self.label_save_dir = f"{self.save_dir}/label_" + self.label_all_save_dir = f"{self.save_dir}/label_all" + self.image_save_dir = f"{self.save_dir}/image_" + self.calib_save_dir = f"{self.save_dir}/calib" + self.point_cloud_save_dir = f"{self.save_dir}/velodyne" + self.pose_save_dir = f"{self.save_dir}/pose" self.create_folder() def convert(self): """Convert action.""" - print('Start converting ...') - mmcv.track_parallel_progress(self.convert_one, range(len(self)), - self.workers) - print('\nFinished ...') + print("Start converting ...") + mmcv.track_parallel_progress(self.convert_one, range(len(self)), self.workers) + print("\nFinished ...") def convert_one(self, file_idx): """Convert action for single file. @@ -103,18 +98,18 @@ def convert_one(self, file_idx): file_idx (int): Index of the file to be converted. """ pathname = self.tfrecord_pathnames[file_idx] - dataset = tf.data.TFRecordDataset(pathname, compression_type='') + dataset = tf.data.TFRecordDataset(pathname, compression_type="") for frame_idx, data in enumerate(dataset): - if frame_idx % 5 != 0: continue # print(frame_idx) frame = dataset_pb2.Frame() frame.ParseFromString(bytearray(data.numpy())) - if (self.selected_waymo_locations is not None - and frame.context.stats.location - not in self.selected_waymo_locations): + if ( + self.selected_waymo_locations is not None + and frame.context.stats.location not in self.selected_waymo_locations + ): continue self.save_image(frame, file_idx, frame_idx) @@ -138,9 +133,11 @@ def save_image(self, frame, file_idx, frame_idx): frame_idx (int): Current frame index. """ for img in frame.images: - img_path = f'{self.image_save_dir}{str(img.name - 1)}/' + \ - f'{self.prefix}{str(file_idx).zfill(3)}' + \ - f'{str(frame_idx).zfill(3)}.png' + img_path = ( + f"{self.image_save_dir}{str(img.name - 1)}/" + + f"{self.prefix}{str(file_idx).zfill(3)}" + + f"{str(frame_idx).zfill(3)}.png" + ) img = mmcv.imfrombytes(img.image) mmcv.imwrite(img, img_path) @@ -153,24 +150,23 @@ def save_calib(self, frame, file_idx, frame_idx): frame_idx (int): Current frame index. """ # waymo front camera to kitti reference camera - T_front_cam_to_ref = np.array([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], - [1.0, 0.0, 0.0]]) + T_front_cam_to_ref = np.array( + [[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]] + ) camera_calibs = [] - R0_rect = [f'{i:e}' for i in np.eye(3).flatten()] + R0_rect = [f"{i:e}" for i in np.eye(3).flatten()] Tr_velo_to_cams = [] - calib_context = '' + calib_context = "" for camera in frame.context.camera_calibrations: # extrinsic parameters - T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape( - 4, 4) + T_cam_to_vehicle = np.array(camera.extrinsic.transform).reshape(4, 4) T_vehicle_to_cam = np.linalg.inv(T_cam_to_vehicle) - Tr_velo_to_cam = \ - self.cart_to_homo(T_front_cam_to_ref) @ T_vehicle_to_cam + Tr_velo_to_cam = self.cart_to_homo(T_front_cam_to_ref) @ T_vehicle_to_cam if camera.name == 1: # FRONT = 1, see dataset.proto for details self.T_velo_to_front_cam = Tr_velo_to_cam.copy() - Tr_velo_to_cam = Tr_velo_to_cam[:3, :].reshape((12, )) - Tr_velo_to_cams.append([f'{i:e}' for i in Tr_velo_to_cam]) + Tr_velo_to_cam = Tr_velo_to_cam[:3, :].reshape((12,)) + Tr_velo_to_cams.append([f"{i:e}" for i in Tr_velo_to_cam]) # intrinsic parameters camera_calib = np.zeros((3, 4)) @@ -180,23 +176,24 @@ def save_calib(self, frame, file_idx, frame_idx): camera_calib[1, 2] = camera.intrinsic[3] camera_calib[2, 2] = 1 camera_calib = list(camera_calib.reshape(12)) - camera_calib = [f'{i:e}' for i in camera_calib] + camera_calib = [f"{i:e}" for i in camera_calib] camera_calibs.append(camera_calib) # all camera ids are saved as id-1 in the result because # camera 0 is unknown in the proto for i in range(5): - calib_context += 'P' + str(i) + ': ' + \ - ' '.join(camera_calibs[i]) + '\n' - calib_context += 'R0_rect' + ': ' + ' '.join(R0_rect) + '\n' + calib_context += "P" + str(i) + ": " + " ".join(camera_calibs[i]) + "\n" + calib_context += "R0_rect" + ": " + " ".join(R0_rect) + "\n" for i in range(5): - calib_context += 'Tr_velo_to_cam_' + str(i) + ': ' + \ - ' '.join(Tr_velo_to_cams[i]) + '\n' + calib_context += ( + "Tr_velo_to_cam_" + str(i) + ": " + " ".join(Tr_velo_to_cams[i]) + "\n" + ) with open( - f'{self.calib_save_dir}/{self.prefix}' + - f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', - 'w+') as fp_calib: + f"{self.calib_save_dir}/{self.prefix}" + + f"{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt", + "w+", + ) as fp_calib: fp_calib.write(calib_context) fp_calib.close() @@ -208,31 +205,34 @@ def save_lidar(self, frame, file_idx, frame_idx): file_idx (int): Current file index. frame_idx (int): Current frame index. """ - range_images, camera_projections, range_image_top_pose = \ - parse_range_image_and_camera_projection(frame) + ( + range_images, + camera_projections, + range_image_top_pose, + ) = parse_range_image_and_camera_projection(frame) # First return - points_0, cp_points_0, intensity_0, elongation_0 = \ - self.convert_range_image_to_point_cloud( - frame, - range_images, - camera_projections, - range_image_top_pose, - ri_index=0 - ) + ( + points_0, + cp_points_0, + intensity_0, + elongation_0, + ) = self.convert_range_image_to_point_cloud( + frame, range_images, camera_projections, range_image_top_pose, ri_index=0 + ) points_0 = np.concatenate(points_0, axis=0) intensity_0 = np.concatenate(intensity_0, axis=0) elongation_0 = np.concatenate(elongation_0, axis=0) # Second return - points_1, cp_points_1, intensity_1, elongation_1 = \ - self.convert_range_image_to_point_cloud( - frame, - range_images, - camera_projections, - range_image_top_pose, - ri_index=1 - ) + ( + points_1, + cp_points_1, + intensity_1, + elongation_1, + ) = self.convert_range_image_to_point_cloud( + frame, range_images, camera_projections, range_image_top_pose, ri_index=1 + ) points_1 = np.concatenate(points_1, axis=0) intensity_1 = np.concatenate(intensity_1, axis=0) elongation_1 = np.concatenate(elongation_1, axis=0) @@ -243,11 +243,12 @@ def save_lidar(self, frame, file_idx, frame_idx): timestamp = frame.timestamp_micros * np.ones_like(intensity) # concatenate x,y,z, intensity, elongation, timestamp (6-dim) - point_cloud = np.column_stack( - (points, intensity, elongation, timestamp)) + point_cloud = np.column_stack((points, intensity, elongation, timestamp)) - pc_path = f'{self.point_cloud_save_dir}/{self.prefix}' + \ - f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.bin' + pc_path = ( + f"{self.point_cloud_save_dir}/{self.prefix}" + + f"{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.bin" + ) point_cloud.astype(np.float32).tofile(pc_path) def save_label(self, frame, file_idx, frame_idx): @@ -264,8 +265,10 @@ def save_label(self, frame, file_idx, frame_idx): frame_idx (int): Current frame index. """ fp_label_all = open( - f'{self.label_all_save_dir}/{self.prefix}' + - f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', 'w+') + f"{self.label_all_save_dir}/{self.prefix}" + + f"{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt", + "w+", + ) id_to_bbox = dict() id_to_name = dict() for labels in frame.projected_lidar_labels: @@ -276,7 +279,7 @@ def save_label(self, frame, file_idx, frame_idx): label.box.center_x - label.box.length / 2, label.box.center_y - label.box.width / 2, label.box.center_x + label.box.length / 2, - label.box.center_y + label.box.width / 2 + label.box.center_y + label.box.width / 2, ] id_to_bbox[label.id] = bbox id_to_name[label.id] = name - 1 @@ -292,7 +295,7 @@ def save_label(self, frame, file_idx, frame_idx): break if bounding_box is None or name is None: - name = '0' + name = "0" bounding_box = (0, 0, 0, 0) my_type = self.type_list[obj.type] @@ -314,8 +317,7 @@ def save_label(self, frame, file_idx, frame_idx): z = obj.box.center_z - height / 2 # project bounding box to the virtual reference frame - pt_ref = self.T_velo_to_front_cam @ \ - np.array([x, y, z, 1]).reshape((4, 1)) + pt_ref = self.T_velo_to_front_cam @ np.array([x, y, z, 1]).reshape((4, 1)) x, y, z, _ = pt_ref.flatten().tolist() rotation_y = -obj.box.heading - np.pi / 2 @@ -326,23 +328,33 @@ def save_label(self, frame, file_idx, frame_idx): occluded = 0 alpha = -10 - line = my_type + \ - ' {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n'.format( - round(truncated, 2), occluded, round(alpha, 2), - round(bounding_box[0], 2), round(bounding_box[1], 2), - round(bounding_box[2], 2), round(bounding_box[3], 2), - round(height, 2), round(width, 2), round(length, 2), - round(x, 2), round(y, 2), round(z, 2), - round(rotation_y, 2)) + line = my_type + " {} {} {} {} {} {} {} {} {} {} {} {} {} {}\n".format( + round(truncated, 2), + occluded, + round(alpha, 2), + round(bounding_box[0], 2), + round(bounding_box[1], 2), + round(bounding_box[2], 2), + round(bounding_box[3], 2), + round(height, 2), + round(width, 2), + round(length, 2), + round(x, 2), + round(y, 2), + round(z, 2), + round(rotation_y, 2), + ) if self.save_track_id: - line_all = line[:-1] + ' ' + name + ' ' + track_id + '\n' + line_all = line[:-1] + " " + name + " " + track_id + "\n" else: - line_all = line[:-1] + ' ' + name + '\n' + line_all = line[:-1] + " " + name + "\n" fp_label = open( - f'{self.label_save_dir}{name}/{self.prefix}' + - f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt', 'a') + f"{self.label_save_dir}{name}/{self.prefix}" + + f"{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt", + "a", + ) fp_label.write(line) fp_label.close() @@ -365,36 +377,39 @@ def save_pose(self, frame, file_idx, frame_idx): """ pose = np.array(frame.pose.transform).reshape(4, 4) np.savetxt( - join(f'{self.pose_save_dir}/{self.prefix}' + - f'{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt'), - pose) + join( + f"{self.pose_save_dir}/{self.prefix}" + + f"{str(file_idx).zfill(3)}{str(frame_idx).zfill(3)}.txt" + ), + pose, + ) def create_folder(self): """Create folder for data preprocessing.""" if not self.test_mode: dir_list1 = [ - self.label_all_save_dir, self.calib_save_dir, - self.point_cloud_save_dir, self.pose_save_dir + self.label_all_save_dir, + self.calib_save_dir, + self.point_cloud_save_dir, + self.pose_save_dir, ] dir_list2 = [self.label_save_dir, self.image_save_dir] else: dir_list1 = [ - self.calib_save_dir, self.point_cloud_save_dir, - self.pose_save_dir + self.calib_save_dir, + self.point_cloud_save_dir, + self.pose_save_dir, ] dir_list2 = [self.image_save_dir] for d in dir_list1: mmcv.mkdir_or_exist(d) for d in dir_list2: for i in range(5): - mmcv.mkdir_or_exist(f'{d}{str(i)}') - - def convert_range_image_to_point_cloud(self, - frame, - range_images, - camera_projections, - range_image_top_pose, - ri_index=0): + mmcv.mkdir_or_exist(f"{d}{str(i)}") + + def convert_range_image_to_point_cloud( + self, frame, range_images, camera_projections, range_image_top_pose, ri_index=0 + ): """Convert range images to point cloud. Args: @@ -414,37 +429,38 @@ def convert_range_image_to_point_cloud(self, with shape [N, 1], elongation with shape [N, 1]). All the lists have the length of lidar numbers (5). """ - calibrations = sorted( - frame.context.laser_calibrations, key=lambda c: c.name) + calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name) points = [] cp_points = [] intensity = [] elongation = [] frame_pose = tf.convert_to_tensor( - value=np.reshape(np.array(frame.pose.transform), [4, 4])) + value=np.reshape(np.array(frame.pose.transform), [4, 4]) + ) # [H, W, 6] range_image_top_pose_tensor = tf.reshape( tf.convert_to_tensor(value=range_image_top_pose.data), - range_image_top_pose.shape.dims) + range_image_top_pose.shape.dims, + ) # [H, W, 3, 3] - range_image_top_pose_tensor_rotation = \ - transform_utils.get_rotation_matrix( - range_image_top_pose_tensor[..., 0], - range_image_top_pose_tensor[..., 1], - range_image_top_pose_tensor[..., 2]) - range_image_top_pose_tensor_translation = \ - range_image_top_pose_tensor[..., 3:] + range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix( + range_image_top_pose_tensor[..., 0], + range_image_top_pose_tensor[..., 1], + range_image_top_pose_tensor[..., 2], + ) + range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:] range_image_top_pose_tensor = transform_utils.get_transform( range_image_top_pose_tensor_rotation, - range_image_top_pose_tensor_translation) + range_image_top_pose_tensor_translation, + ) for c in calibrations: range_image = range_images[c.name][ri_index] if len(c.beam_inclinations) == 0: beam_inclinations = range_image_utils.compute_inclination( - tf.constant( - [c.beam_inclination_min, c.beam_inclination_max]), - height=range_image.shape.dims[0]) + tf.constant([c.beam_inclination_min, c.beam_inclination_max]), + height=range_image.shape.dims[0], + ) else: beam_inclinations = tf.constant(c.beam_inclinations) @@ -452,8 +468,8 @@ def convert_range_image_to_point_cloud(self, extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4]) range_image_tensor = tf.reshape( - tf.convert_to_tensor(value=range_image.data), - range_image.shape.dims) + tf.convert_to_tensor(value=range_image.data), range_image.shape.dims + ) pixel_pose_local = None frame_pose_local = None if c.name == dataset_pb2.LaserName.TOP: @@ -466,33 +482,39 @@ def convert_range_image_to_point_cloud(self, nlz_mask = range_image_tensor[..., 3] != 1.0 # 1.0: in NLZ range_image_mask = range_image_mask & nlz_mask - range_image_cartesian = \ + range_image_cartesian = ( range_image_utils.extract_point_cloud_from_range_image( tf.expand_dims(range_image_tensor[..., 0], axis=0), tf.expand_dims(extrinsic, axis=0), - tf.expand_dims(tf.convert_to_tensor( - value=beam_inclinations), axis=0), + tf.expand_dims( + tf.convert_to_tensor(value=beam_inclinations), axis=0 + ), pixel_pose=pixel_pose_local, - frame_pose=frame_pose_local) + frame_pose=frame_pose_local, + ) + ) range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0) - points_tensor = tf.gather_nd(range_image_cartesian, - tf.compat.v1.where(range_image_mask)) + points_tensor = tf.gather_nd( + range_image_cartesian, tf.compat.v1.where(range_image_mask) + ) cp = camera_projections[c.name][ri_index] - cp_tensor = tf.reshape( - tf.convert_to_tensor(value=cp.data), cp.shape.dims) + cp_tensor = tf.reshape(tf.convert_to_tensor(value=cp.data), cp.shape.dims) cp_points_tensor = tf.gather_nd( - cp_tensor, tf.compat.v1.where(range_image_mask)) + cp_tensor, tf.compat.v1.where(range_image_mask) + ) points.append(points_tensor.numpy()) cp_points.append(cp_points_tensor.numpy()) - intensity_tensor = tf.gather_nd(range_image_tensor[..., 1], - tf.where(range_image_mask)) + intensity_tensor = tf.gather_nd( + range_image_tensor[..., 1], tf.where(range_image_mask) + ) intensity.append(intensity_tensor.numpy()) - elongation_tensor = tf.gather_nd(range_image_tensor[..., 2], - tf.where(range_image_mask)) + elongation_tensor = tf.gather_nd( + range_image_tensor[..., 2], tf.where(range_image_mask) + ) elongation.append(elongation_tensor.numpy()) return points, cp_points, intensity, elongation diff --git a/tools/dist_test.sh b/tools/dist_test.sh index 99ebea5..e573d80 100755 --- a/tools/dist_test.sh +++ b/tools/dist_test.sh @@ -3,8 +3,8 @@ CONFIG=$1 CHECKPOINT=$2 GPUS=$3 -PORT=${PORT:-29503} +PORT=${PORT:-$((RANDOM % 10000 + 10000))} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ - $(dirname "$0")/test_occ.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} --eval bbox +python -m torch.distributed.run --nproc_per_node=$GPUS --master_port=$PORT \ + $(dirname "$0")/test.py $CONFIG $CHECKPOINT --launcher pytorch ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh index 141b284..e75d1a4 100755 --- a/tools/dist_train.sh +++ b/tools/dist_train.sh @@ -2,7 +2,7 @@ CONFIG=$1 GPUS=$2 -PORT=${PORT:-28509} +PORT=${PORT:-$((RANDOM % 10000 + 10000))} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ diff --git a/tools/fp16/train.py b/tools/fp16/train.py index eddc349..42a0451 100644 --- a/tools/fp16/train.py +++ b/tools/fp16/train.py @@ -3,91 +3,97 @@ import argparse import copy -import mmcv import os import time -import torch import warnings -from mmcv import Config, DictAction -from mmcv.runner import get_dist_info, init_dist, wrap_fp16_model from os import path as osp +import mmcv +import torch +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist, wrap_fp16_model +from mmcv.utils import TORCH_VERSION, digit_version from mmdet import __version__ as mmdet_version from mmdet3d import __version__ as mmdet3d_version -#from mmdet3d.apis import train_model - from mmdet3d.datasets import build_dataset from mmdet3d.models import build_model from mmdet3d.utils import collect_env, get_root_logger from mmdet.apis import set_random_seed from mmseg import __version__ as mmseg_version -from mmcv.utils import TORCH_VERSION, digit_version +# from mmdet3d.apis import train_model + def parse_args(): - parser = argparse.ArgumentParser(description='Train a detector') - parser.add_argument('config', help='train config file path') - parser.add_argument('--work-dir', help='the dir to save logs and models') + parser = argparse.ArgumentParser(description="Train a detector") + parser.add_argument("config", help="train config file path") + parser.add_argument("--work-dir", help="the dir to save logs and models") + parser.add_argument("--resume-from", help="the checkpoint file to resume from") parser.add_argument( - '--resume-from', help='the checkpoint file to resume from') - parser.add_argument( - '--no-validate', - action='store_true', - help='whether not to evaluate the checkpoint during training') + "--no-validate", + action="store_true", + help="whether not to evaluate the checkpoint during training", + ) group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( - '--gpus', + "--gpus", type=int, - help='number of gpus to use ' - '(only applicable to non-distributed training)') + help="number of gpus to use " "(only applicable to non-distributed training)", + ) group_gpus.add_argument( - '--gpu-ids', + "--gpu-ids", type=int, - nargs='+', - help='ids of gpus to use ' - '(only applicable to non-distributed training)') - parser.add_argument('--seed', type=int, default=0, help='random seed') + nargs="+", + help="ids of gpus to use " "(only applicable to non-distributed training)", + ) + parser.add_argument("--seed", type=int, default=0, help="random seed") parser.add_argument( - '--deterministic', - action='store_true', - help='whether to set deterministic options for CUDNN backend.') + "--deterministic", + action="store_true", + help="whether to set deterministic options for CUDNN backend.", + ) parser.add_argument( - '--options', - nargs='+', + "--options", + nargs="+", action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file (deprecate), ' - 'change to --cfg-options instead.') + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file (deprecate), " + "change to --cfg-options instead.", + ) parser.add_argument( - '--cfg-options', - nargs='+', + "--cfg-options", + nargs="+", action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file. If the value to " 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') + "Note that the quotation marks are necessary and that no white space " + "is allowed.", + ) parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) + "--launcher", + choices=["none", "pytorch", "slurm", "mpi"], + default="none", + help="job launcher", + ) + parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( - '--autoscale-lr', - action='store_true', - help='automatically scale lr with the number of gpus') + "--autoscale-lr", + action="store_true", + help="automatically scale lr with the number of gpus", + ) args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) + if "LOCAL_RANK" not in os.environ: + os.environ["LOCAL_RANK"] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( - '--options and --cfg-options cannot be both specified, ' - '--options is deprecated in favor of --cfg-options') + "--options and --cfg-options cannot be both specified, " + "--options is deprecated in favor of --cfg-options" + ) if args.options: - warnings.warn('--options is deprecated in favor of --cfg-options') + warnings.warn("--options is deprecated in favor of --cfg-options") args.cfg_options = args.options return args @@ -100,65 +106,68 @@ def main(): if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. - if cfg.get('custom_imports', None): + if cfg.get("custom_imports", None): from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) + + import_modules_from_strings(**cfg["custom_imports"]) # import modules from plguin/xx, registry will be updated - if hasattr(cfg, 'plugin'): + if hasattr(cfg, "plugin"): if cfg.plugin: import importlib - if hasattr(cfg, 'plugin_dir'): + + if hasattr(cfg, "plugin_dir"): plugin_dir = cfg.plugin_dir _module_dir = os.path.dirname(plugin_dir) - _module_dir = _module_dir.split('/') + _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: - _module_path = _module_path + '.' + m + _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) else: # import dir is the dirpath for the config file _module_dir = os.path.dirname(args.config) - _module_dir = _module_dir.split('/') + _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: - _module_path = _module_path + '.' + m + _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) - + from projects.mmdet3d_plugin.bevformer.apis import custom_train_model # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): + if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir - elif cfg.get('work_dir', None) is None: + elif cfg.get("work_dir", None) is None: # use config filename as default work_dir if cfg.work_dir is None - cfg.work_dir = osp.join('./work_dirs', - osp.splitext(osp.basename(args.config))[0]) - #if args.resume_from is not None: + cfg.work_dir = osp.join( + "./work_dirs", osp.splitext(osp.basename(args.config))[0] + ) + # if args.resume_from is not None: - if args.resume_from is not None and osp.isfile(args.resume_from): + if args.resume_from is not None and osp.isfile(args.resume_from): cfg.resume_from = args.resume_from if args.gpu_ids is not None: cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) - if digit_version(TORCH_VERSION) != digit_version('1.8.1'): - cfg.optimizer['type'] = 'AdamW' + if digit_version(TORCH_VERSION) != digit_version("1.8.1"): + cfg.optimizer["type"] = "AdamW" if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) - cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + cfg.optimizer["lr"] = cfg.optimizer["lr"] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - assert False, 'DOT NOT SUPPORT!!!' + if args.launcher == "none": + assert False, "DOT NOT SUPPORT!!!" distributed = False else: distributed = True @@ -172,69 +181,69 @@ def main(): # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) + log_file = osp.join(cfg.work_dir, f"{timestamp}.log") # specify logger name, if we still use 'mmdet', the output info will be # filtered and won't be saved in the log_file # TODO: ugly workaround to judge whether we are training det or seg model - if cfg.model.type in ['EncoderDecoder3D']: - logger_name = 'mmseg' + if cfg.model.type in ["EncoderDecoder3D"]: + logger_name = "mmseg" else: - logger_name = 'mmdet' + logger_name = "mmdet" logger = get_root_logger( - log_file=log_file, log_level=cfg.log_level, name=logger_name) + log_file=log_file, log_level=cfg.log_level, name=logger_name + ) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() - env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) - dash_line = '-' * 60 + '\n' - logger.info('Environment info:\n' + dash_line + env_info + '\n' + - dash_line) - meta['env_info'] = env_info - meta['config'] = cfg.pretty_text + env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) + dash_line = "-" * 60 + "\n" + logger.info("Environment info:\n" + dash_line + env_info + "\n" + dash_line) + meta["env_info"] = env_info + meta["config"] = cfg.pretty_text # log some basic info - logger.info(f'Distributed training: {distributed}') - logger.info(f'Config:\n{cfg.pretty_text}') + logger.info(f"Distributed training: {distributed}") + logger.info(f"Config:\n{cfg.pretty_text}") # set random seeds if args.seed is not None: - logger.info(f'Set random seed to {args.seed}, ' - f'deterministic: {args.deterministic}') + logger.info( + f"Set random seed to {args.seed}, " f"deterministic: {args.deterministic}" + ) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed - meta['seed'] = args.seed - meta['exp_name'] = osp.basename(args.config) + meta["seed"] = args.seed + meta["exp_name"] = osp.basename(args.config) model = build_model( - cfg.model, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) + cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg") + ) model.init_weights() eval_model_config = copy.deepcopy(cfg.model) eval_model = build_model( - eval_model_config, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) - - fp16_cfg = cfg.get('fp16', None) + eval_model_config, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg") + ) + + fp16_cfg = cfg.get("fp16", None) if fp16_cfg is not None: wrap_fp16_model(eval_model) - #eval_model.init_weights() + # eval_model.init_weights() eval_model.load_state_dict(model.state_dict()) - logger.info(f'Model:\n{model}') + logger.info(f"Model:\n{model}") from projects.mmdet3d_plugin.datasets import custom_build_dataset + datasets = [custom_build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) # in case we use a dataset wrapper - if 'dataset' in cfg.data.train: + if "dataset" in cfg.data.train: val_dataset.pipeline = cfg.data.train.dataset.pipeline else: val_dataset.pipeline = cfg.data.train.pipeline @@ -253,7 +262,9 @@ def main(): config=cfg.pretty_text, CLASSES=datasets[0].CLASSES, PALETTE=datasets[0].PALETTE # for segmentors - if hasattr(datasets[0], 'PALETTE') else None) + if hasattr(datasets[0], "PALETTE") + else None, + ) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES custom_train_model( @@ -264,8 +275,9 @@ def main(): distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, - meta=meta) + meta=meta, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/misc/browse_dataset.py b/tools/misc/browse_dataset.py index e3419f6..1802d01 100644 --- a/tools/misc/browse_dataset.py +++ b/tools/misc/browse_dataset.py @@ -1,51 +1,64 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse -import numpy as np import warnings -from mmcv import Config, DictAction, mkdir_or_exist, track_iter_progress from os import path as osp -from mmdet3d.core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode, - DepthInstance3DBoxes, LiDARInstance3DBoxes) -from mmdet3d.core.visualizer import (show_multi_modality_result, show_result, - show_seg_result) +import numpy as np +from mmcv import Config, DictAction, mkdir_or_exist, track_iter_progress +from mmdet3d.core.bbox import ( + Box3DMode, + CameraInstance3DBoxes, + Coord3DMode, + DepthInstance3DBoxes, + LiDARInstance3DBoxes, +) +from mmdet3d.core.visualizer import ( + show_multi_modality_result, + show_result, + show_seg_result, +) from mmdet3d.datasets import build_dataset def parse_args(): - parser = argparse.ArgumentParser(description='Browse a dataset') - parser.add_argument('config', help='train config file path') + parser = argparse.ArgumentParser(description="Browse a dataset") + parser.add_argument("config", help="train config file path") parser.add_argument( - '--skip-type', + "--skip-type", type=str, - nargs='+', - default=['Normalize'], - help='skip some useless pipeline') + nargs="+", + default=["Normalize"], + help="skip some useless pipeline", + ) parser.add_argument( - '--output-dir', + "--output-dir", default=None, type=str, - help='If there is no display interface, you can save it') + help="If there is no display interface, you can save it", + ) parser.add_argument( - '--task', + "--task", type=str, - choices=['det', 'seg', 'multi_modality-det', 'mono-det'], - help='Determine the visualization method depending on the task.') + choices=["det", "seg", "multi_modality-det", "mono-det"], + help="Determine the visualization method depending on the task.", + ) parser.add_argument( - '--online', - action='store_true', - help='Whether to perform online visualization. Note that you often ' - 'need a monitor to do so.') + "--online", + action="store_true", + help="Whether to perform online visualization. Note that you often " + "need a monitor to do so.", + ) parser.add_argument( - '--cfg-options', - nargs='+', + "--cfg-options", + nargs="+", action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file. If the value to " 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') + "Note that the quotation marks are necessary and that no white space " + "is allowed.", + ) args = parser.parse_args() return args @@ -56,21 +69,22 @@ def build_data_cfg(config_path, skip_type, cfg_options): if cfg_options is not None: cfg.merge_from_dict(cfg_options) # import modules from string list. - if cfg.get('custom_imports', None): + if cfg.get("custom_imports", None): from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) + + import_modules_from_strings(**cfg["custom_imports"]) # extract inner dataset of `RepeatDataset` as `cfg.data.train` # so we don't need to worry about it later - if cfg.data.train['type'] == 'RepeatDataset': + if cfg.data.train["type"] == "RepeatDataset": cfg.data.train = cfg.data.train.dataset # use only first dataset for `ConcatDataset` - if cfg.data.train['type'] == 'ConcatDataset': + if cfg.data.train["type"] == "ConcatDataset": cfg.data.train = cfg.data.train.datasets[0] train_data_cfg = cfg.data.train # eval_pipeline purely consists of loading functions # use eval_pipeline for data loading - train_data_cfg['pipeline'] = [ - x for x in cfg.eval_pipeline if x['type'] not in skip_type + train_data_cfg["pipeline"] = [ + x for x in cfg.eval_pipeline if x["type"] not in skip_type ] return cfg @@ -79,36 +93,31 @@ def build_data_cfg(config_path, skip_type, cfg_options): def to_depth_mode(points, bboxes): """Convert points and bboxes to Depth Coord and Depth Box mode.""" if points is not None: - points = Coord3DMode.convert_point(points.copy(), Coord3DMode.LIDAR, - Coord3DMode.DEPTH) + points = Coord3DMode.convert_point( + points.copy(), Coord3DMode.LIDAR, Coord3DMode.DEPTH + ) if bboxes is not None: - bboxes = Box3DMode.convert(bboxes.clone(), Box3DMode.LIDAR, - Box3DMode.DEPTH) + bboxes = Box3DMode.convert(bboxes.clone(), Box3DMode.LIDAR, Box3DMode.DEPTH) return points, bboxes def show_det_data(idx, dataset, out_dir, filename, show=False): """Visualize 3D point cloud and 3D bboxes.""" example = dataset.prepare_train_data(idx) - points = example['points']._data.numpy() - gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'].tensor + points = example["points"]._data.numpy() + gt_bboxes = dataset.get_ann_info(idx)["gt_bboxes_3d"].tensor if dataset.box_mode_3d != Box3DMode.DEPTH: points, gt_bboxes = to_depth_mode(points, gt_bboxes) show_result( - points, - gt_bboxes.clone(), - None, - out_dir, - filename, - show=show, - snapshot=True) + points, gt_bboxes.clone(), None, out_dir, filename, show=show, snapshot=True + ) def show_seg_data(idx, dataset, out_dir, filename, show=False): """Visualize 3D point cloud and segmentation mask.""" example = dataset.prepare_train_data(idx) - points = example['points']._data.numpy() - gt_seg = example['pts_semantic_mask']._data.numpy() + points = example["points"]._data.numpy() + gt_seg = example["pts_semantic_mask"]._data.numpy() show_seg_result( points, gt_seg.copy(), @@ -118,23 +127,19 @@ def show_seg_data(idx, dataset, out_dir, filename, show=False): np.array(dataset.PALETTE), dataset.ignore_index, show=show, - snapshot=True) + snapshot=True, + ) -def show_proj_bbox_img(idx, - dataset, - out_dir, - filename, - show=False, - is_nus_mono=False): +def show_proj_bbox_img(idx, dataset, out_dir, filename, show=False, is_nus_mono=False): """Visualize 3D bboxes on 2D image by projection.""" try: example = dataset.prepare_train_data(idx) except AttributeError: # for Mono-3D datasets example = dataset.prepare_train_img(idx) - gt_bboxes = dataset.get_ann_info(idx)['gt_bboxes_3d'] - img_metas = example['img_metas']._data - img = example['img']._data.numpy() + gt_bboxes = dataset.get_ann_info(idx)["gt_bboxes_3d"] + img_metas = example["img_metas"]._data + img = example["img"]._data.numpy() # need to transpose channel to first dim img = img.transpose(1, 2, 0) # no 3D gt bboxes, just show img @@ -148,37 +153,38 @@ def show_proj_bbox_img(idx, None, out_dir, filename, - box_mode='depth', + box_mode="depth", img_metas=img_metas, - show=show) + show=show, + ) elif isinstance(gt_bboxes, LiDARInstance3DBoxes): show_multi_modality_result( img, gt_bboxes, None, - img_metas['lidar2img'], + img_metas["lidar2img"], out_dir, filename, - box_mode='lidar', + box_mode="lidar", img_metas=img_metas, - show=show) + show=show, + ) elif isinstance(gt_bboxes, CameraInstance3DBoxes): show_multi_modality_result( img, gt_bboxes, None, - img_metas['cam2img'], + img_metas["cam2img"], out_dir, filename, - box_mode='camera', + box_mode="camera", img_metas=img_metas, - show=show) + show=show, + ) else: # can't project, just show img - warnings.warn( - f'unrecognized gt box type {type(gt_bboxes)}, only show image') - show_multi_modality_result( - img, None, None, None, out_dir, filename, show=show) + warnings.warn(f"unrecognized gt box type {type(gt_bboxes)}, only show image") + show_multi_modality_result(img, None, None, None, out_dir, filename, show=show) def main(): @@ -190,7 +196,8 @@ def main(): cfg = build_data_cfg(args.config, args.skip_type, args.cfg_options) try: dataset = build_dataset( - cfg.data.train, default_args=dict(filter_empty_gt=False)) + cfg.data.train, default_args=dict(filter_empty_gt=False) + ) except TypeError: # seg dataset doesn't have `filter_empty_gt` key dataset = build_dataset(cfg.data.train) data_infos = dataset.data_infos @@ -200,28 +207,29 @@ def main(): vis_task = args.task # 'det', 'seg', 'multi_modality-det', 'mono-det' for idx, data_info in enumerate(track_iter_progress(data_infos)): - if dataset_type in ['KittiDataset', 'WaymoDataset']: - data_path = data_info['point_cloud']['velodyne_path'] + if dataset_type in ["KittiDataset", "WaymoDataset"]: + data_path = data_info["point_cloud"]["velodyne_path"] elif dataset_type in [ - 'ScanNetDataset', 'SUNRGBDDataset', 'ScanNetSegDataset', - 'S3DISSegDataset', 'S3DISDataset' + "ScanNetDataset", + "SUNRGBDDataset", + "ScanNetSegDataset", + "S3DISSegDataset", + "S3DISDataset", ]: - data_path = data_info['pts_path'] - elif dataset_type in ['NuScenesDataset', 'LyftDataset']: - data_path = data_info['lidar_path'] - elif dataset_type in ['NuScenesMonoDataset']: - data_path = data_info['file_name'] + data_path = data_info["pts_path"] + elif dataset_type in ["NuScenesDataset", "LyftDataset"]: + data_path = data_info["lidar_path"] + elif dataset_type in ["NuScenesMonoDataset"]: + data_path = data_info["file_name"] else: - raise NotImplementedError( - f'unsupported dataset type {dataset_type}') + raise NotImplementedError(f"unsupported dataset type {dataset_type}") file_name = osp.splitext(osp.basename(data_path))[0] - if vis_task in ['det', 'multi_modality-det']: + if vis_task in ["det", "multi_modality-det"]: # show 3D bboxes on 3D point clouds - show_det_data( - idx, dataset, args.output_dir, file_name, show=args.online) - if vis_task in ['multi_modality-det', 'mono-det']: + show_det_data(idx, dataset, args.output_dir, file_name, show=args.online) + if vis_task in ["multi_modality-det", "mono-det"]: # project 3D bboxes to 2D image show_proj_bbox_img( idx, @@ -229,12 +237,12 @@ def main(): args.output_dir, file_name, show=args.online, - is_nus_mono=(dataset_type == 'NuScenesMonoDataset')) - elif vis_task in ['seg']: + is_nus_mono=(dataset_type == "NuScenesMonoDataset"), + ) + elif vis_task in ["seg"]: # show 3D segmentation mask on 3D point clouds - show_seg_data( - idx, dataset, args.output_dir, file_name, show=args.online) + show_seg_data(idx, dataset, args.output_dir, file_name, show=args.online) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/misc/fuse_conv_bn.py b/tools/misc/fuse_conv_bn.py index d4e2201..c76fb14 100644 --- a/tools/misc/fuse_conv_bn.py +++ b/tools/misc/fuse_conv_bn.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse + import torch from mmcv.runner import save_checkpoint -from torch import nn as nn - from mmdet.apis import init_model +from torch import nn as nn def fuse_conv_bn(conv, bn): @@ -13,12 +13,10 @@ def fuse_conv_bn(conv, bn): fuse it with the preceding conv layers to save computations and simplify network structures.""" conv_w = conv.weight - conv_b = conv.bias if conv.bias is not None else torch.zeros_like( - bn.running_mean) + conv_b = conv.bias if conv.bias is not None else torch.zeros_like(bn.running_mean) factor = bn.weight / torch.sqrt(bn.running_var + bn.eps) - conv.weight = nn.Parameter(conv_w * - factor.reshape([conv.out_channels, 1, 1, 1])) + conv.weight = nn.Parameter(conv_w * factor.reshape([conv.out_channels, 1, 1, 1])) conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias) return conv @@ -45,11 +43,10 @@ def fuse_module(m): def parse_args(): - parser = argparse.ArgumentParser( - description='fuse Conv and BN layers in a model') - parser.add_argument('config', help='config file path') - parser.add_argument('checkpoint', help='checkpoint file path') - parser.add_argument('out', help='output path of the converted model') + parser = argparse.ArgumentParser(description="fuse Conv and BN layers in a model") + parser.add_argument("config", help="config file path") + parser.add_argument("checkpoint", help="checkpoint file path") + parser.add_argument("out", help="output path of the converted model") args = parser.parse_args() return args @@ -63,5 +60,5 @@ def main(): save_checkpoint(fused_model, args.out) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/misc/print_config.py b/tools/misc/print_config.py index 3100fc3..3c72b54 100644 --- a/tools/misc/print_config.py +++ b/tools/misc/print_config.py @@ -1,13 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse + from mmcv import Config, DictAction def parse_args(): - parser = argparse.ArgumentParser(description='Print the whole config') - parser.add_argument('config', help='config file path') + parser = argparse.ArgumentParser(description="Print the whole config") + parser.add_argument("config", help="config file path") parser.add_argument( - '--options', nargs='+', action=DictAction, help='arguments in dict') + "--options", nargs="+", action=DictAction, help="arguments in dict" + ) args = parser.parse_args() return args @@ -19,8 +21,8 @@ def main(): cfg = Config.fromfile(args.config) if args.options is not None: cfg.merge_from_dict(args.options) - print(f'Config:\n{cfg.pretty_text}') + print(f"Config:\n{cfg.pretty_text}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/misc/visualize_results.py b/tools/misc/visualize_results.py index 302adc5..fbdb9d9 100644 --- a/tools/misc/visualize_results.py +++ b/tools/misc/visualize_results.py @@ -1,18 +1,18 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse + import mmcv from mmcv import Config - from mmdet3d.datasets import build_dataset def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet3D visualize the results') - parser.add_argument('config', help='test config file path') - parser.add_argument('--result', help='results file in pickle format') + parser = argparse.ArgumentParser(description="MMDet3D visualize the results") + parser.add_argument("config", help="test config file path") + parser.add_argument("--result", help="results file in pickle format") parser.add_argument( - '--show-dir', help='directory where visualize results will be saved') + "--show-dir", help="directory where visualize results will be saved" + ) args = parser.parse_args() return args @@ -21,9 +21,8 @@ def parse_args(): def main(): args = parse_args() - if args.result is not None and \ - not args.result.endswith(('.pkl', '.pickle')): - raise ValueError('The results file must be a pkl file.') + if args.result is not None and not args.result.endswith((".pkl", ".pickle")): + raise ValueError("The results file must be a pkl file.") cfg = Config.fromfile(args.config) cfg.data.test.test_mode = True @@ -32,18 +31,18 @@ def main(): dataset = build_dataset(cfg.data.test) results = mmcv.load(args.result) - if getattr(dataset, 'show', None) is not None: + if getattr(dataset, "show", None) is not None: # data loading pipeline for showing - eval_pipeline = cfg.get('eval_pipeline', {}) + eval_pipeline = cfg.get("eval_pipeline", {}) if eval_pipeline: dataset.show(results, args.show_dir, pipeline=eval_pipeline) else: dataset.show(results, args.show_dir) # use default pipeline else: raise NotImplementedError( - 'Show is not implemented for dataset {}!'.format( - type(dataset).__name__)) + "Show is not implemented for dataset {}!".format(type(dataset).__name__) + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/model_converters/convert_votenet_checkpoints.py b/tools/model_converters/convert_votenet_checkpoints.py index 33792b0..61bb5f0 100644 --- a/tools/model_converters/convert_votenet_checkpoints.py +++ b/tools/model_converters/convert_votenet_checkpoints.py @@ -1,18 +1,19 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse import tempfile + import torch from mmcv import Config from mmcv.runner import load_state_dict - from mmdet3d.models import build_detector def parse_args(): parser = argparse.ArgumentParser( - description='MMDet3D upgrade model version(before v0.6.0) of VoteNet') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument('--out', help='path of the output checkpoint file') + description="MMDet3D upgrade model version(before v0.6.0) of VoteNet" + ) + parser.add_argument("checkpoint", help="checkpoint file") + parser.add_argument("--out", help="path of the output checkpoint file") args = parser.parse_args() return args @@ -27,37 +28,37 @@ def parse_config(config_strings): Config: model config """ temp_file = tempfile.NamedTemporaryFile() - config_path = f'{temp_file.name}.py' - with open(config_path, 'w') as f: + config_path = f"{temp_file.name}.py" + with open(config_path, "w") as f: f.write(config_strings) config = Config.fromfile(config_path) # Update backbone config - if 'pool_mod' in config.model.backbone: - config.model.backbone.pop('pool_mod') + if "pool_mod" in config.model.backbone: + config.model.backbone.pop("pool_mod") - if 'sa_cfg' not in config.model.backbone: - config.model.backbone['sa_cfg'] = dict( - type='PointSAModule', - pool_mod='max', - use_xyz=True, - normalize_xyz=True) + if "sa_cfg" not in config.model.backbone: + config.model.backbone["sa_cfg"] = dict( + type="PointSAModule", pool_mod="max", use_xyz=True, normalize_xyz=True + ) - if 'type' not in config.model.bbox_head.vote_aggregation_cfg: - config.model.bbox_head.vote_aggregation_cfg['type'] = 'PointSAModule' + if "type" not in config.model.bbox_head.vote_aggregation_cfg: + config.model.bbox_head.vote_aggregation_cfg["type"] = "PointSAModule" # Update bbox_head config - if 'pred_layer_cfg' not in config.model.bbox_head: - config.model.bbox_head['pred_layer_cfg'] = dict( - in_channels=128, shared_conv_channels=(128, 128), bias=True) + if "pred_layer_cfg" not in config.model.bbox_head: + config.model.bbox_head["pred_layer_cfg"] = dict( + in_channels=128, shared_conv_channels=(128, 128), bias=True + ) - if 'feat_channels' in config.model.bbox_head: - config.model.bbox_head.pop('feat_channels') + if "feat_channels" in config.model.bbox_head: + config.model.bbox_head.pop("feat_channels") - if 'vote_moudule_cfg' in config.model.bbox_head: - config.model.bbox_head['vote_module_cfg'] = config.model.bbox_head.pop( - 'vote_moudule_cfg') + if "vote_moudule_cfg" in config.model.bbox_head: + config.model.bbox_head["vote_module_cfg"] = config.model.bbox_head.pop( + "vote_moudule_cfg" + ) if config.model.bbox_head.vote_aggregation_cfg.use_xyz: config.model.bbox_head.vote_aggregation_cfg.mlp_channels[0] -= 3 @@ -76,41 +77,48 @@ def main(): """ args = parse_args() checkpoint = torch.load(args.checkpoint) - cfg = parse_config(checkpoint['meta']['config']) + cfg = parse_config(checkpoint["meta"]["config"]) # Build the model and load checkpoint model = build_detector( - cfg.model, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) - orig_ckpt = checkpoint['state_dict'] + cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg") + ) + orig_ckpt = checkpoint["state_dict"] converted_ckpt = orig_ckpt.copy() - if cfg['dataset_type'] == 'ScanNetDataset': + if cfg["dataset_type"] == "ScanNetDataset": NUM_CLASSES = 18 - elif cfg['dataset_type'] == 'SUNRGBDDataset': + elif cfg["dataset_type"] == "SUNRGBDDataset": NUM_CLASSES = 10 else: raise NotImplementedError RENAME_PREFIX = { - 'bbox_head.conv_pred.0': 'bbox_head.conv_pred.shared_convs.layer0', - 'bbox_head.conv_pred.1': 'bbox_head.conv_pred.shared_convs.layer1' + "bbox_head.conv_pred.0": "bbox_head.conv_pred.shared_convs.layer0", + "bbox_head.conv_pred.1": "bbox_head.conv_pred.shared_convs.layer1", } DEL_KEYS = [ - 'bbox_head.conv_pred.0.bn.num_batches_tracked', - 'bbox_head.conv_pred.1.bn.num_batches_tracked' + "bbox_head.conv_pred.0.bn.num_batches_tracked", + "bbox_head.conv_pred.1.bn.num_batches_tracked", ] EXTRACT_KEYS = { - 'bbox_head.conv_pred.conv_cls.weight': - ('bbox_head.conv_pred.conv_out.weight', [(0, 2), (-NUM_CLASSES, -1)]), - 'bbox_head.conv_pred.conv_cls.bias': - ('bbox_head.conv_pred.conv_out.bias', [(0, 2), (-NUM_CLASSES, -1)]), - 'bbox_head.conv_pred.conv_reg.weight': - ('bbox_head.conv_pred.conv_out.weight', [(2, -NUM_CLASSES)]), - 'bbox_head.conv_pred.conv_reg.bias': - ('bbox_head.conv_pred.conv_out.bias', [(2, -NUM_CLASSES)]) + "bbox_head.conv_pred.conv_cls.weight": ( + "bbox_head.conv_pred.conv_out.weight", + [(0, 2), (-NUM_CLASSES, -1)], + ), + "bbox_head.conv_pred.conv_cls.bias": ( + "bbox_head.conv_pred.conv_out.bias", + [(0, 2), (-NUM_CLASSES, -1)], + ), + "bbox_head.conv_pred.conv_reg.weight": ( + "bbox_head.conv_pred.conv_out.weight", + [(2, -NUM_CLASSES)], + ), + "bbox_head.conv_pred.conv_reg.bias": ( + "bbox_head.conv_pred.conv_out.bias", + [(2, -NUM_CLASSES)], + ), } # Delete some useless keys @@ -122,8 +130,7 @@ def main(): for old_key in converted_ckpt.keys(): for rename_prefix in RENAME_PREFIX.keys(): if rename_prefix in old_key: - new_key = old_key.replace(rename_prefix, - RENAME_PREFIX[rename_prefix]) + new_key = old_key.replace(rename_prefix, RENAME_PREFIX[rename_prefix]) RENAME_KEYS[new_key] = old_key for new_key, old_key in RENAME_KEYS.items(): converted_ckpt[new_key] = converted_ckpt.pop(old_key) @@ -132,7 +139,7 @@ def main(): for new_key, (old_key, indices) in EXTRACT_KEYS.items(): cur_layers = orig_ckpt[old_key] converted_layers = [] - for (start, end) in indices: + for start, end in indices: if end != -1: converted_layers.append(cur_layers[start:end]) else: @@ -144,9 +151,9 @@ def main(): # Check the converted checkpoint by loading to the model load_state_dict(model, converted_ckpt, strict=True) - checkpoint['state_dict'] = converted_ckpt + checkpoint["state_dict"] = converted_ckpt torch.save(checkpoint, args.out) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/model_converters/publish_model.py b/tools/model_converters/publish_model.py index 318fd46..1acf9f0 100644 --- a/tools/model_converters/publish_model.py +++ b/tools/model_converters/publish_model.py @@ -1,29 +1,29 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse import subprocess + import torch def parse_args(): - parser = argparse.ArgumentParser( - description='Process a checkpoint to be published') - parser.add_argument('in_file', help='input checkpoint filename') - parser.add_argument('out_file', help='output checkpoint filename') + parser = argparse.ArgumentParser(description="Process a checkpoint to be published") + parser.add_argument("in_file", help="input checkpoint filename") + parser.add_argument("out_file", help="output checkpoint filename") args = parser.parse_args() return args def process_checkpoint(in_file, out_file): - checkpoint = torch.load(in_file, map_location='cpu') + checkpoint = torch.load(in_file, map_location="cpu") # remove optimizer for smaller file size - if 'optimizer' in checkpoint: - del checkpoint['optimizer'] + if "optimizer" in checkpoint: + del checkpoint["optimizer"] # if it is necessary to remove some sensitive data in checkpoint['meta'], # add the code here. torch.save(checkpoint, out_file) - sha = subprocess.check_output(['sha256sum', out_file]).decode() - final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]) - subprocess.Popen(['mv', out_file, final_file]) + sha = subprocess.check_output(["sha256sum", out_file]).decode() + final_file = out_file.rstrip(".pth") + "-{}.pth".format(sha[:8]) + subprocess.Popen(["mv", out_file, final_file]) def main(): @@ -31,5 +31,5 @@ def main(): process_checkpoint(args.in_file, args.out_file) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/model_converters/regnet2mmdet.py b/tools/model_converters/regnet2mmdet.py index 9dee3c8..caed92b 100644 --- a/tools/model_converters/regnet2mmdet.py +++ b/tools/model_converters/regnet2mmdet.py @@ -1,52 +1,53 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse -import torch from collections import OrderedDict +import torch + def convert_stem(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('stem.conv', 'conv1') - new_key = new_key.replace('stem.bn', 'bn1') + new_key = model_key.replace("stem.conv", "conv1") + new_key = new_key.replace("stem.bn", "bn1") state_dict[new_key] = model_weight converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') + print(f"Convert {model_key} to {new_key}") def convert_head(model_key, model_weight, state_dict, converted_names): - new_key = model_key.replace('head.fc', 'fc') + new_key = model_key.replace("head.fc", "fc") state_dict[new_key] = model_weight converted_names.add(model_key) - print(f'Convert {model_key} to {new_key}') + print(f"Convert {model_key} to {new_key}") def convert_reslayer(model_key, model_weight, state_dict, converted_names): - split_keys = model_key.split('.') + split_keys = model_key.split(".") layer, block, module = split_keys[:3] block_id = int(block[1:]) - layer_name = f'layer{int(layer[1:])}' - block_name = f'{block_id - 1}' + layer_name = f"layer{int(layer[1:])}" + block_name = f"{block_id - 1}" - if block_id == 1 and module == 'bn': - new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}' - elif block_id == 1 and module == 'proj': - new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}' - elif module == 'f': - if split_keys[3] == 'a_bn': - module_name = 'bn1' - elif split_keys[3] == 'b_bn': - module_name = 'bn2' - elif split_keys[3] == 'c_bn': - module_name = 'bn3' - elif split_keys[3] == 'a': - module_name = 'conv1' - elif split_keys[3] == 'b': - module_name = 'conv2' - elif split_keys[3] == 'c': - module_name = 'conv3' - new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}' + if block_id == 1 and module == "bn": + new_key = f"{layer_name}.{block_name}.downsample.1.{split_keys[-1]}" + elif block_id == 1 and module == "proj": + new_key = f"{layer_name}.{block_name}.downsample.0.{split_keys[-1]}" + elif module == "f": + if split_keys[3] == "a_bn": + module_name = "bn1" + elif split_keys[3] == "b_bn": + module_name = "bn2" + elif split_keys[3] == "c_bn": + module_name = "bn3" + elif split_keys[3] == "a": + module_name = "conv1" + elif split_keys[3] == "b": + module_name = "conv2" + elif split_keys[3] == "c": + module_name = "conv3" + new_key = f"{layer_name}.{block_name}.{module_name}.{split_keys[-1]}" else: - raise ValueError(f'Unsupported conversion of key {model_key}') - print(f'Convert {model_key} to {new_key}') + raise ValueError(f"Unsupported conversion of key {model_key}") + print(f"Convert {model_key} to {new_key}") state_dict[new_key] = model_weight converted_names.add(model_key) @@ -55,35 +56,35 @@ def convert(src, dst): """Convert keys in pycls pretrained RegNet models to mmdet style.""" # load caffe model regnet_model = torch.load(src) - blobs = regnet_model['model_state'] + blobs = regnet_model["model_state"] # convert to pytorch style state_dict = OrderedDict() converted_names = set() for key, weight in blobs.items(): - if 'stem' in key: + if "stem" in key: convert_stem(key, weight, state_dict, converted_names) - elif 'head' in key: + elif "head" in key: convert_head(key, weight, state_dict, converted_names) - elif key.startswith('s'): + elif key.startswith("s"): convert_reslayer(key, weight, state_dict, converted_names) # check if all layers are converted for key in blobs: if key not in converted_names: - print(f'not converted: {key}') + print(f"not converted: {key}") # save checkpoint checkpoint = dict() - checkpoint['state_dict'] = state_dict + checkpoint["state_dict"] = state_dict torch.save(checkpoint, dst) def main(): - parser = argparse.ArgumentParser(description='Convert model keys') - parser.add_argument('src', help='src detectron model path') - parser.add_argument('dst', help='save path') + parser = argparse.ArgumentParser(description="Convert model keys") + parser.add_argument("src", help="src detectron model path") + parser.add_argument("dst", help="save path") args = parser.parse_args() convert(args.src, args.dst) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/my_dist_train.sh b/tools/my_dist_train.sh deleted file mode 100755 index 669a2aa..0000000 --- a/tools/my_dist_train.sh +++ /dev/null @@ -1,20 +0,0 @@ -# !/usr/bin/env bash -CONFIG=$1 -GPUS=$2 -NNODES=${NNODES:-1} -NODE_RANK=${NODE_RANK:-0} -PORT=${PORT:-29500} -MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} - - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch \ - --nnodes=$NNODES \ - --node_rank=$NODE_RANK \ - --master_addr=$MASTER_ADDR \ - --nproc_per_node=$GPUS \ - --master_port=$PORT \ - $(dirname "$0")/train.py \ - $CONFIG \ - --seed 0 \ - --launcher pytorch ${@:3} \ No newline at end of file diff --git a/tools/slurm_test.sh b/tools/slurm_test.sh new file mode 100755 index 0000000..caa6945 --- /dev/null +++ b/tools/slurm_test.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -x +export CUDA_HOME='/mnt/lustre/share/cuda-11.1' +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 + +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} $CHECKPOINT --launcher="slurm" ${PY_ARGS} diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh new file mode 100755 index 0000000..4064443 --- /dev/null +++ b/tools/slurm_train.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -x +# export CUDA_HOME=/mnt/lustre/share/cuda-11.1 + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 + +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + -x SH-IDC1-10-140-0-199 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py index fd2cf45..5aa7dda 100644 --- a/tools/test.py +++ b/tools/test.py @@ -4,105 +4,114 @@ # Modified by Zhiqi Li # --------------------------------------------- import argparse -import mmcv import os -import torch +import os.path as osp +import time import warnings + +import mmcv +import torch from mmcv import Config, DictAction from mmcv.cnn import fuse_conv_bn from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, - wrap_fp16_model) - +from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model from mmdet3d.apis import single_gpu_test from mmdet3d.datasets import build_dataset -from projects.mmdet3d_plugin.datasets.builder import build_dataloader from mmdet3d.models import build_model from mmdet.apis import set_random_seed -from projects.mmdet3d_plugin.bevformer.apis.test import custom_multi_gpu_test from mmdet.datasets import replace_ImageToTensor -import time -import os.path as osp + +from projects.mmdet3d_plugin.bevformer.apis.test import custom_multi_gpu_test +from projects.mmdet3d_plugin.datasets.builder import build_dataloader def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet test (and eval) a model') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument('--out', help='output result file in pickle format') + parser = argparse.ArgumentParser(description="MMDet test (and eval) a model") + parser.add_argument("config", help="test config file path") + parser.add_argument("checkpoint", help="checkpoint file") + parser.add_argument("--out", help="output result file in pickle format") parser.add_argument( - '--fuse-conv-bn', - action='store_true', - help='Whether to fuse conv and bn, this will slightly increase' - 'the inference speed') + "--fuse-conv-bn", + action="store_true", + help="Whether to fuse conv and bn, this will slightly increase" + "the inference speed", + ) parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') + "--format-only", + action="store_true", + help="Format the output results without perform evaluation. It is" + "useful when you want to format the result to a specific format and " + "submit it to the test server", + ) parser.add_argument( - '--eval', + "--eval", type=str, - nargs='+', + nargs="+", help='evaluation metrics, which depends on the dataset, e.g., "bbox",' - ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--show-dir', help='directory where results will be saved') + ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC', + ) + parser.add_argument("--show", action="store_true", help="show results") + parser.add_argument("--show-dir", help="directory where results will be saved") parser.add_argument( - '--gpu-collect', - action='store_true', - help='whether to use gpu to collect results.') + "--gpu-collect", + action="store_true", + help="whether to use gpu to collect results.", + ) parser.add_argument( - '--tmpdir', - help='tmp directory used for collecting results from multiple ' - 'workers, available when gpu-collect is not specified') - parser.add_argument('--seed', type=int, default=0, help='random seed') + "--tmpdir", + help="tmp directory used for collecting results from multiple " + "workers, available when gpu-collect is not specified", + ) + parser.add_argument("--seed", type=int, default=0, help="random seed") parser.add_argument( - '--deterministic', - action='store_true', - help='whether to set deterministic options for CUDNN backend.') + "--deterministic", + action="store_true", + help="whether to set deterministic options for CUDNN backend.", + ) parser.add_argument( - '--cfg-options', - nargs='+', + "--cfg-options", + nargs="+", action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file. If the value to " 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') + "Note that the quotation marks are necessary and that no white space " + "is allowed.", + ) parser.add_argument( - '--options', - nargs='+', + "--options", + nargs="+", action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function (deprecate), ' - 'change to --eval-options instead.') + help="custom options for evaluation, the key-value pair in xxx=yyy " + "format will be kwargs for dataset.evaluate() function (deprecate), " + "change to --eval-options instead.", + ) parser.add_argument( - '--eval-options', - nargs='+', + "--eval-options", + nargs="+", action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function') + help="custom options for evaluation, the key-value pair in xxx=yyy " + "format will be kwargs for dataset.evaluate() function", + ) parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) + "--launcher", + choices=["none", "pytorch", "slurm", "mpi"], + default="none", + help="job launcher", + ) + parser.add_argument("--local_rank", type=int, default=0) args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) + if "LOCAL_RANK" not in os.environ: + os.environ["LOCAL_RANK"] = str(args.local_rank) if args.options and args.eval_options: raise ValueError( - '--options and --eval-options cannot be both specified, ' - '--options is deprecated in favor of --eval-options') + "--options and --eval-options cannot be both specified, " + "--options is deprecated in favor of --eval-options" + ) if args.options: - warnings.warn('--options is deprecated in favor of --eval-options') + warnings.warn("--options is deprecated in favor of --eval-options") args.eval_options = args.options return args @@ -110,52 +119,54 @@ def parse_args(): def main(): args = parse_args() - assert args.out or args.eval or args.format_only or args.show \ - or args.show_dir, \ - ('Please specify at least one operation (save/eval/format/show the ' - 'results / save the results) with the argument "--out", "--eval"' - ', "--format-only", "--show" or "--show-dir"') + assert args.out or args.eval or args.format_only or args.show or args.show_dir, ( + "Please specify at least one operation (save/eval/format/show the " + 'results / save the results) with the argument "--out", "--eval"' + ', "--format-only", "--show" or "--show-dir"' + ) if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') + raise ValueError("--eval and --format_only cannot be both specified") - if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): - raise ValueError('The output file must be a pkl file.') + if args.out is not None and not args.out.endswith((".pkl", ".pickle")): + raise ValueError("The output file must be a pkl file.") cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. - if cfg.get('custom_imports', None): + if cfg.get("custom_imports", None): from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) + + import_modules_from_strings(**cfg["custom_imports"]) # import modules from plguin/xx, registry will be updated - if hasattr(cfg, 'plugin'): + if hasattr(cfg, "plugin"): if cfg.plugin: import importlib - if hasattr(cfg, 'plugin_dir'): + + if hasattr(cfg, "plugin_dir"): plugin_dir = cfg.plugin_dir _module_dir = os.path.dirname(plugin_dir) - _module_dir = _module_dir.split('/') + _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: - _module_path = _module_path + '.' + m + _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) else: # import dir is the dirpath for the config file _module_dir = os.path.dirname(args.config) - _module_dir = _module_dir.split('/') + _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: - _module_path = _module_path + '.' + m + _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): + if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None @@ -163,22 +174,22 @@ def main(): samples_per_gpu = 1 if isinstance(cfg.data.test, dict): cfg.data.test.test_mode = True - samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) + samples_per_gpu = cfg.data.test.pop("samples_per_gpu", 1) if samples_per_gpu > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.test.pipeline = replace_ImageToTensor( - cfg.data.test.pipeline) + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) elif isinstance(cfg.data.test, list): for ds_cfg in cfg.data.test: ds_cfg.test_mode = True samples_per_gpu = max( - [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) + [ds_cfg.pop("samples_per_gpu", 1) for ds_cfg in cfg.data.test] + ) if samples_per_gpu > 1: for ds_cfg in cfg.data.test: ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': + if args.launcher == "none": distributed = False else: distributed = True @@ -197,27 +208,29 @@ def main(): dist=distributed, shuffle=False, nonshuffler_sampler=cfg.data.nonshuffler_sampler, + use_streaming=False, + cfg=None, ) # build the model and load checkpoint cfg.model.train_cfg = None - model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) + model = build_model(cfg.model, test_cfg=cfg.get("test_cfg")) + fp16_cfg = cfg.get("fp16", None) if fp16_cfg is not None: wrap_fp16_model(model) - checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + checkpoint = load_checkpoint(model, args.checkpoint, map_location="cpu") if args.fuse_conv_bn: model = fuse_conv_bn(model) # old versions did not save class info in checkpoints, this walkaround is # for backward compatibility - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] + if "CLASSES" in checkpoint.get("meta", {}): + model.CLASSES = checkpoint["meta"]["CLASSES"] else: model.CLASSES = dataset.CLASSES # palette for visualization in segmentation tasks - if 'PALETTE' in checkpoint.get('meta', {}): - model.PALETTE = checkpoint['meta']['PALETTE'] - elif hasattr(dataset, 'PALETTE'): + if "PALETTE" in checkpoint.get("meta", {}): + model.PALETTE = checkpoint["meta"]["PALETTE"] + elif hasattr(dataset, "PALETTE"): # segmentation dataset has `PALETTE` attribute model.PALETTE = dataset.PALETTE @@ -229,34 +242,41 @@ def main(): model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], - broadcast_buffers=False) - outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir, - args.gpu_collect) + broadcast_buffers=False, + ) + outputs = custom_multi_gpu_test( + model, data_loader, args.tmpdir, args.gpu_collect + ) rank, _ = get_dist_info() if rank == 0: if args.out: - print(f'\nwriting results to {args.out}') - assert False - #mmcv.dump(outputs['bbox_results'], args.out) + print(f"\nwriting results to {args.out}") + mmcv.dump(outputs, args.out) kwargs = {} if args.eval_options is None else args.eval_options - kwargs['jsonfile_prefix'] = osp.join('test', args.config.split( - '/')[-1].split('.')[-2], time.ctime().replace(' ', '_').replace(':', '_')) + kwargs["jsonfile_prefix"] = osp.join( + "test", + args.config.split("/")[-1].split(".")[-2], + time.ctime().replace(" ", "_").replace(":", "_"), + ) if args.format_only: dataset.format_results(outputs, **kwargs) if args.eval: - eval_kwargs = cfg.get('evaluation', {}).copy() + eval_kwargs = cfg.get("evaluation", {}).copy() # hard-code way to remove EvalHook args for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' + "interval", + "tmpdir", + "start", + "gpu_collect", + "save_best", + "rule", ]: eval_kwargs.pop(key, None) eval_kwargs.update(dict(metric=args.eval, **kwargs)) - print(dataset.evaluate(outputs, **eval_kwargs)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/test_code.py b/tools/test_code.py deleted file mode 100644 index 7b8b714..0000000 --- a/tools/test_code.py +++ /dev/null @@ -1,15 +0,0 @@ -import torch -num_voxel=4 -num_points_in_pillar=8 -num_points_in_voxel=num_points_in_pillar//num_voxel -Z=8 -H=2 -W=2 -zs = torch.linspace(0, Z - 0, num_points_in_pillar, ).view(num_voxel,num_points_in_voxel, 1, 1).permute(1,0,2,3).expand(num_points_in_voxel,num_voxel, H, W) / Z -xs = torch.linspace(0, W - 0, W, ).view(1,1, 1, W).expand(num_points_in_voxel,num_voxel, H, W) / W -ys = torch.linspace(0, H - 0, H,).view(1, 1,H, 1).expand(num_points_in_voxel,num_voxel, H, W) / H -ref_3d = torch.stack((xs, ys, zs), -1) -print(ref_3d[0]) -ref_3d = ref_3d.permute(0, 4, 1, 2, 3).flatten(2).permute(0, 2, 1) - -print(ref_3d.shape) diff --git a/tools/test_data_pipeline.py b/tools/test_data_pipeline.py new file mode 100644 index 0000000..17f5cf2 --- /dev/null +++ b/tools/test_data_pipeline.py @@ -0,0 +1,42 @@ +""" +run the dataloader pipeline to get the data or exmaine the pipeline without run the model +""" + +import os +import matplotlib.pyplot as plt +import numpy as np +import PIL +import torch +from mmcv import Config, DictAction +from mmdet3d.datasets import build_dataset +from projects.mmdet3d_plugin.datasets import nuscenes_occ + +config = "projects/configs/cvtocc/bevformer_nuscene.py" +cfg = Config.fromfile(config) + +dataset = build_dataset(cfg.data.train) +result = dataset.__getitem__(200) + +# waymo +# imgs = result["img"].data[-1] +# lidar2img = result["img_metas"].data[2]["lidar2img"] +# voxel_label = result["voxel_semantics"] + +breakpoint() +print(result['img_metas'].data.keys()) # dict_keys([0, 1, 2]) +print(result['img_metas'].data[2].keys()) +# dict_keys(['filename', 'pts_filename', 'occ_gt_path', 'scene_token', 'frame_idx', 'scene_idx', 'sample_idx', 'ori_shape', 'img_shape', 'pad_shape', 'lidar2img', 'ego2lidar', 'cam_intrinsic', 'lidar2cam', 'can_bus', 'prev_bev_exists']) +''' +(Pdb) print(result['img_metas'].data[2]['pts_filename']) +./data/occ3d-nus/samples/LIDAR_TOP/n008-2018-05-21-11-06-59-0400__LIDAR_TOP__1526915375547671.pcd.bin +(Pdb) print(result['img_metas'].data[2]['frame_idx']) +1 +(Pdb) print(result['img_metas'].data[2]['scene_idx']) +166 +(Pdb) print(result['img_metas'].data[2]['sample_idx']) +e981a119b19040159fe112adca805119 +(Pdb) print(result['img_metas'].data[2]['scene_token']) +15e1fa06e30e438a98430cc1fd0e8a69 +(Pdb) print(result['img_metas'].data[2]['occ_gt_path']) +gts/scene-0166/e981a119b19040159fe112adca805119/labels.npz +''' \ No newline at end of file diff --git a/tools/test_occ.py b/tools/test_occ.py deleted file mode 100644 index 1043a8a..0000000 --- a/tools/test_occ.py +++ /dev/null @@ -1,262 +0,0 @@ -# --------------------------------------------- -# Copyright (c) OpenMMLab. All rights reserved. -# --------------------------------------------- -# Modified by Zhiqi Li -# --------------------------------------------- -import argparse -import mmcv -import os -import torch -import warnings -from mmcv import Config, DictAction -from mmcv.cnn import fuse_conv_bn -from mmcv.parallel import MMDataParallel, MMDistributedDataParallel -from mmcv.runner import (get_dist_info, init_dist, load_checkpoint, - wrap_fp16_model) - -from mmdet3d.apis import single_gpu_test -from mmdet3d.datasets import build_dataset -from projects.mmdet3d_plugin.datasets.builder import build_dataloader -from mmdet3d.models import build_model -from mmdet.apis import set_random_seed -from projects.mmdet3d_plugin.bevformer.apis.test_occ import custom_multi_gpu_test -from mmdet.datasets import replace_ImageToTensor -import time -import os.path as osp - - -def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet test (and eval) a model') - parser.add_argument('config', help='test config file path') - parser.add_argument('checkpoint', help='checkpoint file') - parser.add_argument('--out', help='output result file in pickle format') - parser.add_argument( - '--fuse-conv-bn', - action='store_true', - help='Whether to fuse conv and bn, this will slightly increase' - 'the inference speed') - parser.add_argument( - '--format-only', - action='store_true', - help='Format the output results without perform evaluation. It is' - 'useful when you want to format the result to a specific format and ' - 'submit it to the test server') - parser.add_argument( - '--eval', - type=str, - nargs='+', - help='evaluation metrics, which depends on the dataset, e.g., "bbox",' - ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--show-dir', help='directory where results will be saved') - parser.add_argument( - '--gpu-collect', - action='store_true', - help='whether to use gpu to collect results.') - parser.add_argument( - '--tmpdir', - help='tmp directory used for collecting results from multiple ' - 'workers, available when gpu-collect is not specified') - parser.add_argument('--seed', type=int, default=0, help='random seed') - parser.add_argument( - '--deterministic', - action='store_true', - help='whether to set deterministic options for CUDNN backend.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function (deprecate), ' - 'change to --eval-options instead.') - parser.add_argument( - '--eval-options', - nargs='+', - action=DictAction, - help='custom options for evaluation, the key-value pair in xxx=yyy ' - 'format will be kwargs for dataset.evaluate() function') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - - if args.options and args.eval_options: - raise ValueError( - '--options and --eval-options cannot be both specified, ' - '--options is deprecated in favor of --eval-options') - if args.options: - warnings.warn('--options is deprecated in favor of --eval-options') - args.eval_options = args.options - return args - - -def main(): - args = parse_args() - - assert args.out or args.eval or args.format_only or args.show \ - or args.show_dir, \ - ('Please specify at least one operation (save/eval/format/show the ' - 'results / save the results) with the argument "--out", "--eval"' - ', "--format-only", "--show" or "--show-dir"') - - if args.eval and args.format_only: - raise ValueError('--eval and --format_only cannot be both specified') - - if args.out is not None and not args.out.endswith(('.pkl', '.pickle')): - raise ValueError('The output file must be a pkl file.') - - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - # import modules from string list. - if cfg.get('custom_imports', None): - from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) - - # import modules from plguin/xx, registry will be updated - if hasattr(cfg, 'plugin'): - if cfg.plugin: - import importlib - if hasattr(cfg, 'plugin_dir'): - plugin_dir = cfg.plugin_dir - _module_dir = os.path.dirname(plugin_dir) - _module_dir = _module_dir.split('/') - _module_path = _module_dir[0] - - for m in _module_dir[1:]: - _module_path = _module_path + '.' + m - print(_module_path) - plg_lib = importlib.import_module(_module_path) - else: - # import dir is the dirpath for the config file - _module_dir = os.path.dirname(args.config) - _module_dir = _module_dir.split('/') - _module_path = _module_dir[0] - for m in _module_dir[1:]: - _module_path = _module_path + '.' + m - print(_module_path) - plg_lib = importlib.import_module(_module_path) - - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - - cfg.model.pretrained = None - # in case the test dataset is concatenated - samples_per_gpu = 1 - if isinstance(cfg.data.test, dict): - cfg.data.test.test_mode = True - samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1) - if samples_per_gpu > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - cfg.data.test.pipeline = replace_ImageToTensor( - cfg.data.test.pipeline) - elif isinstance(cfg.data.test, list): - for ds_cfg in cfg.data.test: - ds_cfg.test_mode = True - samples_per_gpu = max( - [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test]) - if samples_per_gpu > 1: - for ds_cfg in cfg.data.test: - ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) - - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - - # set random seeds - if args.seed is not None: - set_random_seed(args.seed, deterministic=args.deterministic) - - # build the dataloader - dataset = build_dataset(cfg.data.test) - data_loader = build_dataloader( - dataset, - samples_per_gpu=samples_per_gpu, - workers_per_gpu=cfg.data.workers_per_gpu, - dist=distributed, - shuffle=False, - nonshuffler_sampler=cfg.data.nonshuffler_sampler, - ) - - # build the model and load checkpoint - cfg.model.train_cfg = None - model = build_model(cfg.model, test_cfg=cfg.get('test_cfg')) - fp16_cfg = cfg.get('fp16', None) - if fp16_cfg is not None: - wrap_fp16_model(model) - checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') - if args.fuse_conv_bn: - model = fuse_conv_bn(model) - # old versions did not save class info in checkpoints, this walkaround is - # for backward compatibility - if 'CLASSES' in checkpoint.get('meta', {}): - model.CLASSES = checkpoint['meta']['CLASSES'] - else: - model.CLASSES = dataset.CLASSES - # palette for visualization in segmentation tasks - if 'PALETTE' in checkpoint.get('meta', {}): - model.PALETTE = checkpoint['meta']['PALETTE'] - elif hasattr(dataset, 'PALETTE'): - # segmentation dataset has `PALETTE` attribute - model.PALETTE = dataset.PALETTE - - if not distributed: - assert False - # model = MMDataParallel(model, device_ids=[0]) - # outputs = single_gpu_test(model, data_loader, args.show, args.show_dir) - else: - model = MMDistributedDataParallel( - model.cuda(), - device_ids=[torch.cuda.current_device()], - broadcast_buffers=False) - outputs = custom_multi_gpu_test(model, data_loader, args.tmpdir, - args.gpu_collect) - - rank, _ = get_dist_info() - if rank == 0: - if args.out: - print(f'\nwriting results to {args.out}') - assert False - #mmcv.dump(outputs['bbox_results'], args.out) - kwargs = {} if args.eval_options is None else args.eval_options - kwargs['jsonfile_prefix'] = osp.join('test', args.config.split( - '/')[-1].split('.')[-2], time.ctime().replace(' ', '_').replace(':', '_')) - if args.format_only: - dataset.format_results(outputs, **kwargs) - - if args.eval: - eval_kwargs = cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=args.eval, **kwargs)) - - print(dataset.evaluate(outputs, **eval_kwargs)) - - -if __name__ == '__main__': - main() diff --git a/tools/train.py b/tools/train.py index da1f761..0dfbd10 100644 --- a/tools/train.py +++ b/tools/train.py @@ -3,97 +3,100 @@ # --------------------------------------------- # Modified by Zhiqi Li # --------------------------------------------- - + from __future__ import division import argparse import copy -import mmcv import os import time -import torch import warnings -from mmcv import Config, DictAction -from mmcv.runner import get_dist_info, init_dist from os import path as osp +import mmcv +import torch +from mmcv import Config, DictAction +from mmcv.runner import get_dist_info, init_dist +from mmcv.utils import TORCH_VERSION, digit_version from mmdet import __version__ as mmdet_version from mmdet3d import __version__ as mmdet3d_version -#from mmdet3d.apis import train_model - from mmdet3d.datasets import build_dataset from mmdet3d.models import build_model from mmdet3d.utils import collect_env, get_root_logger from mmdet.apis import set_random_seed from mmseg import __version__ as mmseg_version -from mmcv.utils import TORCH_VERSION, digit_version - def parse_args(): - parser = argparse.ArgumentParser(description='Train a detector') - parser.add_argument('config', help='train config file path') - parser.add_argument('--work-dir', help='the dir to save logs and models') - parser.add_argument( - '--resume-from', help='the checkpoint file to resume from') + parser = argparse.ArgumentParser(description="Train a detector") + parser.add_argument("config", help="train config file path") + parser.add_argument("--work-dir", help="the dir to save logs and models") + parser.add_argument("--resume-from", help="the checkpoint file to resume from") parser.add_argument( - '--no-validate', - action='store_true', - help='whether not to evaluate the checkpoint during training') + "--no-validate", + action="store_true", + help="whether not to evaluate the checkpoint during training", + ) group_gpus = parser.add_mutually_exclusive_group() group_gpus.add_argument( - '--gpus', + "--gpus", type=int, - help='number of gpus to use ' - '(only applicable to non-distributed training)') + help="number of gpus to use " "(only applicable to non-distributed training)", + ) group_gpus.add_argument( - '--gpu-ids', + "--gpu-ids", type=int, - nargs='+', - help='ids of gpus to use ' - '(only applicable to non-distributed training)') - parser.add_argument('--seed', type=int, default=0, help='random seed') + nargs="+", + help="ids of gpus to use " "(only applicable to non-distributed training)", + ) + parser.add_argument("--seed", type=int, default=0, help="random seed") parser.add_argument( - '--deterministic', - action='store_true', - help='whether to set deterministic options for CUDNN backend.') + "--deterministic", + action="store_true", + help="whether to set deterministic options for CUDNN backend.", + ) parser.add_argument( - '--options', - nargs='+', + "--options", + nargs="+", action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file (deprecate), ' - 'change to --cfg-options instead.') + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file (deprecate), " + "change to --cfg-options instead.", + ) parser.add_argument( - '--cfg-options', - nargs='+', + "--cfg-options", + nargs="+", action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' + help="override some settings in the used config, the key-value pair " + "in xxx=yyy format will be merged into config file. If the value to " 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') + "Note that the quotation marks are necessary and that no white space " + "is allowed.", + ) parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) + "--launcher", + choices=["none", "pytorch", "slurm", "mpi"], + default="none", + help="job launcher", + ) + parser.add_argument("--local_rank", type=int, default=0) parser.add_argument( - '--autoscale-lr', - action='store_true', - help='automatically scale lr with the number of gpus') + "--autoscale-lr", + action="store_true", + help="automatically scale lr with the number of gpus", + ) args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) + if "LOCAL_RANK" not in os.environ: + os.environ["LOCAL_RANK"] = str(args.local_rank) if args.options and args.cfg_options: raise ValueError( - '--options and --cfg-options cannot be both specified, ' - '--options is deprecated in favor of --cfg-options') + "--options and --cfg-options cannot be both specified, " + "--options is deprecated in favor of --cfg-options" + ) if args.options: - warnings.warn('--options is deprecated in favor of --cfg-options') + warnings.warn("--options is deprecated in favor of --cfg-options") args.cfg_options = args.options return args @@ -106,47 +109,50 @@ def main(): if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) # import modules from string list. - if cfg.get('custom_imports', None): + if cfg.get("custom_imports", None): from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) + + import_modules_from_strings(**cfg["custom_imports"]) # import modules from plguin/xx, registry will be updated - if hasattr(cfg, 'plugin'): + if hasattr(cfg, "plugin"): if cfg.plugin: import importlib - if hasattr(cfg, 'plugin_dir'): + + if hasattr(cfg, "plugin_dir"): plugin_dir = cfg.plugin_dir _module_dir = os.path.dirname(plugin_dir) - _module_dir = _module_dir.split('/') + _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: - _module_path = _module_path + '.' + m + _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) else: # import dir is the dirpath for the config file _module_dir = os.path.dirname(args.config) - _module_dir = _module_dir.split('/') + _module_dir = _module_dir.split("/") _module_path = _module_dir[0] for m in _module_dir[1:]: - _module_path = _module_path + '.' + m + _module_path = _module_path + "." + m print(_module_path) plg_lib = importlib.import_module(_module_path) from projects.mmdet3d_plugin.bevformer.apis.train import custom_train_model # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): + if cfg.get("cudnn_benchmark", False): torch.backends.cudnn.benchmark = True # work_dir is determined in this priority: CLI > segment in file > filename if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None cfg.work_dir = args.work_dir - elif cfg.get('work_dir', None) is None: + elif cfg.get("work_dir", None) is None: # use config filename as default work_dir if cfg.work_dir is None - cfg.work_dir = osp.join('./work_dirs', - osp.splitext(osp.basename(args.config))[0]) + cfg.work_dir = osp.join( + "./work_dirs", osp.splitext(osp.basename(args.config))[0] + ) # if args.resume_from is not None: if args.resume_from is not None and osp.isfile(args.resume_from): cfg.resume_from = args.resume_from @@ -154,14 +160,17 @@ def main(): cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) - if digit_version(TORCH_VERSION) == digit_version('1.8.1') and cfg.optimizer['type'] == 'AdamW': - cfg.optimizer['type'] = 'AdamW2' # fix bug in Adamw + if ( + digit_version(TORCH_VERSION) == digit_version("1.8.1") + and cfg.optimizer["type"] == "AdamW" + ): + cfg.optimizer["type"] = "AdamW2" # fix bug in Adamw if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) - cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8 + cfg.optimizer["lr"] = cfg.optimizer["lr"] * len(cfg.gpu_ids) / 8 # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': + if args.launcher == "none": distributed = False else: distributed = True @@ -175,47 +184,47 @@ def main(): # dump config cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) # init the logger before other steps - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - log_file = osp.join(cfg.work_dir, f'{timestamp}.log') + timestamp = time.strftime("%Y%m%d_%H%M%S", time.localtime()) + log_file = osp.join(cfg.work_dir, f"{timestamp}.log") # specify logger name, if we still use 'mmdet', the output info will be # filtered and won't be saved in the log_file # TODO: ugly workaround to judge whether we are training det or seg model - if cfg.model.type in ['EncoderDecoder3D']: - logger_name = 'mmseg' + if cfg.model.type in ["EncoderDecoder3D"]: + logger_name = "mmseg" else: - logger_name = 'mmdet' + logger_name = "mmdet" logger = get_root_logger( - log_file=log_file, log_level=cfg.log_level, name=logger_name) + log_file=log_file, log_level=cfg.log_level, name=logger_name + ) # init the meta dict to record some important information such as # environment info and seed, which will be logged meta = dict() # log env info env_info_dict = collect_env() - env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) - dash_line = '-' * 60 + '\n' - logger.info('Environment info:\n' + dash_line + env_info + '\n' + - dash_line) - meta['env_info'] = env_info - meta['config'] = cfg.pretty_text + env_info = "\n".join([(f"{k}: {v}") for k, v in env_info_dict.items()]) + dash_line = "-" * 60 + "\n" + logger.info("Environment info:\n" + dash_line + env_info + "\n" + dash_line) + meta["env_info"] = env_info + meta["config"] = cfg.pretty_text # log some basic info - logger.info(f'Distributed training: {distributed}') + logger.info(f"Distributed training: {distributed}") logger.info(f'Config:\n{cfg.pretty_text}') # set random seeds if args.seed is not None: - logger.info(f'Set random seed to {args.seed}, ' - f'deterministic: {args.deterministic}') + logger.info( + f"Set random seed to {args.seed}, " f"deterministic: {args.deterministic}" + ) set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed - meta['seed'] = args.seed - meta['exp_name'] = osp.basename(args.config) + meta["seed"] = args.seed + meta["exp_name"] = osp.basename(args.config) model = build_model( - cfg.model, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) + cfg.model, train_cfg=cfg.get("train_cfg"), test_cfg=cfg.get("test_cfg") + ) model.init_weights() logger.info(f'Model:\n{model}') @@ -223,7 +232,7 @@ def main(): if len(cfg.workflow) == 2: val_dataset = copy.deepcopy(cfg.data.val) # in case we use a dataset wrapper - if 'dataset' in cfg.data.train: + if "dataset" in cfg.data.train: val_dataset.pipeline = cfg.data.train.dataset.pipeline else: val_dataset.pipeline = cfg.data.train.pipeline @@ -242,7 +251,9 @@ def main(): config=cfg.pretty_text, CLASSES=datasets[0].CLASSES, PALETTE=datasets[0].PALETTE # for segmentors - if hasattr(datasets[0], 'PALETTE') else None) + if hasattr(datasets[0], "PALETTE") + else None, + ) # add an attribute for visualization convenience model.CLASSES = datasets[0].CLASSES custom_train_model( @@ -252,8 +263,9 @@ def main(): distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, - meta=meta) + meta=meta, + ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/tools/vis_tools/README.md b/tools/vis_tools/README.md new file mode 100644 index 0000000..8e626fd --- /dev/null +++ b/tools/vis_tools/README.md @@ -0,0 +1,79 @@ +# Visualization Tools +## vis_ref.py +### usage +This script processes the `Waymo` or `NuScenes` datasets and generates images with 3D reference points. The dataset configuration can be switched by setting the `IS_WAYMO` variable. + +### config parameter +IS_WAYMO: Boolean value, set to `True` to process the Waymo dataset, set to `False` to process the NuScenes dataset. +Other configuration parameters like NOT_OBSERVED, FREE, OCCUPIED, etc., are defined in the script for specific settings of different datasets. + +### input +ref_file: Path to the reference file, select the corresponding file based on the value of IS_WAYMO (e.g., work_dirs/ref_waymo.pkl or work_dirs/ref_nuscene.pkl). +metas_file: Path to the metadata file, select the corresponding file based on the value of IS_WAYMO (e.g., work_dirs/metas_waymo.pkl or work_dirs/metas_nuscene.pkl). + +### output +"work_dirs/waymo{}_debug_{}.jpg".format(IS_WAYMO, cam_id) + +### command line +```sh +python tools/vis_tools/vis_ref.py +``` + +## vis_ref_dataloader.py +### usage +Similar to the previous file, but this one uses a dataloader to fetch data directly instead of using saved data. + +### config parameter +config: Path to the required config file. + +### output +"work_dirs/", "{}_{}.jpg".format("ref", img_idx) + +### command line +```sh +python -m tools.vis_tools.vis_ref_dataloader +``` + +## vis_occ.py + +### usage +This file visualizes the ground truth source files of Occ3d-Waymo. Simple modifications can meet various occupancy visualization needs. + + +### config parameter +data_dir: Directory of the npz files to be visualized. It will visualize npz files from 0 to 99 in the directory. The npz files contain keys "voxel_label", "origin_voxel_state", "final_voxel_state", "infov", and "ego2global". + +### command line +```sh +python vis_occ.py +``` + +## vis_pose.py + +### usage +This script projects point cloud data onto images and generates image files with the point cloud visualization results. + +### config parameter +#### constant + +- `point_cloud_range`:Defines the coordinate range of the point cloud. +- `voxel_size`:Defines the size of the voxel. +- `IS_WAYMO`:Flag to indicate if the dataset is Waymo. Different parameters are set based on this flag. + +#### directory +- `pcd_path`:Path to the point cloud data. +- `img_path`:Path to the image data. +- `voxel_path`:Path to the voxel label data. + +### output +"work_dirs/", "{}_{}.jpg".format("src", cam_idx) + +### command line +```sh +python -m tools.vis_tools.vis_pose +``` + +## utils.py + +### usage +This is a utility file defining various functions used by other vis files, including `get_cv_color`, `get_open3d_color`, `display_laser_on_image` and `volume2points` diff --git a/tools/vis_tools/__init__.py b/tools/vis_tools/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tools/vis_tools/utils.py b/tools/vis_tools/utils.py new file mode 100644 index 0000000..7a474b5 --- /dev/null +++ b/tools/vis_tools/utils.py @@ -0,0 +1,101 @@ +import numpy as np +import torch + +PALETTE = np.asarray( + [ + [0.0, 0.0, 0.0], + [0.2745098, 0.50980392, 0.70588235], + [0.0, 0.0, 0.90196078], + [0.52941176, 0.80784314, 0.92156863], + [0.39215686, 0.58431373, 0.92941176], + [0.85882353, 0.43921569, 0.57647059], + [0.0, 0.0, 0.50196078], + [0.94117647, 0.50196078, 0.50196078], + [0.54117647, 0.16862745, 0.88627451], + [0.43921569, 0.50196078, 0.56470588], + [0.82352941, 0.41176471, 0.11764706], + [255 / 255, 0, 255 / 255], + [0.18431373, 0.30980392, 0.30980392], + [0.7372549, 0.56078431, 0.56078431], + [0.8627451, 0.07843137, 0.23529412], + [1.0, 0.49803922, 0.31372549], + [0, 175 / 255, 0], + [1.0, 1, 1.0], + [0.5, 0.5, 0.5], + [1.0, 0.3254902, 0.0], + [1.0, 0.84313725, 0.0], + [1.0, 0.23921569, 0.38823529], + [1.0, 0.54901961, 0.0], + [1.0, 0.38823529, 0.27843137], + [0.0, 0.81176471, 0.74901961], + [0.68627451, 0.0, 0.29411765], + [0.29411765, 0.0, 0.29411765], + [0.43921569, 0.70588235, 0.23529412], + [0.87058824, 0.72156863, 0.52941176], + [1.0, 0.89411765, 0.76862745], + [0.0, 0.68627451, 0.0], + [1.0, 0.94117647, 0.96078431], + ] +) + + +def get_cv_color(i, begin=0): + return PALETTE[begin + i % (len(PALETTE) - begin)] * 255 + + +def get_open3d_color(i, begin=0): + return PALETTE[begin + i % (len(PALETTE) - begin)] + + +def display_laser_on_image(img, pcl, vehicle_to_image): + """ + pcl: ego frame + """ + # Convert the pointcloud to homogeneous coordinates. + pcl1 = np.concatenate((pcl, np.ones_like(pcl[:, 0:1])), axis=1) + + # Transform the point cloud to image space. + proj_pcl = np.einsum("ij,bj->bi", vehicle_to_image, pcl1) + + # Filter LIDAR points which are behind the camera. + mask = np.ones_like(proj_pcl[:, 0], dtype=np.bool) + mask = np.logical_and(mask, proj_pcl[:, 2] > 0) + # mask = proj_pcl[:,2] > 0 + # proj_pcl = proj_pcl[mask] + + # Project the point cloud onto the image. + proj_pcl = proj_pcl[:, :2] / proj_pcl[:, 2:3] + mask = np.logical_and(mask, proj_pcl[:, 0] > 1) + mask = np.logical_and(mask, proj_pcl[:, 0] < img.shape[1] - 1) + mask = np.logical_and(mask, proj_pcl[:, 1] > 1) + mask = np.logical_and(mask, proj_pcl[:, 1] < img.shape[0] - 1) + # Filter points which are outside the image. + # mask = np.logical_and( + # np.logical_and(proj_pcl[:,0] > 0, proj_pcl[:,0] < img.shape[1]), + # np.logical_and(proj_pcl[:,1] > 0, proj_pcl[:,1] < img.shape[1])) + + # proj_pcl = proj_pcl[mask] + return proj_pcl, mask + + +def volume2points(voxel, voxel_size, point_cloud_range): + is_numpy = False + if isinstance(voxel, np.ndarray): + voxel = torch.Tensor(voxel) + is_numpy = True + _device = voxel.device + voxel_size_device = torch.tensor(voxel_size).to(_device) + point_cloud_range_device = torch.tensor(point_cloud_range).to(_device) + xx = torch.arange(0, voxel.shape[0]).to(_device).to(torch.float32) + yy = torch.arange(0, voxel.shape[1]).to(_device).to(torch.float32) + zz = torch.arange(0, voxel.shape[2]).to(_device).to(torch.float32) + # zz = torch.linspace(0, 6.4 - 1, 16).to(_device).to(torch.float32) + grid_x, grid_y, grid_z = torch.meshgrid(xx, yy, zz, indexing="ij") + voxel_coors = torch.stack([grid_x, grid_y, grid_z], axis=-1) + voxel_locs = (voxel_coors + 0.5) + voxel_locs = voxel_locs * voxel_size_device + voxel_locs = voxel_locs + point_cloud_range_device[:3] + + if is_numpy: + voxel_locs = voxel_locs.cpu().numpy() + return voxel_locs diff --git a/tools/vis_tools/vis_occ.py b/tools/vis_tools/vis_occ.py new file mode 100644 index 0000000..2835a6e --- /dev/null +++ b/tools/vis_tools/vis_occ.py @@ -0,0 +1,521 @@ +""" +This is a utility code for visualizing occupancy voxel. +""" + +import math +import os +import pickle +from glob import glob +from typing import Dict, Iterable, List, Tuple + +import cv2 +import numpy as np +import open3d as o3d +import torch +from PIL import Image +from tqdm import tqdm + +NOT_OBSERVED = -1 +FREE = 0 +OCCUPIED = 1 + +colormap_to_colors = np.array( + [ + [0, 0, 0, 255], # 0 undefined + [255, 158, 0, 255], # 1 car orange + [0, 0, 230, 255], # 2 pedestrian Blue + [47, 79, 79, 255], # 3 sign Darkslategrey + [220, 20, 60, 255], # 4 CYCLIST Crimson + [255, 69, 0, 255], # 5 traiffic_light Orangered + [255, 140, 0, 255], # 6 pole Darkorange + [233, 150, 70, 255], # 7 construction_cone Darksalmon + [255, 61, 99, 255], # 8 bycycle Red + [112, 128, 144, 255], # 9 motorcycle Slategrey + [222, 184, 135, 255], # 10 building Burlywood + [0, 175, 0, 255], # 11 vegetation Green + [165, 42, 42, 255], # 12 trunk nuTonomy green + [0, 207, 191, 255], # 13 curb, road, lane_marker, other_ground + [75, 0, 75, 255], # 14 walkable, sidewalk + [255, 0, 0, 255], # 15 unobsrvd + [128, 128, 128, 255], # 16 for vis + ], + dtype=np.float32, +) + +LINE_SEGMENTS = [ + [4, 0], + [3, 7], + [5, 1], + [6, 2], # lines along x-axis + [5, 4], + [5, 6], + [6, 7], + [7, 4], # lines along x-axis + [0, 1], + [1, 2], + [2, 3], + [3, 0], +] # lines along y-axis + + +def _expand_dim(array): + return np.concatenate((array, np.ones_like(array)[:, :1]), axis=1) + + +def voxel2points(voxel, occ_show, voxelSize): + occIdx = torch.where(occ_show) + points = torch.cat( + ( + occIdx[0][:, None] * voxelSize[0], + occIdx[1][:, None] * voxelSize[1], + occIdx[2][:, None] * voxelSize[2], + ), + dim=1, + ) + return points, voxel[occIdx], occIdx + + +def voxel_profile(voxel, voxel_size): + centers = torch.cat((voxel[:, :2], voxel[:, 2][:, None] - voxel_size[2] / 2), dim=1) + wlh = torch.cat( + ( + torch.tensor(voxel_size[0]).repeat(centers.shape[0])[:, None], + torch.tensor(voxel_size[1]).repeat(centers.shape[0])[:, None], + torch.tensor(voxel_size[2]).repeat(centers.shape[0])[:, None], + ), + dim=1, + ) + yaw = torch.full_like(centers[:, 0:1], 0) + return torch.cat((centers, wlh, yaw), dim=1) + + +def rotz(t): + """Rotation about the z-axis.""" + c = torch.cos(t) + s = torch.sin(t) + return torch.tensor([[c, -s, 0], [s, c, 0], [0, 0, 1]]) + + +def my_compute_box_3d(center, size, heading_angle): + h, w, l = size[:, 2], size[:, 0], size[:, 1] + heading_angle = -heading_angle - math.pi / 2 + center[:, 2] = center[:, 2] + h / 2 + # R = rotz(1 * heading_angle) + l, w, h = (l / 2).unsqueeze(1), (w / 2).unsqueeze(1), (h / 2).unsqueeze(1) + x_corners = torch.cat([-l, l, l, -l, -l, l, l, -l], dim=1)[..., None] + y_corners = torch.cat([w, w, -w, -w, w, w, -w, -w], dim=1)[..., None] + z_corners = torch.cat([h, h, h, h, -h, -h, -h, -h], dim=1)[..., None] + # corners_3d = R @ torch.vstack([x_corners, y_corners, z_corners]) + corners_3d = torch.cat([x_corners, y_corners, z_corners], dim=2) + corners_3d[..., 0] += center[:, 0:1] + corners_3d[..., 1] += center[:, 1:2] + corners_3d[..., 2] += center[:, 2:3] + return corners_3d + + +def show_point_cloud( + points: np.ndarray, + colors=True, + points_colors=None, + bbox3d=None, + voxelize=False, + bbox_corners=None, + linesets=None, + vis=None, + offset=[0, 0, 0], + visible=True, +) -> None: + """ + :param points: + :param colors: false # do not show the color of point + :param points_colors: + :param bbox3d: voxel boundary Nx7 (center, wlh, yaw=0) + :param voxelize: false # do not draw the voxel boundary + :return: + """ + if vis is None: + vis = o3d.visualization.VisualizerWithKeyCallback() + vis.create_window(visible=visible) + if isinstance(offset, list) or isinstance(offset, tuple): + offset = np.array(offset) + + opt = vis.get_render_option() + opt.background_color = np.asarray([1, 1, 1]) + # opt.background_color = np.asarray([0, 0, 0]) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(points + offset) + if colors: + pcd.colors = o3d.utility.Vector3dVector(points_colors[:, :3]) + mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( + size=1.6, origin=[0, 0, 0] + ) + + vis.add_geometry(pcd) + if voxelize: + line_sets = o3d.geometry.LineSet() + line_sets.points = o3d.open3d.utility.Vector3dVector( + bbox_corners.reshape((-1, 3)) + offset + ) + line_sets.lines = o3d.open3d.utility.Vector2iVector(linesets.reshape((-1, 2))) + line_sets.paint_uniform_color((0, 0, 0)) + # line_sets.colors = o3d.open3d.utility.Vector3dVector(colors) + # linesets = _draw_bboxes(bbox3d, vis) + + vis.add_geometry(mesh_frame) + vis.add_geometry(line_sets) + # vis.run() + return vis + + +def main(occ_state, occ_show, voxel_size, vis=None, offset=[0, 0, 0]): + # occ_state, voxel_size = data['occ_state'].cpu(), data['voxel_size'] + colors = colormap_to_colors / 255 + pcd, labels, occIdx = voxel2points(occ_state, occ_show, voxel_size) + _labels = labels % len(colors) + pcds_colors = colors[_labels] + bboxes = voxel_profile(pcd, voxel_size) + bboxes_corners = my_compute_box_3d(bboxes[:, 0:3], bboxes[:, 3:6], bboxes[:, 6:7]) + # bboxes_corners = torch.cat([my_compute_box_3d(box[0:3], box[3:6], box[6:7])[None, ...] for box in bboxes], dim=0) + bases_ = torch.arange(0, bboxes_corners.shape[0] * 8, 8) + edges = torch.tensor( + [ + [0, 1], + [1, 2], + [2, 3], + [3, 0], + [4, 5], + [5, 6], + [6, 7], + [7, 4], + [0, 4], + [1, 5], + [2, 6], + [3, 7], + ] + ) # lines along y-axis + edges = edges.reshape((1, 12, 2)).repeat(bboxes_corners.shape[0], 1, 1) + edges = edges + bases_[:, None, None] + vis = show_point_cloud( + points=pcd.numpy(), + colors=True, + points_colors=pcds_colors, + voxelize=True, + bbox3d=bboxes.numpy(), + bbox_corners=bboxes_corners.numpy(), + linesets=edges.numpy(), + vis=vis, + offset=offset, + ) + return vis + + +def generate_the_ego_car(): + ego_range = [-2, -1, 0, 2, 1, 1.5] + ego_voxel_size = [0.1, 0.1, 0.1] + ego_xdim = int((ego_range[3] - ego_range[0]) / ego_voxel_size[0]) + ego_ydim = int((ego_range[4] - ego_range[1]) / ego_voxel_size[1]) + ego_zdim = int((ego_range[5] - ego_range[2]) / ego_voxel_size[2]) + ego_voxel_num = ego_xdim * ego_ydim * ego_zdim + temp_x = np.arange(ego_xdim) + temp_y = np.arange(ego_ydim) + temp_z = np.arange(ego_zdim) + ego_xyz = np.stack(np.meshgrid(temp_y, temp_x, temp_z), axis=-1).reshape(-1, 3) + ego_point_x = (ego_xyz[:, 0:1] + 0.5) / ego_xdim * ( + ego_range[3] - ego_range[0] + ) + ego_range[0] + ego_point_y = (ego_xyz[:, 1:2] + 0.5) / ego_ydim * ( + ego_range[4] - ego_range[1] + ) + ego_range[1] + ego_point_z = (ego_xyz[:, 2:3] + 0.5) / ego_zdim * ( + ego_range[5] - ego_range[2] + ) + ego_range[2] + ego_point_xyz = np.concatenate((ego_point_y, ego_point_x, ego_point_z), axis=-1) + ego_points_label = (np.ones((ego_point_xyz.shape[0])) * 16).astype(np.uint8) + ego_dict = {} + ego_dict["point"] = ego_point_xyz + ego_dict["label"] = ego_points_label + return ego_point_xyz + + +def increase_brightness(img, value=30): + hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + h, s, v = cv2.split(hsv) + + lim = 255 - value + v[v > lim] = 255 + v[v <= lim] += value + + final_hsv = cv2.merge((h, s, v)) + img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) + return img + + +def load_view_point(pcd, filename): + vis = o3d.visualization.Visualizer() + vis.create_window() + ctr = vis.get_view_control() + trajectory = o3d.io.read_pinhole_camera_trajectory(filename) + vis.add_geometry(pcd) + ctr.convert_from_pinhole_camera_parameters( + trajectory.intrinsic, trajectory.extrinsic[0] + ) + vis.run() + vis.destroy_window() + + +def video(voxel_path, save_path, vis_voxel01=True, interval=5, vis_3d=False): + ROAD_LABEL_START = 13 + ROAD_LABEL_STOP = 14 + FILL_ROAD = True + FREE_LABEL = 23 + fps = 10 // interval + + # you can use Ctrl+c and Ctrl+v to get vis_param + vis_param = { + "class_name": "ViewTrajectory", + "interval": 29, + "is_loop": False, + "trajectory": [ + { + "boundingbox_max": [ + 80.000015258789062, + 80.000015258789062, + 7.8000006675720215, + ], + "boundingbox_min": [-80.0, -80.0, -5.0], + "field_of_view": 60.0, + "front": [ + -0.63153029317382392, + -0.0044086852255532157, + 0.77533866942025165, + ], + "lookat": [9.0382292077897386, 0.44317978063165703, 7.0656591627410501], + "up": [0.77529242561670686, 0.0087221808444446977, 0.63154222213774414], + "zoom": 0.080000000000000002, + } + ], + "version_major": 1, + "version_minor": 0, + } + voxelsize = [0.1, 0.1, 0.2] if vis_voxel01 else [0.4, 0.4, 0.4] + point_cloud_range = ( + [-80, -80, -5, 80, 80, 7.8] if vis_voxel01 else [-40, -40, -1, 40, 40, 5.4] + ) + + color = colormap_to_colors / 255 + if not os.path.exists(save_path): + os.makedirs(save_path) + + front_vec = np.array(vis_param["trajectory"][0]["front"])[..., None] + up_vec = np.array(vis_param["trajectory"][0]["up"])[..., None] + zoom = vis_param["trajectory"][0]["zoom"] + lookat = vis_param["trajectory"][0]["lookat"] + + npz_file_list = sorted(glob(os.path.join(voxel_path, "*npz"))) + large_voxel_npz_file_list = sorted(glob(os.path.join(voxel_path, "*_04.npz"))) + if not vis_voxel01: + voxel_files = large_voxel_npz_file_list + else: + voxel_files = sorted( + list(set(npz_file_list).difference(set(large_voxel_npz_file_list))) + ) + for frame_idx, voxel_file in enumerate(tqdm(voxel_files[::interval])): + voxel = np.load(voxel_file)["voxel_label"] + if FILL_ROAD: + # fill road for vis + road = voxel == ROAD_LABEL_START + # road_level=torch.argmax(torch.bincount(torch.nonzero(road)[:, 2])) + counts = np.bincount(np.nonzero(road)[2]) + road_level = max(np.argmax(counts) - 5, 0) + voxel[:, :, road_level] = 16 # gray color + + voxel = torch.from_numpy(voxel) + occ_show = voxel != FREE_LABEL + points, labels, occIdx = voxel2points(voxel, occ_show, voxelsize) + points[:, 0] += point_cloud_range[0] + points[:, 1] += point_cloud_range[1] + points[:, 2] += point_cloud_range[2] + + pcd_colors = color[labels.numpy().astype(int)] + bboxes = voxel_profile(torch.tensor(points), voxelsize) + bboxes_corners = my_compute_box_3d( + bboxes[:, 0:3], bboxes[:, 3:6], bboxes[:, 6:7] + ) + bases_ = torch.arange(0, bboxes_corners.shape[0] * 8, 8) + edges = torch.tensor( + [ + [0, 1], + [1, 2], + [2, 3], + [3, 0], + [4, 5], + [5, 6], + [6, 7], + [7, 4], + [0, 4], + [1, 5], + [2, 6], + [3, 7], + ] + ) # lines along y-axis + edges = edges.reshape((1, 12, 2)).repeat(bboxes_corners.shape[0], 1, 1) + edges = edges + bases_[:, None, None] + vis = show_point_cloud( + points=points.numpy(), + colors=True, + points_colors=pcd_colors, + voxelize=True, + bbox_corners=bboxes_corners.numpy(), + linesets=edges.numpy(), + visible=vis_3d, + ) + + ego_pcd = o3d.geometry.PointCloud() + ego_points = generate_the_ego_car() + ego_pcd.points = o3d.utility.Vector3dVector(ego_points) + vis.add_geometry(ego_pcd) + + # view control + view_control = vis.get_view_control() + view_control.set_zoom(zoom) + view_control.set_up(up_vec) + view_control.set_front(front_vec) + view_control.set_lookat(lookat) + vis.poll_events() + vis.update_renderer() + vis.get_render_option().point_size = 8 + + if vis_3d: + vis.run() + # save to image + img = vis.capture_screen_float_buffer(True) + img = np.array(img) + img = (img * 255).astype(np.uint8) + + img = increase_brightness(img, value=20) + im = Image.fromarray(img) + out_file = os.path.join(save_path, f"{str(frame_idx).zfill(3)}.jpg") + im.save(out_file) + print("save image to ", out_file) + im.close() + del im + + vis.clear_geometries() + vis.destroy_window() + del view_control + del vis + + # CALL THIS COMAND TO GENERATE VIDEO + command = 'ffmpeg -r {} -i {}/%03d.jpg -c:v libx264 -vf "fps={},format=yuv420p" -preset medium -crf 30 {}/video.mp4'.format( + fps, save_path, fps, save_path + ) + print(os.popen(command).read()) + + +if __name__ == "__main__": + data_dir = "/home/user/tmp/voxel/000/" + + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 23 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 13 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 9 # 0-10 thing 11-17 stuff + + VOXEL_SIZE = [0.1, 0.1, 0.2] + POINT_CLOUD_RANGE = [-80, -80, -5, 80, 80, 7.8] + SPTIAL_SHAPE = [1600, 1600, 64] + TGT_VOXEL_SIZE = [0.4, 0.4, 0.4] + TGT_POINT_CLOUD_RANGE = [-40, -40, -1, 40, 40, 5.4] + VIS = False + FILL_ROAD = False + + voxel_size = VOXEL_SIZE + point_cloud_range = POINT_CLOUD_RANGE + for idx in range(100): + file = os.path.join(data_dir, f"{str(idx).zfill(3)}.npz") + data = np.load(file) + voxel_label = data["voxel_label"] + lidar_mask = data["origin_voxel_state"] + camera_mask = data["final_voxel_state"] + infov = data["infov"] + ego2global = data["ego2global"] + + if FILL_ROAD: + # fill road for vis + road = voxel_label == ROAD_LABEL_START + # road_level=torch.argmax(torch.bincount(torch.nonzero(road)[:, 2])) + road_level = (np.nonzero(road)[2]).min() + voxel_label[:, :, road_level] = 16 # gray color + + ignore_labels = [FREE_LABEL] + mask = np.zeros_like(voxel_label, dtype=np.bool) + for ignore_label in ignore_labels: + mask = np.logical_or(voxel_label == ignore_label, mask) + mask = np.logical_not(mask) + + voxel_state = lidar_mask + voxel_label_vis = voxel_label + voxel_show = voxel_label != FREE_LABEL + vis = main( + torch.from_numpy(voxel_label_vis), + torch.from_numpy(voxel_show), + voxel_size=voxel_size, + vis=None, + offset=[voxel_state.shape[0] * voxel_size[0] * 1.2 * 0, 0, 0], + ) + + voxel_label_vis = voxel_label + voxel_show = np.logical_and( + voxel_label != FREE_LABEL, lidar_mask == BINARY_OBSERVED + ) + vis = main( + torch.from_numpy(voxel_label_vis), + torch.from_numpy(voxel_show), + voxel_size=voxel_size, + vis=vis, + offset=[voxel_state.shape[0] * voxel_size[0] * 1.2 * 1, 0, 0], + ) + + voxel_label_vis = voxel_label + voxel_show = np.logical_and( + voxel_label != FREE_LABEL, camera_mask == BINARY_OBSERVED + ) + vis = main( + torch.from_numpy(voxel_label_vis), + torch.from_numpy(voxel_show), + voxel_size=voxel_size, + vis=vis, + offset=[voxel_state.shape[0] * voxel_size[0] * 1.2 * 2, 0, 0], + ) + + voxel_label_vis = voxel_label + voxel_show = np.logical_and(voxel_label != FREE_LABEL, infov == True) + vis = main( + torch.from_numpy(voxel_label_vis), + torch.from_numpy(voxel_show), + voxel_size=voxel_size, + vis=vis, + offset=[voxel_state.shape[0] * voxel_size[0] * 1.2 * 3, 0, 0], + ) + + ego_point = generate_the_ego_car() + ego_point[:, 0] += point_cloud_range[3] + ego_point[:, 1] += point_cloud_range[4] + ego_point[:, 2] += point_cloud_range[5] + ego_pcd = o3d.geometry.PointCloud() + ego_pcd.points = o3d.utility.Vector3dVector(ego_point) + vis.add_geometry(ego_pcd) + + vis.run() + vis.poll_events() + vis.update_renderer() + # vis.capture_screen_image(f'output/ray.jpg') + + del vis diff --git a/tools/vis_tools/vis_pose.py b/tools/vis_tools/vis_pose.py new file mode 100644 index 0000000..867001f --- /dev/null +++ b/tools/vis_tools/vis_pose.py @@ -0,0 +1,120 @@ +# Copyright 2022 tao.jiang +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +The script is used to project the point cloud data onto the image and generate an image file with the result of the point cloud visualization. +''' +import os +import pickle +from pathlib import Path + +import numpy as np +import PIL +import torch +from matplotlib import pyplot as plt + +from .utils import * + +pcd_path = Path( + "/public/MARS/datasets/waymo_v1.3.1_untar/kitti_format/training/velodyne/0001050.bin" +) +img_path = Path("/public/MARS/datasets/waymo_v1.4.0/gt_points/001/050_cam.pkl") +voxel_path = Path("/public/MARS/datasets/waymo_v1.4.0/voxel/001/050.npz") +point_cloud_range = [-80, -80, -5.0, 80, 80, 7.8] +voxel_size = [0.1, 0.1, 0.2] + +IS_WAYMO = True +if IS_WAYMO: + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 23 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 13 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 9 # 0-10 thing 11-17 stuff + # DO NOT CHANGE + FLT_MAX = 1e9 + RAY_STOP_DISTANCE_VOXEL = 1 + DISTANCE_THESHOLD_IGNORE = 1.0 + RAY_ROAD_IGNORE_DISTANCE = 1.0 + + num_cams = 5 +else: + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 17 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 11 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 10 # 0-10 thing 11-17 stuff + num_cams = 6 + +if __name__ == "__main__": + with open(img_path, "rb") as f: + img_infos = pickle.load(f) + + pcd = np.fromfile(pcd_path, dtype=np.float32).reshape((-1, 6))[:, :3] # ego frame + labels = np.load(voxel_path) + voxel_label = labels["voxel_label"] + origin_voxel_state = labels["origin_voxel_state"] + final_voxel_state = labels["final_voxel_state"] + # ego2global = labels['ego2global'] + points = volume2points(voxel_label, voxel_size, point_cloud_range) + points = points.reshape(-1, 3) + points_label = voxel_label.reshape(-1) + # points = pcd + points_colors = np.zeros((points_label.shape[0], 3)) + if points_label is not None: + for idx in np.unique(points_label): + if idx == FREE_LABEL: + continue + points_colors[points_label == idx] = get_cv_color(idx, begin=1) + points_colors = points_colors / 255.0 + mask = points_label != FREE_LABEL + points = points[mask] + points_label = points_label[mask] + points_colors = points_colors[mask] + + for cam_idx in range(5): + img_info = img_infos[cam_idx] + vehicle2image = img_info["intrinsics"] @ np.linalg.inv(img_info["sensor2ego"]) + img = np.array(img_info["img"]) + pts, mask = display_laser_on_image(img, points, vehicle2image) + pts = pts[mask] + pts_colors = points_colors[mask] + fig, ax = plt.subplots(1, 2, figsize=(18, 8)) + ax[0].imshow(img) + ax[1].imshow(img) + ax[1].scatter(pts[:, 0], pts[:, 1], c=pts_colors, s=1) + ax[0].axis("off") + ax[1].axis("off") + # plt.show() + # plt save too slow, use PIL + # If we haven't already shown or saved the plot, then we need to draw the figure first... + fig.canvas.draw() + img = PIL.Image.frombytes( + "RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb() + ) + img.save(os.path.join("work_dirs/", "{}_{}.jpg".format("src", cam_idx))) + plt.close("all") diff --git a/tools/vis_tools/vis_ref.py b/tools/vis_tools/vis_ref.py new file mode 100644 index 0000000..7143905 --- /dev/null +++ b/tools/vis_tools/vis_ref.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +""" +This script processes data from Waymo or NuScenes datasets and generates annotated images. + +Usage: + python script_name.py + +Configuration: + IS_WAYMO: Boolean flag to toggle between Waymo and NuScenes dataset configurations. + ref_file: reference file containing 3D reference points and camera reference points. + metas_file: metadata file containing img_metas. +""" + +import os +import pickle as pkl +import cv2 +import numpy as np +import torch + +# Configuration for Waymo and NuScenes datasets +IS_WAYMO = True +if IS_WAYMO: + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 23 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 13 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 9 + FLT_MAX = 1e9 + RAY_STOP_DISTANCE_VOXEL = 1 + DISTANCE_THESHOLD_IGNORE = 1.0 + RAY_ROAD_IGNORE_DISTANCE = 1.0 + num_cams = 5 + ref_file = "work_dirs/ref_waymo.pkl" + metas_file = "work_dirs/metas_waymo.pkl" +else: + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 17 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 11 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 10 + num_cams = 6 + ref_file = "work_dirs/ref_nuscene.pkl" + metas_file = "work_dirs/metas_nuscene.pkl" + +# Load reference and metadata files +with open(ref_file, "rb") as f: + data = pkl.load(f) + ref_3d = data["ref_3d"] + reference_points_cam = data["reference_points_cam"] + bev_mask = data["bev_mask"] +with open(metas_file, "rb") as f: + data = pkl.load(f) + semantics = data["voxel_semantics"][0] + imgs = data["imgs"][0] + +# Configuration parameters +embed_dims = 1 +bs = 1 +ref_num_voxel = 2 +bev_z, bev_h, bev_w = 16, 200, 200 +num_query = bev_z * bev_h * bev_w +_device = torch.device("cuda:0") +mean = [103.530, 116.280, 123.675] + +# Reshape semantics data +semantics = semantics.reshape(bev_w, bev_h, bev_z, ref_num_voxel) +query_labels = ( + semantics.permute(2, 1, 0, 3) + .contiguous() + .reshape(bev_w * bev_h * bev_z, ref_num_voxel) + .long() +) + +# Process images +D = reference_points_cam.size(3) +indexes = [] +imgs[:, 0] += mean[0] +imgs[:, 1] += mean[1] +imgs[:, 2] += mean[2] +imgs = torch.clip(imgs, min=0, max=255) + +for cam_id in range(num_cams): + img_cur = imgs[cam_id] + W, H = img_cur.shape[2], img_cur.shape[1] + img_cur_squeeze = img_cur.reshape(-1) + mask_per_img = bev_mask[cam_id] + ref_num = mask_per_img.shape[2] + + for ref_idx in range(ref_num): + print(ref_idx) + index_query_per_img = mask_per_img[0, :, ref_idx].nonzero().squeeze(-1) + query_labels_cur = query_labels[index_query_per_img, ref_idx] + reference_points_cam_cur = reference_points_cam[ + cam_id, 0, index_query_per_img, ref_idx + ] + + uu, vv = reference_points_cam_cur[:, 0] * W, reference_points_cam_cur[:, 1] * H + uu, vv = uu.long(), vv.long() + scalar = (uu + vv * W).long() + + scalar_ = scalar[query_labels_cur < ROAD_LABEL_START] + img_cur_squeeze[0 * H * W + scalar_] = 0 + img_cur_squeeze[1 * H * W + scalar_] = 0 + img_cur_squeeze[2 * H * W + scalar_] = 255 + + scalar_ = scalar[ + torch.logical_and( + query_labels_cur >= ROAD_LABEL_START, + query_labels_cur <= ROAD_LABEL_STOP, + ) + ] + img_cur_squeeze[0 * H * W + scalar_] = 0 + img_cur_squeeze[1 * H * W + scalar_] = 255 + img_cur_squeeze[2 * H * W + scalar_] = 0 + + scalar_ = scalar[ + torch.logical_and( + query_labels_cur > ROAD_LABEL_STOP, query_labels_cur != FREE_LABEL + ) + ] + img_cur_squeeze[0 * H * W + scalar_] = 255 + img_cur_squeeze[1 * H * W + scalar_] = 0 + img_cur_squeeze[2 * H * W + scalar_] = 0 + + img_cur = img_cur_squeeze.reshape(3, H, W).permute(1, 2, 0) + img_show = img_cur.cpu().numpy().astype(np.uint8) + cv2.imwrite("work_dirs/waymo{}_debug_{}.jpg".format(IS_WAYMO, cam_id), img_show) + # cv2.imshow("image", img_show) + # cv2.waitKey() diff --git a/tools/vis_tools/vis_ref_dataloader.py b/tools/vis_tools/vis_ref_dataloader.py new file mode 100644 index 0000000..5a0cbd2 --- /dev/null +++ b/tools/vis_tools/vis_ref_dataloader.py @@ -0,0 +1,162 @@ +# Copyright 2022 tao.jiang +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script processes LiDAR data from the NuScenes or Waymo dataset and visualizes the voxel labels. + +Usage: + python script_name.py + +Configuration: + IS_WAYMO: Boolean flag to toggle between Waymo and NuScenes dataset configurations. +""" +import os + +import matplotlib.pyplot as plt +import numpy as np +import PIL +import torch +from mmcv import Config, DictAction +from mmdet3d.datasets import build_dataset + +from projects.mmdet3d_plugin.datasets import nuscenes_occ +from projects.mmdet3d_plugin.datasets import waymo_temporal_zlt +from .utils import * + +config = "projects/configs/bevformer/bevformer_waymo.py" +voxel_size = [0.4, 0.4, 0.4] +IS_WAYMO = True +cfg = Config.fromfile(config) +point_cloud_range = cfg.point_cloud_range + +IS_WAYMO = True +if IS_WAYMO: + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 23 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 13 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 9 # 0-10 thing 11-17 stuff + # DO NOT CHANGE + FLT_MAX = 1e9 + RAY_STOP_DISTANCE_VOXEL = 1 + DISTANCE_THESHOLD_IGNORE = 1.0 + RAY_ROAD_IGNORE_DISTANCE = 1.0 + + num_cams = 5 +else: + NOT_OBSERVED = -1 + FREE = 0 + OCCUPIED = 1 + FREE_LABEL = 17 + MAX_POINT_NUM = 10 + ROAD_LABEL_START_BEFORE_REMAP = 24 + ROAD_LABEL_STOP_BEFORE_REMAP = 27 + ROAD_LABEL_START = 11 + ROAD_LABEL_STOP = 14 + BINARY_OBSERVED = 1 + BINARY_NOT_OBSERVED = 0 + STUFF_START = 10 # 0-10 thing 11-17 stuff + num_cams = 6 +mean = [103.530, 116.280, 123.675] + + +dataset = build_dataset(cfg.data.train) +result = dataset.__getitem__(1000) +print(result["img_metas"].data.keys()) +imgs = result["img"].data[-1] +lidar2img = result["img_metas"].data[2]["lidar2img"] # DEBUG_TMP +print(result["img_metas"].data[2]["filename"]) +voxel_label = result["voxel_semantics"] +voxel_locs = volume2points(voxel_label, voxel_size, point_cloud_range) +points = voxel_locs.reshape(-1, 3) +points_label = voxel_label.reshape(-1) +points_colors = np.zeros((points_label.shape[0], 3)) +if points_label is not None: + for idx in np.unique(points_label): + if idx == FREE_LABEL: + continue + points_colors[points_label == idx] = get_cv_color(idx, begin=1) +points_colors = points_colors / 255.0 +mask = points_label != (FREE_LABEL+1) # Here we filter out the free label +points = points[mask] +points_label = points_label[mask] +points_colors = points_colors[mask] + +points = np.concatenate([points, np.ones_like(points[:, :1])], axis=-1) +points = points.reshape(-1, 4, 1) + + +imgs[:, 0] += mean[0] +imgs[:, 1] += mean[1] +imgs[:, 2] += mean[2] +imgs = torch.clip(imgs, min=0, max=255) +for img_idx in range(len(imgs)): + img = imgs[img_idx] + lidar2img_ = lidar2img[img_idx] + lidar2img_ = torch.Tensor(lidar2img_) + H, W = img.shape[1], img.shape[2] + + n = points.shape[0] + lidar2img_ = lidar2img_.view(1, 4, 4).repeat(n, 1, 1) + points_ = torch.Tensor(points) + reference_points_cam = torch.matmul( + lidar2img_.to(torch.float32), points_.to(torch.float32) + ).squeeze(-1) + eps = 1e-5 + bev_mask = reference_points_cam[..., 2:3] > eps + reference_points_cam = reference_points_cam[..., 0:2] / torch.maximum( + reference_points_cam[..., 2:3], + torch.ones_like(reference_points_cam[..., 2:3]) * eps, + ) + reference_points_cam[..., 0] /= W + reference_points_cam[..., 1] /= H + bev_mask = ( + bev_mask + & (reference_points_cam[..., 1:2] > 0.0) + & (reference_points_cam[..., 1:2] < 1) + & (reference_points_cam[..., 0:1] < 1) + & (reference_points_cam[..., 0:1] > 0.0) + ) + bev_mask = bev_mask.reshape(-1) + + reference_points_cam_ = reference_points_cam.cpu().numpy() + bev_mask = bev_mask.cpu().numpy() + + im = img.permute(1, 2, 0).cpu().numpy().astype(np.uint8) + uu = reference_points_cam[bev_mask, 0] * W + vv = reference_points_cam[bev_mask, 1] * H + + fig, ax = plt.subplots(1, 2, figsize=(18, 8)) + fig.tight_layout() + # plt.subplots_adjust(wspace=0, hspace=0) + ax[0].imshow(im) + ax[0].axis("off") + ax[1].imshow(im) + ax[1].scatter(uu, vv, c=points_colors[bev_mask], s=1) + ax[1].axis("off") + # plt save too slow, use PIL + # If we haven't already shown or saved the plot, then we need to draw the figure first... + fig.canvas.draw() + img = PIL.Image.frombytes( + "RGB", fig.canvas.get_width_height(), fig.canvas.tostring_rgb() + ) + img.save(os.path.join("work_dirs/", "{}_{}.jpg".format("ref", img_idx))) + plt.close("all")