From c984503557405d7bb8f2c5ebe3e5fc18755b3fd1 Mon Sep 17 00:00:00 2001 From: Charmve Date: Sun, 4 Feb 2024 23:46:14 +0800 Subject: [PATCH] format code --- .flake8 | 2 +- .gitmodules | 9 +- .../README.md" | 23 ++ .../configs/bevformer/bevformer_base.py" | 5 +- .../configs/bevformer/bevformer_small.py" | 5 +- .../configs/bevformer/bevformer_tiny.py" | 5 +- .../README.md" | 24 +- .../configs/_base_/datasets/coco_instance.py | 66 +-- .../_base_/datasets/kitti-3d-3class.py | 128 +++--- .../configs/_base_/datasets/kitti-3d-car.py | 125 +++--- .../configs/_base_/datasets/lyft-3d.py | 126 +++--- .../configs/_base_/datasets/nuim_instance.py | 77 ++-- .../configs/_base_/datasets/nus-3d.py | 129 +++--- .../configs/_base_/datasets/nus-mono3d.py | 103 +++-- .../_base_/datasets/range100_lyft-3d.py | 126 +++--- .../_base_/datasets/s3dis-3d-5class.py | 103 ++--- .../_base_/datasets/s3dis_seg-3d-13class.py | 127 +++--- .../_base_/datasets/scannet-3d-18class.py | 146 ++++--- .../_base_/datasets/scannet_seg-3d-20class.py | 179 +++++--- .../_base_/datasets/sunrgbd-3d-10class.py | 107 ++--- .../_base_/datasets/waymoD5-3d-3class.py | 127 +++--- .../configs/_base_/datasets/waymoD5-3d-car.py | 124 +++--- .../configs/_base_/default_runtime.py | 13 +- .../projects/configs/_base_/models/3dssd.py | 91 ++--- .../models/cascade_mask_rcnn_r50_fpn.py | 181 +++++---- .../centerpoint_01voxel_second_secfpn_nus.py | 76 ++-- .../centerpoint_02pillar_second_secfpn_nus.py | 74 ++-- .../projects/configs/_base_/models/fcos3d.py | 64 +-- .../configs/_base_/models/groupfree3d.py | 76 ++-- .../projects/configs/_base_/models/h3dnet.py | 357 ++++++++-------- .../_base_/models/hv_pointpillars_fpn_lyft.py | 17 +- .../_base_/models/hv_pointpillars_fpn_nus.py | 73 ++-- .../hv_pointpillars_fpn_range100_lyft.py | 17 +- .../models/hv_pointpillars_secfpn_kitti.py | 72 ++-- .../models/hv_pointpillars_secfpn_waymo.py | 89 ++-- .../_base_/models/hv_second_secfpn_kitti.py | 72 ++-- .../_base_/models/hv_second_secfpn_waymo.py | 86 ++-- .../configs/_base_/models/imvotenet_image.py | 105 ++--- .../_base_/models/mask_rcnn_r50_fpn.py | 115 +++--- .../configs/_base_/models/paconv_cuda_ssg.py | 8 +- .../configs/_base_/models/paconv_ssg.py | 52 ++- .../projects/configs/_base_/models/parta2.py | 172 ++++---- .../configs/_base_/models/pointnet2_msg.py | 33 +- .../configs/_base_/models/pointnet2_ssg.py | 41 +- .../projects/configs/_base_/models/votenet.py | 86 ++-- .../configs/_base_/schedules/cosine.py | 14 +- .../configs/_base_/schedules/cyclic_20e.py | 14 +- .../configs/_base_/schedules/cyclic_40e.py | 14 +- .../_base_/schedules/mmdet_schedule_1x.py | 11 +- .../configs/_base_/schedules/schedule_2x.py | 11 +- .../configs/_base_/schedules/schedule_3x.py | 6 +- .../_base_/schedules/seg_cosine_150e.py | 6 +- .../_base_/schedules/seg_cosine_200e.py | 6 +- .../_base_/schedules/seg_cosine_50e.py | 6 +- .../configs/bevformer/bev_base_occ.py | 283 +++++++------ .../bevformer/bev_base_occ_intern_s.py | 276 +++++++------ .../configs/bevformer/bev_tiny_det.py | 284 +++++++------ .../configs/bevformer/bev_tiny_det_occ.py | 295 ++++++++------ .../bevformer/bev_tiny_det_occ_flow.py | 318 +++++++++------ .../configs/bevformer/bev_tiny_occ.py | 276 +++++++------ .../bevformer/bev_tiny_occ_intern_s.py | 279 +++++++------ .../configs/datasets/custom_lyft-3d.py | 128 +++--- .../configs/datasets/custom_nus-3d.py | 129 +++--- .../configs/datasets/custom_waymo-3d.py | 95 +++-- .../configs/hybrid/hybrid_base_occ.py | 383 ++++++++++-------- .../configs/hybrid/hybrid_tiny_occ.py | 370 +++++++++-------- .../hybrid/hybrid_tiny_occ_intern_s.py | 374 +++++++++-------- .../configs/voxelformer/voxel_base_occ.py | 289 +++++++------ .../configs/voxelformer/voxel_tiny_occ.py | 280 +++++++------ .../Awesome-BEV-Perception-Multi-Cameras.md | 3 + dataset/README.md | 38 ++ src/imgs/nuscenes-struct.png | Bin 0 -> 87251 bytes viz/README.md | 27 ++ viz/carla_apollo_bridge | 1 + viz/selfdrivingtech/README.md | 51 +++ .../streetscape.gl_demo/README.md | 54 +++ .../streetscape.gl_demo/get-started/README.md | 10 + .../get-started/index.html | 26 ++ .../get-started/package.json | 40 ++ .../get-started/src/app.js | 220 ++++++++++ .../get-started/src/constants.js | 64 +++ .../get-started/src/log-from-file.js | 84 ++++ .../get-started/src/log-from-live.js | 32 ++ .../get-started/src/log-from-stream.js | 37 ++ .../get-started/webpack.config.js | 76 ++++ viz/streetscape.gl | 1 + 86 files changed, 5242 insertions(+), 3505 deletions(-) create mode 100644 dataset/README.md create mode 100644 src/imgs/nuscenes-struct.png create mode 100644 viz/README.md create mode 160000 viz/carla_apollo_bridge create mode 100644 viz/selfdrivingtech/README.md create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/README.md create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/README.md create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/index.html create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/package.json create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/src/app.js create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/src/constants.js create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-file.js create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-live.js create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-stream.js create mode 100644 viz/selfdrivingtech/streetscape.gl_demo/get-started/webpack.config.js create mode 160000 viz/streetscape.gl diff --git a/.flake8 b/.flake8 index ad29022..69e7ffa 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,4 @@ [flake8] max-line-length = 90 -exclude = file1.py, **/__init__.py +exclude = ./Chapter08-FinalProject/OccNet/projects/mmdet3d_plugin/bevformer/modules/voxel_encoder.py, **/__init__.py diff --git a/.gitmodules b/.gitmodules index a5f0f15..860e65c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -49,14 +49,17 @@ url = https://github.com/OpenDriveLab/UniAD [submodule "SurroundOcc"] - path = code/SurroundOcc + path = dataset/SurroundOcc url = https://github.com/weiyithu/SurroundOcc [submodule "Occ3D"] - path = code/Occ3D + path = dataset/Occ3D url = https://github.com/Tsinghua-MARS-Lab/Occ3D [submodule "OpenScene"] - path = code/OpenScene + path = dataset/OpenScene url = https://github.com/OpenDriveLab/OpenScene +[submodule "OpenOccupancy"] + path = dataset/OpenOccupancy + url = https://github.com/JeffWang987/OpenOccupancy [submodule "Deformable-DETR"] diff --git "a/Chapter03-\351\207\215\350\246\201\346\225\260\346\215\256\351\233\206\345\222\214Benchmark\344\273\213\347\273\215/README.md" "b/Chapter03-\351\207\215\350\246\201\346\225\260\346\215\256\351\233\206\345\222\214Benchmark\344\273\213\347\273\215/README.md" index 2225f09..c9bf304 100644 --- "a/Chapter03-\351\207\215\350\246\201\346\225\260\346\215\256\351\233\206\345\222\214Benchmark\344\273\213\347\273\215/README.md" +++ "b/Chapter03-\351\207\215\350\246\201\346\225\260\346\215\256\351\233\206\345\222\214Benchmark\344\273\213\347\273\215/README.md" @@ -1,3 +1,26 @@ +## nuScenes: A Multimodal Dataset for Autonomous Driving + +https://www.youtube.com/watch?v=C6KbbndonGg + +https://www.nuscenes.org/nuscenes#download + +### Viz + +- streetscape.gl + - https://www.youtube.com/watch?v=irS9H0fU-ig + - https://github.com/prodramp/DeepWorks/tree/main/selfdrivingtech/streetscape.gl_demo +- apollo-DreamView + - https://github.com/ApolloAuto/apollo/tree/master/modules/dreamview +- Carla + - https://www.bilibili.com/video/BV1eN4y1Z7Zy +- lgsvl https://github.com/lgsvl/simulator +- Foxglove Studio https://github.com/foxglove/studio + - https://github.com/foxglove/nuscenes2mcap +## Occ3D datasets + +- Occ3D +- OpenOccupancy +- SurroundOcc ### 3D Occupancy Prediction Challenge at CVPR 2023 (Server remains `active`) diff --git "a/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_base.py" "b/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_base.py" index 9821ad7..52170fe 100644 --- "a/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_base.py" +++ "b/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_base.py" @@ -286,7 +286,10 @@ optimizer = dict( type="AdamW", lr=2e-4, - paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1),}), + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), weight_decay=0.01, ) diff --git "a/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_small.py" "b/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_small.py" index 42906ee..f02a620 100644 --- "a/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_small.py" +++ "b/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_small.py" @@ -298,7 +298,10 @@ optimizer = dict( type="AdamW", lr=2e-4, - paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1),}), + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), weight_decay=0.01, ) diff --git "a/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_tiny.py" "b/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_tiny.py" index 295a7d5..1b570d2 100644 --- "a/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_tiny.py" +++ "b/Chapter06-\345\215\240\346\215\256\347\275\221\347\273\234\351\203\250\347\275\262\345\260\217\350\257\225\357\274\232\346\250\241\345\236\213\351\207\217\345\214\226\345\212\240\351\200\237\344\270\216\351\203\250\347\275\262/BEVFormer_tensorrt/configs/bevformer/bevformer_tiny.py" @@ -295,7 +295,10 @@ optimizer = dict( type="AdamW", lr=2e-4, - paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1),}), + paramwise_cfg=dict( + custom_keys={ + 'img_backbone': dict(lr_mult=0.1), + }), weight_decay=0.01, ) diff --git "a/Chapter07-\350\257\276\347\250\213\345\261\225\346\234\233\344\270\216\346\200\273\347\273\223/README.md" "b/Chapter07-\350\257\276\347\250\213\345\261\225\346\234\233\344\270\216\346\200\273\347\273\223/README.md" index 95f0ed0..58a37dd 100644 --- "a/Chapter07-\350\257\276\347\250\213\345\261\225\346\234\233\344\270\216\346\200\273\347\273\223/README.md" +++ "b/Chapter07-\350\257\276\347\250\213\345\261\225\346\234\233\344\270\216\346\200\273\347\273\223/README.md" @@ -2,7 +2,7 @@ * @Author: Charmve yidazhang1@gmail.com * @Date: 2023-10-10 10:49:13 * @LastEditors: Charmve yidazhang1@gmail.com - * @LastEditTime: 2024-01-31 22:58:00 + * @LastEditTime: 2024-02-02 01:16:50 * @FilePath: /OccNet-Course/Chapter07-课程展望与总结/README.md * @Version: 1.0.1 * @Blogs: charmve.blog.csdn.net @@ -16,12 +16,19 @@ 在本专题课程的课程展望和总结中,主要从算法框架、数据、仿真和其他四个方面做未来展望,以及对本课程做一个总结。 -- 算法框 - - 数据驱动的端到端 UniAD +- 算法框架 + - 数据驱动的端到端 [UniAD](https://github.com/OpenDriveLab/UniAD) + - https://mp.weixin.qq.com/s/qcNtRsBD5aadkavU9TfpFA - https://github.com/OpenDriveLab/End-to-end-Autonomous-Driving + - End-to-end Interpretable Neural Motion Planner [paper](https://arxiv.org/abs/2101.06679) + - End-to-End Learning of Driving Models with Surround-View Cameras and Route Planners [paper](https://arxiv.org/abs/1803.10158) - https://github.com/E2E-AD/AD-MLP - - https://github.com/OpenDriveLab/ST-P3 - - 大模型 LMDrive [关于大模型和自动驾驶的几个迷思](关于大模型和自动驾驶的几个迷思.md) + - ST-P3 [paper](https://arxiv.org/abs/2207.07601) | [code](https://github.com/OpenDriveLab/ST-P3) + - MP3 [paper](https://arxiv.org/abs/2101.06806) | [video](https://www.bilibili.com/video/BV1tQ4y1k7BX) + - TCP [NeurIPS 2022] Trajectory-guided Control Prediction for End-to-end Autonomous Driving: A Simple yet Strong Baseline. [paper](https://arxiv.org/abs/2206.08129) | [video](https://www.bilibili.com/video/BV1Pe4y1x7E3/?spm_id_from=333.337.search-card.all.click&vd_source=57394ba751fad8e6886be567cccfa5bb) |[code](https://github.com/OpenDriveLab/TCP) + - 鉴智机器人 GraphAD + - + - 大模型 [LMDrive](https://github.com/opendilab/LMDrive) [关于大模型和自动驾驶的几个迷思](关于大模型和自动驾驶的几个迷思.md) - 世界模型:Drive-WM、DriveDreamer - 矢量地图在线建图:MapTRv2、ScalableMap、VectorMapNet、HDMapNet、GeMap、MapEX - BEV-OCC-Transformer: OccFormer、OccWorld、Occupancy Flow @@ -30,10 +37,11 @@ - 4D数据自动标注: - OCC与Nerf联合标注 - [面向BEV感知的4D标注方案](https://zhuanlan.zhihu.com/p/642735557?utm_psn=1706841959639998464) - - 数据生成:DrivingDiffusion、[MagicDrive](https://zhuanlan.zhihu.com/p/675303127)、UrbanSyn + - 数据合成:DrivingDiffusion、[MagicDrive](https://zhuanlan.zhihu.com/p/675303127)、UrbanSyn + - https://github.com/runnanchen/CLIP2Scene - 仿真 - - UniSim + - [UniSim](https://waabi.ai/unisim/) - DRIVE Sim - 其他 @@ -41,3 +49,5 @@ - AI 编译器: MLIR、TVM、XLA、Triton - 模型剪枝、模型蒸馏、模型压缩、模型量化(PTQ、QAT) + +关注科技前沿公司:[Waabi](https://waabi.ai/unisim/)、[Wayve](https://wayve.ai/) \ No newline at end of file diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/coco_instance.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/coco_instance.py index f6ea4f4..16d8494 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/coco_instance.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/coco_instance.py @@ -1,48 +1,54 @@ -dataset_type = 'CocoDataset' -data_root = 'data/coco/' +dataset_type = "CocoDataset" +data_root = "data/coco/" img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), + dict(type="LoadImageFromFile"), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True), + dict(type="Resize", img_scale=(1333, 800), keep_ratio=True), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), + dict(type="LoadImageFromFile"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", img_scale=(1333, 800), flip=False, transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) + dict(type="Resize", keep_ratio=True), + dict(type="RandomFlip"), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline), + ann_file=data_root + "annotations/instances_train2017.json", + img_prefix=data_root + "train2017/", + pipeline=train_pipeline, + ), val=dict( type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline), + ann_file=data_root + "annotations/instances_val2017.json", + img_prefix=data_root + "val2017/", + pipeline=test_pipeline, + ), test=dict( type=dataset_type, - ann_file=data_root + 'annotations/instances_val2017.json', - img_prefix=data_root + 'val2017/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) + ann_file=data_root + "annotations/instances_val2017.json", + img_prefix=data_root + "val2017/", + pipeline=test_pipeline, + ), +) +evaluation = dict(metric=["bbox", "segm"]) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-3class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-3class.py index 1822af4..342ecfb 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-3class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-3class.py @@ -1,20 +1,22 @@ # dataset settings -dataset_type = 'KittiDataset' -data_root = 'data/kitti/' -class_names = ['Pedestrian', 'Cyclist', 'Car'] +dataset_type = "KittiDataset" +data_root = "data/kitti/" +class_names = ["Pedestrian", "Cyclist", "Car"] point_cloud_range = [0, -40, -3, 70.4, 40, 1] input_modality = dict(use_lidar=True, use_camera=False) db_sampler = dict( data_root=data_root, - info_path=data_root + 'kitti_dbinfos_train.pkl', + info_path=data_root + "kitti_dbinfos_train.pkl", rate=1.0, prepare=dict( filter_by_difficulty=[-1], - filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10), + ), classes=class_names, - sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6)) + sample_groups=dict(Car=12, Pedestrian=6, Cyclist=6), +) -file_client_args = dict(backend='disk') +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -23,118 +25,126 @@ train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, - file_client_args=file_client_args), - dict(type='ObjectSample', db_sampler=db_sampler), + file_client_args=file_client_args, + ), + dict(type="ObjectSample", db_sampler=db_sampler), dict( - type='ObjectNoise', + type="ObjectNoise", num_try=100, translation_std=[1.0, 1.0, 0.5], global_rot_range=[0.0, 0.0], - rot_range=[-0.78539816, 0.78539816]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + rot_range=[-0.78539816, 0.78539816], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.78539816, 0.78539816], - scale_ratio_range=[0.95, 1.05]), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + scale_ratio_range=[0.95, 1.05], + ), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), - dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=6, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=2, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'kitti_infos_train.pkl', - split='training', - pts_prefix='velodyne_reduced', + ann_file=data_root + "kitti_infos_train.pkl", + split="training", + pts_prefix="velodyne_reduced", pipeline=train_pipeline, modality=input_modality, classes=class_names, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR')), + box_type_3d="LiDAR", + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'kitti_infos_val.pkl', - split='training', - pts_prefix='velodyne_reduced', + ann_file=data_root + "kitti_infos_val.pkl", + split="training", + pts_prefix="velodyne_reduced", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'kitti_infos_val.pkl', - split='training', - pts_prefix='velodyne_reduced', + ann_file=data_root + "kitti_infos_val.pkl", + split="training", + pts_prefix="velodyne_reduced", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) evaluation = dict(interval=1, pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-car.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-car.py index 1e81226..bb07ff4 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-car.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/kitti-3d-car.py @@ -1,18 +1,19 @@ # dataset settings -dataset_type = 'KittiDataset' -data_root = 'data/kitti/' -class_names = ['Car'] +dataset_type = "KittiDataset" +data_root = "data/kitti/" +class_names = ["Car"] point_cloud_range = [0, -40, -3, 70.4, 40, 1] input_modality = dict(use_lidar=True, use_camera=False) db_sampler = dict( data_root=data_root, - info_path=data_root + 'kitti_dbinfos_train.pkl', + info_path=data_root + "kitti_dbinfos_train.pkl", rate=1.0, prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), classes=class_names, - sample_groups=dict(Car=15)) + sample_groups=dict(Car=15), +) -file_client_args = dict(backend='disk') +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -21,118 +22,126 @@ train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, - file_client_args=file_client_args), - dict(type='ObjectSample', db_sampler=db_sampler), + file_client_args=file_client_args, + ), + dict(type="ObjectSample", db_sampler=db_sampler), dict( - type='ObjectNoise', + type="ObjectNoise", num_try=100, translation_std=[1.0, 1.0, 0.5], global_rot_range=[0.0, 0.0], - rot_range=[-0.78539816, 0.78539816]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), + rot_range=[-0.78539816, 0.78539816], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.78539816, 0.78539816], - scale_ratio_range=[0.95, 1.05]), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + scale_ratio_range=[0.95, 1.05], + ), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), - dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=4, use_dim=4, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=6, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=2, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'kitti_infos_train.pkl', - split='training', - pts_prefix='velodyne_reduced', + ann_file=data_root + "kitti_infos_train.pkl", + split="training", + pts_prefix="velodyne_reduced", pipeline=train_pipeline, modality=input_modality, classes=class_names, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR')), + box_type_3d="LiDAR", + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'kitti_infos_val.pkl', - split='training', - pts_prefix='velodyne_reduced', + ann_file=data_root + "kitti_infos_val.pkl", + split="training", + pts_prefix="velodyne_reduced", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'kitti_infos_val.pkl', - split='training', - pts_prefix='velodyne_reduced', + ann_file=data_root + "kitti_infos_val.pkl", + split="training", + pts_prefix="velodyne_reduced", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) evaluation = dict(interval=1, pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/lyft-3d.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/lyft-3d.py index 71baff0..501c35d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/lyft-3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/lyft-3d.py @@ -3,20 +3,24 @@ point_cloud_range = [-80, -80, -5, 80, 80, 3] # For Lyft we usually do 9-class detection class_names = [ - 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', - 'bicycle', 'pedestrian', 'animal' + "car", + "truck", + "bus", + "emergency_vehicle", + "other_vehicle", + "motorcycle", + "bicycle", + "pedestrian", + "animal", ] -dataset_type = 'LyftDataset' -data_root = 'data/lyft/' +dataset_type = "LyftDataset" +data_root = "data/lyft/" # Input modality for Lyft dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict( - use_lidar=True, - use_camera=False, - use_radar=False, - use_map=False, - use_external=False) -file_client_args = dict(backend='disk') + use_lidar=True, use_camera=False, use_radar=False, use_map=False, use_external=False +) +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -28,78 +32,82 @@ # })) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + file_client_args=file_client_args, + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( @@ -108,27 +116,31 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_train.pkl', + ann_file=data_root + "lyft_infos_train.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, - test_mode=False), + test_mode=False, + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_val.pkl', + ann_file=data_root + "lyft_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, - test_mode=True), + test_mode=True, + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_test.pkl', + ann_file=data_root + "lyft_infos_test.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, - test_mode=True)) + test_mode=True, + ), +) # For Lyft dataset, we usually evaluate the model at the end of training. # Since the models are trained by 24 epochs by default, we set evaluation # interval to be 24. Please change the interval accordingly if you do not diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nuim_instance.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nuim_instance.py index 82fce56..66df1eb 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nuim_instance.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nuim_instance.py @@ -1,59 +1,74 @@ -dataset_type = 'CocoDataset' -data_root = 'data/nuimages/' +dataset_type = "CocoDataset" +data_root = "data/nuimages/" class_names = [ - 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', - 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", ] img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict(type="LoadImageFromFile"), + dict(type="LoadAnnotations", with_bbox=True, with_mask=True), dict( - type='Resize', + type="Resize", img_scale=[(1280, 720), (1920, 1080)], - multiscale_mode='range', - keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), + multiscale_mode="range", + keep_ratio=True, + ), + dict(type="RandomFlip", flip_ratio=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="DefaultFormatBundle"), + dict(type="Collect", keys=["img", "gt_bboxes", "gt_labels", "gt_masks"]), ] test_pipeline = [ - dict(type='LoadImageFromFile'), + dict(type="LoadImageFromFile"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", img_scale=(1600, 900), flip=False, transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) + dict(type="Resize", keep_ratio=True), + dict(type="RandomFlip"), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="ImageToTensor", keys=["img"]), + dict(type="Collect", keys=["img"]), + ], + ), ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, - ann_file=data_root + 'annotations/nuimages_v1.0-train.json', + ann_file=data_root + "annotations/nuimages_v1.0-train.json", img_prefix=data_root, classes=class_names, - pipeline=train_pipeline), + pipeline=train_pipeline, + ), val=dict( type=dataset_type, - ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + ann_file=data_root + "annotations/nuimages_v1.0-val.json", img_prefix=data_root, classes=class_names, - pipeline=test_pipeline), + pipeline=test_pipeline, + ), test=dict( type=dataset_type, - ann_file=data_root + 'annotations/nuimages_v1.0-val.json', + ann_file=data_root + "annotations/nuimages_v1.0-val.json", img_prefix=data_root, classes=class_names, - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) + pipeline=test_pipeline, + ), +) +evaluation = dict(metric=["bbox", "segm"]) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-3d.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-3d.py index 1548171..397cb41 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-3d.py @@ -3,20 +3,25 @@ point_cloud_range = [-50, -50, -5, 50, 50, 3] # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', - 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", ] -dataset_type = 'NuScenesDataset' -data_root = 'data/nuscenes/' +dataset_type = "NuScenesDataset" +data_root = "data/nuscenes/" # Input modality for nuScenes dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict( - use_lidar=True, - use_camera=False, - use_radar=False, - use_map=False, - use_external=False) -file_client_args = dict(backend='disk') + use_lidar=True, use_camera=False, use_radar=False, use_map=False, use_external=False +) +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -28,79 +33,83 @@ # })) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + file_client_args=file_client_args, + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( @@ -109,32 +118,36 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_train.pkl', + ann_file=data_root + "nuscenes_infos_train.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_val.pkl', + ann_file=data_root + "nuscenes_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_val.pkl', + ann_file=data_root + "nuscenes_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) # For nuScenes dataset, we usually evaluate the model at the end of training. # Since the models are trained by 24 epochs by default, we set evaluation # interval to be 24. Please change the interval accordingly if you do not diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-mono3d.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-mono3d.py index 1363a94..7282953 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-mono3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/nus-mono3d.py @@ -1,67 +1,78 @@ -dataset_type = 'CustomNuScenesMonoDataset' -data_root = 'data/nuscenes/' +dataset_type = "CustomNuScenesMonoDataset" +data_root = "data/nuscenes/" class_names = [ - 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', - 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", ] # Input modality for nuScenes dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=False) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=False +) img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) train_pipeline = [ - dict(type='LoadImageFromFileMono3D'), + dict(type="LoadImageFromFileMono3D"), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox=True, with_label=True, with_attr_label=True, with_bbox_3d=True, with_label_3d=True, - with_bbox_depth=True), - dict(type='Resize', img_scale=(1600, 900), keep_ratio=True), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle3D', class_names=class_names), + with_bbox_depth=True, + ), + dict(type="Resize", img_scale=(1600, 900), keep_ratio=True), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), dict( - type='Collect3D', + type="Collect3D", keys=[ - 'img', 'gt_bboxes', 'gt_labels', 'attr_labels', 'gt_bboxes_3d', - 'gt_labels_3d', 'centers2d', 'depths' - ]), + "img", + "gt_bboxes", + "gt_labels", + "attr_labels", + "gt_bboxes_3d", + "gt_labels_3d", + "centers2d", + "depths", + ], + ), ] test_pipeline = [ - dict(type='LoadImageFromFileMono3D'), + dict(type="LoadImageFromFileMono3D"), dict( - type='MultiScaleFlipAug', + type="MultiScaleFlipAug", scale_factor=1.0, flip=False, transforms=[ - dict(type='RandomFlip3D'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), + dict(type="RandomFlip3D"), + dict(type="Normalize", **img_norm_cfg), + dict(type="Pad", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['img']), - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["img"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ - dict(type='LoadImageFromFileMono3D'), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['img']) + dict(type="LoadImageFromFileMono3D"), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["img"]), ] data = dict( @@ -70,31 +81,35 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_train_mono3d.coco.json', + ann_file=data_root + "nuscenes_infos_train_mono3d.coco.json", img_prefix=data_root, classes=class_names, pipeline=train_pipeline, modality=input_modality, test_mode=False, - box_type_3d='Camera'), + box_type_3d="Camera", + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_val_mono3d.coco.json', + ann_file=data_root + "nuscenes_infos_val_mono3d.coco.json", img_prefix=data_root, classes=class_names, pipeline=test_pipeline, modality=input_modality, test_mode=True, - box_type_3d='Camera'), + box_type_3d="Camera", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_val_mono3d.coco.json', + ann_file=data_root + "nuscenes_infos_val_mono3d.coco.json", img_prefix=data_root, classes=class_names, pipeline=test_pipeline, modality=input_modality, test_mode=True, - box_type_3d='Camera')) + box_type_3d="Camera", + ), +) evaluation = dict(interval=2) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/range100_lyft-3d.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/range100_lyft-3d.py index efa63ea..9162c62 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/range100_lyft-3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/range100_lyft-3d.py @@ -3,20 +3,24 @@ point_cloud_range = [-100, -100, -5, 100, 100, 3] # For Lyft we usually do 9-class detection class_names = [ - 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', - 'bicycle', 'pedestrian', 'animal' + "car", + "truck", + "bus", + "emergency_vehicle", + "other_vehicle", + "motorcycle", + "bicycle", + "pedestrian", + "animal", ] -dataset_type = 'LyftDataset' -data_root = 'data/lyft/' +dataset_type = "LyftDataset" +data_root = "data/lyft/" # Input modality for Lyft dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict( - use_lidar=True, - use_camera=False, - use_radar=False, - use_map=False, - use_external=False) -file_client_args = dict(backend='disk') + use_lidar=True, use_camera=False, use_radar=False, use_map=False, use_external=False +) +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -28,78 +32,82 @@ # })) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + file_client_args=file_client_args, + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( @@ -108,27 +116,31 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_train.pkl', + ann_file=data_root + "lyft_infos_train.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, - test_mode=False), + test_mode=False, + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_val.pkl', + ann_file=data_root + "lyft_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, - test_mode=True), + test_mode=True, + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_test.pkl', + ann_file=data_root + "lyft_infos_test.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, - test_mode=True)) + test_mode=True, + ), +) # For Lyft dataset, we usually evaluate the model at the end of training. # Since the models are trained by 24 epochs by default, we set evaluation # interval to be 24. Please change the interval accordingly if you do not diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis-3d-5class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis-3d-5class.py index 2422766..edbab86 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis-3d-5class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis-3d-5class.py @@ -1,114 +1,125 @@ # dataset settings -dataset_type = 'S3DISDataset' -data_root = './data/s3dis/' -class_names = ('table', 'chair', 'sofa', 'bookcase', 'board') +dataset_type = "S3DISDataset" +data_root = "./data/s3dis/" +class_names = ("table", "chair", "sofa", "bookcase", "board") train_area = [1, 2, 3, 4, 6] test_area = 5 train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), - dict(type='PointSample', num_points=40000), + use_dim=[0, 1, 2, 3, 4, 5], + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), + dict(type="PointSample", num_points=40000), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5, - flip_ratio_bev_vertical=0.5), + flip_ratio_bev_vertical=0.5, + ), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", # following ScanNet dataset the rotation range is 5 degrees rot_range=[-0.087266, 0.087266], scale_ratio_range=[1.0, 1.0], - shift_height=True), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + shift_height=True, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), + use_dim=[0, 1, 2, 3, 4, 5], + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5, - flip_ratio_bev_vertical=0.5), - dict(type='PointSample', num_points=40000), + flip_ratio_bev_vertical=0.5, + ), + dict(type="PointSample", num_points=40000), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + use_dim=[0, 1, 2, 3, 4, 5], + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=5, dataset=dict( - type='ConcatDataset', + type="ConcatDataset", datasets=[ dict( type=dataset_type, data_root=data_root, - ann_file=data_root + f's3dis_infos_Area_{i}.pkl', + ann_file=data_root + f"s3dis_infos_Area_{i}.pkl", pipeline=train_pipeline, filter_empty_gt=False, classes=class_names, - box_type_3d='Depth') for i in train_area + box_type_3d="Depth", + ) + for i in train_area ], - separate_eval=False)), + separate_eval=False, + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', + ann_file=data_root + f"s3dis_infos_Area_{test_area}.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - box_type_3d='Depth'), + box_type_3d="Depth", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + f's3dis_infos_Area_{test_area}.pkl', + ann_file=data_root + f"s3dis_infos_Area_{test_area}.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - box_type_3d='Depth')) + box_type_3d="Depth", + ), +) evaluation = dict(pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis_seg-3d-13class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis_seg-3d-13class.py index 39bf556..67aad37 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis_seg-3d-13class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/s3dis_seg-3d-13class.py @@ -1,101 +1,121 @@ # dataset settings -dataset_type = 'S3DISSegDataset' -data_root = './data/s3dis/' -class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', - 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter') +dataset_type = "S3DISSegDataset" +data_root = "./data/s3dis/" +class_names = ( + "ceiling", + "floor", + "wall", + "beam", + "column", + "window", + "door", + "table", + "chair", + "sofa", + "bookcase", + "board", + "clutter", +) num_points = 4096 train_area = [1, 2, 3, 4, 6] test_area = 5 train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, use_color=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), + use_dim=[0, 1, 2, 3, 4, 5], + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, - with_seg_3d=True), + with_seg_3d=True, + ), dict( - type='PointSegClassMapping', + type="PointSegClassMapping", valid_cat_ids=tuple(range(len(class_names))), - max_cat_id=13), + max_cat_id=13, + ), dict( - type='IndoorPatchPointSample', + type="IndoorPatchPointSample", num_points=num_points, block_size=1.0, ignore_index=len(class_names), use_normalized_coord=True, enlarge_size=0.2, - min_unique_num=None), - dict(type='NormalizePointsColor', color_mean=None), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) + min_unique_num=None, + ), + dict(type="NormalizePointsColor", color_mean=None), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "pts_semantic_mask"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, use_color=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), - dict(type='NormalizePointsColor', color_mean=None), + use_dim=[0, 1, 2, 3, 4, 5], + ), + dict(type="NormalizePointsColor", color_mean=None), dict( # a wrapper in order to successfully call test function # actually we don't perform test-time-aug - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.0, - flip_ratio_bev_vertical=0.0), + flip_ratio_bev_vertical=0.0, + ), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) # we need to load gt seg_mask! eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, use_color=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), + use_dim=[0, 1, 2, 3, 4, 5], + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, - with_seg_3d=True), + with_seg_3d=True, + ), dict( - type='PointSegClassMapping', + type="PointSegClassMapping", valid_cat_ids=tuple(range(len(class_names))), - max_cat_id=13), - dict( - type='DefaultFormatBundle3D', - with_label=False, - class_names=class_names), - dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) + max_cat_id=13, + ), + dict(type="DefaultFormatBundle3D", with_label=False, class_names=class_names), + dict(type="Collect3D", keys=["points", "pts_semantic_mask"]), ] data = dict( @@ -106,34 +126,35 @@ train=dict( type=dataset_type, data_root=data_root, - ann_files=[ - data_root + f's3dis_infos_Area_{i}.pkl' for i in train_area - ], + ann_files=[data_root + f"s3dis_infos_Area_{i}.pkl" for i in train_area], pipeline=train_pipeline, classes=class_names, test_mode=False, ignore_index=len(class_names), scene_idxs=[ - data_root + f'seg_info/Area_{i}_resampled_scene_idxs.npy' + data_root + f"seg_info/Area_{i}_resampled_scene_idxs.npy" for i in train_area - ]), + ], + ), val=dict( type=dataset_type, data_root=data_root, - ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl', + ann_files=data_root + f"s3dis_infos_Area_{test_area}.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, ignore_index=len(class_names), - scene_idxs=data_root + - f'seg_info/Area_{test_area}_resampled_scene_idxs.npy'), + scene_idxs=data_root + f"seg_info/Area_{test_area}_resampled_scene_idxs.npy", + ), test=dict( type=dataset_type, data_root=data_root, - ann_files=data_root + f's3dis_infos_Area_{test_area}.pkl', + ann_files=data_root + f"s3dis_infos_Area_{test_area}.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - ignore_index=len(class_names))) + ignore_index=len(class_names), + ), +) evaluation = dict(pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet-3d-18class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet-3d-18class.py index 93da1e5..c2ad623 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet-3d-18class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet-3d-18class.py @@ -1,128 +1,158 @@ # dataset settings -dataset_type = 'ScanNetDataset' -data_root = './data/scannet/' -class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', - 'bookshelf', 'picture', 'counter', 'desk', 'curtain', - 'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub', - 'garbagebin') +dataset_type = "ScanNetDataset" +data_root = "./data/scannet/" +class_names = ( + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "showercurtrain", + "toilet", + "sink", + "bathtub", + "garbagebin", +) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=True, load_dim=6, - use_dim=[0, 1, 2]), + use_dim=[0, 1, 2], + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, with_mask_3d=True, - with_seg_3d=True), - dict(type='GlobalAlignment', rotation_axis=2), + with_seg_3d=True, + ), + dict(type="GlobalAlignment", rotation_axis=2), dict( - type='PointSegClassMapping', - valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, - 36, 39), - max_cat_id=40), - dict(type='PointSample', num_points=40000), + type="PointSegClassMapping", + valid_cat_ids=(3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39), + max_cat_id=40, + ), + dict(type="PointSample", num_points=40000), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5, - flip_ratio_bev_vertical=0.5), + flip_ratio_bev_vertical=0.5, + ), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.087266, 0.087266], scale_ratio_range=[1.0, 1.0], - shift_height=True), - dict(type='DefaultFormatBundle3D', class_names=class_names), + shift_height=True, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names), dict( - type='Collect3D', + type="Collect3D", keys=[ - 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', - 'pts_instance_mask' - ]) + "points", + "gt_bboxes_3d", + "gt_labels_3d", + "pts_semantic_mask", + "pts_instance_mask", + ], + ), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=True, load_dim=6, - use_dim=[0, 1, 2]), - dict(type='GlobalAlignment', rotation_axis=2), + use_dim=[0, 1, 2], + ), + dict(type="GlobalAlignment", rotation_axis=2), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5, - flip_ratio_bev_vertical=0.5), - dict(type='PointSample', num_points=40000), + flip_ratio_bev_vertical=0.5, + ), + dict(type="PointSample", num_points=40000), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, load_dim=6, - use_dim=[0, 1, 2]), - dict(type='GlobalAlignment', rotation_axis=2), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + use_dim=[0, 1, 2], + ), + dict(type="GlobalAlignment", rotation_axis=2), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=5, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'scannet_infos_train.pkl', + ann_file=data_root + "scannet_infos_train.pkl", pipeline=train_pipeline, filter_empty_gt=False, classes=class_names, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Depth')), + box_type_3d="Depth", + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'scannet_infos_val.pkl', + ann_file=data_root + "scannet_infos_val.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - box_type_3d='Depth'), + box_type_3d="Depth", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'scannet_infos_val.pkl', + ann_file=data_root + "scannet_infos_val.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - box_type_3d='Depth')) + box_type_3d="Depth", + ), +) evaluation = dict(pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet_seg-3d-20class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet_seg-3d-20class.py index cf73b09..a7cabf1 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet_seg-3d-20class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/scannet_seg-3d-20class.py @@ -1,103 +1,168 @@ # dataset settings -dataset_type = 'ScanNetSegDataset' -data_root = './data/scannet/' -class_names = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', - 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', - 'curtain', 'refrigerator', 'showercurtrain', 'toilet', 'sink', - 'bathtub', 'otherfurniture') +dataset_type = "ScanNetSegDataset" +data_root = "./data/scannet/" +class_names = ( + "wall", + "floor", + "cabinet", + "bed", + "chair", + "sofa", + "table", + "door", + "window", + "bookshelf", + "picture", + "counter", + "desk", + "curtain", + "refrigerator", + "showercurtrain", + "toilet", + "sink", + "bathtub", + "otherfurniture", +) num_points = 8192 train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, use_color=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), + use_dim=[0, 1, 2, 3, 4, 5], + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, - with_seg_3d=True), + with_seg_3d=True, + ), dict( - type='PointSegClassMapping', - valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, - 33, 34, 36, 39), - max_cat_id=40), + type="PointSegClassMapping", + valid_cat_ids=( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 14, + 16, + 24, + 28, + 33, + 34, + 36, + 39, + ), + max_cat_id=40, + ), dict( - type='IndoorPatchPointSample', + type="IndoorPatchPointSample", num_points=num_points, block_size=1.5, ignore_index=len(class_names), use_normalized_coord=False, enlarge_size=0.2, - min_unique_num=None), - dict(type='NormalizePointsColor', color_mean=None), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) + min_unique_num=None, + ), + dict(type="NormalizePointsColor", color_mean=None), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "pts_semantic_mask"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, use_color=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), - dict(type='NormalizePointsColor', color_mean=None), + use_dim=[0, 1, 2, 3, 4, 5], + ), + dict(type="NormalizePointsColor", color_mean=None), dict( # a wrapper in order to successfully call test function # actually we don't perform test-time-aug - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.0, - flip_ratio_bev_vertical=0.0), + flip_ratio_bev_vertical=0.0, + ), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) # we need to load gt seg_mask! eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, use_color=True, load_dim=6, - use_dim=[0, 1, 2, 3, 4, 5]), + use_dim=[0, 1, 2, 3, 4, 5], + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=False, with_label_3d=False, with_mask_3d=False, - with_seg_3d=True), + with_seg_3d=True, + ), dict( - type='PointSegClassMapping', - valid_cat_ids=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, - 33, 34, 36, 39), - max_cat_id=40), - dict( - type='DefaultFormatBundle3D', - with_label=False, - class_names=class_names), - dict(type='Collect3D', keys=['points', 'pts_semantic_mask']) + type="PointSegClassMapping", + valid_cat_ids=( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 14, + 16, + 24, + 28, + 33, + 34, + 36, + 39, + ), + max_cat_id=40, + ), + dict(type="DefaultFormatBundle3D", with_label=False, class_names=class_names), + dict(type="Collect3D", keys=["points", "pts_semantic_mask"]), ] data = dict( @@ -106,27 +171,31 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'scannet_infos_train.pkl', + ann_file=data_root + "scannet_infos_train.pkl", pipeline=train_pipeline, classes=class_names, test_mode=False, ignore_index=len(class_names), - scene_idxs=data_root + 'seg_info/train_resampled_scene_idxs.npy'), + scene_idxs=data_root + "seg_info/train_resampled_scene_idxs.npy", + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'scannet_infos_val.pkl', + ann_file=data_root + "scannet_infos_val.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - ignore_index=len(class_names)), + ignore_index=len(class_names), + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'scannet_infos_val.pkl', + ann_file=data_root + "scannet_infos_val.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - ignore_index=len(class_names))) + ignore_index=len(class_names), + ), +) evaluation = dict(pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/sunrgbd-3d-10class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/sunrgbd-3d-10class.py index 7121b75..86793a1 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/sunrgbd-3d-10class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/sunrgbd-3d-10class.py @@ -1,107 +1,116 @@ -dataset_type = 'SUNRGBDDataset' -data_root = 'data/sunrgbd/' -class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', - 'night_stand', 'bookshelf', 'bathtub') +dataset_type = "SUNRGBDDataset" +data_root = "data/sunrgbd/" +class_names = ( + "bed", + "table", + "sofa", + "chair", + "toilet", + "desk", + "dresser", + "night_stand", + "bookshelf", + "bathtub", +) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=True, load_dim=6, - use_dim=[0, 1, 2]), - dict(type='LoadAnnotations3D'), - dict( - type='RandomFlip3D', - sync_2d=False, - flip_ratio_bev_horizontal=0.5, + use_dim=[0, 1, 2], ), + dict(type="LoadAnnotations3D"), + dict(type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5,), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.523599, 0.523599], scale_ratio_range=[0.85, 1.15], - shift_height=True), - dict(type='PointSample', num_points=20000), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + shift_height=True, + ), + dict(type="PointSample", num_points=20000), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=True, load_dim=6, - use_dim=[0, 1, 2]), + use_dim=[0, 1, 2], + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict( - type='RandomFlip3D', - sync_2d=False, - flip_ratio_bev_horizontal=0.5, + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], ), - dict(type='PointSample', num_points=20000), + dict(type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5,), + dict(type="PointSample", num_points=20000), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='DEPTH', + type="LoadPointsFromFile", + coord_type="DEPTH", shift_height=False, load_dim=6, - use_dim=[0, 1, 2]), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + use_dim=[0, 1, 2], + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=16, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=5, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'sunrgbd_infos_train.pkl', + ann_file=data_root + "sunrgbd_infos_train.pkl", pipeline=train_pipeline, classes=class_names, filter_empty_gt=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='Depth')), + box_type_3d="Depth", + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'sunrgbd_infos_val.pkl', + ann_file=data_root + "sunrgbd_infos_val.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - box_type_3d='Depth'), + box_type_3d="Depth", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'sunrgbd_infos_val.pkl', + ann_file=data_root + "sunrgbd_infos_val.pkl", pipeline=test_pipeline, classes=class_names, test_mode=True, - box_type_3d='Depth')) + box_type_3d="Depth", + ), +) evaluation = dict(pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-3class.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-3class.py index 920ac15..7b48426 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-3class.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-3class.py @@ -1,145 +1,156 @@ # dataset settings # D5 in the config name means the whole dataset is divided into 5 folds # We only use one fold for efficient experiments -dataset_type = 'LidarWaymoDataset' -data_root = 'data/waymo-full/kitti_format/' -file_client_args = dict(backend='disk') +dataset_type = "LidarWaymoDataset" +data_root = "data/waymo-full/kitti_format/" +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. # file_client_args = dict( # backend='petrel', path_mapping=dict(data='s3://waymo_data/')) -class_names = ['Car', 'Pedestrian', 'Cyclist'] +class_names = ["Car", "Pedestrian", "Cyclist"] point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] input_modality = dict(use_lidar=True, use_camera=False) db_sampler = dict( data_root=data_root, - info_path=data_root + 'waymo_dbinfos_train.pkl', + info_path=data_root + "waymo_dbinfos_train.pkl", rate=1.0, prepare=dict( filter_by_difficulty=[-1], - filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10), + ), classes=class_names, sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), points_loader=dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=[0, 1, 2, 3, 4], - file_client_args=file_client_args)) + file_client_args=file_client_args, + ), +) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, - file_client_args=file_client_args), - dict(type='ObjectSample', db_sampler=db_sampler), + file_client_args=file_client_args, + ), + dict(type="ObjectSample", db_sampler=db_sampler), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5, - flip_ratio_bev_vertical=0.5), + flip_ratio_bev_vertical=0.5, + ), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.78539816, 0.78539816], - scale_ratio_range=[0.95, 1.05]), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + scale_ratio_range=[0.95, 1.05], + ), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=2, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=2, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_train.pkl', - split='training', + ann_file=data_root + "waymo_infos_train.pkl", + split="training", pipeline=train_pipeline, modality=input_modality, classes=class_names, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR', + box_type_3d="LiDAR", # load one frame every five frames - load_interval=5)), + load_interval=5, + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_val.pkl', - split='training', + ann_file=data_root + "waymo_infos_val.pkl", + split="training", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_val.pkl', - split='training', + ann_file=data_root + "waymo_infos_val.pkl", + split="training", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-car.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-car.py index 02e2627..0c367b7 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-car.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/datasets/waymoD5-3d-car.py @@ -1,143 +1,153 @@ # dataset settings # D5 in the config name means the whole dataset is divided into 5 folds # We only use one fold for efficient experiments -dataset_type = 'WaymoDataset' -data_root = 'data/waymo/kitti_format/' -file_client_args = dict(backend='disk') +dataset_type = "WaymoDataset" +data_root = "data/waymo/kitti_format/" +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. # file_client_args = dict( # backend='petrel', path_mapping=dict(data='s3://waymo_data/')) -class_names = ['Car'] +class_names = ["Car"] point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] input_modality = dict(use_lidar=True, use_camera=False) db_sampler = dict( data_root=data_root, - info_path=data_root + 'waymo_dbinfos_train.pkl', + info_path=data_root + "waymo_dbinfos_train.pkl", rate=1.0, prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)), classes=class_names, sample_groups=dict(Car=15), points_loader=dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=[0, 1, 2, 3, 4], - file_client_args=file_client_args)) + file_client_args=file_client_args, + ), +) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadAnnotations3D', + type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True, - file_client_args=file_client_args), - dict(type='ObjectSample', db_sampler=db_sampler), + file_client_args=file_client_args, + ), + dict(type="ObjectSample", db_sampler=db_sampler), dict( - type='RandomFlip3D', + type="RandomFlip3D", sync_2d=False, flip_ratio_bev_horizontal=0.5, - flip_ratio_bev_vertical=0.5), + flip_ratio_bev_vertical=0.5, + ), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.78539816, 0.78539816], - scale_ratio_range=[0.95, 1.05]), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + scale_ratio_range=[0.95, 1.05], + ), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=6, use_dim=5, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( samples_per_gpu=2, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=2, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_train.pkl', - split='training', + ann_file=data_root + "waymo_infos_train.pkl", + split="training", pipeline=train_pipeline, modality=input_modality, classes=class_names, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR', + box_type_3d="LiDAR", # load one frame every five frames - load_interval=5)), + load_interval=5, + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_val.pkl', - split='training', + ann_file=data_root + "waymo_infos_val.pkl", + split="training", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_val.pkl', - split='training', + ann_file=data_root + "waymo_infos_val.pkl", + split="training", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/default_runtime.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/default_runtime.py index 4e85b69..7ef3a2f 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/default_runtime.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/default_runtime.py @@ -4,15 +4,12 @@ # For more loggers see # https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) # yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' +dist_params = dict(backend="nccl") +log_level = "INFO" work_dir = None load_from = None resume_from = None -workflow = [('train', 1)] +workflow = [("train", 1)] diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/3dssd.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/3dssd.py index 55344c7..d206d57 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/3dssd.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/3dssd.py @@ -1,77 +1,76 @@ model = dict( - type='SSD3DNet', + type="SSD3DNet", backbone=dict( - type='PointNet2SAMSG', + type="PointNet2SAMSG", in_channels=4, num_points=(4096, 512, (256, 256)), radii=((0.2, 0.4, 0.8), (0.4, 0.8, 1.6), (1.6, 3.2, 4.8)), num_samples=((32, 32, 64), (32, 32, 64), (32, 32, 32)), - sa_channels=(((16, 16, 32), (16, 16, 32), (32, 32, 64)), - ((64, 64, 128), (64, 64, 128), (64, 96, 128)), - ((128, 128, 256), (128, 192, 256), (128, 256, 256))), + sa_channels=( + ((16, 16, 32), (16, 16, 32), (32, 32, 64)), + ((64, 64, 128), (64, 64, 128), (64, 96, 128)), + ((128, 128, 256), (128, 192, 256), (128, 256, 256)), + ), aggregation_channels=(64, 128, 256), - fps_mods=(('D-FPS'), ('FS'), ('F-FPS', 'D-FPS')), + fps_mods=(("D-FPS"), ("FS"), ("F-FPS", "D-FPS")), fps_sample_range_lists=((-1), (-1), (512, -1)), - norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + norm_cfg=dict(type="BN2d", eps=1e-3, momentum=0.1), sa_cfg=dict( - type='PointSAModuleMSG', - pool_mod='max', - use_xyz=True, - normalize_xyz=False)), + type="PointSAModuleMSG", pool_mod="max", use_xyz=True, normalize_xyz=False + ), + ), bbox_head=dict( - type='SSD3DHead', + type="SSD3DHead", in_channels=256, vote_module_cfg=dict( in_channels=256, num_points=256, gt_per_seed=1, - conv_channels=(128, ), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + conv_channels=(128,), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d", eps=1e-3, momentum=0.1), with_res_feat=False, - vote_xyz_range=(3.0, 3.0, 2.0)), + vote_xyz_range=(3.0, 3.0, 2.0), + ), vote_aggregation_cfg=dict( - type='PointSAModuleMSG', + type="PointSAModuleMSG", num_point=256, radii=(4.8, 6.4), sample_nums=(16, 32), mlp_channels=((256, 256, 256, 512), (256, 256, 512, 1024)), - norm_cfg=dict(type='BN2d', eps=1e-3, momentum=0.1), + norm_cfg=dict(type="BN2d", eps=1e-3, momentum=0.1), use_xyz=True, normalize_xyz=False, - bias=True), + bias=True, + ), pred_layer_cfg=dict( in_channels=1536, shared_conv_channels=(512, 128), - cls_conv_channels=(128, ), - reg_conv_channels=(128, ), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), - bias=True), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.1), + cls_conv_channels=(128,), + reg_conv_channels=(128,), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d", eps=1e-3, momentum=0.1), + bias=True, + ), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d", eps=1e-3, momentum=0.1), objectness_loss=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - reduction='sum', - loss_weight=1.0), - center_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=1.0), - dir_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), - dir_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=1.0), - size_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=1.0), - corner_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=1.0), - vote_loss=dict(type='SmoothL1Loss', reduction='sum', loss_weight=1.0)), + type="CrossEntropyLoss", use_sigmoid=True, reduction="sum", loss_weight=1.0 + ), + center_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=1.0), + dir_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + dir_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=1.0), + size_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=1.0), + corner_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=1.0), + vote_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=1.0), + ), # model training and testing settings - train_cfg=dict( - sample_mod='spec', pos_distance_thr=10.0, expand_dims_length=0.05), + train_cfg=dict(sample_mod="spec", pos_distance_thr=10.0, expand_dims_length=0.05), test_cfg=dict( - nms_cfg=dict(type='nms', iou_thr=0.1), - sample_mod='spec', + nms_cfg=dict(type="nms", iou_thr=0.1), + sample_mod="spec", score_thr=0.0, per_class_proposal=True, - max_output_num=100)) + max_output_num=100, + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py index fb9e0a8..c9f706b 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py @@ -1,190 +1,209 @@ # model settings model = dict( - type='CascadeRCNN', - pretrained='torchvision://resnet50', + type="CascadeRCNN", + pretrained="torchvision://resnet50", backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), + norm_cfg=dict(type="BN", requires_grad=True), norm_eval=True, - style='pytorch'), + style="pytorch", + ), neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 + ), rpn_head=dict( - type='RPNHead', + type="RPNHead", in_channels=256, feat_channels=256, anchor_generator=dict( - type='AnchorGenerator', + type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), + strides=[4, 8, 16, 32, 64], + ), bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), + ), roi_head=dict( - type='CascadeRoIHead', + type="CascadeRoIHead", num_stages=3, stage_loss_weights=[1, 0.5, 0.25], bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), out_channels=256, - featmap_strides=[4, 8, 16, 32]), + featmap_strides=[4, 8, 16, 32], + ), bbox_head=[ dict( - type='Shared2FCBBoxHead', + type="Shared2FCBBoxHead", in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + ), reg_class_agnostic=True, loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), + type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), + ), dict( - type='Shared2FCBBoxHead', + type="Shared2FCBBoxHead", in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.05, 0.05, 0.1, 0.1]), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.05, 0.05, 0.1, 0.1], + ), reg_class_agnostic=True, loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, - loss_weight=1.0)), + type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), + ), dict( - type='Shared2FCBBoxHead', + type="Shared2FCBBoxHead", in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.033, 0.033, 0.067, 0.067]), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.033, 0.033, 0.067, 0.067], + ), reg_class_agnostic=True, loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0, loss_weight=1.0), + ), ], mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0), out_channels=256, - featmap_strides=[4, 8, 16, 32]), + featmap_strides=[4, 8, 16, 32], + ), mask_head=dict( - type='FCNMaskHead', + type="FCNMaskHead", num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0), + ), + ), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=256, pos_fraction=0.5, neg_pos_ub=-1, - add_gt_as_proposals=False), + add_gt_as_proposals=False, + ), allowed_border=0, pos_weight=-1, - debug=False), + debug=False, + ), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, - min_bbox_size=0), + min_bbox_size=0, + ), rcnn=[ dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, - add_gt_as_proposals=True), + add_gt_as_proposals=True, + ), mask_size=28, pos_weight=-1, - debug=False), + debug=False, + ), dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, - add_gt_as_proposals=True), + add_gt_as_proposals=True, + ), mask_size=28, pos_weight=-1, - debug=False), + debug=False, + ), dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, - add_gt_as_proposals=True), + add_gt_as_proposals=True, + ), mask_size=28, pos_weight=-1, - debug=False) - ]), + debug=False, + ), + ], + ), test_cfg=dict( rpn=dict( nms_across_levels=False, @@ -192,9 +211,13 @@ nms_post=1000, max_num=1000, nms_thr=0.7, - min_bbox_size=0), + min_bbox_size=0, + ), rcnn=dict( score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), + nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, - mask_thr_binary=0.5))) + mask_thr_binary=0.5, + ), + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py index efdce59..7986e07 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_01voxel_second_secfpn_nus.py @@ -1,62 +1,67 @@ voxel_size = [0.1, 0.1, 0.2] model = dict( - type='CenterPoint', + type="CenterPoint", pts_voxel_layer=dict( - max_num_points=10, voxel_size=voxel_size, max_voxels=(90000, 120000)), - pts_voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + max_num_points=10, voxel_size=voxel_size, max_voxels=(90000, 120000) + ), + pts_voxel_encoder=dict(type="HardSimpleVFE", num_features=5), pts_middle_encoder=dict( - type='SparseEncoder', + type="SparseEncoder", in_channels=5, sparse_shape=[41, 1024, 1024], output_channels=128, - order=('conv', 'norm', 'act'), - encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, - 128)), + order=("conv", "norm", "act"), + encoder_channels=((16, 16, 32), (32, 32, 64), (64, 64, 128), (128, 128)), encoder_paddings=((0, 0, 1), (0, 0, 1), (0, 0, [0, 1, 1]), (0, 0)), - block_type='basicblock'), + block_type="basicblock", + ), pts_backbone=dict( - type='SECOND', + type="SECOND", in_channels=256, out_channels=[128, 256], layer_nums=[5, 5], layer_strides=[1, 2], - norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), - conv_cfg=dict(type='Conv2d', bias=False)), + norm_cfg=dict(type="BN", eps=1e-3, momentum=0.01), + conv_cfg=dict(type="Conv2d", bias=False), + ), pts_neck=dict( - type='SECONDFPN', + type="SECONDFPN", in_channels=[128, 256], out_channels=[256, 256], upsample_strides=[1, 2], - norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), - upsample_cfg=dict(type='deconv', bias=False), - use_conv_for_no_stride=True), + norm_cfg=dict(type="BN", eps=1e-3, momentum=0.01), + upsample_cfg=dict(type="deconv", bias=False), + use_conv_for_no_stride=True, + ), pts_bbox_head=dict( - type='CenterHead', + type="CenterHead", in_channels=sum([256, 256]), tasks=[ - dict(num_class=1, class_names=['car']), - dict(num_class=2, class_names=['truck', 'construction_vehicle']), - dict(num_class=2, class_names=['bus', 'trailer']), - dict(num_class=1, class_names=['barrier']), - dict(num_class=2, class_names=['motorcycle', 'bicycle']), - dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + dict(num_class=1, class_names=["car"]), + dict(num_class=2, class_names=["truck", "construction_vehicle"]), + dict(num_class=2, class_names=["bus", "trailer"]), + dict(num_class=1, class_names=["barrier"]), + dict(num_class=2, class_names=["motorcycle", "bicycle"]), + dict(num_class=2, class_names=["pedestrian", "traffic_cone"]), ], common_heads=dict( - reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2) + ), share_conv_channel=64, bbox_coder=dict( - type='CenterPointBBoxCoder', + type="CenterPointBBoxCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=8, voxel_size=voxel_size[:2], - code_size=9), - separate_head=dict( - type='SeparateHead', init_bias=-2.19, final_kernel=3), - loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), - loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25), - norm_bbox=True), + code_size=9, + ), + separate_head=dict(type="SeparateHead", init_bias=-2.19, final_kernel=3), + loss_cls=dict(type="GaussianFocalLoss", reduction="mean"), + loss_bbox=dict(type="L1Loss", reduction="mean", loss_weight=0.25), + norm_bbox=True, + ), # model training and testing settings train_cfg=dict( pts=dict( @@ -67,7 +72,9 @@ gaussian_overlap=0.1, max_objs=500, min_radius=2, - code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + ) + ), test_cfg=dict( pts=dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], @@ -77,7 +84,10 @@ score_threshold=0.1, out_size_factor=8, voxel_size=voxel_size[:2], - nms_type='rotate', + nms_type="rotate", pre_max_size=1000, post_max_size=83, - nms_thr=0.2))) + nms_thr=0.2, + ) + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py index 311d763..1eca10d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/centerpoint_02pillar_second_secfpn_nus.py @@ -1,61 +1,68 @@ voxel_size = [0.2, 0.2, 8] model = dict( - type='CenterPoint', + type="CenterPoint", pts_voxel_layer=dict( - max_num_points=20, voxel_size=voxel_size, max_voxels=(30000, 40000)), + max_num_points=20, voxel_size=voxel_size, max_voxels=(30000, 40000) + ), pts_voxel_encoder=dict( - type='PillarFeatureNet', + type="PillarFeatureNet", in_channels=5, feat_channels=[64], with_distance=False, voxel_size=(0.2, 0.2, 8), - norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), - legacy=False), + norm_cfg=dict(type="BN1d", eps=1e-3, momentum=0.01), + legacy=False, + ), pts_middle_encoder=dict( - type='PointPillarsScatter', in_channels=64, output_shape=(512, 512)), + type="PointPillarsScatter", in_channels=64, output_shape=(512, 512) + ), pts_backbone=dict( - type='SECOND', + type="SECOND", in_channels=64, out_channels=[64, 128, 256], layer_nums=[3, 5, 5], layer_strides=[2, 2, 2], - norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), - conv_cfg=dict(type='Conv2d', bias=False)), + norm_cfg=dict(type="BN", eps=1e-3, momentum=0.01), + conv_cfg=dict(type="Conv2d", bias=False), + ), pts_neck=dict( - type='SECONDFPN', + type="SECONDFPN", in_channels=[64, 128, 256], out_channels=[128, 128, 128], upsample_strides=[0.5, 1, 2], - norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01), - upsample_cfg=dict(type='deconv', bias=False), - use_conv_for_no_stride=True), + norm_cfg=dict(type="BN", eps=1e-3, momentum=0.01), + upsample_cfg=dict(type="deconv", bias=False), + use_conv_for_no_stride=True, + ), pts_bbox_head=dict( - type='CenterHead', + type="CenterHead", in_channels=sum([128, 128, 128]), tasks=[ - dict(num_class=1, class_names=['car']), - dict(num_class=2, class_names=['truck', 'construction_vehicle']), - dict(num_class=2, class_names=['bus', 'trailer']), - dict(num_class=1, class_names=['barrier']), - dict(num_class=2, class_names=['motorcycle', 'bicycle']), - dict(num_class=2, class_names=['pedestrian', 'traffic_cone']), + dict(num_class=1, class_names=["car"]), + dict(num_class=2, class_names=["truck", "construction_vehicle"]), + dict(num_class=2, class_names=["bus", "trailer"]), + dict(num_class=1, class_names=["barrier"]), + dict(num_class=2, class_names=["motorcycle", "bicycle"]), + dict(num_class=2, class_names=["pedestrian", "traffic_cone"]), ], common_heads=dict( - reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), + reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2) + ), share_conv_channel=64, bbox_coder=dict( - type='CenterPointBBoxCoder', + type="CenterPointBBoxCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=voxel_size[:2], - code_size=9), - separate_head=dict( - type='SeparateHead', init_bias=-2.19, final_kernel=3), - loss_cls=dict(type='GaussianFocalLoss', reduction='mean'), - loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25), - norm_bbox=True), + code_size=9, + ), + separate_head=dict(type="SeparateHead", init_bias=-2.19, final_kernel=3), + loss_cls=dict(type="GaussianFocalLoss", reduction="mean"), + loss_bbox=dict(type="L1Loss", reduction="mean", loss_weight=0.25), + norm_bbox=True, + ), # model training and testing settings train_cfg=dict( pts=dict( @@ -66,7 +73,9 @@ gaussian_overlap=0.1, max_objs=500, min_radius=2, - code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2])), + code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], + ) + ), test_cfg=dict( pts=dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], @@ -77,7 +86,10 @@ pc_range=[-51.2, -51.2], out_size_factor=4, voxel_size=voxel_size[:2], - nms_type='rotate', + nms_type="rotate", pre_max_size=1000, post_max_size=83, - nms_thr=0.2))) + nms_thr=0.2, + ) + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/fcos3d.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/fcos3d.py index 92ea907..d5eb874 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/fcos3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/fcos3d.py @@ -1,25 +1,27 @@ model = dict( - type='FCOSMono3D', - pretrained='open-mmlab://detectron2/resnet101_caffe', + type="FCOSMono3D", + pretrained="open-mmlab://detectron2/resnet101_caffe", backbone=dict( - type='ResNet', + type="ResNet", depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='caffe'), + style="caffe", + ), neck=dict( - type='FPN', + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=5, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), bbox_head=dict( - type='FCOSMono3DHead', + type="FCOSMono3DHead", num_classes=10, in_channels=256, stacked_convs=2, @@ -31,39 +33,37 @@ dir_offset=0.7854, # pi/4 strides=[8, 16, 32, 64, 128], group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo - cls_branch=(256, ), + cls_branch=(256,), reg_branch=( - (256, ), # offset - (256, ), # depth - (256, ), # size - (256, ), # rot - () # velo + (256,), # offset + (256,), # depth + (256,), # size + (256,), # rot + (), # velo ), - dir_branch=(256, ), - attr_branch=(256, ), + dir_branch=(256,), + attr_branch=(256,), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_attr=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), + loss_attr=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0 + ), norm_on_bbox=True, centerness_on_reg=True, center_sampling=True, conv_bias=True, - dcn_on_last_conv=True), + dcn_on_last_conv=True, + ), train_cfg=dict( allowed_border=0, code_weight=[1.0, 1.0, 0.2, 1.0, 1.0, 1.0, 1.0, 0.05, 0.05], pos_weight=-1, - debug=False), + debug=False, + ), test_cfg=dict( use_rotate_nms=True, nms_across_levels=False, @@ -71,4 +71,6 @@ nms_thr=0.8, score_thr=0.05, min_bbox_size=0, - max_per_img=200)) + max_per_img=200, + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/groupfree3d.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/groupfree3d.py index 077d049..d89c549 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/groupfree3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/groupfree3d.py @@ -1,71 +1,65 @@ model = dict( - type='GroupFree3DNet', + type="GroupFree3DNet", backbone=dict( - type='PointNet2SASSG', + type="PointNet2SASSG", in_channels=3, num_points=(2048, 1024, 512, 256), radius=(0.2, 0.4, 0.8, 1.2), num_samples=(64, 32, 16, 16), - sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), - (128, 128, 256)), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), (128, 128, 256)), fp_channels=((256, 256), (256, 288)), - norm_cfg=dict(type='BN2d'), + norm_cfg=dict(type="BN2d"), sa_cfg=dict( - type='PointSAModule', - pool_mod='max', - use_xyz=True, - normalize_xyz=True)), + type="PointSAModule", pool_mod="max", use_xyz=True, normalize_xyz=True + ), + ), bbox_head=dict( - type='GroupFree3DHead', + type="GroupFree3DHead", in_channels=288, num_decoder_layers=6, num_proposal=256, transformerlayers=dict( - type='BaseTransformerLayer', + type="BaseTransformerLayer", attn_cfgs=dict( - type='GroupFree3DMHA', + type="GroupFree3DMHA", embed_dims=288, num_heads=8, attn_drop=0.1, - dropout_layer=dict(type='Dropout', drop_prob=0.1)), + dropout_layer=dict(type="Dropout", drop_prob=0.1), + ), ffn_cfgs=dict( embed_dims=288, feedforward_channels=2048, ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True)), - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', - 'norm')), + act_cfg=dict(type="ReLU", inplace=True), + ), + operation_order=("self_attn", "norm", "cross_attn", "norm", "ffn", "norm"), + ), pred_layer_cfg=dict( - in_channels=288, shared_conv_channels=(288, 288), bias=True), + in_channels=288, shared_conv_channels=(288, 288), bias=True + ), sampling_objectness_loss=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=8.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=8.0 + ), objectness_loss=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - center_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), - dir_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), - dir_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), - size_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + center_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), + dir_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + dir_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), + size_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), size_res_loss=dict( - type='SmoothL1Loss', beta=1.0, reduction='sum', loss_weight=10.0), - semantic_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + type="SmoothL1Loss", beta=1.0, reduction="sum", loss_weight=10.0 + ), + semantic_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + ), # model training and testing settings - train_cfg=dict(sample_mod='kps'), + train_cfg=dict(sample_mod="kps"), test_cfg=dict( - sample_mod='kps', + sample_mod="kps", nms_thr=0.25, score_thr=0.0, per_class_proposal=True, - prediction_stages='last')) + prediction_stages="last", + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/h3dnet.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/h3dnet.py index 7605667..9873a6a 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/h3dnet.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/h3dnet.py @@ -1,8 +1,8 @@ primitive_z_cfg = dict( - type='PrimitiveHead', + type="PrimitiveHead", num_dims=2, num_classes=18, - primitive_mode='z', + primitive_mode="z", upper_thresh=100.0, surface_thresh=0.5, vote_module_cfg=dict( @@ -10,57 +10,61 @@ vote_per_seed=1, gt_per_seed=1, conv_channels=(256, 256), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), norm_feats=True, vote_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='none', - loss_dst_weight=10.0)), + type="ChamferDistance", mode="l1", reduction="none", loss_dst_weight=10.0 + ), + ), vote_aggregation_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=1024, radius=0.3, num_sample=16, mlp_channels=[256, 128, 128, 128], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), feat_channels=(128, 128), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.4, 0.6], - reduction='mean', - loss_weight=30.0), + reduction="mean", + loss_weight=30.0, + ), center_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='sum', + type="ChamferDistance", + mode="l1", + reduction="sum", loss_src_weight=0.5, - loss_dst_weight=0.5), + loss_dst_weight=0.5, + ), semantic_reg_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='sum', + type="ChamferDistance", + mode="l1", + reduction="sum", loss_src_weight=0.5, - loss_dst_weight=0.5), - semantic_cls_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + loss_dst_weight=0.5, + ), + semantic_cls_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), train_cfg=dict( dist_thresh=0.2, var_thresh=1e-2, lower_thresh=1e-6, num_point=100, num_point_line=10, - line_thresh=0.2)) + line_thresh=0.2, + ), +) primitive_xy_cfg = dict( - type='PrimitiveHead', + type="PrimitiveHead", num_dims=1, num_classes=18, - primitive_mode='xy', + primitive_mode="xy", upper_thresh=100.0, surface_thresh=0.5, vote_module_cfg=dict( @@ -68,57 +72,61 @@ vote_per_seed=1, gt_per_seed=1, conv_channels=(256, 256), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), norm_feats=True, vote_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='none', - loss_dst_weight=10.0)), + type="ChamferDistance", mode="l1", reduction="none", loss_dst_weight=10.0 + ), + ), vote_aggregation_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=1024, radius=0.3, num_sample=16, mlp_channels=[256, 128, 128, 128], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), feat_channels=(128, 128), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.4, 0.6], - reduction='mean', - loss_weight=30.0), + reduction="mean", + loss_weight=30.0, + ), center_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='sum', + type="ChamferDistance", + mode="l1", + reduction="sum", loss_src_weight=0.5, - loss_dst_weight=0.5), + loss_dst_weight=0.5, + ), semantic_reg_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='sum', + type="ChamferDistance", + mode="l1", + reduction="sum", loss_src_weight=0.5, - loss_dst_weight=0.5), - semantic_cls_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + loss_dst_weight=0.5, + ), + semantic_cls_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), train_cfg=dict( dist_thresh=0.2, var_thresh=1e-2, lower_thresh=1e-6, num_point=100, num_point_line=10, - line_thresh=0.2)) + line_thresh=0.2, + ), +) primitive_line_cfg = dict( - type='PrimitiveHead', + type="PrimitiveHead", num_dims=0, num_classes=18, - primitive_mode='line', + primitive_mode="line", upper_thresh=100.0, surface_thresh=0.5, vote_module_cfg=dict( @@ -126,216 +134,239 @@ vote_per_seed=1, gt_per_seed=1, conv_channels=(256, 256), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), norm_feats=True, vote_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='none', - loss_dst_weight=10.0)), + type="ChamferDistance", mode="l1", reduction="none", loss_dst_weight=10.0 + ), + ), vote_aggregation_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=1024, radius=0.3, num_sample=16, mlp_channels=[256, 128, 128, 128], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), feat_channels=(128, 128), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.4, 0.6], - reduction='mean', - loss_weight=30.0), + reduction="mean", + loss_weight=30.0, + ), center_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='sum', + type="ChamferDistance", + mode="l1", + reduction="sum", loss_src_weight=1.0, - loss_dst_weight=1.0), + loss_dst_weight=1.0, + ), semantic_reg_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='sum', + type="ChamferDistance", + mode="l1", + reduction="sum", loss_src_weight=1.0, - loss_dst_weight=1.0), - semantic_cls_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=2.0), + loss_dst_weight=1.0, + ), + semantic_cls_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=2.0), train_cfg=dict( dist_thresh=0.2, var_thresh=1e-2, lower_thresh=1e-6, num_point=100, num_point_line=10, - line_thresh=0.2)) + line_thresh=0.2, + ), +) model = dict( - type='H3DNet', + type="H3DNet", backbone=dict( - type='MultiBackbone', + type="MultiBackbone", num_streams=4, - suffixes=['net0', 'net1', 'net2', 'net3'], - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.01), - act_cfg=dict(type='ReLU'), + suffixes=["net0", "net1", "net2", "net3"], + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d", eps=1e-5, momentum=0.01), + act_cfg=dict(type="ReLU"), backbones=dict( - type='PointNet2SASSG', + type="PointNet2SASSG", in_channels=4, num_points=(2048, 1024, 512, 256), radius=(0.2, 0.4, 0.8, 1.2), num_samples=(64, 32, 16, 16), - sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), - (128, 128, 256)), + sa_channels=( + (64, 64, 128), + (128, 128, 256), + (128, 128, 256), + (128, 128, 256), + ), fp_channels=((256, 256), (256, 256)), - norm_cfg=dict(type='BN2d'), + norm_cfg=dict(type="BN2d"), sa_cfg=dict( - type='PointSAModule', - pool_mod='max', - use_xyz=True, - normalize_xyz=True))), + type="PointSAModule", pool_mod="max", use_xyz=True, normalize_xyz=True + ), + ), + ), rpn_head=dict( - type='VoteHead', + type="VoteHead", vote_module_cfg=dict( in_channels=256, vote_per_seed=1, gt_per_seed=3, conv_channels=(256, 256), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), norm_feats=True, vote_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='none', - loss_dst_weight=10.0)), + type="ChamferDistance", + mode="l1", + reduction="none", + loss_dst_weight=10.0, + ), + ), vote_aggregation_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=256, radius=0.3, num_sample=16, mlp_channels=[256, 128, 128, 128], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), pred_layer_cfg=dict( - in_channels=128, shared_conv_channels=(128, 128), bias=True), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + in_channels=128, shared_conv_channels=(128, 128), bias=True + ), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.2, 0.8], - reduction='sum', - loss_weight=5.0), + reduction="sum", + loss_weight=5.0, + ), center_loss=dict( - type='ChamferDistance', - mode='l2', - reduction='sum', + type="ChamferDistance", + mode="l2", + reduction="sum", loss_src_weight=10.0, - loss_dst_weight=10.0), - dir_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), - dir_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), - size_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), - size_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), - semantic_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + loss_dst_weight=10.0, + ), + dir_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + dir_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), + size_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + size_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), + semantic_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + ), roi_head=dict( - type='H3DRoIHead', + type="H3DRoIHead", primitive_list=[primitive_z_cfg, primitive_xy_cfg, primitive_line_cfg], bbox_head=dict( - type='H3DBboxHead', + type="H3DBboxHead", gt_per_seed=3, num_proposal=256, suface_matching_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=256 * 6, radius=0.5, num_sample=32, mlp_channels=[128 + 6, 128, 64, 32], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), line_matching_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=256 * 12, radius=0.5, num_sample=32, mlp_channels=[128 + 12, 128, 64, 32], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), feat_channels=(128, 128), primitive_refine_channels=[128, 128, 128], upper_thresh=100.0, surface_thresh=0.5, line_thresh=0.5, - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.2, 0.8], - reduction='sum', - loss_weight=5.0), + reduction="sum", + loss_weight=5.0, + ), center_loss=dict( - type='ChamferDistance', - mode='l2', - reduction='sum', + type="ChamferDistance", + mode="l2", + reduction="sum", loss_src_weight=10.0, - loss_dst_weight=10.0), + loss_dst_weight=10.0, + ), dir_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), - dir_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + type="CrossEntropyLoss", reduction="sum", loss_weight=0.1 + ), + dir_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), size_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), - size_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), + type="CrossEntropyLoss", reduction="sum", loss_weight=0.1 + ), + size_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), semantic_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=0.1), + type="CrossEntropyLoss", reduction="sum", loss_weight=0.1 + ), cues_objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.3, 0.7], - reduction='mean', - loss_weight=5.0), + reduction="mean", + loss_weight=5.0, + ), cues_semantic_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.3, 0.7], - reduction='mean', - loss_weight=5.0), + reduction="mean", + loss_weight=5.0, + ), proposal_objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.2, 0.8], - reduction='none', - loss_weight=5.0), + reduction="none", + loss_weight=5.0, + ), primitive_center_loss=dict( - type='MSELoss', reduction='none', loss_weight=1.0))), + type="MSELoss", reduction="none", loss_weight=1.0 + ), + ), + ), # model training and testing settings train_cfg=dict( - rpn=dict( - pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'), + rpn=dict(pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod="vote"), rpn_proposal=dict(use_nms=False), rcnn=dict( pos_distance_thr=0.3, neg_distance_thr=0.6, - sample_mod='vote', + sample_mod="vote", far_threshold=0.6, near_threshold=0.3, mask_surface_threshold=0.3, label_surface_threshold=0.3, mask_line_threshold=0.3, - label_line_threshold=0.3)), + label_line_threshold=0.3, + ), + ), test_cfg=dict( rpn=dict( - sample_mod='seed', + sample_mod="seed", nms_thr=0.25, score_thr=0.05, per_class_proposal=True, - use_nms=False), + use_nms=False, + ), rcnn=dict( - sample_mod='seed', - nms_thr=0.25, - score_thr=0.05, - per_class_proposal=True))) + sample_mod="seed", nms_thr=0.25, score_thr=0.05, per_class_proposal=True + ), + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_lyft.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_lyft.py index 87c7fe0..49ebbea 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_lyft.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_lyft.py @@ -1,4 +1,4 @@ -_base_ = './hv_pointpillars_fpn_nus.py' +_base_ = "./hv_pointpillars_fpn_nus.py" # model settings (based on nuScenes model settings) # Voxel size for voxel encoder @@ -9,14 +9,19 @@ pts_voxel_layer=dict( max_num_points=20, point_cloud_range=[-80, -80, -5, 80, 80, 3], - max_voxels=(60000, 60000)), + max_voxels=(60000, 60000), + ), pts_voxel_encoder=dict( - feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3]), + feat_channels=[64], point_cloud_range=[-80, -80, -5, 80, 80, 3] + ), pts_middle_encoder=dict(output_shape=[640, 640]), pts_bbox_head=dict( num_classes=9, anchor_generator=dict( - ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[]), - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + ranges=[[-80, -80, -1.8, 80, 80, -1.8]], custom_values=[] + ), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder", code_size=7), + ), # model training settings (based on nuScenes model settings) - train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_nus.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_nus.py index e153f6c..0523cd0 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_nus.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_nus.py @@ -5,14 +5,15 @@ # keys in the config. voxel_size = [0.25, 0.25, 8] model = dict( - type='MVXFasterRCNN', + type="MVXFasterRCNN", pts_voxel_layer=dict( max_num_points=64, point_cloud_range=[-50, -50, -5, 50, 50, 3], voxel_size=voxel_size, - max_voxels=(30000, 40000)), + max_voxels=(30000, 40000), + ), pts_voxel_encoder=dict( - type='HardVFE', + type="HardVFE", in_channels=4, feat_channels=[64, 64], with_distance=False, @@ -20,71 +21,76 @@ with_cluster_center=True, with_voxel_center=True, point_cloud_range=[-50, -50, -5, 50, 50, 3], - norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + norm_cfg=dict(type="naiveSyncBN1d", eps=1e-3, momentum=0.01), + ), pts_middle_encoder=dict( - type='PointPillarsScatter', in_channels=64, output_shape=[400, 400]), + type="PointPillarsScatter", in_channels=64, output_shape=[400, 400] + ), pts_backbone=dict( - type='SECOND', + type="SECOND", in_channels=64, - norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_cfg=dict(type="naiveSyncBN2d", eps=1e-3, momentum=0.01), layer_nums=[3, 5, 5], layer_strides=[2, 2, 2], - out_channels=[64, 128, 256]), + out_channels=[64, 128, 256], + ), pts_neck=dict( - type='FPN', - norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), - act_cfg=dict(type='ReLU'), + type="FPN", + norm_cfg=dict(type="naiveSyncBN2d", eps=1e-3, momentum=0.01), + act_cfg=dict(type="ReLU"), in_channels=[64, 128, 256], out_channels=256, start_level=0, - num_outs=3), + num_outs=3, + ), pts_bbox_head=dict( - type='Anchor3DHead', + type="Anchor3DHead", num_classes=10, in_channels=256, feat_channels=256, use_direction_classifier=True, anchor_generator=dict( - type='AlignedAnchor3DRangeGenerator', + type="AlignedAnchor3DRangeGenerator", ranges=[[-50, -50, -1.8, 50, 50, -1.8]], scales=[1, 2, 4], sizes=[ - [0.8660, 2.5981, 1.], # 1.5/sqrt(3) - [0.5774, 1.7321, 1.], # 1/sqrt(3) - [1., 1., 1.], + [0.8660, 2.5981, 1.0], # 1.5/sqrt(3) + [0.5774, 1.7321, 1.0], # 1/sqrt(3) + [1.0, 1.0, 1.0], [0.4, 0.4, 1], ], custom_values=[0, 0], rotations=[0, 1.57], - reshape_out=True), + reshape_out=True, + ), assigner_per_size=False, diff_rad_by_sin=True, dir_offset=0.7854, # pi/4 dir_limit_offset=0, - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=9), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder", code_size=9), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=0.2), + ), # model training and testing settings train_cfg=dict( pts=dict( assigner=dict( - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.6, neg_iou_thr=0.3, min_pos_iou=0.3, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), allowed_border=0, code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.2, 0.2], pos_weight=-1, - debug=False)), + debug=False, + ) + ), test_cfg=dict( pts=dict( use_rotate_nms=True, @@ -93,4 +99,7 @@ nms_thr=0.2, score_thr=0.05, min_bbox_size=0, - max_num=500))) + max_num=500, + ) + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py index 9cd200f..c44318b 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_fpn_range100_lyft.py @@ -1,4 +1,4 @@ -_base_ = './hv_pointpillars_fpn_nus.py' +_base_ = "./hv_pointpillars_fpn_nus.py" # model settings (based on nuScenes model settings) # Voxel size for voxel encoder @@ -9,14 +9,19 @@ pts_voxel_layer=dict( max_num_points=20, point_cloud_range=[-100, -100, -5, 100, 100, 3], - max_voxels=(60000, 60000)), + max_voxels=(60000, 60000), + ), pts_voxel_encoder=dict( - feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3]), + feat_channels=[64], point_cloud_range=[-100, -100, -5, 100, 100, 3] + ), pts_middle_encoder=dict(output_shape=[800, 800]), pts_bbox_head=dict( num_classes=9, anchor_generator=dict( - ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[]), - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7)), + ranges=[[-100, -100, -1.8, 100, 100, -1.8]], custom_values=[] + ), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder", code_size=7), + ), # model training settings (based on nuScenes model settings) - train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))) + train_cfg=dict(pts=dict(code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_kitti.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_kitti.py index 85076d0..13672a9 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_kitti.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_kitti.py @@ -1,41 +1,45 @@ voxel_size = [0.16, 0.16, 4] model = dict( - type='VoxelNet', + type="VoxelNet", voxel_layer=dict( max_num_points=32, # max_points_per_voxel point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], voxel_size=voxel_size, - max_voxels=(16000, 40000) # (training, testing) max_voxels + max_voxels=(16000, 40000), # (training, testing) max_voxels ), voxel_encoder=dict( - type='PillarFeatureNet', + type="PillarFeatureNet", in_channels=4, feat_channels=[64], with_distance=False, voxel_size=voxel_size, - point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1]), + point_cloud_range=[0, -39.68, -3, 69.12, 39.68, 1], + ), middle_encoder=dict( - type='PointPillarsScatter', in_channels=64, output_shape=[496, 432]), + type="PointPillarsScatter", in_channels=64, output_shape=[496, 432] + ), backbone=dict( - type='SECOND', + type="SECOND", in_channels=64, layer_nums=[3, 5, 5], layer_strides=[2, 2, 2], - out_channels=[64, 128, 256]), + out_channels=[64, 128, 256], + ), neck=dict( - type='SECONDFPN', + type="SECONDFPN", in_channels=[64, 128, 256], upsample_strides=[1, 2, 4], - out_channels=[128, 128, 128]), + out_channels=[128, 128, 128], + ), bbox_head=dict( - type='Anchor3DHead', + type="Anchor3DHead", num_classes=3, in_channels=384, feat_channels=384, use_direction_classifier=True, anchor_generator=dict( - type='Anchor3DRangeGenerator', + type="Anchor3DRangeGenerator", ranges=[ [0, -39.68, -0.6, 70.4, 39.68, -0.6], [0, -39.68, -0.6, 70.4, 39.68, -0.6], @@ -43,46 +47,48 @@ ], sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], rotations=[0, 1.57], - reshape_out=False), + reshape_out=False, + ), diff_rad_by_sin=True, - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder"), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=0.2), + ), # model training and testing settings train_cfg=dict( assigner=[ dict( # for Pedestrian - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.35, min_pos_iou=0.35, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Cyclist - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.35, min_pos_iou=0.35, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Car - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.6, neg_iou_thr=0.45, min_pos_iou=0.45, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), ], allowed_border=0, pos_weight=-1, - debug=False), + debug=False, + ), test_cfg=dict( use_rotate_nms=True, nms_across_levels=False, @@ -90,4 +96,6 @@ score_thr=0.1, min_bbox_size=0, nms_pre=100, - max_num=50)) + max_num=50, + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_waymo.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_waymo.py index 14873ea..79328b5 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_waymo.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_pointpillars_secfpn_waymo.py @@ -5,14 +5,15 @@ # keys in the config. voxel_size = [0.32, 0.32, 6] model = dict( - type='MVXFasterRCNN', + type="MVXFasterRCNN", pts_voxel_layer=dict( max_num_points=20, point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], voxel_size=voxel_size, - max_voxels=(32000, 32000)), + max_voxels=(32000, 32000), + ), pts_voxel_encoder=dict( - type='HardVFE', + type="HardVFE", in_channels=5, feat_channels=[64], with_distance=False, @@ -20,83 +21,92 @@ with_cluster_center=True, with_voxel_center=True, point_cloud_range=[-74.88, -74.88, -2, 74.88, 74.88, 4], - norm_cfg=dict(type='naiveSyncBN1d', eps=1e-3, momentum=0.01)), + norm_cfg=dict(type="naiveSyncBN1d", eps=1e-3, momentum=0.01), + ), pts_middle_encoder=dict( - type='PointPillarsScatter', in_channels=64, output_shape=[468, 468]), + type="PointPillarsScatter", in_channels=64, output_shape=[468, 468] + ), pts_backbone=dict( - type='SECOND', + type="SECOND", in_channels=64, - norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_cfg=dict(type="naiveSyncBN2d", eps=1e-3, momentum=0.01), layer_nums=[3, 5, 5], layer_strides=[1, 2, 2], - out_channels=[64, 128, 256]), + out_channels=[64, 128, 256], + ), pts_neck=dict( - type='SECONDFPN', - norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + type="SECONDFPN", + norm_cfg=dict(type="naiveSyncBN2d", eps=1e-3, momentum=0.01), in_channels=[64, 128, 256], upsample_strides=[1, 2, 4], - out_channels=[128, 128, 128]), + out_channels=[128, 128, 128], + ), pts_bbox_head=dict( - type='Anchor3DHead', + type="Anchor3DHead", num_classes=3, in_channels=384, feat_channels=384, use_direction_classifier=True, anchor_generator=dict( - type='AlignedAnchor3DRangeGenerator', - ranges=[[-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345], - [-74.88, -74.88, -0.1188, 74.88, 74.88, -0.1188], - [-74.88, -74.88, 0, 74.88, 74.88, 0]], + type="AlignedAnchor3DRangeGenerator", + ranges=[ + [-74.88, -74.88, -0.0345, 74.88, 74.88, -0.0345], + [-74.88, -74.88, -0.1188, 74.88, 74.88, -0.1188], + [-74.88, -74.88, 0, 74.88, 74.88, 0], + ], sizes=[ [2.08, 4.73, 1.77], # car [0.84, 1.81, 1.77], # cyclist - [0.84, 0.91, 1.74] # pedestrian + [0.84, 0.91, 1.74], # pedestrian ], rotations=[0, 1.57], - reshape_out=False), + reshape_out=False, + ), diff_rad_by_sin=True, dir_offset=0.7854, # pi/4 dir_limit_offset=0, - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder", code_size=7), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=0.2), + ), # model training and testing settings train_cfg=dict( pts=dict( assigner=[ dict( # car - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.55, neg_iou_thr=0.4, min_pos_iou=0.4, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # cyclist - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=0.3, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # pedestrian - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=0.3, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), ], allowed_border=0, code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], pos_weight=-1, - debug=False)), + debug=False, + ) + ), test_cfg=dict( pts=dict( use_rotate_nms=True, @@ -105,4 +115,7 @@ nms_thr=0.25, score_thr=0.1, min_bbox_size=0, - max_num=500))) + max_num=500, + ) + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_kitti.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_kitti.py index 6bf18ab..f3ad847 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_kitti.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_kitti.py @@ -1,37 +1,41 @@ voxel_size = [0.05, 0.05, 0.1] model = dict( - type='VoxelNet', + type="VoxelNet", voxel_layer=dict( max_num_points=5, point_cloud_range=[0, -40, -3, 70.4, 40, 1], voxel_size=voxel_size, - max_voxels=(16000, 40000)), - voxel_encoder=dict(type='HardSimpleVFE'), + max_voxels=(16000, 40000), + ), + voxel_encoder=dict(type="HardSimpleVFE"), middle_encoder=dict( - type='SparseEncoder', + type="SparseEncoder", in_channels=4, sparse_shape=[41, 1600, 1408], - order=('conv', 'norm', 'act')), + order=("conv", "norm", "act"), + ), backbone=dict( - type='SECOND', + type="SECOND", in_channels=256, layer_nums=[5, 5], layer_strides=[1, 2], - out_channels=[128, 256]), + out_channels=[128, 256], + ), neck=dict( - type='SECONDFPN', + type="SECONDFPN", in_channels=[128, 256], upsample_strides=[1, 2], - out_channels=[256, 256]), + out_channels=[256, 256], + ), bbox_head=dict( - type='Anchor3DHead', + type="Anchor3DHead", num_classes=3, in_channels=512, feat_channels=512, use_direction_classifier=True, anchor_generator=dict( - type='Anchor3DRangeGenerator', + type="Anchor3DRangeGenerator", ranges=[ [0, -40.0, -0.6, 70.4, 40.0, -0.6], [0, -40.0, -0.6, 70.4, 40.0, -0.6], @@ -39,46 +43,48 @@ ], sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], rotations=[0, 1.57], - reshape_out=False), + reshape_out=False, + ), diff_rad_by_sin=True, - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder"), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=0.2), + ), # model training and testing settings train_cfg=dict( assigner=[ dict( # for Pedestrian - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.35, neg_iou_thr=0.2, min_pos_iou=0.2, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Cyclist - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.35, neg_iou_thr=0.2, min_pos_iou=0.2, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Car - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.6, neg_iou_thr=0.45, min_pos_iou=0.45, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), ], allowed_border=0, pos_weight=-1, - debug=False), + debug=False, + ), test_cfg=dict( use_rotate_nms=True, nms_across_levels=False, @@ -86,4 +92,6 @@ score_thr=0.1, min_bbox_size=0, nms_pre=100, - max_num=50)) + max_num=50, + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_waymo.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_waymo.py index eb9bd3a..196eeca 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_waymo.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/hv_second_secfpn_waymo.py @@ -5,91 +5,99 @@ # keys in the config. voxel_size = [0.08, 0.08, 0.1] model = dict( - type='VoxelNet', + type="VoxelNet", voxel_layer=dict( max_num_points=10, point_cloud_range=[-76.8, -51.2, -2, 76.8, 51.2, 4], voxel_size=voxel_size, - max_voxels=(80000, 90000)), - voxel_encoder=dict(type='HardSimpleVFE', num_features=5), + max_voxels=(80000, 90000), + ), + voxel_encoder=dict(type="HardSimpleVFE", num_features=5), middle_encoder=dict( - type='SparseEncoder', + type="SparseEncoder", in_channels=5, sparse_shape=[61, 1280, 1920], - order=('conv', 'norm', 'act')), + order=("conv", "norm", "act"), + ), backbone=dict( - type='SECOND', + type="SECOND", in_channels=384, - norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + norm_cfg=dict(type="naiveSyncBN2d", eps=1e-3, momentum=0.01), layer_nums=[5, 5], layer_strides=[1, 2], - out_channels=[128, 256]), + out_channels=[128, 256], + ), neck=dict( - type='SECONDFPN', - norm_cfg=dict(type='naiveSyncBN2d', eps=1e-3, momentum=0.01), + type="SECONDFPN", + norm_cfg=dict(type="naiveSyncBN2d", eps=1e-3, momentum=0.01), in_channels=[128, 256], upsample_strides=[1, 2], - out_channels=[256, 256]), + out_channels=[256, 256], + ), bbox_head=dict( - type='Anchor3DHead', + type="Anchor3DHead", num_classes=3, in_channels=512, feat_channels=512, use_direction_classifier=True, anchor_generator=dict( - type='AlignedAnchor3DRangeGenerator', - ranges=[[-76.8, -51.2, -0.0345, 76.8, 51.2, -0.0345], - [-76.8, -51.2, 0, 76.8, 51.2, 0], - [-76.8, -51.2, -0.1188, 76.8, 51.2, -0.1188]], + type="AlignedAnchor3DRangeGenerator", + ranges=[ + [-76.8, -51.2, -0.0345, 76.8, 51.2, -0.0345], + [-76.8, -51.2, 0, 76.8, 51.2, 0], + [-76.8, -51.2, -0.1188, 76.8, 51.2, -0.1188], + ], sizes=[ [2.08, 4.73, 1.77], # car [0.84, 0.91, 1.74], # pedestrian - [0.84, 1.81, 1.77] # cyclist + [0.84, 1.81, 1.77], # cyclist ], rotations=[0, 1.57], - reshape_out=False), + reshape_out=False, + ), diff_rad_by_sin=True, dir_offset=0.7854, # pi/4 dir_limit_offset=0, - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder', code_size=7), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder", code_size=7), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=1.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=0.2), + ), # model training and testing settings train_cfg=dict( assigner=[ dict( # car - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.55, neg_iou_thr=0.4, min_pos_iou=0.4, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # pedestrian - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=0.3, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # cyclist - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.3, min_pos_iou=0.3, - ignore_iof_thr=-1) + ignore_iof_thr=-1, + ), ], allowed_border=0, code_weight=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], pos_weight=-1, - debug=False), + debug=False, + ), test_cfg=dict( use_rotate_nms=True, nms_across_levels=False, @@ -97,4 +105,6 @@ nms_thr=0.25, score_thr=0.1, min_bbox_size=0, - max_num=500)) + max_num=500, + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/imvotenet_image.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/imvotenet_image.py index 981f8bc..aeaeb81 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/imvotenet_image.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/imvotenet_image.py @@ -1,108 +1,121 @@ model = dict( - type='ImVoteNet', + type="ImVoteNet", img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='caffe'), + style="caffe", + ), img_neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 + ), img_rpn_head=dict( - type='RPNHead', + type="RPNHead", in_channels=256, feat_channels=256, anchor_generator=dict( - type='AnchorGenerator', + type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), + strides=[4, 8, 16, 32, 64], + ), bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), img_roi_head=dict( - type='StandardRoIHead', + type="StandardRoIHead", bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), out_channels=256, - featmap_strides=[4, 8, 16, 32]), + featmap_strides=[4, 8, 16, 32], + ), bbox_head=dict( - type='Shared2FCBBoxHead', + type="Shared2FCBBoxHead", in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=10, bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + ), reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), + ), # model training and testing settings train_cfg=dict( img_rpn=dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=256, pos_fraction=0.5, neg_pos_ub=-1, - add_gt_as_proposals=False), + add_gt_as_proposals=False, + ), allowed_border=-1, pos_weight=-1, - debug=False), + debug=False, + ), img_rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=1000, max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), + nms=dict(type="nms", iou_threshold=0.7), + min_bbox_size=0, + ), img_rcnn=dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, - add_gt_as_proposals=True), + add_gt_as_proposals=True, + ), pos_weight=-1, - debug=False)), + debug=False, + ), + ), test_cfg=dict( img_rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), + nms=dict(type="nms", iou_threshold=0.7), + min_bbox_size=0, + ), img_rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100))) + score_thr=0.05, nms=dict(type="nms", iou_threshold=0.5), max_per_img=100 + ), + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/mask_rcnn_r50_fpn.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/mask_rcnn_r50_fpn.py index c5d5e32..ff34b3d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/mask_rcnn_r50_fpn.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/mask_rcnn_r50_fpn.py @@ -1,114 +1,127 @@ # model settings model = dict( - type='MaskRCNN', - pretrained='torchvision://resnet50', + type="MaskRCNN", + pretrained="torchvision://resnet50", backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), + norm_cfg=dict(type="BN", requires_grad=True), norm_eval=True, - style='pytorch'), + style="pytorch", + ), neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), + type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5 + ), rpn_head=dict( - type='RPNHead', + type="RPNHead", in_channels=256, feat_channels=256, anchor_generator=dict( - type='AnchorGenerator', + type="AnchorGenerator", scales=[8], ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), + strides=[4, 8, 16, 32, 64], + ), bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0], + ), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), roi_head=dict( - type='StandardRoIHead', + type="StandardRoIHead", bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0), out_channels=256, - featmap_strides=[4, 8, 16, 32]), + featmap_strides=[4, 8, 16, 32], + ), bbox_head=dict( - type='Shared2FCBBoxHead', + type="Shared2FCBBoxHead", in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), + type="DeltaXYWHBBoxCoder", + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2], + ), reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0), + loss_bbox=dict(type="L1Loss", loss_weight=1.0), + ), mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), + type="SingleRoIExtractor", + roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0), out_channels=256, - featmap_strides=[4, 8, 16, 32]), + featmap_strides=[4, 8, 16, 32], + ), mask_head=dict( - type='FCNMaskHead', + type="FCNMaskHead", num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), + loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0), + ), + ), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=256, pos_fraction=0.5, neg_pos_ub=-1, - add_gt_as_proposals=False), + add_gt_as_proposals=False, + ), allowed_border=-1, pos_weight=-1, - debug=False), + debug=False, + ), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=1000, max_num=1000, nms_thr=0.7, - min_bbox_size=0), + min_bbox_size=0, + ), rcnn=dict( assigner=dict( - type='MaxIoUAssigner', + type="MaxIoUAssigner", pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), sampler=dict( - type='RandomSampler', + type="RandomSampler", num=512, pos_fraction=0.25, neg_pos_ub=-1, - add_gt_as_proposals=True), + add_gt_as_proposals=True, + ), mask_size=28, pos_weight=-1, - debug=False)), + debug=False, + ), + ), test_cfg=dict( rpn=dict( nms_across_levels=False, @@ -116,9 +129,13 @@ nms_post=1000, max_num=1000, nms_thr=0.7, - min_bbox_size=0), + min_bbox_size=0, + ), rcnn=dict( score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), + nms=dict(type="nms", iou_threshold=0.5), max_per_img=100, - mask_thr_binary=0.5))) + mask_thr_binary=0.5, + ), + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_cuda_ssg.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_cuda_ssg.py index f513bd4..0236235 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_cuda_ssg.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_cuda_ssg.py @@ -1,7 +1,9 @@ -_base_ = './paconv_ssg.py' +_base_ = "./paconv_ssg.py" model = dict( backbone=dict( sa_cfg=dict( - type='PAConvCUDASAModule', - scorenet_cfg=dict(mlp_channels=[8, 16, 16])))) + type="PAConvCUDASAModule", scorenet_cfg=dict(mlp_channels=[8, 16, 16]) + ) + ) +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_ssg.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_ssg.py index 1d4f1ed..a89298f 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_ssg.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/paconv_ssg.py @@ -1,49 +1,59 @@ # model settings model = dict( - type='EncoderDecoder3D', + type="EncoderDecoder3D", backbone=dict( - type='PointNet2SASSG', + type="PointNet2SASSG", in_channels=9, # [xyz, rgb, normalized_xyz] num_points=(1024, 256, 64, 16), radius=(None, None, None, None), # use kNN instead of ball query num_samples=(32, 32, 32, 32), - sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, - 512)), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, 512)), fp_channels=(), - norm_cfg=dict(type='BN2d', momentum=0.1), + norm_cfg=dict(type="BN2d", momentum=0.1), sa_cfg=dict( - type='PAConvSAModule', - pool_mod='max', + type="PAConvSAModule", + pool_mod="max", use_xyz=True, normalize_xyz=False, paconv_num_kernels=[16, 16, 16], - paconv_kernel_input='w_neighbor', - scorenet_input='w_neighbor_dist', + paconv_kernel_input="w_neighbor", + scorenet_input="w_neighbor_dist", scorenet_cfg=dict( mlp_channels=[16, 16, 16], - score_norm='softmax', + score_norm="softmax", temp_factor=1.0, - last_bn=False))), + last_bn=False, + ), + ), + ), decode_head=dict( - type='PAConvHead', + type="PAConvHead", # PAConv model's decoder takes skip connections from beckbone # different from PointNet++, it also concats input features in the last # level of decoder, leading to `128 + 6` as the channel number - fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), - (128 + 6, 128, 128, 128)), + fp_channels=( + (768, 256, 256), + (384, 256, 256), + (320, 256, 128), + (128 + 6, 128, 128, 128), + ), channels=128, dropout_ratio=0.5, - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), - act_cfg=dict(type='ReLU'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), + act_cfg=dict(type="ReLU"), loss_decode=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", use_sigmoid=False, class_weight=None, # should be modified with dataset - loss_weight=1.0)), + loss_weight=1.0, + ), + ), # correlation loss to regularize PAConv's kernel weights loss_regularization=dict( - type='PAConvRegularizationLoss', reduction='sum', loss_weight=10.0), + type="PAConvRegularizationLoss", reduction="sum", loss_weight=10.0 + ), # model training and testing settings train_cfg=dict(), - test_cfg=dict(mode='slide')) + test_cfg=dict(mode="slide"), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/parta2.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/parta2.py index 6c5ae9a..6778ed1 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/parta2.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/parta2.py @@ -3,91 +3,93 @@ point_cloud_range = [0, -40, -3, 70.4, 40, 1] model = dict( - type='PartA2', + type="PartA2", voxel_layer=dict( max_num_points=5, # max_points_per_voxel point_cloud_range=point_cloud_range, voxel_size=voxel_size, - max_voxels=(16000, 40000) # (training, testing) max_voxels + max_voxels=(16000, 40000), # (training, testing) max_voxels ), - voxel_encoder=dict(type='HardSimpleVFE'), + voxel_encoder=dict(type="HardSimpleVFE"), middle_encoder=dict( - type='SparseUNet', + type="SparseUNet", in_channels=4, sparse_shape=[41, 1600, 1408], - order=('conv', 'norm', 'act')), + order=("conv", "norm", "act"), + ), backbone=dict( - type='SECOND', + type="SECOND", in_channels=256, layer_nums=[5, 5], layer_strides=[1, 2], - out_channels=[128, 256]), + out_channels=[128, 256], + ), neck=dict( - type='SECONDFPN', + type="SECONDFPN", in_channels=[128, 256], upsample_strides=[1, 2], - out_channels=[256, 256]), + out_channels=[256, 256], + ), rpn_head=dict( - type='PartA2RPNHead', + type="PartA2RPNHead", num_classes=3, in_channels=512, feat_channels=512, use_direction_classifier=True, anchor_generator=dict( - type='Anchor3DRangeGenerator', - ranges=[[0, -40.0, -0.6, 70.4, 40.0, -0.6], - [0, -40.0, -0.6, 70.4, 40.0, -0.6], - [0, -40.0, -1.78, 70.4, 40.0, -1.78]], + type="Anchor3DRangeGenerator", + ranges=[ + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -0.6, 70.4, 40.0, -0.6], + [0, -40.0, -1.78, 70.4, 40.0, -1.78], + ], sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73], [1.6, 3.9, 1.56]], rotations=[0, 1.57], - reshape_out=False), + reshape_out=False, + ), diff_rad_by_sin=True, assigner_per_size=True, assign_per_class=True, - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder"), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0), - loss_dir=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.2)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + loss_bbox=dict(type="SmoothL1Loss", beta=1.0 / 9.0, loss_weight=2.0), + loss_dir=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=0.2), + ), roi_head=dict( - type='PartAggregationROIHead', + type="PartAggregationROIHead", num_classes=3, semantic_head=dict( - type='PointwiseSemanticHead', + type="PointwiseSemanticHead", in_channels=16, extra_width=0.2, seg_score_thr=0.3, num_classes=3, loss_seg=dict( - type='FocalLoss', + type="FocalLoss", use_sigmoid=True, - reduction='sum', + reduction="sum", gamma=2.0, alpha=0.25, - loss_weight=1.0), - loss_part=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), + loss_weight=1.0, + ), + loss_part=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0), + ), seg_roi_extractor=dict( - type='Single3DRoIAwareExtractor', + type="Single3DRoIAwareExtractor", roi_layer=dict( - type='RoIAwarePool3d', - out_size=14, - max_pts_per_voxel=128, - mode='max')), + type="RoIAwarePool3d", out_size=14, max_pts_per_voxel=128, mode="max" + ), + ), part_roi_extractor=dict( - type='Single3DRoIAwareExtractor', + type="Single3DRoIAwareExtractor", roi_layer=dict( - type='RoIAwarePool3d', - out_size=14, - max_pts_per_voxel=128, - mode='avg')), + type="RoIAwarePool3d", out_size=14, max_pts_per_voxel=128, mode="avg" + ), + ), bbox_head=dict( - type='PartA2BboxHead', + type="PartA2BboxHead", num_classes=3, seg_in_channels=16, part_in_channels=4, @@ -95,7 +97,7 @@ part_conv_channels=[64, 64], merge_conv_channels=[128, 128], down_conv_channels=[128, 256], - bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'), + bbox_coder=dict(type="DeltaXYZWLHRBBoxCoder"), shared_fc_channels=[256, 512, 512, 512], cls_channels=[256, 256], reg_channels=[256, 256], @@ -103,89 +105,98 @@ roi_feat_size=14, with_corner_loss=True, loss_bbox=dict( - type='SmoothL1Loss', - beta=1.0 / 9.0, - reduction='sum', - loss_weight=1.0), + type="SmoothL1Loss", beta=1.0 / 9.0, reduction="sum", loss_weight=1.0 + ), loss_cls=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", use_sigmoid=True, - reduction='sum', - loss_weight=1.0))), + reduction="sum", + loss_weight=1.0, + ), + ), + ), # model training and testing settings train_cfg=dict( rpn=dict( assigner=[ dict( # for Pedestrian - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.35, min_pos_iou=0.35, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Cyclist - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.5, neg_iou_thr=0.35, min_pos_iou=0.35, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Car - type='MaxIoUAssigner', - iou_calculator=dict(type='BboxOverlapsNearest3D'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlapsNearest3D"), pos_iou_thr=0.6, neg_iou_thr=0.45, min_pos_iou=0.45, - ignore_iof_thr=-1) + ignore_iof_thr=-1, + ), ], allowed_border=0, pos_weight=-1, - debug=False), + debug=False, + ), rpn_proposal=dict( nms_pre=9000, nms_post=512, max_num=512, nms_thr=0.8, score_thr=0, - use_rotate_nms=False), + use_rotate_nms=False, + ), rcnn=dict( assigner=[ dict( # for Pedestrian - type='MaxIoUAssigner', - iou_calculator=dict( - type='BboxOverlaps3D', coordinate='lidar'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlaps3D", coordinate="lidar"), pos_iou_thr=0.55, neg_iou_thr=0.55, min_pos_iou=0.55, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Cyclist - type='MaxIoUAssigner', - iou_calculator=dict( - type='BboxOverlaps3D', coordinate='lidar'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlaps3D", coordinate="lidar"), pos_iou_thr=0.55, neg_iou_thr=0.55, min_pos_iou=0.55, - ignore_iof_thr=-1), + ignore_iof_thr=-1, + ), dict( # for Car - type='MaxIoUAssigner', - iou_calculator=dict( - type='BboxOverlaps3D', coordinate='lidar'), + type="MaxIoUAssigner", + iou_calculator=dict(type="BboxOverlaps3D", coordinate="lidar"), pos_iou_thr=0.55, neg_iou_thr=0.55, min_pos_iou=0.55, - ignore_iof_thr=-1) + ignore_iof_thr=-1, + ), ], sampler=dict( - type='IoUNegPiecewiseSampler', + type="IoUNegPiecewiseSampler", num=128, pos_fraction=0.55, neg_piece_fractions=[0.8, 0.2], neg_iou_piece_thrs=[0.55, 0.1], neg_pos_ub=-1, add_gt_as_proposals=False, - return_iou=True), + return_iou=True, + ), cls_pos_thr=0.75, - cls_neg_thr=0.25)), + cls_neg_thr=0.25, + ), + ), test_cfg=dict( rpn=dict( nms_pre=1024, @@ -193,9 +204,8 @@ max_num=100, nms_thr=0.7, score_thr=0, - use_rotate_nms=True), - rcnn=dict( use_rotate_nms=True, - use_raw_score=True, - nms_thr=0.01, - score_thr=0.1))) + ), + rcnn=dict(use_rotate_nms=True, use_raw_score=True, nms_thr=0.01, score_thr=0.1), + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_msg.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_msg.py index 222ab88..5f7228b 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_msg.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_msg.py @@ -1,28 +1,35 @@ -_base_ = './pointnet2_ssg.py' +_base_ = "./pointnet2_ssg.py" # model settings model = dict( backbone=dict( _delete_=True, - type='PointNet2SAMSG', + type="PointNet2SAMSG", in_channels=6, # [xyz, rgb], should be modified with dataset num_points=(1024, 256, 64, 16), radii=((0.05, 0.1), (0.1, 0.2), (0.2, 0.4), (0.4, 0.8)), num_samples=((16, 32), (16, 32), (16, 32), (16, 32)), - sa_channels=(((16, 16, 32), (32, 32, 64)), ((64, 64, 128), (64, 96, - 128)), - ((128, 196, 256), (128, 196, 256)), ((256, 256, 512), - (256, 384, 512))), + sa_channels=( + ((16, 16, 32), (32, 32, 64)), + ((64, 64, 128), (64, 96, 128)), + ((128, 196, 256), (128, 196, 256)), + ((256, 256, 512), (256, 384, 512)), + ), aggregation_channels=(None, None, None, None), - fps_mods=(('D-FPS'), ('D-FPS'), ('D-FPS'), ('D-FPS')), + fps_mods=(("D-FPS"), ("D-FPS"), ("D-FPS"), ("D-FPS")), fps_sample_range_lists=((-1), (-1), (-1), (-1)), dilated_group=(False, False, False, False), out_indices=(0, 1, 2, 3), sa_cfg=dict( - type='PointSAModuleMSG', - pool_mod='max', - use_xyz=True, - normalize_xyz=False)), + type="PointSAModuleMSG", pool_mod="max", use_xyz=True, normalize_xyz=False + ), + ), decode_head=dict( - fp_channels=((1536, 256, 256), (512, 256, 256), (352, 256, 128), - (128, 128, 128, 128)))) + fp_channels=( + (1536, 256, 256), + (512, 256, 256), + (352, 256, 128), + (128, 128, 128, 128), + ) + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_ssg.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_ssg.py index 58b4c24..9b9527c 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_ssg.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/pointnet2_ssg.py @@ -1,35 +1,40 @@ # model settings model = dict( - type='EncoderDecoder3D', + type="EncoderDecoder3D", backbone=dict( - type='PointNet2SASSG', + type="PointNet2SASSG", in_channels=6, # [xyz, rgb], should be modified with dataset num_points=(1024, 256, 64, 16), radius=(0.1, 0.2, 0.4, 0.8), num_samples=(32, 32, 32, 32), - sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, - 512)), + sa_channels=((32, 32, 64), (64, 64, 128), (128, 128, 256), (256, 256, 512)), fp_channels=(), - norm_cfg=dict(type='BN2d'), + norm_cfg=dict(type="BN2d"), sa_cfg=dict( - type='PointSAModule', - pool_mod='max', - use_xyz=True, - normalize_xyz=False)), + type="PointSAModule", pool_mod="max", use_xyz=True, normalize_xyz=False + ), + ), decode_head=dict( - type='PointNet2Head', - fp_channels=((768, 256, 256), (384, 256, 256), (320, 256, 128), - (128, 128, 128, 128)), + type="PointNet2Head", + fp_channels=( + (768, 256, 256), + (384, 256, 256), + (320, 256, 128), + (128, 128, 128, 128), + ), channels=128, dropout_ratio=0.5, - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), - act_cfg=dict(type='ReLU'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), + act_cfg=dict(type="ReLU"), loss_decode=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", use_sigmoid=False, class_weight=None, # should be modified with dataset - loss_weight=1.0)), + loss_weight=1.0, + ), + ), # model training and testing settings train_cfg=dict(), - test_cfg=dict(mode='slide')) + test_cfg=dict(mode="slide"), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/votenet.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/votenet.py index 129339d..1b13d97 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/votenet.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/models/votenet.py @@ -1,73 +1,73 @@ model = dict( - type='VoteNet', + type="VoteNet", backbone=dict( - type='PointNet2SASSG', + type="PointNet2SASSG", in_channels=4, num_points=(2048, 1024, 512, 256), radius=(0.2, 0.4, 0.8, 1.2), num_samples=(64, 32, 16, 16), - sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), - (128, 128, 256)), + sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256), (128, 128, 256)), fp_channels=((256, 256), (256, 256)), - norm_cfg=dict(type='BN2d'), + norm_cfg=dict(type="BN2d"), sa_cfg=dict( - type='PointSAModule', - pool_mod='max', - use_xyz=True, - normalize_xyz=True)), + type="PointSAModule", pool_mod="max", use_xyz=True, normalize_xyz=True + ), + ), bbox_head=dict( - type='VoteHead', + type="VoteHead", vote_module_cfg=dict( in_channels=256, vote_per_seed=1, gt_per_seed=3, conv_channels=(256, 256), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), norm_feats=True, vote_loss=dict( - type='ChamferDistance', - mode='l1', - reduction='none', - loss_dst_weight=10.0)), + type="ChamferDistance", + mode="l1", + reduction="none", + loss_dst_weight=10.0, + ), + ), vote_aggregation_cfg=dict( - type='PointSAModule', + type="PointSAModule", num_point=256, radius=0.3, num_sample=16, mlp_channels=[256, 128, 128, 128], use_xyz=True, - normalize_xyz=True), + normalize_xyz=True, + ), pred_layer_cfg=dict( - in_channels=128, shared_conv_channels=(128, 128), bias=True), - conv_cfg=dict(type='Conv1d'), - norm_cfg=dict(type='BN1d'), + in_channels=128, shared_conv_channels=(128, 128), bias=True + ), + conv_cfg=dict(type="Conv1d"), + norm_cfg=dict(type="BN1d"), objectness_loss=dict( - type='CrossEntropyLoss', + type="CrossEntropyLoss", class_weight=[0.2, 0.8], - reduction='sum', - loss_weight=5.0), + reduction="sum", + loss_weight=5.0, + ), center_loss=dict( - type='ChamferDistance', - mode='l2', - reduction='sum', + type="ChamferDistance", + mode="l2", + reduction="sum", loss_src_weight=10.0, - loss_dst_weight=10.0), - dir_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), - dir_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0), - size_class_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0), + loss_dst_weight=10.0, + ), + dir_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + dir_res_loss=dict(type="SmoothL1Loss", reduction="sum", loss_weight=10.0), + size_class_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), size_res_loss=dict( - type='SmoothL1Loss', reduction='sum', loss_weight=10.0 / 3.0), - semantic_loss=dict( - type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)), + type="SmoothL1Loss", reduction="sum", loss_weight=10.0 / 3.0 + ), + semantic_loss=dict(type="CrossEntropyLoss", reduction="sum", loss_weight=1.0), + ), # model training and testing settings - train_cfg=dict( - pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'), + train_cfg=dict(pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod="vote"), test_cfg=dict( - sample_mod='seed', - nms_thr=0.25, - score_thr=0.05, - per_class_proposal=True)) + sample_mod="seed", nms_thr=0.25, score_thr=0.05, per_class_proposal=True + ), +) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cosine.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cosine.py index 69cb7df..111094a 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cosine.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cosine.py @@ -2,19 +2,21 @@ # optimizer lr = 0.003 # max learning rate optimizer = dict( - type='AdamW', + type="AdamW", lr=lr, betas=(0.95, 0.99), # the momentum is change during training - weight_decay=0.001) + weight_decay=0.001, +) optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=1000, warmup_ratio=1.0 / 10, - min_lr_ratio=1e-5) + min_lr_ratio=1e-5, +) momentum_config = None -runner = dict(type='EpochBasedRunner', max_epochs=40) +runner = dict(type="EpochBasedRunner", max_epochs=40) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_20e.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_20e.py index 704740e..1911d41 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_20e.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_20e.py @@ -4,21 +4,15 @@ # use a default schedule. # optimizer # This schedule is mainly used by models on nuScenes dataset -optimizer = dict(type='AdamW', lr=1e-4, weight_decay=0.01) +optimizer = dict(type="AdamW", lr=1e-4, weight_decay=0.01) # max_norm=10 is better for SECOND optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( - policy='cyclic', - target_ratio=(10, 1e-4), - cyclic_times=1, - step_ratio_up=0.4, + policy="cyclic", target_ratio=(10, 1e-4), cyclic_times=1, step_ratio_up=0.4, ) momentum_config = dict( - policy='cyclic', - target_ratio=(0.85 / 0.95, 1), - cyclic_times=1, - step_ratio_up=0.4, + policy="cyclic", target_ratio=(0.85 / 0.95, 1), cyclic_times=1, step_ratio_up=0.4, ) # runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=20) +runner = dict(type="EpochBasedRunner", max_epochs=20) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_40e.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_40e.py index 4a711ac..9a79821 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_40e.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/cyclic_40e.py @@ -6,7 +6,7 @@ lr = 0.0018 # The optimizer follows the setting in SECOND.Pytorch, but here we use # the offcial AdamW optimizer implemented by PyTorch. -optimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01) +optimizer = dict(type="AdamW", lr=lr, betas=(0.95, 0.99), weight_decay=0.01) optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) # We use cyclic learning rate and momentum schedule following SECOND.Pytorch # https://github.com/traveller59/second.pytorch/blob/3aba19c9688274f75ebb5e576f65cfe54773c021/torchplus/train/learning_schedules_fastai.py#L69 # noqa @@ -14,18 +14,12 @@ # https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327 # noqa # https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130 # noqa lr_config = dict( - policy='cyclic', - target_ratio=(10, 1e-4), - cyclic_times=1, - step_ratio_up=0.4, + policy="cyclic", target_ratio=(10, 1e-4), cyclic_times=1, step_ratio_up=0.4, ) momentum_config = dict( - policy='cyclic', - target_ratio=(0.85 / 0.95, 1), - cyclic_times=1, - step_ratio_up=0.4, + policy="cyclic", target_ratio=(0.85 / 0.95, 1), cyclic_times=1, step_ratio_up=0.4, ) # Although the max_epochs is 40, this schedule is usually used we # RepeatDataset with repeat ratio N, thus the actual max epoch # number could be Nx40 -runner = dict(type='EpochBasedRunner', max_epochs=40) +runner = dict(type="EpochBasedRunner", max_epochs=40) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/mmdet_schedule_1x.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/mmdet_schedule_1x.py index 13b3783..4a405bc 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/mmdet_schedule_1x.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/mmdet_schedule_1x.py @@ -1,11 +1,8 @@ # optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) +optimizer = dict(type="SGD", lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) + policy="step", warmup="linear", warmup_iters=500, warmup_ratio=0.001, step=[8, 11] +) +runner = dict(type="EpochBasedRunner", max_epochs=12) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_2x.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_2x.py index afde799..cf2d44a 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_2x.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_2x.py @@ -1,14 +1,15 @@ # optimizer # This schedule is mainly used by models on nuScenes dataset -optimizer = dict(type='AdamW', lr=0.001, weight_decay=0.01) +optimizer = dict(type="AdamW", lr=0.001, weight_decay=0.01) # max_norm=10 is better for SECOND optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( - policy='step', - warmup='linear', + policy="step", + warmup="linear", warmup_iters=1000, warmup_ratio=1.0 / 1000, - step=[20, 23]) + step=[20, 23], +) momentum_config = None # runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=24) +runner = dict(type="EpochBasedRunner", max_epochs=24) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_3x.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_3x.py index 115cd26..b83581d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_3x.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/schedule_3x.py @@ -2,8 +2,8 @@ # This schedule is mainly used by models on indoor dataset, # e.g., VoteNet on SUNRGBD and ScanNet lr = 0.008 # max learning rate -optimizer = dict(type='AdamW', lr=lr, weight_decay=0.01) +optimizer = dict(type="AdamW", lr=lr, weight_decay=0.01) optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2)) -lr_config = dict(policy='step', warmup=None, step=[24, 32]) +lr_config = dict(policy="step", warmup=None, step=[24, 32]) # runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=36) +runner = dict(type="EpochBasedRunner", max_epochs=36) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_150e.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_150e.py index 04b44e5..48ca032 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_150e.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_150e.py @@ -1,9 +1,9 @@ # optimizer # This schedule is mainly used on S3DIS dataset in segmentation task -optimizer = dict(type='SGD', lr=0.2, weight_decay=0.0001, momentum=0.9) +optimizer = dict(type="SGD", lr=0.2, weight_decay=0.0001, momentum=0.9) optimizer_config = dict(grad_clip=None) -lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=0.002) +lr_config = dict(policy="CosineAnnealing", warmup=None, min_lr=0.002) momentum_config = None # runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=150) +runner = dict(type="EpochBasedRunner", max_epochs=150) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_200e.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_200e.py index 6a49484..a8ec751 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_200e.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_200e.py @@ -1,9 +1,9 @@ # optimizer # This schedule is mainly used on ScanNet dataset in segmentation task -optimizer = dict(type='Adam', lr=0.001, weight_decay=0.01) +optimizer = dict(type="Adam", lr=0.001, weight_decay=0.01) optimizer_config = dict(grad_clip=None) -lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) +lr_config = dict(policy="CosineAnnealing", warmup=None, min_lr=1e-5) momentum_config = None # runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=200) +runner = dict(type="EpochBasedRunner", max_epochs=200) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_50e.py b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_50e.py index 975a8f9..46d17d3 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_50e.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/_base_/schedules/seg_cosine_50e.py @@ -1,9 +1,9 @@ # optimizer # This schedule is mainly used on S3DIS dataset in segmentation task -optimizer = dict(type='Adam', lr=0.001, weight_decay=0.001) +optimizer = dict(type="Adam", lr=0.001, weight_decay=0.001) optimizer_config = dict(grad_clip=None) -lr_config = dict(policy='CosineAnnealing', warmup=None, min_lr=1e-5) +lr_config = dict(policy="CosineAnnealing", warmup=None, min_lr=1e-5) momentum_config = None # runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=50) +runner = dict(type="EpochBasedRunner", max_epochs=50) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ.py index f4e5bdf..076a573 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ.py @@ -1,13 +1,10 @@ # base model ResNet101 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -15,61 +12,69 @@ voxel_size = [0.2, 0.2, 8] occupancy_size = [0.5, 0.5, 0.5] -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 4 bev_h_ = 200 bev_w_ = 200 -queue_length = 4 # each sequence contains `queue_length` frames. +queue_length = 4 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, only_occ=only_occ, only_det=only_det, img_backbone=dict( - type='ResNet', + type="ResNet", depth=101, num_stages=4, out_indices=(1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN2d', requires_grad=False), + norm_cfg=dict(type="BN2d", requires_grad=False), norm_eval=True, - style='caffe', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict - stage_with_dcn=(False, False, True, True)), + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), img_neck=dict( - type='FPN', - in_channels=[512, 1024, 2048], + type="FPN", + in_channels=[512, 1024, 2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', - num_outs=_num_levels_, # can be decreased to save memory for cross-attention - relu_before_extra_convs=True), + add_extra_convs="on_output", + num_outs=_num_levels_, # can be decreased to save memory for cross-attention + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerOccupancyHead', + type="BEVFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -85,121 +90,141 @@ only_occ=only_occ, only_det=only_det, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=6, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', + type="TemporalSelfAttention", embed_dims=_dim_, num_points=4, # default is 4 - num_levels=1), + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), # add ffn_cfgs when _dim_ != 256 feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), # use custom - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), # use custom + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -209,7 +234,7 @@ type=dataset_type, use_occ_gts=use_occ_gts, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -219,50 +244,56 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) -load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +load_from = "ckpts/r101_dcn_fcos3d_pretrain.pth" log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ_intern_s.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ_intern_s.py index d29b03f..1d36fb7 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ_intern_s.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_base_occ_intern_s.py @@ -1,13 +1,10 @@ # base model ResNet101 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,34 +13,40 @@ occupancy_size = [0.5, 0.5, 0.5] img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 4 bev_h_ = 200 bev_w_ = 200 -queue_length = 4 # each sequence contains `queue_length` frames. +queue_length = 4 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False -pretrained = 'ckpts/mask_rcnn_internimage_s_fpn_3x_coco.pth' +pretrained = "ckpts/mask_rcnn_internimage_s_fpn_3x_coco.pth" model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, @@ -51,30 +54,32 @@ only_det=only_det, img_backbone=dict( _delete_=True, - type='InternImage', - core_op='DCNv3', + type="InternImage", + core_op="DCNv3", channels=80, depths=[4, 4, 21, 4], groups=[5, 10, 20, 40], - mlp_ratio=4., + mlp_ratio=4.0, drop_path_rate=0.3, - norm_layer='LN', + norm_layer="LN", layer_scale=1.0, offset_scale=1.0, post_norm=True, with_cp=True, out_indices=(1, 2, 3), - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + init_cfg=dict(type="Pretrained", checkpoint=pretrained), + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[160, 320, 640], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerOccupancyHead', + type="BEVFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -90,121 +95,141 @@ only_occ=only_occ, only_det=only_det, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=6, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', + type="TemporalSelfAttention", embed_dims=_dim_, num_points=4, # default is 4 - num_levels=1), + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), # add ffn_cfgs when _dim_ != 256 feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), # use custom - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), # use custom + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -214,7 +239,7 @@ type=dataset_type, use_occ_gts=use_occ_gts, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -224,49 +249,56 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, weight_decay=0.05, - constructor='CustomLayerDecayOptimizerConstructor', - paramwise_cfg=dict( - num_layers=33, layer_decay_rate=1.0, - depths=[4, 4, 21, 4])) + constructor="CustomLayerDecayOptimizerConstructor", + paramwise_cfg=dict(num_layers=33, layer_decay_rate=1.0, depths=[4, 4, 21, 4]), +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det.py index e64b144..c8a40a1 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,55 +13,62 @@ occupancy_size = [0.5, 0.5, 0.5] - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, - pretrained=dict(img='torchvision://resnet50'), + pretrained=dict(img="torchvision://resnet50"), img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(3,), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='pytorch'), + style="pytorch", + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerHead', + type="BEVFormerHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -74,130 +78,158 @@ with_box_refine=True, as_two_stage=False, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=3, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), decoder=dict( - type='DetectionTransformerDecoder', + type="DetectionTransformerDecoder", num_layers=6, return_intermediate=True, transformerlayers=dict( - type='DetrTransformerDecoderLayer', + type="DetrTransformerDecoderLayer", attn_cfgs=[ dict( - type='MultiheadAttention', + type="MultiheadAttention", embed_dims=_dim_, num_heads=8, - dropout=0.1), - dict( - type='CustomMSDeformableAttention', + dropout=0.1, + ), + dict( + type="CustomMSDeformableAttention", embed_dims=_dim_, - num_levels=1), + num_levels=1, + ), ], - feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img"]), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -206,7 +238,7 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -216,48 +248,54 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ.py index 28df9bb..c67ca30 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,59 +13,66 @@ occupancy_size = [0.5, 0.5, 0.5] - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=True, only_occ=False, only_det=False, - pretrained=dict(img='torchvision://resnet50'), + pretrained=dict(img="torchvision://resnet50"), img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(3,), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='pytorch'), + style="pytorch", + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerOccupancyHead', + type="BEVFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -82,137 +86,164 @@ occ_dims=_occupancy_dim_, occupancy_classes=16, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=3, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), decoder=dict( - type='DetectionTransformerDecoder', + type="DetectionTransformerDecoder", num_layers=6, return_intermediate=True, transformerlayers=dict( - type='DetrTransformerDecoderLayer', + type="DetrTransformerDecoderLayer", attn_cfgs=[ dict( - type='MultiheadAttention', + type="MultiheadAttention", embed_dims=_dim_, num_heads=8, - dropout=0.1), - dict( - type='CustomMSDeformableAttention', + dropout=0.1, + ), + dict( + type="CustomMSDeformableAttention", embed_dims=_dim_, - num_levels=1), + num_levels=1, + ), ], - feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -221,7 +252,7 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -231,49 +262,55 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ_flow.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ_flow.py index 077b5af..5bbe664 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ_flow.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_det_occ_flow.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,57 +13,64 @@ occupancy_size = [0.5, 0.5, 0.5] - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=True, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=True, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 32 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=True, - pretrained=dict(img='torchvision://resnet50'), + pretrained=dict(img="torchvision://resnet50"), img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(3,), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='pytorch'), + style="pytorch", + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerOccupancyHead', + type="BEVFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -80,143 +84,181 @@ occ_dims=_occupancy_dim_, occupancy_classes=16, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=3, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), decoder=dict( - type='DetectionTransformerDecoder', + type="DetectionTransformerDecoder", num_layers=6, return_intermediate=True, transformerlayers=dict( - type='DetrTransformerDecoderLayer', + type="DetrTransformerDecoderLayer", attn_cfgs=[ dict( - type='MultiheadAttention', + type="MultiheadAttention", embed_dims=_dim_, num_heads=8, - dropout=0.1), - dict( - type='CustomMSDeformableAttention', + dropout=0.1, + ), + dict( + type="CustomMSDeformableAttention", embed_dims=_dim_, - num_levels=1), + num_levels=1, + ), ], - feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), - loss_flow=dict(type='L1Loss', loss_weight=0.25), # occupancy flow + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), + loss_flow=dict(type="L1Loss", loss_weight=0.25), # occupancy flow loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=3, use_dim=3, - file_client_args=file_client_args), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='LoadFlowGT'), # occupancy flow - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'points', 'occ_gts', 'flow_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadPointsFromFile", + coord_type="LIDAR", + load_dim=3, + use_dim=3, + file_client_args=file_client_args, + ), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="LoadFlowGT"), # occupancy flow + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", + keys=["gt_bboxes_3d", "gt_labels_3d", "img", "points", "occ_gts", "flow_gts"], + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=3, use_dim=3, - file_client_args=file_client_args), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict( + type="LoadPointsFromFile", + coord_type="LIDAR", + load_dim=3, + use_dim=3, + file_client_args=file_client_args, + ), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -225,7 +267,7 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -235,48 +277,54 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ.py index d73ef7d..ac726a0 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,59 +13,66 @@ occupancy_size = [0.5, 0.5, 0.5] - img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 -_occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_occupancy_dim_ = 128 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=True, only_occ=True, only_det=False, - pretrained=dict(img='torchvision://resnet50'), + pretrained=dict(img="torchvision://resnet50"), img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(3,), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='pytorch'), + style="pytorch", + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerOccupancyHead', + type="BEVFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -83,115 +87,133 @@ occupancy_classes=16, only_occ=True, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=3, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -199,10 +221,10 @@ workers_per_gpu=4, train=dict( type=dataset_type, - use_occ_gts=True, + use_occ_gts=True, load_occ_lidarseg=True, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -210,51 +232,57 @@ use_valid_flag=True, bev_size=(bev_h_, bev_w_), queue_length=queue_length, - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - load_occ_lidarseg=True, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - load_occ_lidarseg=True, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + load_occ_lidarseg=True, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + load_occ_lidarseg=True, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ_intern_s.py b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ_intern_s.py index 0c0ab61..d4ec837 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ_intern_s.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/bevformer/bev_tiny_occ_intern_s.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -17,33 +14,39 @@ img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 -_occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_occupancy_dim_ = 128 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. -pretrained = 'ckpts/mask_rcnn_internimage_s_fpn_3x_coco.pth' +pretrained = "ckpts/mask_rcnn_internimage_s_fpn_3x_coco.pth" model = dict( - type='BEVFormer', + type="BEVFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=True, @@ -51,30 +54,32 @@ only_det=False, img_backbone=dict( _delete_=True, - type='InternImage', - core_op='DCNv3', + type="InternImage", + core_op="DCNv3", channels=80, depths=[4, 4, 21, 4], groups=[5, 10, 20, 40], - mlp_ratio=4., + mlp_ratio=4.0, drop_path_rate=0.3, - norm_layer='LN', + norm_layer="LN", layer_scale=1.0, offset_scale=1.0, post_norm=True, with_cp=False, out_indices=(3,), - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + init_cfg=dict(type="Pretrained", checkpoint=pretrained), + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[640], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='BEVFormerOccupancyHead', + type="BEVFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, num_query=900, @@ -89,115 +94,133 @@ occupancy_classes=16, only_occ=True, transformer=dict( - type='PerceptionTransformer', + type="PerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=3, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', - embed_dims=_dim_, - num_levels=1), + type="TemporalSelfAttention", embed_dims=_dim_, num_levels=1 + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm')))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -205,10 +228,10 @@ workers_per_gpu=4, train=dict( type=dataset_type, - use_occ_gts=True, + use_occ_gts=True, load_occ_lidarseg=True, data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -216,51 +239,57 @@ use_valid_flag=True, bev_size=(bev_h_, bev_w_), queue_length=queue_length, - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - load_occ_lidarseg=True, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - load_occ_lidarseg=True, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + load_occ_lidarseg=True, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + load_occ_lidarseg=True, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_lyft-3d.py b/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_lyft-3d.py index 5a95d89..ec1130b 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_lyft-3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_lyft-3d.py @@ -3,20 +3,24 @@ point_cloud_range = [-80, -80, -5, 80, 80, 3] # For Lyft we usually do 9-class detection class_names = [ - 'car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', - 'bicycle', 'pedestrian', 'animal' + "car", + "truck", + "bus", + "emergency_vehicle", + "other_vehicle", + "motorcycle", + "bicycle", + "pedestrian", + "animal", ] -dataset_type = 'CustomLyftDataset' -data_root = 'data/lyft/' +dataset_type = "CustomLyftDataset" +data_root = "data/lyft/" # Input modality for Lyft dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict( - use_lidar=True, - use_camera=False, - use_radar=False, - use_map=False, - use_external=True) -file_client_args = dict(backend='disk') + use_lidar=True, use_camera=False, use_radar=False, use_map=False, use_external=True +) +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -28,78 +32,82 @@ # })) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + file_client_args=file_client_args, + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( @@ -108,29 +116,33 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_train.pkl', + ann_file=data_root + "lyft_infos_train.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, - test_mode=False), + test_mode=False, + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_val.pkl', + ann_file=data_root + "lyft_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, - test_mode=True), + test_mode=True, + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'lyft_infos_val.pkl', + ann_file=data_root + "lyft_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, - test_mode=True)) + test_mode=True, + ), +) # For Lyft dataset, we usually evaluate the model at the end of training. # Since the models are trained by 24 epochs by default, we set evaluation # interval to be 24. Please change the interval accordingly if you do not # use a default schedule. -evaluation = dict(interval=24, pipeline=eval_pipeline) \ No newline at end of file +evaluation = dict(interval=24, pipeline=eval_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_nus-3d.py b/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_nus-3d.py index af81f9b..d515b2d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_nus-3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_nus-3d.py @@ -3,20 +3,25 @@ point_cloud_range = [-50, -50, -5, 50, 50, 3] # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', - 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier' + "car", + "truck", + "trailer", + "bus", + "construction_vehicle", + "bicycle", + "motorcycle", + "pedestrian", + "traffic_cone", + "barrier", ] -dataset_type = 'NuScenesDataset_eval_modified' -data_root = 'data/nuscenes/' +dataset_type = "NuScenesDataset_eval_modified" +data_root = "data/nuscenes/" # Input modality for nuScenes dataset, this is consistent with the submission # format which requires the information in input_modality. input_modality = dict( - use_lidar=True, - use_camera=False, - use_radar=False, - use_map=False, - use_external=False) -file_client_args = dict(backend='disk') + use_lidar=True, use_camera=False, use_radar=False, use_map=False, use_external=False +) +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. @@ -28,79 +33,83 @@ # })) train_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), + file_client_args=file_client_args, + ), + dict(type="LoadAnnotations3D", with_bbox_3d=True, with_label_3d=True), dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[-0.3925, 0.3925], scale_ratio_range=[0.95, 1.05], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), - dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='PointShuffle'), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']) + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D", flip_ratio_bev_horizontal=0.5), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="PointShuffle"), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="Collect3D", keys=["points", "gt_bboxes_3d", "gt_labels_3d"]), ] test_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1333, 800), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='GlobalRotScaleTrans', + type="GlobalRotScaleTrans", rot_range=[0, 0], - scale_ratio_range=[1., 1.], - translation_std=[0, 0, 0]), - dict(type='RandomFlip3D'), + scale_ratio_range=[1.0, 1.0], + translation_std=[0, 0, 0], + ), + dict(type="RandomFlip3D"), + dict(type="PointsRangeFilter", point_cloud_range=point_cloud_range), dict( - type='PointsRangeFilter', point_cloud_range=point_cloud_range), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="Collect3D", keys=["points"]), + ], + ), ] # construct a pipeline for data and gt loading in show function # please keep its loading function consistent with test_pipeline (e.g. client) eval_pipeline = [ dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=5, - file_client_args=file_client_args), + file_client_args=file_client_args, + ), dict( - type='LoadPointsFromMultiSweeps', + type="LoadPointsFromMultiSweeps", sweeps_num=10, - file_client_args=file_client_args), - dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='Collect3D', keys=['points']) + file_client_args=file_client_args, + ), + dict(type="DefaultFormatBundle3D", class_names=class_names, with_label=False), + dict(type="Collect3D", keys=["points"]), ] data = dict( @@ -109,31 +118,35 @@ train=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_train.pkl', + ann_file=data_root + "nuscenes_infos_train.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), val=dict( type=dataset_type, - ann_file=data_root + 'nuscenes_infos_val.pkl', + ann_file=data_root + "nuscenes_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'nuscenes_infos_val.pkl', + ann_file=data_root + "nuscenes_infos_val.pkl", pipeline=test_pipeline, classes=class_names, modality=input_modality, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) # For nuScenes dataset, we usually evaluate the model at the end of training. # Since the models are trained by 24 epochs by default, we set evaluation # interval to be 24. Please change the interval accordingly if you do not diff --git a/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_waymo-3d.py b/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_waymo-3d.py index 4100e13..5303507 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_waymo-3d.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/datasets/custom_waymo-3d.py @@ -1,67 +1,73 @@ # dataset settings # D5 in the config name means the whole dataset is divided into 5 folds # We only use one fold for efficient experiments -dataset_type = 'CustomWaymoDataset' -data_root = 'data/waymo/kitti_format/' -file_client_args = dict(backend='disk') +dataset_type = "CustomWaymoDataset" +data_root = "data/waymo/kitti_format/" +file_client_args = dict(backend="disk") # Uncomment the following if use ceph or other file clients. # See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient # for more details. # file_client_args = dict( # backend='petrel', path_mapping=dict(data='s3://waymo_data/')) -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -class_names = ['Car', 'Pedestrian', 'Cyclist'] +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +class_names = ["Car", "Pedestrian", "Cyclist"] point_cloud_range = [-74.88, -74.88, -2, 74.88, 74.88, 4] input_modality = dict(use_lidar=False, use_camera=True) db_sampler = dict( data_root=data_root, - info_path=data_root + 'waymo_dbinfos_train.pkl', + info_path=data_root + "waymo_dbinfos_train.pkl", rate=1.0, prepare=dict( filter_by_difficulty=[-1], - filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)), + filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10), + ), classes=class_names, sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10), points_loader=dict( - type='LoadPointsFromFile', - coord_type='LIDAR', + type="LoadPointsFromFile", + coord_type="LIDAR", load_dim=5, use_dim=[0, 1, 2, 3, 4], - file_client_args=file_client_args)) - + file_client_args=file_client_args, + ), +) train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='DefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="DefaultFormatBundle3D", class_names=class_names), + dict(type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img"]), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1920, 1280), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] @@ -72,41 +78,46 @@ samples_per_gpu=2, workers_per_gpu=4, train=dict( - type='RepeatDataset', + type="RepeatDataset", times=2, dataset=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_train.pkl', - split='training', + ann_file=data_root + "waymo_infos_train.pkl", + split="training", pipeline=train_pipeline, modality=input_modality, classes=class_names, test_mode=False, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR', + box_type_3d="LiDAR", # load one frame every five frames - load_interval=5)), + load_interval=5, + ), + ), val=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_val.pkl', - split='training', + ann_file=data_root + "waymo_infos_val.pkl", + split="training", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR'), + box_type_3d="LiDAR", + ), test=dict( type=dataset_type, data_root=data_root, - ann_file=data_root + 'waymo_infos_val.pkl', - split='training', + ann_file=data_root + "waymo_infos_val.pkl", + split="training", pipeline=test_pipeline, modality=input_modality, classes=class_names, test_mode=True, - box_type_3d='LiDAR')) + box_type_3d="LiDAR", + ), +) -evaluation = dict(interval=24, pipeline=test_pipeline) \ No newline at end of file +evaluation = dict(interval=24, pipeline=test_pipeline) diff --git a/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_base_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_base_occ.py index 30aad74..b582d31 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_base_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_base_occ.py @@ -1,13 +1,10 @@ # base model ResNet101 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -15,48 +12,52 @@ voxel_size = [0.2, 0.2, 8] occupancy_size = [0.5, 0.5, 0.5] -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 3 bev_h_ = 200 bev_w_ = 200 bev_z_ = 16 -queue_length = 4 # each sequence contains `queue_length` frames. +queue_length = 4 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False load_occ_lidarseg = True # info of voxel block encoder -voxel_encoder1_dim = _dim_//2 -voxel_encoder2_dim = _dim_//2 -voxel_encoder3_dim = _dim_//4 -voxel_encoder4_dim = _dim_//4 +voxel_encoder1_dim = _dim_ // 2 +voxel_encoder2_dim = _dim_ // 2 +voxel_encoder3_dim = _dim_ // 4 +voxel_encoder4_dim = _dim_ // 4 bev_z1 = 2 bev_z2 = 4 bev_z3 = 8 bev_z4 = 16 -_pos_dim_1 = voxel_encoder1_dim//2 -_pos_dim_2 = voxel_encoder2_dim//2 -_pos_dim_3 = voxel_encoder3_dim//2 -_pos_dim_4 = voxel_encoder4_dim//2 +_pos_dim_1 = voxel_encoder1_dim // 2 +_pos_dim_2 = voxel_encoder2_dim // 2 +_pos_dim_3 = voxel_encoder3_dim // 2 +_pos_dim_4 = voxel_encoder4_dim // 2 last_voxel_dims = voxel_encoder4_dim @@ -64,33 +65,37 @@ box_query_dims = _dim_ model = dict( - type='HybridFormer', + type="HybridFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, only_occ=only_occ, only_det=only_det, img_backbone=dict( - type='ResNet', + type="ResNet", depth=101, num_stages=4, out_indices=(1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN2d', requires_grad=False), + norm_cfg=dict(type="BN2d", requires_grad=False), norm_eval=True, - style='caffe', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict - stage_with_dcn=(False, False, True, True)), + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), img_neck=dict( - type='FPN', - in_channels=[512, 1024, 2048], + type="FPN", + in_channels=[512, 1024, 2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', - num_outs=_num_levels_, # can be decreased to save memory for cross-attention - relu_before_extra_convs=True), + add_extra_convs="on_output", + num_outs=_num_levels_, # can be decreased to save memory for cross-attention + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='HybridFormerOccupancyHead', + type="HybridFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, bev_z=bev_z_, @@ -109,179 +114,221 @@ last_voxel_dims=last_voxel_dims, box_query_dims=box_query_dims, # detection head transformer=dict( - type='HybridPerceptionTransformer', + type="HybridPerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, decoder_on_bev=decoder_on_bev, - encoder_embed_dims=[_dim_, voxel_encoder1_dim, voxel_encoder2_dim, voxel_encoder3_dim, voxel_encoder4_dim], # the dim of cascaded voxel encoder - feature_map_z=[1, bev_z1, bev_z2, bev_z3, bev_z4], # the height of cascaded voxel encoder + encoder_embed_dims=[ + _dim_, + voxel_encoder1_dim, + voxel_encoder2_dim, + voxel_encoder3_dim, + voxel_encoder4_dim, + ], # the dim of cascaded voxel encoder + feature_map_z=[ + 1, + bev_z1, + bev_z2, + bev_z3, + bev_z4, + ], # the height of cascaded voxel encoder pos_dims=[_pos_dim_, _pos_dim_1, _pos_dim_2, _pos_dim_3, _pos_dim_4], position=dict( bev=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), voxel=dict( - type='VoxelLearnedPositionalEncoding', + type="VoxelLearnedPositionalEncoding", num_feats=_pos_dim_1, row_num_embed=bev_h_, col_num_embed=bev_w_, z_num_embed=bev_z1, - ) - ), + ), + ), encoder=dict( bev=dict( # the bev encoder - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=1, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', + type="TemporalSelfAttention", embed_dims=_dim_, - num_points=4, - num_levels=1), + num_points=4, + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), voxel=dict( # the config of first cascaded voxel encoder - type='VoxelFormerEncoder', + type="VoxelFormerEncoder", num_layers=1, pc_range=point_cloud_range, - num_points_in_voxel=4, + num_points_in_voxel=4, return_intermediate=False, transformerlayers=dict( - type='VoxelFormerLayer', + type="VoxelFormerLayer", attn_cfgs=[ dict( - type='VoxelTemporalSelfAttention', + type="VoxelTemporalSelfAttention", embed_dims=voxel_encoder1_dim, - num_points=4, - num_levels=1), + num_points=4, + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=voxel_encoder1_dim, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=voxel_encoder1_dim, - ) + ), ], ffn_cfgs=dict( - type='FFN', + type="FFN", embed_dims=voxel_encoder1_dim, - feedforward_channels=voxel_encoder1_dim*2, + feedforward_channels=voxel_encoder1_dim * 2, num_fcs=2, ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), - ), - feedforward_channels=voxel_encoder1_dim*2, + act_cfg=dict(type="ReLU", inplace=True), + ), + feedforward_channels=voxel_encoder1_dim * 2, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), - ), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" # file_client_args = dict(backend='petrel') train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), # use custom - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), # use custom + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -290,10 +337,10 @@ train=dict( type=dataset_type, use_occ_gts=use_occ_gts, - load_occ_lidarseg=load_occ_lidarseg, # 10 foreground + 6 background + load_occ_lidarseg=load_occ_lidarseg, # 10 foreground + 6 background data_root=data_root, - ceph_dir='nuscenes:s3://openmmlab/datasets/detection3d/nuscenes', - ann_file='data/nuscenes_occupancy/nuscenes_infos_temporal_train_occ_gt.pkl', + ceph_dir="nuscenes:s3://openmmlab/datasets/detection3d/nuscenes", + ann_file="data/nuscenes_occupancy/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -303,54 +350,60 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - load_occ_lidarseg=load_occ_lidarseg, # 10 foreground + 6 background - data_root=data_root, - ceph_dir='nuscenes:s3://openmmlab/datasets/detection3d/nuscenes', - ann_file='data/nuscenes_occupancy/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - load_occ_lidarseg=load_occ_lidarseg, # 10 foreground + 6 background - data_root=data_root, - ceph_dir='nuscenes:s3://openmmlab/datasets/detection3d/nuscenes', - ann_file='data/nuscenes_occupancy/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + load_occ_lidarseg=load_occ_lidarseg, # 10 foreground + 6 background + data_root=data_root, + ceph_dir="nuscenes:s3://openmmlab/datasets/detection3d/nuscenes", + ann_file="data/nuscenes_occupancy/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + load_occ_lidarseg=load_occ_lidarseg, # 10 foreground + 6 background + data_root=data_root, + ceph_dir="nuscenes:s3://openmmlab/datasets/detection3d/nuscenes", + ann_file="data/nuscenes_occupancy/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) -load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +load_from = "ckpts/r101_dcn_fcos3d_pretrain.pth" log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = True \ No newline at end of file +find_unused_parameters = True diff --git a/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ.py index e6ad491..2506b40 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,47 +13,53 @@ occupancy_size = [0.5, 0.5, 0.5] img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 bev_z_ = 16 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False # info of voxel block encoder -voxel_encoder1_dim = _dim_//2 -voxel_encoder2_dim = _dim_//2 -voxel_encoder3_dim = _dim_//4 -voxel_encoder4_dim = _dim_//4 +voxel_encoder1_dim = _dim_ // 2 +voxel_encoder2_dim = _dim_ // 2 +voxel_encoder3_dim = _dim_ // 4 +voxel_encoder4_dim = _dim_ // 4 bev_z1 = 2 bev_z2 = 4 bev_z3 = 8 bev_z4 = 16 -_pos_dim_1 = voxel_encoder1_dim//2 -_pos_dim_2 = voxel_encoder2_dim//2 -_pos_dim_3 = voxel_encoder3_dim//2 -_pos_dim_4 = voxel_encoder4_dim//2 +_pos_dim_1 = voxel_encoder1_dim // 2 +_pos_dim_2 = voxel_encoder2_dim // 2 +_pos_dim_3 = voxel_encoder3_dim // 2 +_pos_dim_4 = voxel_encoder4_dim // 2 last_voxel_dims = voxel_encoder4_dim @@ -67,32 +70,34 @@ # box_query_dims = voxel_encoder4_dim model = dict( - type='HybridFormer', + type="HybridFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, only_occ=only_occ, only_det=only_det, - pretrained=dict(img='torchvision://resnet50'), + pretrained=dict(img="torchvision://resnet50"), img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(3,), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='pytorch'), + style="pytorch", + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='HybridFormerOccupancyHead', + type="HybridFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, bev_z=bev_z_, @@ -111,183 +116,224 @@ last_voxel_dims=last_voxel_dims, box_query_dims=box_query_dims, # detection head transformer=dict( - type='HybridPerceptionTransformer', + type="HybridPerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, decoder_on_bev=decoder_on_bev, - encoder_embed_dims=[_dim_, voxel_encoder1_dim, voxel_encoder2_dim, voxel_encoder3_dim, voxel_encoder4_dim], # the dim of cascaded voxel encoder - feature_map_z=[1, bev_z1, bev_z2, bev_z3, bev_z4], # the height of cascaded voxel encoder + encoder_embed_dims=[ + _dim_, + voxel_encoder1_dim, + voxel_encoder2_dim, + voxel_encoder3_dim, + voxel_encoder4_dim, + ], # the dim of cascaded voxel encoder + feature_map_z=[ + 1, + bev_z1, + bev_z2, + bev_z3, + bev_z4, + ], # the height of cascaded voxel encoder pos_dims=[_pos_dim_, _pos_dim_1, _pos_dim_2, _pos_dim_3, _pos_dim_4], position=dict( bev=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), voxel=dict( - type='VoxelLearnedPositionalEncoding', + type="VoxelLearnedPositionalEncoding", num_feats=_pos_dim_1, row_num_embed=bev_h_, col_num_embed=bev_w_, z_num_embed=bev_z1, - ) - ), + ), + ), encoder=dict( bev=dict( # the bev encoder - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=1, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', + type="TemporalSelfAttention", embed_dims=_dim_, - num_points=4, - num_levels=1), + num_points=4, + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), voxel=dict( # the config of first cascaded voxel encoder - type='VoxelFormerEncoder', + type="VoxelFormerEncoder", num_layers=1, pc_range=point_cloud_range, - num_points_in_voxel=4, + num_points_in_voxel=4, return_intermediate=False, transformerlayers=dict( - type='VoxelFormerLayer', + type="VoxelFormerLayer", attn_cfgs=[ dict( - type='VoxelTemporalSelfAttention', + type="VoxelTemporalSelfAttention", embed_dims=voxel_encoder1_dim, - num_points=4, - num_levels=1), + num_points=4, + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=voxel_encoder1_dim, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=voxel_encoder1_dim, - ) + ), ], ffn_cfgs=dict( - type='FFN', + type="FFN", embed_dims=voxel_encoder1_dim, - feedforward_channels=voxel_encoder1_dim*2, + feedforward_channels=voxel_encoder1_dim * 2, num_fcs=2, ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), - ), - feedforward_channels=voxel_encoder1_dim*2, + act_cfg=dict(type="ReLU", inplace=True), + ), + feedforward_channels=voxel_encoder1_dim * 2, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), - ), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -295,9 +341,9 @@ workers_per_gpu=4, train=dict( type=dataset_type, - use_occ_gts=use_occ_gts, # 10 foreground + 6 background + use_occ_gts=use_occ_gts, # 10 foreground + 6 background data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -307,49 +353,55 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ_intern_s.py b/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ_intern_s.py index 72c3e99..4cd9953 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ_intern_s.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/hybrid/hybrid_tiny_occ_intern_s.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,47 +13,53 @@ occupancy_size = [0.5, 0.5, 0.5] img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 bev_z_ = 16 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False # info of voxel block encoder -voxel_encoder1_dim = _dim_//2 -voxel_encoder2_dim = _dim_//2 -voxel_encoder3_dim = _dim_//4 -voxel_encoder4_dim = _dim_//4 +voxel_encoder1_dim = _dim_ // 2 +voxel_encoder2_dim = _dim_ // 2 +voxel_encoder3_dim = _dim_ // 4 +voxel_encoder4_dim = _dim_ // 4 bev_z1 = 2 bev_z2 = 4 bev_z3 = 8 bev_z4 = 16 -_pos_dim_1 = voxel_encoder1_dim//2 -_pos_dim_2 = voxel_encoder2_dim//2 -_pos_dim_3 = voxel_encoder3_dim//2 -_pos_dim_4 = voxel_encoder4_dim//2 +_pos_dim_1 = voxel_encoder1_dim // 2 +_pos_dim_2 = voxel_encoder2_dim // 2 +_pos_dim_3 = voxel_encoder3_dim // 2 +_pos_dim_4 = voxel_encoder4_dim // 2 last_voxel_dims = voxel_encoder4_dim @@ -66,9 +69,9 @@ # decoder_on_bev = False # box_query_dims = voxel_encoder4_dim -pretrained = 'ckpts/mask_rcnn_internimage_s_fpn_3x_coco.pth' +pretrained = "ckpts/mask_rcnn_internimage_s_fpn_3x_coco.pth" model = dict( - type='HybridFormer', + type="HybridFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, @@ -76,30 +79,32 @@ only_det=only_det, img_backbone=dict( _delete_=True, - type='InternImage', - core_op='DCNv3', + type="InternImage", + core_op="DCNv3", channels=80, depths=[4, 4, 21, 4], groups=[5, 10, 20, 40], - mlp_ratio=4., + mlp_ratio=4.0, drop_path_rate=0.3, - norm_layer='LN', + norm_layer="LN", layer_scale=1.0, offset_scale=1.0, post_norm=True, with_cp=True, out_indices=(3,), - init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + init_cfg=dict(type="Pretrained", checkpoint=pretrained), + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[640], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='HybridFormerOccupancyHead', + type="HybridFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, bev_z=bev_z_, @@ -118,183 +123,224 @@ last_voxel_dims=last_voxel_dims, box_query_dims=box_query_dims, # detection head transformer=dict( - type='HybridPerceptionTransformer', + type="HybridPerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, decoder_on_bev=decoder_on_bev, - encoder_embed_dims=[_dim_, voxel_encoder1_dim, voxel_encoder2_dim, voxel_encoder3_dim, voxel_encoder4_dim], # the dim of cascaded voxel encoder - feature_map_z=[1, bev_z1, bev_z2, bev_z3, bev_z4], # the height of cascaded voxel encoder + encoder_embed_dims=[ + _dim_, + voxel_encoder1_dim, + voxel_encoder2_dim, + voxel_encoder3_dim, + voxel_encoder4_dim, + ], # the dim of cascaded voxel encoder + feature_map_z=[ + 1, + bev_z1, + bev_z2, + bev_z3, + bev_z4, + ], # the height of cascaded voxel encoder pos_dims=[_pos_dim_, _pos_dim_1, _pos_dim_2, _pos_dim_3, _pos_dim_4], position=dict( bev=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), voxel=dict( - type='VoxelLearnedPositionalEncoding', + type="VoxelLearnedPositionalEncoding", num_feats=_pos_dim_1, row_num_embed=bev_h_, col_num_embed=bev_w_, z_num_embed=bev_z1, - ) - ), + ), + ), encoder=dict( bev=dict( # the bev encoder - type='BEVFormerEncoder', + type="BEVFormerEncoder", num_layers=1, pc_range=point_cloud_range, num_points_in_pillar=4, return_intermediate=False, transformerlayers=dict( - type='BEVFormerLayer', + type="BEVFormerLayer", attn_cfgs=[ dict( - type='TemporalSelfAttention', + type="TemporalSelfAttention", embed_dims=_dim_, - num_points=4, - num_levels=1), + num_points=4, + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), voxel=dict( # the config of first cascaded voxel encoder - type='VoxelFormerEncoder', + type="VoxelFormerEncoder", num_layers=1, pc_range=point_cloud_range, - num_points_in_voxel=4, + num_points_in_voxel=4, return_intermediate=False, transformerlayers=dict( - type='VoxelFormerLayer', + type="VoxelFormerLayer", attn_cfgs=[ dict( - type='VoxelTemporalSelfAttention', + type="VoxelTemporalSelfAttention", embed_dims=voxel_encoder1_dim, - num_points=4, - num_levels=1), + num_points=4, + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=voxel_encoder1_dim, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=voxel_encoder1_dim, - ) + ), ], ffn_cfgs=dict( - type='FFN', + type="FFN", embed_dims=voxel_encoder1_dim, - feedforward_channels=voxel_encoder1_dim*2, + feedforward_channels=voxel_encoder1_dim * 2, num_fcs=2, ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), - ), - feedforward_channels=voxel_encoder1_dim*2, + act_cfg=dict(type="ReLU", inplace=True), + ), + feedforward_channels=voxel_encoder1_dim * 2, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), - ), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='LearnedPositionalEncoding', + type="LearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -302,9 +348,9 @@ workers_per_gpu=4, train=dict( type=dataset_type, - use_occ_gts=use_occ_gts, # 10 foreground + 6 background + use_occ_gts=use_occ_gts, # 10 foreground + 6 background data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -314,49 +360,55 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_base_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_base_occ.py index 775d7fd..055d69d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_base_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_base_occ.py @@ -1,12 +1,9 @@ # base model ResNet101 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -14,62 +11,70 @@ voxel_size = [0.2, 0.2, 8] occupancy_size = [0.5, 0.5, 0.5] -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) +img_norm_cfg = dict(mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 -_num_levels_ = 3 # set 4 is OOM +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 +_num_levels_ = 3 # set 4 is OOM bev_h_ = 200 bev_w_ = 200 bev_z_ = 4 -queue_length = 4 # each sequence contains `queue_length` frames. +queue_length = 4 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False model = dict( - type='VoxelFormer', + type="VoxelFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, only_occ=only_occ, only_det=only_det, img_backbone=dict( - type='ResNet', + type="ResNet", depth=101, num_stages=4, out_indices=(1, 2, 3), frozen_stages=1, - norm_cfg=dict(type='BN2d', requires_grad=False), + norm_cfg=dict(type="BN2d", requires_grad=False), norm_eval=True, - style='caffe', - dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), # original DCNv2 will print log when perform load_state_dict - stage_with_dcn=(False, False, True, True)), + style="caffe", + dcn=dict( + type="DCNv2", deform_groups=1, fallback_on_stride=False + ), # original DCNv2 will print log when perform load_state_dict + stage_with_dcn=(False, False, True, True), + ), img_neck=dict( - type='FPN', - in_channels=[512, 1024, 2048], + type="FPN", + in_channels=[512, 1024, 2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', - num_outs=_num_levels_, # can be decreased to save memory for cross-attention - relu_before_extra_convs=True), + add_extra_convs="on_output", + num_outs=_num_levels_, # can be decreased to save memory for cross-attention + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='VoxelFormerOccupancyHead', + type="VoxelFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, bev_z=bev_z_, @@ -86,122 +91,142 @@ only_occ=only_occ, only_det=only_det, transformer=dict( - type='VoxelPerceptionTransformer', + type="VoxelPerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='VoxelFormerEncoder', + type="VoxelFormerEncoder", num_layers=6, pc_range=point_cloud_range, - num_points_in_voxel=4, # TODO + num_points_in_voxel=4, # TODO return_intermediate=False, transformerlayers=dict( - type='VoxelFormerLayer', + type="VoxelFormerLayer", attn_cfgs=[ dict( - type='VoxelTemporalSelfAttention', + type="VoxelTemporalSelfAttention", embed_dims=_dim_, num_points=4, # default is 4 - num_levels=1), + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), # add ffn_cfgs when _dim_ != 256 feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='VoxelLearnedPositionalEncoding', + type="VoxelLearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, z_num_embed=bev_z_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), # use custom - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), # use custom + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -209,9 +234,9 @@ workers_per_gpu=4, train=dict( type=dataset_type, - use_occ_gts=use_occ_gts, # 10 foreground + 6 background + use_occ_gts=use_occ_gts, # 10 foreground + 6 background data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -221,50 +246,56 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) -load_from = 'ckpts/r101_dcn_fcos3d_pretrain.pth' +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) +load_from = "ckpts/r101_dcn_fcos3d_pretrain.pth" log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_tiny_occ.py b/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_tiny_occ.py index 44e4d8a..816ce3d 100644 --- a/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_tiny_occ.py +++ b/Chapter08-FinalProject/OccNet/projects/configs/voxelformer/voxel_tiny_occ.py @@ -1,13 +1,10 @@ # tiny model ResNet50 # occupancy_size = 0.5 -_base_ = [ - '../datasets/custom_nus-3d.py', - '../_base_/default_runtime.py' -] +_base_ = ["../datasets/custom_nus-3d.py", "../_base_/default_runtime.py"] # plugin = True -plugin_dir = 'projects/mmdet3d_plugin/' +plugin_dir = "projects/mmdet3d_plugin/" # If point cloud range is changed, the models should also change their point # cloud range accordingly @@ -16,61 +13,69 @@ occupancy_size = [0.5, 0.5, 0.5] img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True +) # For nuScenes we usually do 10-class detection class_names = [ - 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', - 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone' + "car", + "truck", + "construction_vehicle", + "bus", + "trailer", + "barrier", + "motorcycle", + "bicycle", + "pedestrian", + "traffic_cone", ] input_modality = dict( - use_lidar=False, - use_camera=True, - use_radar=False, - use_map=False, - use_external=True) + use_lidar=False, use_camera=True, use_radar=False, use_map=False, use_external=True +) _dim_ = 256 _occupancy_dim_ = 128 -_pos_dim_ = _dim_//2 -_ffn_dim_ = _dim_*2 +_pos_dim_ = _dim_ // 2 +_ffn_dim_ = _dim_ * 2 _num_levels_ = 1 bev_h_ = 200 bev_w_ = 200 bev_z_ = 4 -queue_length = 3 # each sequence contains `queue_length` frames. +queue_length = 3 # each sequence contains `queue_length` frames. use_occ_gts = True only_occ = True only_det = False model = dict( - type='VoxelFormer', + type="VoxelFormer", use_grid_mask=True, video_test_mode=True, use_occ_gts=use_occ_gts, only_occ=only_occ, only_det=only_det, - pretrained=dict(img='torchvision://resnet50'), + pretrained=dict(img="torchvision://resnet50"), img_backbone=dict( - type='ResNet', + type="ResNet", depth=50, num_stages=4, out_indices=(3,), frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False), + norm_cfg=dict(type="BN", requires_grad=False), norm_eval=True, - style='pytorch'), + style="pytorch", + ), img_neck=dict( - type='FPN', + type="FPN", in_channels=[2048], out_channels=_dim_, start_level=0, - add_extra_convs='on_output', + add_extra_convs="on_output", num_outs=_num_levels_, - relu_before_extra_convs=True), + relu_before_extra_convs=True, + ), pts_bbox_head=dict( - type='VoxelFormerOccupancyHead', + type="VoxelFormerOccupancyHead", bev_h=bev_h_, bev_w=bev_w_, bev_z=bev_z_, @@ -87,126 +92,145 @@ only_occ=only_occ, only_det=only_det, transformer=dict( - type='VoxelPerceptionTransformer', + type="VoxelPerceptionTransformer", rotate_prev_bev=True, use_shift=True, use_can_bus=True, embed_dims=_dim_, encoder=dict( - type='VoxelFormerEncoder', + type="VoxelFormerEncoder", num_layers=3, pc_range=point_cloud_range, num_points_in_voxel=4, return_intermediate=False, transformerlayers=dict( - type='VoxelFormerLayer', + type="VoxelFormerLayer", attn_cfgs=[ dict( - type='VoxelTemporalSelfAttention', + type="VoxelTemporalSelfAttention", embed_dims=_dim_, num_points=4, # default is 4 - num_levels=1), + num_levels=1, + ), dict( - type='SpatialCrossAttention', + type="SpatialCrossAttention", pc_range=point_cloud_range, deformable_attention=dict( - type='MSDeformableAttention3D', + type="MSDeformableAttention3D", embed_dims=_dim_, num_points=8, - num_levels=_num_levels_), + num_levels=_num_levels_, + ), embed_dims=_dim_, - ) + ), ], ffn_cfgs=dict( - type='FFN', - embed_dims=_dim_, - feedforward_channels=_ffn_dim_, - num_fcs=2, - ffn_drop=0.1, - act_cfg=dict(type='ReLU', inplace=True), + type="FFN", + embed_dims=_dim_, + feedforward_channels=_ffn_dim_, + num_fcs=2, + ffn_drop=0.1, + act_cfg=dict(type="ReLU", inplace=True), ), # add ffn_cfgs when _dim_ != 256 feedforward_channels=_ffn_dim_, ffn_dropout=0.1, - operation_order=('self_attn', 'norm', 'cross_attn', 'norm', - 'ffn', 'norm'))), + operation_order=( + "self_attn", + "norm", + "cross_attn", + "norm", + "ffn", + "norm", + ), + ), ), + ), bbox_coder=dict( - type='NMSFreeCoder', + type="NMSFreeCoder", post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], pc_range=point_cloud_range, max_num=300, voxel_size=voxel_size, - num_classes=10), + num_classes=10, + ), positional_encoding=dict( - type='VoxelLearnedPositionalEncoding', + type="VoxelLearnedPositionalEncoding", num_feats=_pos_dim_, row_num_embed=bev_h_, col_num_embed=bev_w_, z_num_embed=bev_z_, - ), + ), loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=2.0), - loss_bbox=dict(type='L1Loss', loss_weight=0.25), - loss_iou=dict(type='GIoULoss', loss_weight=0.0), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0 + ), + loss_bbox=dict(type="L1Loss", loss_weight=0.25), + loss_iou=dict(type="GIoULoss", loss_weight=0.0), loss_occupancy=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0)), + type="FocalLoss", use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0 + ), + ), # model training and testing settings - train_cfg=dict(pts=dict( - grid_size=[512, 512, 1], - voxel_size=voxel_size, - point_cloud_range=point_cloud_range, - out_size_factor=4, - assigner=dict( - type='HungarianAssigner3D', - cls_cost=dict(type='FocalLossCost', weight=2.0), - reg_cost=dict(type='BBox3DL1Cost', weight=0.25), - iou_cost=dict(type='IoUCost', weight=0.0), # Fake cost. This is just to make it compatible with DETR head. - pc_range=point_cloud_range)))) + train_cfg=dict( + pts=dict( + grid_size=[512, 512, 1], + voxel_size=voxel_size, + point_cloud_range=point_cloud_range, + out_size_factor=4, + assigner=dict( + type="HungarianAssigner3D", + cls_cost=dict(type="FocalLossCost", weight=2.0), + reg_cost=dict(type="BBox3DL1Cost", weight=0.25), + iou_cost=dict( + type="IoUCost", weight=0.0 + ), # Fake cost. This is just to make it compatible with DETR head. + pc_range=point_cloud_range, + ), + ) + ), +) -dataset_type = 'CustomNuScenesDataset' -data_root = 'data/nuscenes/' -file_client_args = dict(backend='disk') +dataset_type = "CustomNuScenesDataset" +data_root = "data/nuscenes/" +file_client_args = dict(backend="disk") train_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='PhotoMetricDistortionMultiViewImage'), - dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True, with_attr_label=False), - dict(type='LoadOccupancyGT'), - dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), - dict(type='ObjectNameFilter', classes=class_names), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), - dict(type='CustomDefaultFormatBundle3D', class_names=class_names), - dict(type='CustomCollect3D', keys=['gt_bboxes_3d', 'gt_labels_3d', 'img', 'occ_gts']) + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="PhotoMetricDistortionMultiViewImage"), + dict( + type="LoadAnnotations3D", + with_bbox_3d=True, + with_label_3d=True, + with_attr_label=False, + ), + dict(type="LoadOccupancyGT"), + dict(type="ObjectRangeFilter", point_cloud_range=point_cloud_range), + dict(type="ObjectNameFilter", classes=class_names), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), + dict(type="CustomDefaultFormatBundle3D", class_names=class_names), + dict( + type="CustomCollect3D", keys=["gt_bboxes_3d", "gt_labels_3d", "img", "occ_gts"] + ), ] test_pipeline = [ - dict(type='LoadMultiViewImageFromFiles', to_float32=True), - dict(type='NormalizeMultiviewImage', **img_norm_cfg), - + dict(type="LoadMultiViewImageFromFiles", to_float32=True), + dict(type="NormalizeMultiviewImage", **img_norm_cfg), dict( - type='MultiScaleFlipAug3D', + type="MultiScaleFlipAug3D", img_scale=(1600, 900), pts_scale_ratio=1, flip=False, transforms=[ - dict(type='RandomScaleImageMultiViewImage', scales=[0.5]), - dict(type='PadMultiViewImage', size_divisor=32), + dict(type="RandomScaleImageMultiViewImage", scales=[0.5]), + dict(type="PadMultiViewImage", size_divisor=32), dict( - type='DefaultFormatBundle3D', - class_names=class_names, - with_label=False), - dict(type='CustomCollect3D', keys=['img']) - ]) + type="DefaultFormatBundle3D", class_names=class_names, with_label=False + ), + dict(type="CustomCollect3D", keys=["img"]), + ], + ), ] data = dict( @@ -214,9 +238,9 @@ workers_per_gpu=4, train=dict( type=dataset_type, - use_occ_gts=use_occ_gts, # 10 foreground + 6 background + use_occ_gts=use_occ_gts, # 10 foreground + 6 background data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl', + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_train_occ_gt.pkl", pipeline=train_pipeline, classes=class_names, modality=input_modality, @@ -226,48 +250,54 @@ queue_length=queue_length, # we use box_type_3d='LiDAR' in kitti and nuscenes dataset # and box_type_3d='Depth' in sunrgbd and scannet dataset. - box_type_3d='LiDAR'), - val=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality, samples_per_gpu=1), - test=dict(type=dataset_type, - data_root=data_root, - ann_file='data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl', - pipeline=test_pipeline, bev_size=(bev_h_, bev_w_), - classes=class_names, modality=input_modality), - shuffler_sampler=dict(type='DistributedGroupSampler'), - nonshuffler_sampler=dict(type='DistributedSampler') + box_type_3d="LiDAR", + ), + val=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + samples_per_gpu=1, + ), + test=dict( + type=dataset_type, + data_root=data_root, + ann_file="data/occ_gt_release_v1_0/nuscenes_infos_temporal_val_occ_gt.pkl", + pipeline=test_pipeline, + bev_size=(bev_h_, bev_w_), + classes=class_names, + modality=input_modality, + ), + shuffler_sampler=dict(type="DistributedGroupSampler"), + nonshuffler_sampler=dict(type="DistributedSampler"), ) optimizer = dict( - type='AdamW', + type="AdamW", lr=2e-4, - paramwise_cfg=dict( - custom_keys={ - 'img_backbone': dict(lr_mult=0.1), - }), - weight_decay=0.01) + paramwise_cfg=dict(custom_keys={"img_backbone": dict(lr_mult=0.1)}), + weight_decay=0.01, +) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( - policy='CosineAnnealing', - warmup='linear', + policy="CosineAnnealing", + warmup="linear", warmup_iters=500, warmup_ratio=1.0 / 3, - min_lr_ratio=1e-3) + min_lr_ratio=1e-3, +) total_epochs = 24 evaluation = dict(interval=1, pipeline=test_pipeline) -runner = dict(type='EpochBasedRunner', max_epochs=total_epochs) +runner = dict(type="EpochBasedRunner", max_epochs=total_epochs) log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - dict(type='TensorboardLoggerHook') - ]) + interval=50, hooks=[dict(type="TextLoggerHook"), dict(type="TensorboardLoggerHook")] +) checkpoint_config = dict(interval=1) -find_unused_parameters = False \ No newline at end of file +find_unused_parameters = False diff --git a/Chapter09-Appendix/Awesome-BEV-Perception-Multi-Cameras.md b/Chapter09-Appendix/Awesome-BEV-Perception-Multi-Cameras.md index be718ec..3a08cf0 100644 --- a/Chapter09-Appendix/Awesome-BEV-Perception-Multi-Cameras.md +++ b/Chapter09-Appendix/Awesome-BEV-Perception-Multi-Cameras.md @@ -32,6 +32,7 @@ + BEVDet: High-Performance Multi-Camera 3D Object Detection in Bird-Eye-View [[paper](https://arxiv.org/pdf/2112.11790.pdf)] [[Github](https://github.com/HuangJunJie2017/BEVDet)] + BEVDet4D: Exploit Temporal Cues in Multi-camera 3D Object Detection [[paper](https://arxiv.org/abs/2203.17054)] + PETRv2: A Unified Framework for 3D Perception from Multi-Camera Images [[paper](https://arxiv.org/abs/2206.01256)][[Github](https://github.com/megvii-research/PETR)] ++ Sparse4D [[paper]()][[Github](https://github.com/HorizonRobotics/Sparse4D)][[video](https://www.bilibili.com/video/BV1nw411b7bB)] + M2BEV: Multi-Camera Joint 3D Detection and Segmentation with Unified Birds-Eye View Representation [[paper](https://arxiv.org/abs/2204.05088)] + BEVerse: Unified Perception and Prediction in Birds-Eye-View for Vision-Centric Autonomous Driving [[paper](https://arxiv.org/abs/2205.09743v1)] [[Github](https://github.com/zhangyp15/BEVerse)] + PolarDETR: Polar Parametrization for Vision-based Surround-View 3D Detection[[paper](https://arxiv.org/abs/2206.10965)] [[Github](https://github.com/hustvl/PolarDETR)] @@ -70,6 +71,8 @@ #### Fast BEV + Fast-BEV: A Fast and Strong Bird’s-Eye View Perception Baseline [[paper](https://arxiv.org/abs/2301.12511)] [[Github](https://github.com/Sense-GVT/Fast-BEV)] + MatrixVT: Efficient Multi-Camera to BEV Transformation for 3D Perception [[paper](https://arxiv.org/abs/2211.10593)][[Github](https://github.com/Megvii-BaseDetection/BEVDepth)] ++ WidthFormer\StreamPETR https://github.com/ChenhongyiYang/WidthFormer ++ StreamPETR [[paper](https://arxiv.org/abs/2303.11926)][[Github](https://github.com/exiawsh/StreamPETR)] #### HD Map Construction + (ICRA 2022) HDMapNet: An Online HD Map Construction and Evaluation Framework [[paper](https://tsinghua-mars-lab.github.io/HDMapNet/)] [[Github](https://github.com/Tsinghua-MARS-Lab/HDMapNet)] diff --git a/dataset/README.md b/dataset/README.md new file mode 100644 index 0000000..05eb831 --- /dev/null +++ b/dataset/README.md @@ -0,0 +1,38 @@ + + +## Datasets + +### autonomous driving datasets + +- nuScenes +- KITTI +- Lyft +- Waymo datasets + +### 3D Occupancy Prediction datasets + +- Occ3D +- OpenOccupancy +- SurroundOcc + +## Data auto-labing + +- auto-labing + +## How to make your own datasets + +- data-mining +- CLIP2Scene \ No newline at end of file diff --git a/src/imgs/nuscenes-struct.png b/src/imgs/nuscenes-struct.png new file mode 100644 index 0000000000000000000000000000000000000000..92a61eb3b350415322a4bd4121b09695cc823d26 GIT binary patch literal 87251 zcmZ5|WmuHm)-a3%l9H0rFo4n^NT-Cf;sZ!G2*ZeUm!zbFgh+=;r*wxPARsXaDBaD_ z@a@s(ocCPcFYdWx?X_3$6-4Q1sSp#;5ny3q5v!>xJ;cI-(qLgh4)Jio9r(IF2^JPJ zmYR~ho)`8;<5>+ur}uJ3V85(})UF2kt&;u*L&TW1P2(L}Z6dXPjhJop-UMUBL`z2; zg3TGr8)goSDBew$jEs*U~OV_Pq!?g8nlu+OIg+1i##)ukTlp`4S4f zw!PMp<$p-Ke__UW*Tg}Um#-*|N%m4gG>X^9?&T>_;&Rle#fLtV4%KaE)7zQ?I*nv!v8aIH6j zQyxSk{*tyD7CzQpKlKx;WreIB6sXwsqm?9_t7Yj5n52AuCOwdhPiLacXWEi-eo)XI zm(BzmUSEoc|FWtQ-{|~&WWFSw>9y)GJ}sv|+Sj+A)7h8R{^07eC_bI@D02@LZk8d= zWI7mld0-W+X*-$sndt{i(U-E`Z9_NU-BjLZPw+RTRAmzOgVCnZDt^!RfoNLk{j$Lx zszYCH@E&ie%1LvQ^j8mj-z4{9>95?IYuccEaYM=uAn18WhLvfkZW2KIwgQVQ@vANnpMCb-g4MbAx5UG8xy&{sH z?a+>84%0l-&qiVw-5G{%MCB_*SJrkwBUa9@diMkEl zxcy%h2>Q)Y)oCg|8UF=F)+2Int?1I7{+W@HDz$Z5?!gDkwA{@ZcGgGm=OVsN6`G=s z@pl~Qm8bk4zFU-J!aG*R3T~|-XKQX`-DyE`hA58&UM=yt9)DBs%%CBov21H|GPIC~ zhQDf4$o(#2V<9gg?5t8)l#%+KUbD&5a^uGLjUS7pip&hxvSd7`1G!4oywA?iXZzn! z`8M&E>2X7xZDyt_wqHe8QEY7riM0a67OmC2kCv(XztzZ6kHSC9%lPXVAET=Mmo7-C zqv6Be%3p;?$Y&RQHT+U$@pg(yBPXb%h^iTWocMR`8NaS(1~@ALoZk(zmNc?5w|-bC z8qNw>b#UctcVsy*P{kuGQ+{L`$_?XTmk{#KNbQ(Pb&C(%L#VPLaCVJ{YO=lia^o~4 zXX4`cct%gc-B1~+Ui0hgQ%5)qbY|h|s-&XcdF+Zw1kzMbD|;WbDX69Q@3Fo*ocAI- zl;2n{*ZkPS>^>b4|5LI9cHj8DGqSI^R_C|4VK3|*5BjhbR?prtHK?|0dyM3lxmk&G zM$R)CadyL-2Nu+#(;vypEN~No8k%xl`RNv}s7Fp!x4mRIc8GGXcaA940T<2_8gunt zLOl}*TCr(#OZ>y);UQ$s14M(c{wK|Fgo@+MvpQR(J8_tOgl4to#v@;9ZCtYIg5nzg z_b+}5FU+vCvuZ^xx4m!2y@N++Lh-BbhVIibE9`W=Gj$`zpXAjAnmyMcO{8+>I|cbY zb|rWls3ZBEf0pWHCEwbMgqaf7dOH)pv|oP~c^S8=<;ZSRH08hIq%%IXm0Z;Rn(_D$ zzuD<||2;w*si^3N$gdujkrkw0OvV}PSo$u9$J8}&cM0t4jjC_xpML9mXXJ?Nka4lf zWp0V5lD%)MOqQNh%>uY52e=pUkTt%MEn9wJ#!JlS5gh>`{k0NjEN+Lt0vf8s6v?I;r@zmDpi^x_maz+k45K_c}E2&`gR3goFj9jqpvGwYLc&badp*LO)x|Jes1jrWTl%BKye-D_kx2+FczJQV{bwd zGW_h6|DSelCy_;OUa$EjZmWho148fJn+MHxGYQpI6eEj*)84VL3vHaL{R;J>szU*^ zf-|S>!35{)E;kB07T3Z^(Iyu7`ZK($=E{OdLV(6;qHov&e`I5VBNA^%!c|@JMc7h= zA|;LOGsqN|~#vtwsCCvT*%q2ay4%Q2)}?9B5II1-GxEi53qB`*7J(^6@3 z&LU!imCDG^XS?$y*E7OuWK;PoV;!8BM}9fj6}qIeqoafm*&qG<;pk{6N@G@%Yi9H= z@wIQ*#6t7=p8c0)c8K3^w^F>F@1zB_(v`n!A_Im+7p`G5;V}yw>pWN}^MvOAJ}3zE z_MUk5yn*U=Lu~7z52Sd!)_d&x&cooGqQvz>_=gT9mv_Oj-aHy|8})U}t@Kvdx2XS# z?H_#@P)#-dGV&f_`)_ssTQ?GInr{nK{`jP7=XNq#qOCCvODFwRrsgE%oDM&_y=J1A zK{&t>)ROQrLY75<0m3S9zI}%7q|+KRp1wKL^~uZz+UdOIDk(9)^+86pk=1 zf6gP5GpC8A_m$7qg%r1#;}dctRMP=1e;6Zca^G~tD3z2^?j^f-8mh)C#g`7@vasAC zxx^<**8GR`tWLDk#H`D$1VNQT`n=4#penzEZhT=}PAI zm6P9E9Wi<>taIafums-w7Jo>Ca}=R()rExwsWQpy=JX9mDrnnvc-%R9i*n| zYq4C9H+#<_Z6yWs2nl=Rh|09*GUDq3&sYVd&eM5=vD4o^CslDOcczbFA_>k>V0)wa zP75p8V(q>hE^`7qDFG2Mq+?SwKc%87ND(o|2UB-~bMVt|Ju)ybNXm}&h%OA~oxjhH z&@yqcm|hw6sN3j!7MYtvY9mdB0#y8Q7Ayv7uh>rgLWRnAjQT69`X2Tu1Uia?{2(vv=l`SDh*^G~ULx_g=z z%jqpT?=(fp>~dnp6-?R`d~_a%D~j0MK}r-OAd1}Sm6?fq4gEe^q87;8D~r5SPmx=~ zb{nqN&}E{xFdt(uE^w=g^+liQlb1;S7z^W#(hGmR&CABLw=1xZSe{5cJOwPm1Q5770g#OD4MCMLt2JjS}w8rvjGNr#}+<7SbgT^ zv{~!@iZLo0>pw?1_}t*p^&g2jOQ5)Y7^zO?{Jul%P^bAoD(v$DLBKp}!CsN&&5?mB z0ijLk$1!fcgK1bJIgN5vuVAN)Wfmpj&Z?-mfY5oXR)-8X)fyR%pJVTlWHTPo1z%us zQvq}iqG+lDTZa=lXKqc=1pbslB2+(lO!!aD&nh3kXy0d-?9uY6R6j`JP1pkA90ep9tVLjzE%n6>SD+PIGA>ou~r&3DXn*l*B0@EJi zb|oO*$)V$~4sTJyV~2(}{3fmShC#%%)`_{DGdrC#9f&ytq2X4rjeZe(5X6u)^7-{ zhZMlHzma=RgEy+iOi8mRA;{6~mg2eZCMo3oz4)-Xjt(d+y8D}OQO?rq76ILK_5nzl z_ApM6h~!7t41EajU#u-`E$OQhIvHrLZUyuA6|cXQy#WSiNW>zJB$x~j3Ta3C{~kpm zV=8o(h#gGYAP6f~3;Epd-8kXoE$xge&2Tdl6o_XgFV;TxjD zSC#Qt5XG~vFK)@Vyl?p>^0vAIqv?`Da6iQd?59s-`GHaQWs`FB{O| zz$D&lgFO2A8vmEqA}ac1r!809)#ZD>H+RSQk017QDh5MwAv{&`y5r@&R^y2(+fdwY zShss4qu4;(3if=oWmf+HfBMKWRsZ#6YN2kIPtq`Hs#m5;pBy_GXsW&y!uNh|yqdh~ zy3eMb6`^=OmsSIjf0FDzk;~XF8JzP%Gx~kXYhxD+*eQibINXrP1 z;kusZTu+$=4hji}q6qJsuP6yVCo{q5sbOqkT&;K0<<{GipR$|ybUpFs=;?Yhc+9bH$6ya|I(4+XSyI*5pOz7x`euz^&cr(s_zCZIsl3@6r~+Tgl!bvNaym6nIR-<_OE#@%3Yygf4ltth3p_KMPD=~e_UI6uwNysFTggrs+Wx; zGtFI^vm4lSlwwMaqWQ`b_2#MZ(3YXiy~Q;t4(8a|62z>XGVP7uO;I0Y0`wrgU&^R* z!ylRu!@Pxku1d7;G0?0wOdu3TnwIar$qYEDbd@5zDbvhhil=>2%&KTYT$rn+@w!G( z)5GhHlm`Q`L>4sMVFEal#O}6&FZ4^ZnO-9wj*hNQzZtvayrQ&vKK!l0JhxvE&)2OX|$nH~{vYvwHel}qdXpnrpY@MvRCj2n@_AN@@mrf5~ z;CS0c^FD2SZ8IiZ@mW)5|3Rzp{+Vxo!3lj=)wf{7*=I

lMR}C1wJzQ0#m?39M>1 z@Xx$cECODr>}`i?G8&&JJH{UM_^R{2>8H$u$EkRZ*1wt(Zo2I!)#RZpP6o8|YVr+Z z3-Y@ah7V7Fq!C)Q-%EWi5jVz;NJAs_(V1mf+e@4nNZ*FbPq^|WDy}tH^(+R*x^E&#-v80 zH(qTf`Z0#jKb}HW#)g3D)rBO2HLLg=EcGilr``S6bN(+CQ$d}^$K}9jxul{mr zf0oHdX?hoN+W+}7M#}fZ$1BdYVw^Xzm4QI8m*s)wk>^4l_5a$^r)zeOTrpHY#ubLHO7*;$TMxBg1^r?Kolo&kpy^>S+Z$f!nYBsvwl|( zyk;og6&tWB9x5Z1BbmV`dQ`1OMm_3mD&xcF_2^^sJVcQe96W_s z7d`|fEc)zC;5>$p|<6h z=$mp$LC!&AVtPRdXq&UDL#W!|{sbW>MQnjk1`S-Kvl9oOT+G*9T=?E=B~QefX4I{k z4_4ac$c5J}Ly&Z-V_;Gg+7|DKDTp~0%~rSQ6tL}YT~B0~RAk=Bd0>h3VIPirVXJ>f7&Ao{8*|rg8F3($YXOq^Y5dCS4Cr&AqlvT;W9NdWq5q z@zv#gf&fcjR)K{-B!K&B_}Z?y4x4@~ni}k{y(qZ&C*2Jv+(O_ngJ|lJ-*>yEpH+(w z7;($}zwhf@j<4TFiSoyf9FvgXUZM8<2LvICS?XDi2wrS&tmu}Zq*CTs?wapq62k||cGccd-pac(?vyoqeliO}2%^k2z5pWJS= z<{RKk%a^_}8;8myxNlWg#TVIIsDrMaBFyh^TPQ*&~$PoIP|Txg7L zqlJYp-`=`plwk@)j}C1uq|pY#3IQvV?DCfc;b}Jw4mU}b9<5(hoXMz8M4QAP-4b7x zB3XRvl3uo1{2+5F%2WlW1c` zl16XAF@q;0xyI^Nt1}G3^4AoTce`}YYh*U^x?XwMf4%!g?_NTIX!bCjCO@MDy{J#6 zFmg<_d>m*&GOCx%!s2XT{&$(kRb5hCkVw*jr{76vM)xCnb#Dfo3vIfG<$>p^r$(3B zj?mRhcTa`kfxBNNKu7>5VN&1h*Q@Fkrp`5hvWnI+q^WMyFaNfZH=I4&DI0nuKmB=L zz37@+Fw0uU`&!Z(`IDlyV4lN3BjDxx35`NB-}3VggrKq;dB|xN45~Fh7BzYuf1bQt z*)h!>=Txs!H!>iVmq(BIyPxSwEIs9ayrBubFa3^y!l61FP2Ws1(H7#DB4sagQ0p+B zJJln!>6cf$f8RXex0FM!+2iyWUjge$>mR>Kmy7N}zY7aHAg8J`)5 z{-a$^>0IPA!+VsnsDqCSl}Pr_M#;EN zXt+h5TCRzsqI{RrySuM8-0R@%0(y=D(?>m}*H~j=HDwF9&-}g-JiUovF1Q%$dc`-- zKutzh*~(I!yryf;52x0s$L4^%*!rXn=bDVoJr4Z(m5Mz7djaw76WJ27WDh6lZjqEG z0;2he1=(}!Kyyx3R;7$IRpSI&*2;$XW=YB1mR}y$PW3MX$8Ts^N8CR6VyfgJ_qcxK z)!}q?uW$;h#bSTKdd_?14B9E&`D%2!zIJ+8!4Kv}cOMx_niugCucejL*2*S=JSJ?? z{;5eX6_d6zmKxeuZ>rRoKf-EEt~K)dUbSEnk?EOKPy%Vc2OOeWer^cg{G2HYG4)dQ zo!`w?llw9z(F|V`Cdi*&oydz1@dDT`JReQW-zWUbkN@#UL9{x4dt5o zmAF}zleDy#Hm*B#wQ#(Pn|e!spkP_c*Z1RDH@*F?W9k?JgxnHNdoA?3^Clh8s{1A% z9jlTYpIlcScTiF83tVaah01rcoeNPoui4rrAW|zEjFUkOLdj}0fqrbW-d^$6GuNrF zNU1LBeTH9iEl>WRyf_V1t#cWLwEmOEfeQQ!ZFGqx0&O%bk~4TI%d2g;zE^yI|49w} z3?{HJnSUYGe`h$V1-R_QT4~{%>aBIG^ z6Bjmgy*f7=H6PZ}9-C6D|NUy|D-Ur&!+>1cRrvsK_kd&mB7@|oN5Z{_Gk8RjaX)O4 zDNDIx4&;vVVK_H$SR1?;-BbxU)B4zwjef6Q6?zuv&uW46u;W;--(tciQge}aDpoZg z(u%3Sh05a<=#b<=eMl@oz3DJgMcFi!W*?4KcUOwN_7=^#dR!$orO2YWf}F=+{#d4i zA>VEFMzlmSIPyzo`FirCJ3Ta@K`n)sHJ$?VUDNs*Sctcy%elM)J;K^qq2B7_RBXWt zmOLv%`L)!#W5CA5?`>$*^fvf>avw!2ly+XbFiY_XEXR>GDpjXAa5wWcglVJE@YA^K z?UeM3Z|j7>vF>Zn1UBRwMMA`?SO7TNCDtY!;&z#u>F23Q@^1~tmSuwi%bRFp>1o`N zp~ENiH*O0o+)diJs{EdR7QZK|EJenyYAi*3O}s9)FgM&UEnIy@Q29ceUal?>WyE}; zom}Uio(wW*iPFittmIP~6>70%%<4C(thp-`?Hl7r;-A4?Rvxp9zMuNkLb}~y4LsE7 z#g7}CH)7$Q4vlP%S-nwu41`*T}O)Tybu4WdBhLxMLK1@ z78$H=jMDs2%BOxND;?kmD7x#9+M^H7IjI)2dyhHG`tyjBL!YuSlZ@oF=oEcV8jJca zmRDB^j`p*8;jcU_bD85xnkA_{>^HbMfBdN6B5$1b{vNKF*7eHGZ6o}ZUkcv%^O)NR zt4cDU(nis~@0!vQag}uyn>5-vwrTP%-^DUb%N8XTT=hPzL5vLZ2eXu`8IAUxX4)Zz zx0Sw+ZE|zEd%fr%vz)Te)O^Z770jZpQ97=Yk&>g5*iXRHU5bN?(edh*?Dd164(bd9 zEC|(3NBNrGJ#*mjq-nw-icO`=9-NACm3cYcH7Vtq4Nc3FN;82zx29pv2*a(Pr`3=5 zK6FR&t9+sQ7Qr*$O}|`Y2KFr~e(5@VlS(frWrE-dXGLb33y4jSrzTEX@yMh87+KDU z=7>xswD6;>Fb8!b^T-!X@?HVoA{r~WPU8l-UYbZqKEAx5gI zXIrR+oz3?foZg8A1lKVe)Jdn*#+h<$y>zZOUB)N!xDL}cFI_L17Eja)A4O=csWFcy zjeY5@h-$bxIIq)u)98?fx^f6$AM_#e6<@2em$7zWLj2qYrxs4fnm;Ty?Sefv)}bHu zNksSioi08a8Z%)1wY+trLXh`xY z?k)DT$&8ZV`21ESKCz}U9ub)0FVgdIN&MTR_ZpcS^VO>$;`@;-GNp~gg0zdQx;o=o zlO|lx$0NGUyudphr%~aY=@F`C$)mausTTd8UI3Zq{gG?(5nUdK=fBQFei#g=60XL= z;6(X%lPh%>MU(ZMXhh$z(Pm+e0}d?LbBKHBi3>_83pz`iG-0Nef=cJx-yMLyYk zu%Co~MOgSk++ivgRd>YA2mNR+{j8h$y9Q1A1EGoI5b6*oqbNnmA{bG(=H3H0?(bJIL96wPbnZ&&f>k6F~*?_s$}d#2hD= zTi5m5!&}29Ir7@swvJbWQ)?4u&4;_frVWO>^wN|^zSr=I?Vl`;iU|#yzY21BUYrv5 zcAA{#AXokjEBIHPgMZXGE1`|?TIyf5oo{Nuf)r-A5eKbvduOsAb_{RC(^y%wbk>! zG#kixX}6nWgmNczbD~G+Q|8)dUPHwj(f``z^A%ACwC(j`Ut%T zUrwhXlzBFMUcN87ggX3DYIOjjQBF`A6p2!QmuZJyD<3f!*3#6S&w0;cR`~msXN>OE zh~N6K(`Qq9A3>~?A6ljl6bTSt*kvoPkGVdX$l4&Tl z=j)#qs9rw=$w03dJ5%utQko8!4cCjz<>B+#p&O6&N+ktv-<|+;zUO}Vq>wWaDq>rYM7 zRi$>8wo2enhwK4YUs8`Q_D}nzytCce=65g-nB~Zee`-?U!D>j8vkswR3icjzEfMh6bE7s!*^!|V#!Lt zK%_d=40wgANwPoZ@s?zZ<$?>SsVL8O^myFnS}AwB255~CGai-@d7jx|6?2-kWOfuU z_C9*x)0wvGDo7uo=2wcxL5Q|i?8W8D)=uvR%P3qi%m(rB1 zTOXK3-*=P6atK4GLjE6;pgXNpc-kP$YqkasFS5Z<4j-0|iX^;tU)IcYmO!d)J+TYB&Q zVE;Dj8%DOR582hMQxE*dH#Z9KD0hD=?n!>T+>ra^(59RwHM#8E=LrpWuG+5;xC_ci z{1(N7>X1FGN}TXqW#mOQ$F}b@F^pF1BZ;HoS9;o;lx`nlSq(G4)UG%ow&|}Jv*xs} zJ?iz?gXu}~o+>Nfhn|=LCCe9s!TBWCM1Ci3w}>l!EW;8}B>*$4w0dfFQ-88opH{OjRm zfc7!)2*cUO8^{7YLZ3^?ZCT2Rg7a#Vl9x`J>#uz9doIi4H+i#7ukD#NjPRW;XRrag zT;q%!YfXf2oCPni^o=OQi~aG4AG$wUt>$5ha2PS|R`>~)uf~POshTW@fb4 zn zPm-F-psi2LpIwaoAW9XTOm5T~pm;j>yW)3~3kmK`+Kqvi{(jygx<9N~X0nI6VlH4^ zGs<00*2PCa!l+2pX_!vNyL<3Vdp~*fkaD&m@G66Xr1Y9k6phSjwuv3ef9nlS(HuT3P1(hLIEHX<^ho)huDjVYL0tHG z+aph{NgeBpYlXV}LkmD>iEqk~Wi1~GFE#-wA*-l>=^Btqpo)$8ot%&6>f@2D9Pfrd zKb9o;e7l{sLwQ8v>M>l&xcL5PA36FXBjU|e&EgY3P!RFGv|e`-Qa7-l0&Yjm-&E8F zoKO2E>Yeh?#6Wc?M8LUe`27Sa1j$Bo|G>lyyZQnD*F!`cT>}VGBVydQ{>IL1J5Ke zHxo{H?WWBIpB2)3zIO8pWsi*X4?w+z2Rh99Q__?>4VjvCF{%*%vhOyO3)(I;$)Eh1 zKgJ>ugUYpSb^r;fR9yD~{9p*=w!j2&Uc>e`(KpZb-|Ah=UN%MN~)lWm6!1}nD^-(mtt95=9 zH0R}Ng|+*=RH#?_ApKK~VuwgU{7gh+TZXngB)_=gGYiy89peQ5L#h*;qmkE}*~Pc* zOK(yVRUtR8PG<)49q|MC(0@7)>xywjx*Y~DT0pPO?)0_=GBjr#f*xT!bI9cZZ22z5 zNx=btB|$rFNmJQ_-GXz9qzU&j!*S zuLIT=;!r{SBSQ=tu2$V9K3)sh+{MvzNopSybVnd&%$yt%{-^luQ9+^WdCh)w{)AIt z)CNdi-WV;~5uLe^)e@v>N%0NUQF+2DATl(uln@JgdhCgKuZOp+K*J&J0Tm5%qHp%X zI6K9~lLQy}PWqvQWsY7Qs0a^z zObH@SlNEh9KK6Bn-j9-It~%wAK0W_3r7Bbu9?_yR;bt33g~3tRBH<<;7d8m~p0an2 z*T=$)OQuEznE6kLp8$4Ybgc`v-^WmI&(8#Y^Uzn0*P|!mXp=~PO*vkJ*Mr_Dd&0DW z|7lMPZrRDZ=yJYg-%BZ>RC3hA9w+iDgw+HbfTXxa{{kZqP#Z1}H!TY&L z9MKNiY=c06BxR*jpCXH;t0xcNaBr2KJ@?pt*|Jk|br~)u8da)Oyk(KM`j(VN3NdlS za|UGl&NIGo5=h`)=evG0zl%ww_TJ4NW}YEZHALib*WwvyrB>3Dl&v3qa_#lG|8CZi zC9hiXp|I?S4yp%{N_A*fMZ0=6q_qK?eo1K`jd5r+2{COLHXR%gP_mE*kb;hsXd6xRaCA zW3`PZg>LKpwb@@{>Ng0g6sx|xU^~YjD}KAJoh#^!H#hm4ecJTHv^vX}Nd{o2FT^^$ zn@w#Ja+P_%{^LgQ*mKU46f`X|##>+yBcas63o z{h6fbZ!?T%WpZouzN`Q{U1N!e7ds;=xn`3Dl1{ zsO6U;cVf3G`9#%rX!h+J|pqAm?v>sJlcwg7Z)`I zZOin=C1q39Q$-WGZm5Fdvz_nfTgRLA6YQo$83&ff8%Jm;=&@IjA>9Gxr%-;5`!c?2 zawc-kXZvinE)GSb$KNF+g&JaW6i4+UR>pm`aS&>YaEEj&{tkrHn#=A;V4(2I&?q+_ zr6wYRnhZSw+SFYh?;s_%`c$?Uzzd(KXZhj4u1q{cR_k?_b*8IcW3KVX%i%vF+!GfE za)Pw_md31;7F64QF{z)0KkDf6O=b_hXq<>K`k=4!yZy?UV8`^M*`CNLyQv7}j@W;G zcG(=iYFNA&ot3rI%KRchmaepsr#L#=s0cC;&tUHLn~H36qTENMUs579XVB9r{or`- zM2^5MX{5E)8VG~SI$RSHVRb${ca{VpMs*75$+|SQw_+alQa<)!)tTwt!3IB61NyX6 zU^8$&mm7&`fuSr2MVN~8y7k`j6HeAL7P>B(jBX70z9H_@~sFIus+EEpP>!$=guLblO(=L5`DtuUwk(~IZ!v#6e>yH?DmH#- zp7Gld_U(VSH9<0kyX84tz5k=4wX@ zr1&icWgapDv}ok@;cF-W-#&RFVEyU;fII-Nam7XxvtR`mgUb~kd{_S_;1zd@I(iV} z0GeCli!U<&gDB|=9>6}1l0D@+Er6TB{eUl@4ZxLnm|%?ie>=ziKN?MAwss-3L~y|h zY?8irqS60QQZD;71}Q@x>yO=-of#h+``iJ`R+by|P(q&oI_Es@l(z*`aRAbj3iBTz z4rT`kuv3i5nZE&F6sl#pDz=aSR05JHzP!MK!a%dS1tdWn2YMS2|2@`AO%?%c@R%Z~ zpPTuYIA}W?Buv;)1oKm1oA2W}R6^t!0Gze#9zkXuCII@>;5j><+iOrV&{t1h2cmEr z5axYM3y)gP*MMyD1Hh*bhmGH1 zF8sK;_a>t5|Bb%*}hm~Me|o+1(;g8t2`E@2-B>%Hpk*BjgjGYh2>X9 znt%sCIu)0ghX??SA}(AZY;3&BU_c-G2f+nI(QNV}trA z0wl!le+vVoxep#kjs23<c zA#78msmuT{e%q2BFf8>LHK_ONF}VUz4odpJ%y|H~_MRsRlK(?~z%oGBp=e8H3Q|1q z#S5-u2slcB7w-!u;Nw-k0fhVXGNK>w7717?poV%&2W!Ca#uVm&ZS@4#k4{r~=wVgbi>2MI{Zw0`bgF*%bPX?D0=irt$}j zEEv{-Cjwo}0y@OZ|Ni=4`;ED04NOLlQacj{0Oq&Ge+wv>Z-eCA!9IVam0kby5Zs~w zloY`3c?(@to_=QsB-S&@~+<^*HF?#Z#f+xth+>t^dIPuT}Vqb0J`> z#m8fD@&CX#1Y|GoDP{Z%en<-}zsW=W9Oo~%6={r6>0@L4K`zJ+yexG`pq%jEr>w!v z4HrQtp@W<5!+~}1_(V!G{;55n4}W%_?GIi@>BAQA{sk@xA=#3!0wk6$c;(aw#x(!X zAQ+9YIc8}IY=8G8$B+N>;3S0*2X``@j-L-!hJc1%}%R$cx7eDn(hO1QJ?3FrT^PeO@#@Fn(k*o~VaDUd%P zGBAtuS}x}P%e+f+)qTqr-`3eZ_T@hyhL`|(F1Q_wDgWaB3ny5FAGd+JflERN{|^Xh zj3M18Xj%Ms5tM2R8i2@&A1axDg*sti6dKdK{g(?muYiK6={Vv3 zC38@VHGbqWWQGuFGcfrtka-~cG!cwvz@z}7DZzg$(`+7ae!RXkCh2*7HLqqvgz04A z#zRe_%XVIEdNqNf5-l}`AE&q7_16SIl=0^)6TqG|)?Syt%o2lArHu81knfk9pX{aQ zm9z7gi<7wnlEHMN+xg9x5zF5uR;LEjoB0wim%!iWt2p~&v&9Kb`ynUkAqO?H-mMxO z)4L%jQ*RGeB~1nL-c2u!c*Z1b{e;Vyuzt|daX*Qg%6zs~JZke{#P2`50LnmAMER&|r2FzlwQ`yCS_1Ho~sf2J#IpBD2LzX^L=>E-`J$>AH1 zY5eeE!QDqm$@!#AHgq871<`(fNtd ztccCHW+}=W)@Y=NUr<#uUre< z0IK7%QpW#B9uJ~hkl^HzQFe8{+T9;I*dR?w72ScOqDPu&A%dBV9BvDVB;t4xCgs9H zepAn!M#X?MvGpd^f+y1p95PxIjHjbB%4}QOCg5tUJG741{q4W1VUC$+$a`fa`-iD3 zH-o=?>XYZx2v=yhxb!OxIBk`H+2&BOizXV<)E4>R2;SQ5q?AEq0IvupZ( zsZ5}*lzC2Z*h+rv3nSrPeN19VH={Ko2 zBjGeTm6E-^GuI15TL`b76c}`Ec`3gp@vmUEZ~?(o&gqi*t8;fC_0eMtEPO8n zsn}-}GQqaGL%jG6=0b!iS(nNZNU3;Sz$l0O9DnVcf(T`9Oer%Xv2IM5s4ovYX3*zr zhd~1Tl^~V138j^MbB+JK7rb2g{#U1H|KOgTxtxl|au+_TKPtTjcB4b068G02Fu<}i ziaoWyfnqlo-vU12JW)hxxB?|1;o*%4E;@olN~$;1)XMY(!KDc>cI%?_#=2X0V1V#H zr1=QM`~8EM%|D(2L}fA{jj-Q~*dpi*>k11?9S7LA?HQQgQxdv_xKMS)k3ZYr9HjlQ z{~z^u41A{^M;H1(bQrd{=;8EC<-h3b`<{f1`b~OsjweR=mSw*K=X4IKkVwdH(p?sE z%vk3mtZp7e*yqEwGB)?De zxb5tUiB_?KQNWXrhQGQ7LIYsesPWlxaMkr_ID^<+sFZ=w_&w{y@%-BB&`*@e$3xV= z)FSI554{cz=bP!Rg<&)RiIGOyfwwq+xGBZ)i-0J(HetC;pz|J~hNPkMTCerzu(d~8 z9-0O^2wW(xHsT*^a54#!BLoXqW|Q_n{-|qZ3YhVNwzJ&EBb+CWFt|HT^%a&~Wrjd=D*VJJp!z$QM8HuXd*1 z##S-!p`hNI<4L-8!v${9Ni=e=EkshX<6oPkI;1RFL{CPsp{H!{UNSOIk}{W}O+mkr zO3yff1JbUC5Py3MAR9Ce6aX^MLUA}4erUvw*I7^F5=TNa}nzK&?s zFzix{y0|HB}HkxrI8Y8bLM!A;miYVbMArjJOZ~Y>)Kg{J8nS3ht5y_s^?9Nen7jH{^|!m zB&TI(NSAGf06!jQjz>gPvIFJgugGF;uk>9fQU>}=gMxBy9UNfXb$ zV^R!%qd?G+O7m3fZ}cTY9tnoNbV<7(xKLTIsoja3{|8G2;Qm=`sf?NRT3g#ega(+I z^?JuTh(Q?zj`KMTi*kH_9dH4Z91_aGckhwb6Gp-hE>sV^czH}yz=-4%VRAG1k;8&> z10ce*-$+}ZMSc$>fQBc}#Pm>Mh(`is*Gos$8U*qHY@tdE8BKXdSWyn?2v8ZXXPsg0 z6>jaCV#(v0X_XDEj0C$P7n3YwV%E{UUeQpTi~w>A3pWe-2sO|!wH?r&hM~el9f4f`5Jf=3iP z^Y9)>1z-hhVd8AJ-RCZUfC%tm-=x{{UrBZ8z~KzCIs%$h;zX17u$=2lE4>(bEB!k# zE4i^jdWze;4Rpw5n{MNej5Y%qJvBlb{|#A_n86XCQ0i7213}_}0Vzqi5a4k7f=;A0 zR(CnZUsczZ?rxna51($l*0D}?!`y9K%Kn*t(S!$Z<8UT&`YSvi0N96RrR0AFP5?8h zO2XtgU5Q**F%t`wt?f;nJz)?&UNB3m8nJWj42z{kL#Lc)DtPZ3)5s3Ab=7aDwR&z5 z4X}6*iyVK_PWv`1W%M*Wa^YFznj#A+RTqT@DNXI!A3Fx>X279wbO-z;CLsjOWoW;} zN#kIRuF??^Rx*7X7Rwn(;W2kD@%Kx!0cx5|MrYY$G(&QW0BEnvB&6@2eYXF6MALSs zpyb~Ao>F*zsodKz2X7!u2XkXx_c3PJ9^_>d*$l+F{%Dh17^snae&Z-Koqu=H`UETP z5TB5)M;9l&mFY$!jgqy&tf#xAE=^!UL!j4twXw~3rld-=!1`XM=F=x34NLL)SO zb?!{ee_!&+DQZrFaFtBd*hFx1%a-B_tM+HGAG5Fa9e<<_>FE@m{5gc zQfx)(?&uJk#r~8hjao2u`_=Is74y=09wkV7hO*_Ii4R;}d6$T8LsAR*;jUoCoOf+Y z>j;<<^d`ONQt^u`7M@W~yootEhFMU^s*_KAj9HKulT;{&ut@$TA|E8KMTeP(hbKBE zBm>N;*}Mo_c!mdjgBO;{O&TQw1K%_2iQbgM>eB34vCIGuE(JeHavHsRTu!XQTsUu_eJf!j%5N;t?!XnB2sH8b zhFg{l9QTz*r`FYvPwf|crk|@zoMgYAd|X0wY?9HS+(}r){vTm)9uM`~{g01fY%>_L zjI4uDCdQJi#f(BpqZA5_C@QjqtTDzgZI)EFY*A@NmJk_BC~Kw2zJ*fuNLjwuYex6| zd4E2?`~LmaV>I)6UCX)7@_e3iE@*rq5Uh0gOuP-#D@)P=$46FVJ3>%Q z;Y%PMcp<56QySEReUj3qFei?0QYTplP=%ux*BzrS%&Yl-s4KfvT zZ%OE1KRjCoM`Q%rwhnMTf0pZfY1Hg@S=j8=mC&%CD&rx>mpos6M)e1&gjL;L=~&qf zudVacyn_Y@!^jm1tnVwAp_%h=iwNgM^+@kJYGq00H&&-zq%|P*hT5ih_e2@l|5|RVUFSNnd_}z1&n8YQj6#y_N6f}7In<8u=VY&c z%!_JhU-cDVkr}WFTOSr1y5w{1(K!DzSCy)i=s`@-iYQJHlTw=CWdRL*6+$Coea5yi z9Wk;A+zzCzH4r}3L|owmxvjObF1-7GDCaq(To}gD1JJ%+NDfXLTY406!g~*y3w;7q8L)<)aX0~^0We;S6{m8y>RgAiwvoSwObTW!}J zLe##Svnly)A7?Y0g^cJeeYNNV}|VA z2>;AI?r}yjdXjfAewcbqBZ z*J`GkABV?n;dV(2-k)T~EwET{d_jtW9gkw1U>?jk03cz-1j1OZ;Q&t;oT`GeP;4cn ztw1RBURCq1wygnlV{=9$I>Sg%-)cqq=qI6=Ieq>9Ev!8+Hm-So8^hxwIx$ZUW6C z-c9zH4rvn-bXSWk^>UI@`PrKXjx<`ZRyU*A^Y|xu{KD$9hM!Q-iAe)s(!}?!O-w5g zWCSX#{Ur-e({sm)@s@W$NnmC=FDMJTQY`C;r0%ZRI?-{(D{j0xvljcDgY&pid+UW1 z23m$+U3hb^>@{k`r7v`DYD4yr;I{%a>N>{cE3xMD;D_jpnxl9MCWa>+82Ll{+0McOqFA z$5QT8pxmuDi9WK_7Z=;~eE?E%JKVB;&+f4{lir$a%-MSg_->9jV- z44oGu*~Tv<8lP;^?MElenbpd%!-+em9PAO0dH+}D22(Wq(ZKl;m#>y+0-hEx^4#A0 z$wdjTU8t-bj9CRk8CEI5EC7vp|$H zSU%pkLbT&JRv0r?t|H0l6b!l4P1T>Y5~_DxjyATsa`)T^YGyiI-Y^%qk*ERx(mj8DA(D! zs9(2GTbF?ykI+6Fg0KS#?Uo@3!%z1&m0CT>x>g}-ZA-!$-!9Li^Oifp^}qc#vyB@P ztT!J@4GB)FQBIro298RbRNM4%PhA2r^`s^UBEIsQI6@xX{~%-lAcl7W*)6>wPU5k63k}vCp3D>eWT9DNh1rX&~&&$!+(j0UUuVY%z>DHC%2|=AXAW z6P{agAcEh%X;}_5wI;=5g~f8H=ehPvo^q?!f!CIf#~@M1tB9(zNdn?M`Uac3IVnyx zJH;&l+-;Oe1pbGfb&@?VQ+_%QW?o|qUI9TFx43BhPL7&2wYfu>Rw4BjsX%D8;RH}s zHY=jtAF8$>6n94c9`PI!wM`CH(zkPE`hMym;6=3IJgFoy<_a>EOF-a3TCZeeKhHKC z$H|(kS}SSCOS+r+y@v=zyUul+e>G|9xn$>9x!Gg)zUUB1ic{|eS2`Ov>Sqtm6fh$| zjJ`rXv~+Om28@Sp+=Q`r!iu|QJt-v}u?2gj<&~|jMEhjOe`8RrPozX^& zHy+02{>ohU-1V|4+2_zFf%Csh#SLCIxjQCbG>K{SvE~zsul_Ks|2AgYs`g1vQ@bF~1V62QLk2RdFjvkV|oK!7lM02_fj>w;nV=-i6CA7qc_fM-l=8}6zx+Tq?l^d zVI9uEgfo(RNL?wP)NPj@pC~(7ZBw;5-LwQ*k{IeeB<#4%Nwe?qnBVrZ@1w1vL<>wCp zU?cz;6J1mChaiBElFQnE+?rc&X=k5{H3{S1BYo%Kr5>UzJbvi2!|9oaskJdx+qKN_ z4Nli+E-=%}#b^$lqB^!do72Y|7Y?D{RSRK#Gi2L(UYOQy`_5nPzwjk2xN_Cuh55Wr z`;SNI$M5G_yxb)yIXrwh-T4Y|@h|r|>Igz2&tN)!?~z@;8|i=$wN zlj4eTAdayo&1Ski{9t*$*+D&Zx>80VO|ZtX{x#yy3sgMjj-JSUH-D(jhLNq=qzThW z%Q>9B7#5P|ZsV8n3ltg|zAf94_gwO41>ZEtv>BI*bcVKXLJY*Lmu~H}VL{p=0C0%X zdrkWei?NG{c;-p41wBGaaXa-RkFx1&&TET zGc}SjOc|+A%!Jo2nx3{u&&^!_xK*{3h~2wnn|s*SOoqRjL@x=%UQ3YS;LP*1bAfPb zhBc5d3lUJN!#u?$o0L`X9b0i`6$l%95VY$^onCx8=0Rb!FTNC72JDG!yCayEJ*>crGo4DQEUV=t&2KYz(&bJ?38{Ec{=}-gagxhg9Zr4ijb&F6rCAnUeOa`wq4v9Zq$(}v^z^Uv;lvR$~nMvB4KcK z2yU3T=8cHr18HQ6f_!gt%!9YnO$k;fbvPoTugGwt3Q}RancvR*R~jhvmfvQwTk)lL z0p5Gcga*1e=tmJ{ak3lG(XRflrKcfR?XINYy2_@+s`$v3FXCiQh?DS-`6ty)s? z;Rj0%O0_{i?Et^Sl7eFg00z{t1^49HtxW)L1KoSFTlfLuaEX`m2u%*8A`IMUl!Qd> zCD>MyXNXeQaZ#Mz58@w$DfJrlb8B%W+$X+zs zJJ;ykbIKgvqATCKUhMG#3pL(fZ3X>oAs+lpW`Ei_z}|wNz2`77_yu96c^<(#u+Y5F zu58Ac+lM1YN1$eXevaRE{-r}ifOsBpE7%Tx2cC0-Ve0w$%-Zl|8}JDvXf)qEgtLn_ zw|?Fcrm%eM3Q~xeW${RbE?)z$A*9`n zke&p&PeYaMO)n*w1J2w9e)_Xu)Qlmv6ALmoir#0|-jK0t z_H-6#enM7E)noyoz$|@Wi>&ZZVd){&8 zVb5Ri{GSY&8jo3KgGX#Lhm>aEf!)zfP~Je$LQO+Fax@sdg?$N8oFB~l-T&v)p{SB5X44C6pXg+x9~}Y~^0!;4w{`hmQ}r-kaJdHMX%1xlrY? zgMuzUb3>r134vrHDSXCD*ZZHGeeleuf5d!P;s)yci?AkP7^4{lIfDD7LEfg1+LNEUFH=(~eR)@vC^FbPNDP036b z3y3pRZMy#UjtcWwuvJ0Mfy1TIzpSqv_;%X%W2z{oA=@e6$0x$zWJ%hnK zH#Sy80=)UvEuGE{5_kpxPgEtu9_RBn9q;RSUE#m0`>%u<=(qzm zJpxFfg7O6>LMGZh35%-qh!mUaHnZcrAK|7~O$Qx3th zrGwW^KZZC9@NSSr%K0k1q3b zdmyyNYA018Tm}5T{Stg>*JAo|uc4-8`B$V`>iXPAn>4@YEwPtEAFYa+@NX>Vzj9tS zK6}XLUp)N@N-^8F<@4&td@RZ&U)Z<6ab1+gA$ z1kvnUj0;%I2H5(9EdWUkP_wwL>k4@zU_GvZyc{RVpzka}u2jq~FL81-@cpy`asXQZ z5l%i|d12?ny_{jQVg3^4UQZJ#9Y-D(9=dpse}3cCbwIgbsy8Gzl-`~}iqgTP*#74q zLPitx&G0iN{AM5vvH7pE=K|8Dr|g4bj&OpH0;~hGf32_b`ojK$sWQ74%LCVn1zNs* zmKd9}*Jx?&!yA|T)Hds#`P}SK016G03;#uYU%54w_bdtTa9n-s?a>C~xc<$P|6smH?nBHsS4L>h${Vo^-v6e4^SCATytghK`_;h3pIRs; zdQME##6@`vj^0Qj*wX80zX8dOxvzqww}U!eKtSiq_7okkMZd!WWev=%EcmyLrfwkmmK!N&pfz4VdXO(mpiUOZRw;cgz#nkJs%lk$lxj5Qzt}g^ zL6E16cs44q^4nw$&~D%f#6=P}VC|=APqNVJu{7vUR82`h@G4_*jWcxcS8indOq?Vy zwpPA33q7arsce!Oi8$_73M`_rRAPDShMeFbqSh+6fOWV*=D=*i1>=Uv+dUa=Vq%i9 zv?M^NQ$+xgow%9b37{FGfD@0{q8cFMiv3!Q?uESBgN!E9DRv^sjy<#JqB^?U=m560J^RmiMsAZ-T8J} zqgvvW#7+R>17I&5(_R1*wFas(%=y5z|~jaqC;6v1M@ zt!yhnOX04YvI(fHaDnT@Y%_ukzxsa~tqq65U|N7$$l^;i(8?6B@_f;a61;*}3*%;v z0k-l=G|`rbeWHUDYE<`l`{wru?%Gsw`xB;St}Ig)Ecyud|~gyqf{2;bD9d=exU=@EI`fTa|jSxSKzZ zweyGe+}2Yt`?l+^PxenwG6sO=7uV<9PIkX3d(Py<>95KiL1Lz>~AzL2UAHzJDb2zf@nm9Sci?cU5N2+X6Y*4vqF67`jW6UC-o>^A{*24W;& zDN(MiEOsj$iWg383fdm9$<^s24s?}C;Bdq3^K0RM2Nq+#rsj7&L-;cCRlOZa9qVzK zG5EFRiaVsA-PnNei3-E*AR`5%ED8aP?$9uSuNIp^FO-$5&4~mth}P)~=zB$$A*r2M ze9_V+lU7Kb76B}k*t4I$!n711Jk$XwdnW+__FKR1=7Y2+P0m??PC!Iu)rH^Y9Grbk z{F@8#^ZJ1Vi+8>W!%s=|&ROm-Q^2I)mCG7(`qP_3=7iQFJ zl*tMA28gkUiG6+~VtH42etYp5rJdI-G|xP^^^xzeVzGDkn^)Y(ZQ3A~_8J*PL&h)& z@`)ZG+D@@-m@c63&{vRwHBc6cBa+!5UR_e|up7uv03jOxdJZTI`ZC4m&dQ_vXUbP3 zhc&r$ygIuUD|owfFaTEL7lA*>l#w5t| z*+X`R76ULEau8i9#TRV?x_{SEotn-$>FC!zDk~wg6#uE$$80N94<9&t<^COX@EOR*380xG>d9UY?)2gxpF>5@ zKr809aJ!}&`jRh{!}{QA;#HQ}@rIBVdDUyWl5!~`CyPE~K}U1Gt8uXFd(K?IMHbv1&|<53~NePVM(+QBos?o&Wa!k5Ei8u;?Lfp9VmPyc$s&u z(#+Z{L!1!F39`e$ofuhl*C_{>4yh~@J#G^{%H&#L98qB2PW$b10p6sqA6aLmyw5Li zJ4nG10$pRfLstbTl$SCr#|KIRwf6ZAVZ$KzTb4`~V^GAKB*Zc>CkFJ1hydcq_c=PWlKl^8&-KZl}&c$x^wf_6+jq z8KV$nl&Z3Bi)<#8prPe71RAi$yKwn5Pg+cdUETTV+RdM!Sz*6G8rbAc zLwYZC5N0j|u{d~dk0Vno$itxel^URwbMKj@{&EdPb8LRSr~&fyYJ5Rs)szGo#~pIp za(Iw`Vou;hT|mMG{GkS3=vY z6#W5Eq3P69+pt*>^p{0Wp8gt@S|NW6bz1wR2 z^Wk^uE`8@3)Z8zlYPTw71b*=ii1`uQnv}77^=O>$x>`tBY?*)P4&lum=Zs4dJRJ3W zKDiGB8W==PJ!mpCZ9T<{kdvG6-`y413$?C|$eDaK(-wR^K->3#k#afcS*m)9!ys+yEuHsNGy$Z6B;9pF|E}UVFD!ug;dUbF zrC^W>J$Gh^wnyHPcNQ!7QXrjYNq6qkm%%X5gQy>wvA!k^b;YC(2?&*1)o~g&9DguQ z6f}<7YoC5XTTZocT}~0o#1g^yu7h4D$f!ML<8~%&f~bWmt@uGkD#%E9eYJYZw+YW9 zDCQC-DD{2)_ly+9sV)`qk`?C|lQV^&Ap_O#1L6D6@&_&T?V>?mA?gEK?5;RgyZN!^ zIF-L7XqyXPGzm}<+ufoaD}=0Vy%MSTR}tJOIi1E`d6k!{1;w-$Bk24=pu@lsQy0)t z>9cD8tXRC!Qkt%A>aPoJMR4*p|zXt(AYSw`eC<^K~9MD11UwSFs358%FrT%Z>5+r|Y0G;phZZ?4vnVFXDV>-zA-28$a#5%m!$gOiKPF|)w zb4l6?HKG!rO-_6#K|vWJl_CPRyK$|3|1w!`Iet1%;z0Rz4)-G0C=vzh$xU5R`qNM; z48DHom7RH;)IgOAf$+%>$j*OaK#dNOLD(P{OUucZ{Xi;blWgLT&eachzMY>5@__Hvw4xvq3%2y#7ODF=J%qQ9Ooy))yiD0xd^rf%BL#7 zPJatBz!qbJXifd2U(IUJ1|m1SXp|bY11OL+Ysa5 z`ok8v=$imM5MGGtO%suIo@l>}MR zUb5S#b56C5?r4AJJvfK7P&J?(0H75?)8rpR=x$vZkQiVTlQ@=r23vSPO_(y&v3#!D z9SzzGdVj~FzZH`@9?F=K(N@N7eJ_6T?y>(82I?Go?R20*3CekJN+wR1`5a*`9BC6& zZ*S-1)F%issg>n%joRRLl|E|&qU+Usz^?(ZwLnk2ek7f1n{4By=eznTo$IH>*AIqGYTG&T?#9ga z2CY0(6~w$9km+J&utBJ$s=%cUXh#x=4)FKNC0vc+I*%GNhr@AuyaHz1KQk^=i;6A0 zq@zlds8hk?t6bNA+{=VhaiFKG7@fQPY2{6EnYsko{$!h3qqW%e2UUKlIErzJ#Fvd1 z-Z>82&GbDop~^zu{4Q1{#BWoOIU${_o*)dRL#vlJMAVV#0@0W?=_Jj>XVK=T=^)JT z+*&K!+Ttki9^Ccg^^3Y4SEoO5q4W0jncE5jBkMbtgHy)O&z0}h)do=I)CGHWXlKiT z0qNLtV}z-Tjg%mIt|Vx?b!=fKMXE3ok#_!2VqiqX+Jjk<-c!q;v3tLw3u7N1xn@kB z7=3z;i9z87!OCo^ZZ>BQn_&Yk^8nnp#yuA(7m`h4hTH%X_f;=b)a!kS|RCo zf9K=Gl721JRcwA+Gm;t<}%tsX<@mXAZHazi4is&9 zQkdR1T51n{YX_J|uApKxiv*Yw1d*pX61uzufI~9hgv2oX6f?jlR{p`*K>4=hL*NaH zds60^vJsxhL7)u169>Hbzp<>dBSi_^fO_~QNMQ_qd3wcy8Q~&;^Qe@FSA&N311jrT(~yg1&Kti62oWhUKMvfO z0FvMwfW`T3ULo53;h|>&BZ7f{Ey4c)TF?Jk*$JGXLJm2~P^qTENzh9E9Sx)?%W_)m ziv<4WVtUOxW>Ap~HY0I=;yKm|7C>D;PSrsyCb;?sBA~Rn&Zk(L-kvWytYu8b{3)qs zWA5Q?@H5Y==}1;2p#gRKxCUj2`unE{YQ9ZG!imZH>^;n7GJ@`B(AxHd`35#_cR>_#Fn+Ma^?$)@z}p;}ijFzO3uQcjIIODh8q6wfE1aly z-oqY(Zz+4hiPeg@MP&vhMmF3ya8F)rh)?wL?%mxPpdL&am(ySuCoWGf4DTXYy8i-2 z7{+Od<^N*DAW&W8*aqcVsW9{IxPMC3?>_^25TWkAIr;Q^%i-%JMRm!$_7B9o$v$_^ zzgpFm_eG8ztz;N%@j|@-%^*fXR<+=ob_(`LD z-#d}4xkLf7Sg!h?AM&R10X%W`rOd9=z!ii_Pj1eF@DQH1a@TTAYrSf6jcDwU_@7uk z1=;f>LRW`ojwhyplx(!<+j=|3IX3m|E4JP7WUxc0-t5~k{0KFHEf7983u>M!Z@_zVxwgm1!8ur7- zcXxa4swhp`rUg1veD35AIi5IvIuR-xLqmmKbYx}4J@0qWDLD%;7m|u4Kg-z&l?6!J z<^*ucs($@eod*?Sjpi>~9!z7fePul%l9{RAZxgaOhK9o5h*hC^MS_0QTAe?Cuvo<<#zd6kR>N+IMwq_gb5i z*MX!@wbr`((nO{a4{QIfj~~P{UR7xL%bH~#g@>&@Wgok+)Z7tdu`yQgmJ85ZoSWjA zfgjLc|1LUZDF6hpDTl*bxBmTqWvKQVx|j{9RIv6QP~q09pW!iQlQ(%&Oi^yd-reY2MUyMvB z!~?w$8V}kSSvnAinVifvpOjZY%3qMvCC>6hp2R3eNpan^3;=xSm0{ZRWHSDZu|3iU8mhT@7GmK&mL6jV)TK_)Z=oDDSTk!?# z8-sfUR{j2!jD(Ge|GRjd_4xl&0}7QFFiX^-We!}Y8$4A!E7E#4CM z<+beLqyH}A?E{8(k9f1dgdUi+MOi$7ic<cV>S=vGcg;kl!~@7*aHZ?`KP zZ`a)^waayJG^f(EHMwG8?%f4NwMw6bJKi4x=ik1w)${MKI3#Cos*|jmWaHXfH2n*4 zSh3Q7CF1h?J@ehOwn3|R1D~37gNuU>D@p~--`%>8YoNmI#@8FVzfR-QN?rJ4;{9dx zBsxX_38|Uebm+mo^MAR@|HXF7dHr6AV5)to=j&@N4r4RmPM`llJUT(dV-z4B9qqI) zq>LrD*y9|MPiQ-pm%Bf6zqkBYrQW>rXmiD_XO73!osQY6+q`&R3`%c3oIW17`gp8+ z_>o#3ibhkq(NoH(Yr=gimol*{kS?aGJlOEgV_B3nnhoo|YqZrk)0Jui29 zoAa@lZ{+>~kh*fe_~(JcH~-t^sG4bdRb*iJ%Q7wZ_uba}7jp&~f2%hIESPg4CfvVn zsz+T@SoBaGjbx36K>vk5ZlFpCbq|z91)c+lAEc4bTYmyxGhqSBumi6Ic=%+>Pg^Q^`A;? z9R<-Zyu}}{Jvb56edlFq5=KZcXuH-HTw^=LG2RB=iK>OOjtRKksEFw}V|dm1fP2gN za(?&aX^<~7cJS3NYf`89ws((@OEMX-Ol=_W#aW|9EP+1)?~zXC>HoeDZK~f-P{t`l z5q}A~g5p}Vs}pEd`!Sku^C!3{2Vfs!dChkOzLj?bZsZtHAPxRV$T^s)qb8)-0w4c5 zv3uR69F$-d$L$m*MC}2Ep$!uJ@zRWU(L-oIewtYjj?P7usIa;uhVblIe<|u(Eh;qk zYJX^A-Cx=qLb?BEj#T`?DSXE2)P4#B9R#iTNS&RsuH;D_VNkEw1I+0g+7lkNU z7hmYAVj$ne1FX(j;u{J}9bJV27USb&TkQ)PD8gafCxXrwm#5yM<=T=bi?%9VE+plF zvLFB>{r{A==He+j$iLvd&RJN)>&bItzYkpke52cK?&Q_1jeCXT#j)RWq<)pCfugEP zyLb<1kG4ZhO9Vj{$=Z2G2w(hCfLBWA1#BPrJT5P_q?O;Qlf-@EewyT-X5cMMuhQ>8 zj)>dewo|NFTe7|T#+E|~TXb)`U%u&b zS#Ha|9EC%2Tlvc+P3C;0r?&)-Km2x7ww-g>MJP2q?+(p1QZGmb`{eR0ET zB&ZF&{g{Ui<-HrfHYL07oUs6PFplm-p*&ecGUApFBYL9Y;91r-Y_Oqqa3x`9W zT{6S@@CtC`-f6TGxjMzwi7imHJUlCxOqo};7Es0Kz{rh*)szEZ3^2rAYw%;i${8IG z@F5c>Y<54K7ZZ}-eo&{5kcQcYLgKt4ObstPC)~DKsL>U^Av|g~0grl8!F45H3Kr$k z#)(59GL1A4M^&Y>-?UU2Z7t)p8R#|&**>xc!wd`8gs>iqIfLN98~h$lN^Df$b~#&! zw;C(xzFrTPZFuN8v2Rq>Y{pJCI_Q+xbvrVZt=iXAa1eFEe)v4T` z`$p6nx}DKp_gMW)amIU-;W^EefFG_}vwsx6IHmlmv~mnfQY5?`ot#vDUQrOESUh(j zB}Cr%^U8&kh5bQ;nzP*p-Bw;neC=`wvqWZGLI-8Q@R9qd;;b!%A?P8DnI82Nt3iUb zP=i}OnL*q2GOZbo!@HS*Y57f~`|#YY1*A~}q;Vh{w*+v)E^wI)H zGbU<24BY=s8|g?X+xs+mvQHy8?O5UApybs1x9!GUb9d|}#vBtp{pQ_UN!_bwla*Y( z)9-BAiX6plJrzi~^JP!v6I}cz<)+aN_fxbx-MqSfg&mC#FTOo{U9zgg7G*2YWnT=ydqseD9z8BT>7!XK$AOqh8~mU-$ctz4L5; zJwBcD=`CUKC>xpEEQ06nY|D9*!n4Hk@lU+CL09UD0z59C@kYw8G~58W2QCal_5F@j z2mAegD?@dmA#5QqrRPr&As>H=4f#Pl9Kngw4X45E8#m$b?)a>Tl@bd06R+*yEUG%< z%!B*>o<4?~G0G3aV5mo%F-*w@5=vHXN1&q3s7R|Jbm*9!t*!mN*ISBxiC7&4xysH6 zXUjr<}ZJJl6z%y5Nhje+Phi|rp8 zG92&GzHsXPa8Vnyy8dKaGdt+>H7?OA%nOGXT+#Y zTo!nLm$TE>a#wmTN6fwE!Z?R#G@YLNV<&+ke00=FpEXyKIZ#n6b1$MBxlmGhv}MGU z&-1f=mL<7IJCRGj<8mepi5!NaMAe3ExaM(lJqXYab#6mZx!$xr01l_?xQ_X(Q2a>XC5g>Dc~daRGeSU`>c< zplH{HEU`RV5=V@cBa$5%O$v&l?;F&qGeCNWXYC5T9Mku6eCj$E1K;&Ky3!cZ_L)X+ zglE`lr}^LAX>q0m(bi|^4I2V;)92ly_xSAmX1*?+(RxNSh z53aatzwvgd{P6QiAd%;nA<;`sFvhXFx6j2kF@cJtakKNmP{e1EU52bvanqDd1plk= zz+&$Yg*jZ;4ZYI{v6?xY*Lg&7E}sI7Q%W^o8;1H4xNFEP%Scd|8y00k#wxT$+&+h` zcg8;Hd2{SqCn1yubXeoi3h$wiJQ`|~j&^Ft`Q$yb$ILiHuOP*`&cpD+S&G9LV6}kV z%M|9dWg|8?19=IWJw}d=EIriGk6j?qPmbEK| z4f0+gQdx3)aw{5!(v2pf*l`w?+dt4Q)7n>fZwy+|kYY+=myt^6Z=RgD%s=29O9R&) zeeM3v>sKk(T~}#rmVDg)UQ=gc(3I54v(NVS57ZevOQq zAuE5<gyZwjsIHhrMld?w$RQ8QJ$ z$#*?(@l4aa-qQNn-D$71Otq8p&-^)aarvO7{l)RBu)!N8YpZQj4>S|UL-V~nO7`9e zu!^}HmfA=Gj(Q%-pK!4B5xe<4er9EC zWDThqdm-@qz1k@)>G4w>xET@xi4t~^qQDG_jH6Ceef<$#T{9w4`Q!0qqYg1@Wc>81 z^Np_>_Eod_NO2BaBZ*vBb{TQ3>Z{qrZwYOw0z#AygMl`Vp}eSWYSJ@UEyL+#?Shl5ImImU5C|MNET@%CY}08vrGw+PaUsYOFYZyh#+uVJ z+Az%9!za9RHq@;PBvL|jCGM0@rO1-pSa)hSU&dfRf_-HWXg&MX8sYVQg?@fSYzvK%nmV#gnA* z3rQTch;+~6HV!53BjAuBgt+T7^ikNq@YMF>N zgCWfe7jyZwR^7B#juL;?_6a_OhRgK#UvOkYISQGwp;qpO{aU!m$u|1hu|^_5`GX4v zf#HztVsPZOwHw1sZdG7ln5dhvrlagla5Dhwf>LZ%Sqg!5=$t}zH%^SGONJ>~`R z=Fu?JsV8MRr@=|_u^H^RwK^qFMh9(jDlV9-1R+33-bpr>3^i1z+HVsZ9 z8}as*ttp32-A+?(X9NlK&Q7egE5uNN54DNcDFU%$t%ySBUdPW8oG^@$Zx#7J zk0@79MvAbZoE-VTy?U#$)+X@142G?N?b0t3R0c6F=Wql4m~u9MsuL< zcSgWPuf%~Q(Ecwj0m>W~iMq%7EbkZFz)+VT@rp)((RT>{n#zv;`uO6sMDd6E<$7gV zwFj}4zPap}_kr6{**)fLJXXX-dvd9)TBE_JBvNz!XZv|2aRv9)i4d}(pG5C!&Up2p zpY9tnWqcl-bdB?HP73e0c1}#r>?Tfbmt6v1HqUv_l4iaJW z7k7U(d${*S!lN_`4Soh*uZ{E0R$z;^3d6`|*iYQsx2Zkot%Z-<#0iZTldSU@?Z$l3 zGv(>Mc@)mphnYdjGdfg6VA*MWb(R2RtaQAyH}hh{0jX((@XLjB<~@Gn@0O0}kNVus z!_M)|$A|9&EUL4A_eh3XEw%LSRNi-R9_1Ip_Xj?Lp|&DaW{2URn0za*)~|3Ua{~5l zifc6oO3^xQfY*Men(%WTXYyGa7A0ikFR2Gcxx5v|h|4HB)&IQW4lkL`j;!v-MkTmb zk5y(yAUrE1_`QD*-x-kLsG^@pvzx_IH^ZwlDHLjoE2UDDV^>nR@y}hP>WFKa)^UC> z!>jiXx{cY9lgQd$b8r+&c(k-R^yunN7%@4+c3D~K^vt9p@RRxU$fO#qkdl|Z z+=CMLttz8lbM?S2yW#4z{#2{JpX*ZD_0xSMx}{DXrRQc~zt6aU*96t(tmmbz_%QdD zM7O!;MhNNi^9GVDp}j`q0-V(ZkH;RzceZyK9z684PjPx3cc4F8S}k_KxG-m&oa?db zw@(U%`8H zua&R~Jw=J{QCf!mY;2oF7p7#_k{Nc~<(YiX({JeTEK+PHK?(&UW~rJ!gx6}2=WrIPfll3H^Ksky0 zfej;S58U-VgAt6lbAEO8SukPc$8exDD}9!wFA6d6Zuz9gFB&<8+&}o1wu3mmIwI1f zaepx{M%!cek5}%&hQ_)5NXK+q!R%C1slwT7a1>lD0BGwRV_{CVfLF~UBm9$ZjLRPu z{nWxv&XX1!h*7+E_uQIO$sNaN^E2Y6HSaYa=OQc5fBX=;`r!hy?niq5T$_ND#m(=x zh)Vr`COD-2^t)D0DQybgUKdVN&O@*vr@Zbce(E=JJ}2|w7QemMW}X{5j*io>eq{vA zyEI${_S1sV&PSUGulu1j8D-)-k1Hh&rYq%sI?eE^pWjDLVt8d8t~gbWuV&{l_C!bo zpg~ub$e1On2&t>&T#%m2x*T|-{P+p4EXg7Coo4T)p=Z$% z3`A?CtlGy1YAwrIXy=j6)vv&E3E;e*Aw*PX%B{kUfoi&R>g?wM?br*pk;}spU^#S? zl^Q&1U_0!x`HUKWE0PC29#1|ur3Tusq`oX1{o+3VR*x^ddQpL)brcq!rMJYk#3|R$ zy~;lwcKX`lT7%ddJI9VD$dP@|WgF+kErtt7=WTu6U`S&0Bh48EC55|B**JNqBsQX$ zYEhb{Tb<_jN)VzLOaFUbB|9$Nl?`up zR-^S?devmeB|Yc6+%mqIlg%Ymx_c31>%i4qURFGL(l8%T?$P($lAWB}&W;;7%tmgs zZO5Lsgb};2bu_caO1!Rv(ryz10>ze(zU_*|NfLNy`4q(LQ8Z&lk)2bGgCMnc+_scr z>8|^50gVlJk|-`t$3~Qim6u8Dm2J}N=j{^VrV6@dF) zxcc}!Ci3e;SzR#~Wi0wPDH{00^ddevHV%WEe&Sj)en@uF=l0!03n487s<96os&Gy0 z_+KAoCDR{O-t8bfU{U6m_CIN;ZVPU=f!SbDVf% zm)x7|-n*=+MP-96_VQKx?MQ_H)M{ydCn|5r8HY?+b4kB^hL`c6aR-Tu*W+;~L6mH~ z*+rWnJCFa~azr#dYvSO&At@PH?MzWyP}><85n~(Y%nOg&#UNPgL~@Fj524Si37$JI zTio+T`{MrP1rX+>=a^f<%f|duL`r4fNPd1vZrZvR4!@oy-9|AcStH6uyo@ zGZ{jQ#v;U2eX}BxU9;*Ptj?+}zIcZV#(}HVl?=TG+s3E18YX$2oP2oxMFL8ML!q?E z=VHQHow}Efxsg?A5@yjHCLW91W+=q*<=$DX{HT4oJsLr@!SM@Pk>lt-)Hi(ZJ9>$&^a*D1=z46*~h4y5=9fHu?+(BTfkkxN~`jV_PV{ z!n00Okt(Ac1nkj9r-O*C$6h3emJUV}IoPPu{O>0GbsqvMDIF=q`%b*v!LkHyNGvtn z6FFu#5lQTF9CT=lB*zMPd<#%Y8mun4Ui3l1;v_EkIGMfO=>rV&z5|0pA~G3`$}rGs zJyGxD<&&j680V@s;lon_2EJLzvS5ji zLHa(KFg7vAhOz^Je64JRA<<0Pgpd!c(5udf>}Zo;(#X(o#LrX5WvMC0tAFg%=)0g3 z-=|_0z#*doBc^mmp8_I_5eHgj@7SRSdhZm%vD~65Z262#ERU^GI!$u3)7cS$k%Dw- z80w+4pnNeNM)Xs?s2Yy7f;Vi|J$o@hBVDhJU<-kqXaq1t*#crg3%22gvp;yiH`Doa zHUX`rVGRq%64h0T9x8*zMz#}u=d!l(NNx(PHfC@096WVT?0>H#oO4SX!Ep$E>=NS5 z%S;Ahz!}?PfV4AoeOv;VkpH__g=|rGBM?(t({A7}=>SP!v}Dlq3<3J@BLUIoR)O*T8v|zwq#IS}qUf27}g5zy>YZ!fcIf z_uOuWhhsm=pUe^EBi-}(p=eFIl!)=fNRo7YUcHL%Y~8< zIE+@E;E{}XWmEfY_i94m+;JF6Bq;n;$Tk>C9I02=@&%4LSxrdG*~xtY^I6|y*o#}9 zB0q=YFfwxVM}#}kZ;8K*0vKB56u-@^ek*#5GUa5M|u`%|F?3)|bVR|!^& z&(L|@5B&auyn`v(O_KQDpLRa>ppCJj_xBwXXT>4(ZGedKZK*(Od<5(Nf2Yfji4qV+ z^Rr>%F4kiHF;O9W{x;`x=an(Gjk!JM;dB#OIPii%$i)Nnn>disxU3|RZfh~TB#*CQ zvO;IgS!$(l$8h^uq8f_t#!X|uh_A|B{UNsLHVkh)!FGH0Aq?+=n?UbO+(Fj)?HNXR zd^quC9JcNpRzp~>)fxM!$Gob^Je=m#_32(=M*ExSwff5JbuxWvhd&?W`zhZ9dqgXj zr0HTL$YyZSfaK>9hzAbMVgpWEDSN#$z<&m=2u~ToFuWJd{~vkp{npghc8#WxP!fs? zA<_{9)CdugBApT{gb8SY#JWtpII)SCffWkR7qj)CxJl`1Qz9j6kv|T>5 z<0IA8^5wog0t)QsAs=~lq;5SYYFWq9N+h5QMZR0deF6DgrF>ywg&MM zy|l!vxjI~6H;mY%R3Bh_!iGZ0bnzVSwH)te_G#*1HvpyUw?niu>t5LET)!2Vk>@Jy zVIt#y8Ww6sAkWgPhA+&Dk;bD&d7jXKx=P1G>Qo5IU>0~#mHvngCg@6s4dEE6<`74@ zegb&FA%q7c7&5;-4?mNS_&AlefI`yZLN*2yX0BRWJMQ+fTwI|7D+jW#|nk1bHLeIbs%y zHHgY)HcojsUkwh&pq^)x9WI9ohSMDMr4CFTm&%`<*fIVn4#8VGkI@2qOUnU)fr!b$ z+(Qe>%EvudOOi4ziY|06Gb+I4DZjvc`f~9j2ij6AQVV5_U^aSrTYjDdf*RJ*Jz+Qso%||apeUJCl_xSZPLear@{C8oz zWM^&H`_A(;6qP6rIqBF#F&dV-Uv#YcSE$|erOD%h{Iuy!#iP_xl*O#l6g?WE7 z82QXv-IhMo%Us9^p1dNHACDO-2*qfpi%fc1ntF@1kS@F$+DEbTI7J+>HVC?X41&FW z0nY^XHuW=LZ|jOvT$@TB^&UESyVoTv%tis7KKIn+KfzU&99278C0@Df@)3$6M6mjZ zr>~8Sdccu-A=Qp19=paDE0^X)*g8m1g3sEsyTQl*5?oj8qXX~{bYd7U`VxEKx35G7dSugX4ls6;}%f#b4MRqt+6BQGl+ zcs4xSspVy5Xvo^>Ac1A@6+?_))_DTQd0iX)e~VFh+&%rC`Lr66lS7b7w_!sMCFUPZ>*cQi^j?ch7 z(W>B{{!Mp(CCcp#LAMbLvB|zlG5W2K%aYKZIJ5r6bS6-}9(%{bkzPsL_fUBZHafWz z2r~t*8FWg&>L=)Pls_<+xd#a;xO<9A7+6m!nA^zm`gfzy@^DRd-{k7u8@~@|EeQOC zDiGITA?#gM>i2i~yvGwz36vLJSH`_KQ;)p!)0*}KE~m6bwj&&lM_*rx8aNIT5F`5p z9Jr?*c${i>pvQYyIkQfOiSM4v_Pb}wb#M0EI*!FZcb|SKu%Vj5^-kEhvo$7{>s{a4 zFFj9jWshIq(}G}(T|@98O`6eiG-k(m#*4#GmybG+(!nj~c}b~qz3*)=Gf;HNXF?`H zg^^DR*qU<;Ihxb>u6>Evrr)1HyCqGI>a0|yK=EO?_pls;2@ckf5M3)g9DGJSu%1nOK1qLLr($gYCV^^kY9H^3#Y+)O z%3S0Dc;wO>-({)Kp3`V>u9{oxf{S&j`IFm)iQ-b1K9*Wle$~;hDAXDJ4;H}2x8dx! zeRQe0X)M>ruTh1Tu^!H`Jq1eNu4UrHA`2_B;$EI~hMB)kc=ItQQh)x%Zw49P>?r+P zR3Jw2{{~|8{}&MB`zh_G1HMjKiz4q9bqCMdSr><^*jXPtdAjW-;>|$iV`@+S%}8al zXAB%~U|yq2om4&Qq*nZrw)kgK)!&0S;o7?=)pw0asYL}!!o2$oM!Uli*&d5W!JAFf zU>+pD*%re+%O=vBS24C9*g5anNr~t z1#7ogD%t8E*39Hnl29Ryus=+zxAVNta_j35bT_YEI!ec{e4Ywncpwsl!r&ZJk1>sh zs%+3;`n*gj<1D@+su%Mp{70i4`B|v&3eaymiB?fLAB#QnK;gm%vWIEy-M?NI@(`7h5H`<)4e}yyB_X7^ zz|rKv_(&GXN=c89hUM~3=9ds+$9oQ6st0PJZgOki8%qgP>q}8e0M5{f;iZM|50a_# zx|rp?NjNuk7Mf1@-xmXr4_G5;JD&k?0g&e+k4$7dR6uI{rlaU4%|f2D0((xNhVzC* zTb_UB;|i!f5wgXST~`d@A*)`0p2R8~sMDgF3dg+8^#L+-lm$PN=*oV7zMXyT<*H@~ z_VfD?UTR`d0LGE#z3H?>b#?$4k_qmUpfdeB83e(PT6wRn|9&Y-44ZR3>Vdha!F6ob zaLnwv+NWUXtYKInnoFq0=++u9gy4|`m%awx%bDs(E&TI6{xL<7B5?P2Ro(+N7WCK> zn-+o50|91HRT5%}zcv(=4^RU-N37#w2zftWHCV{$v{vG@+|9GB@ubR{fxQBN}%(HHaBMNBH zANHMhE8CIfi3=O(RR7>4PbwKv@lg2;Fwm1X*;t2mQj zP#dT+_roEfAgx#0V)%;7>GCVztWymKPAIlO5n_AfpLtIKxPd+w_tR0~`zCIS@4jc~ zkDaec?PqP`#+!95rqg3EP2=g|)`o^Bgmt?F9no-5AeYBS0-L9I zJA~^TxShnLyxSb8oLH&bVZ>)0#{wK9wE`NXVK3%5e&q(o_lw=z@)) zWaz@L{;g=HD4k-%S7J-ph5o%U40*OOpv?x<8bK@HKZ&0TWmyX5pEZi><` z*0qxDGXQZt&e8^-SptA|whc)F0L8{9jJPg_bQhZv6qf|XMGZ&Z-B8*DUtYyZEX{9h z7lQ1zF-_t+%cTNEX)fePM)d~FTDI*wu-?dt!*REE0bjsOg};XIV(2fK{s9g&v-OF9#o~ zrg!}Pug`c7@d9SXe?941TNoS-`lVr}M@^QcJ5u1t3yq)>H)ha5#BalN2w|$(?-iLa43F5P9rropL2&(hX29*|`9C{HuQYJ% zH%m!&`Rz7*=(up-XxI2)&~Nn0s7ZAwd9hUKL-@_@j0P;#6YvQjcKU1EvA#c6&I`9_ z`|gsZvO68i-u=`~(@*MqBep&H+^7F@&ZCk3Xl&t|f=ZofT@a2;V3jHGkncMxx zW{L(MJ!BsA0?~7-m|vy6*aA8E)orHump?ZIN#hTT7X(F}sI9&0B`NN>l5v)&Q&i+r z-x@B*Q5#VN>KXtdn3!-QP2yL&>xJy+g8(KcS9_Fc+*1sj%A!D2hhs`wH4aBX2kd8J zcsTGDk?Q_GOF4XIEo6P*doN|l9an9N8{@pl-B(YwDvlN={ubS~&-ByQJ)9m{e37$| z;#Vqlsnmh?KvQe?hmWkK$n+ui#>cSQ@RpHQuln$&o@xK>FiuCOU{1YC4BHc@x<5@n zf2wXPP#Na70J6X2RAI?ceD++xT`_vWiPHwkF@9E+*aultbMgf&LHQu?;q;F&n4f5w54R-;rjj4&-ZPAo1o;{tz6!9 zXgG<(p)Vu(-~p~E(fj_aA+NjpSYVTt*Yr4h!`;2t4GLBH^9gJQE0u@TbdtPVvKTjO z!a-r7GDy6! zmbjpwW_7CY!W+q!ueRa);|km)P{P+$azy(!CBQ+T|0aiIwBMku;9!f0Y<9moIgUV%DGN7iN*Po? z7f?ai@Q-4r7f%!U@`k^ap+(CyP9U_fc$I$b0oTMbIS9e{0^49P?D!eVr=%gv z-%l^(c09e)Sc!~5hz12B4#m4U4d^7^yrl$mSw4ZGOik{C;yH>9iw~-yaq+yHroo}= zROij9g=P;Kwe0c_C`p~6#axHiPlmn}3*Udq77db=9A!ygJ>LyUgz0Kio6lGgY9(l_=jBIlL4`tPXOO2G)j zE%f-TAqyq%J1t6-Xs`$iITqhoZj9mrT0KloR`Q8sarXZ7+!V({(h(3z%c&Q~3|=%v z4`!V;23&*~7*=5%?0TM|eAYS&KNn5rkDmZtTcrR-7VKCJgYesm6Ix=AH>#o7wXAo2 zLaa>&P}lIiR)=csKL;j$I+t~{nU`&}PUnGL?U5WNIuNl%oU27xv0h@_MCvxE9o`F4HlOBM}{uHe0ff1ao zeG&&?l!ZjFb-IOlHU^HXy(!6Z7Zc$@`OBf=>_|07nVdWVX<_7qx8Tga&$p*SIpo%v zif)vo1gVsaGe2omiEkYK^9|)?LgJ@UeI&;su`>okcI%%&JEn)y&s4U6Y4FS^8qhMJ z4WQcBzKo#aM`YNznT~uX2E<#OxYh2>d9`laU`+S1sn^-QXPp3DNGtm zBUnr-y?E`0yzqXcA`auLUb-M!sFCsYOvP@ClVnL)p61z&1GX!mT zi_w;u;krKb@NFwHI7at%DW>+HbYl-3HdX2|P>BJ#M-B4m+YI^QT~Hr<7-A3VtRRr+ z5~1)oWQ0$R>QQqGx%lJkq9W_iG1~beVqskLl&p<|BEXWlJWma5yW^ipZxHEnfzp98 zSgGU3o~ARHo4OrI^V}MAa+!}P5KDU63ISv>+QD>9JXG<~lV3HIUA7@zy6=Ih-LgTD zkdK)c`Hx&#Q`<6r z3S383IL_+reaMd4W3HGW#)gwJqVFw|+q4Ucd5h@_zZv%YZC3Z(;svPffFIN8N2oE8 zbX3~>#}rR|Rw|3jnjC9IdN^nx^x0P}PHVaUUKG7cY~mXuwrW`0kZR^!qmN0K(I0K6 z0S9#mJ|CK_3j&Z@X}%M+XHvx|N{RUJFfMev?eC|upi ziohU2J4LFUC}}(V{TNUnYS0^!pr>sSXt@51H3Ybr>zDmOnOr{qC!LR{^~J%GMK5zDQ&^vW_ zx)QOShCk+x*A84Yr`3H%ukL6XVr~LKJKNl&L!^*ID6az@ZkNkrjbLUWy0$OcILnCAeO#>sw359S|Yx=Rm~kv1m}NB%;6*HbUi(~VAflg zdj+6e6Q}H3O6^`vn;Zq<0cOhm5rqgt><&Xw07CBzyNpCVDX#s}D9cYjY%g?NJd2cy zViy!t;S20#Jn5z*_-v=6IS)npFVg$9Dt^adyRy^aFE2HQQ#>X@hYV?6yS z4|>j(yWdKOUfMBkQPASEeN5bP0K+%oam<+nrRdA0iqy|UYVzry*hhVuRk@ya>Pzpd zEZ;JV(s+%+N56kP|K%DzEK}DqaWqs({pS`sD!rz@Qf)3gL$A3|_rQ^^u(Q4zfj8qe zc@j$Peto~Lw8nbyJA4u&`}zd$5lF>+C*Cp_!9=TdEjmcWkUWLu_*k*eVYaGEp576hCKspSAsm-ZX1*D>|D3LR;mtLGF~72ox^l{=IOgnx zXj{(-!;h08o+31BI5R7CC^lq0p~g5>$&d{|aR!SU>J|kda<%*n1A*7XwjP4OxNV)E z3*QHDC(WEPjRDCPsbNHaet0MWd#M5g!Cu@Ik`qHiLL@FATLC#iBn2K~nTJ2C%l|3a zUV!Vw*ZZdS{^eaQWZ92`#sEPL^TM-!w*iP`PjH0EM>qu^6pD7GM_kEKV!^8pY5)`O zZ@vyuxFg$~MC;P=aL5xtLmPt9vm@tuK+sm!=-6NwK{o}3eW9k`)B6hSS8yB+c%f95 zo75gSpq7}WvH)ry;J8(Kou#y$FIz?Y*T~rM3C}hbIO#~CiRq$52#MKix8AhEB$r+(I&i`x5Fo-A%q_nSO)_^xHAS1$x_pQ(Oaut?jdg0F&*!z-*4|P)}7Rt0bpvdtZGqz?FMn{bM|Cu6xh$aFvWDAWfF>A4C#kt%C`D91s5MDc32)KvZ@u|8JTVa9UYOdaN&Sh+eua7j5&dw#6FX){dXJ z(xG;flk!3G{JeD{Cvl|`gBxdI3?gLpe!c=A!-l=OU=ugX0UQPK9gayC8N;Hb&jB8o z=!VD#)A^BpNx{Ul>?I-nQU;r=Dvx&c@IyjyHo{~C!daI>gVr;- zA8C==TU9nb^SZ%tsuTwyFfkvoJ<4$10LqKEmDM=U4O&6qqxCLYyoSn!GS66LhEy)T zg@?GNX@}bH5}XB#11>Inx8Y}DN)1v`^#>!tUNM3Yv+_E>WZxiwHK50s(Z74q$`NXx zR61*@{;P;(cA|Oxa(`R5Z59ADPL>82l3;Lily^RP$5AX!8~bR$FoZ!tI?$>@t-B!9syFg(AkNIag)`Za~3a_HkSX`^)we(u>c03+g9~Lo_R+=*I4a2aks20u6KR!UYCD z1DYTJ26*2`lT0O&A4=PZbVR^JiFAcg3$+nPRl#sW}FBN{<+I>6%ZaLuG~*& z1fK5y3=505lspPi=y?#^uZ@{LY2DYThoMl~BQ(VhNkYYP59lAOvoL`~=AAXO_+9Bg zU)?N!1+q(YLn$fE{=B*=SO!ZHAhW^jlUmr1A83BXQ&a|W&v_@7F37qTT}Pgr55YhP z=aOWl8ANo20Jl`v3NpG;vnK-G!OPa^CfNxw*TrT}Y=}dVOfSdG{498Og{`$rR#o^X z(o=x&Zq*99t^@iNj5t^L?QnSDV59Vo>sIN@mG3g+yyf3?Dt_~wO;)_O!4jn` zO+%^G{RNr5Llk=%!+gN_UAW!A0RlmOz`X*J<{!XDm?ej5E;JTs4>~%F9B3 zDSy%Cutq_$=7JD}@TW-9fkL_5ciS-KgM*!8^`i(-E!J(xg$h5EAe{JGeuPwWou)cf z3Cc=@{|?;rvP|NQ(}23GlnJJE>Yi%lQO5-$hJe7^gmsdHO=)aaxIoaT!_tZpElkN@P#l`fuBCKnd2%>rD5; z<X^bs3FT2A_snWWo>?nXqG{uy=A#b1 ziyF*w#frojo+gTG<<{YGw{kgjAz_mJfmL0Gfd4_^;*^K!d^5Q}X-(WVu>V1;%gsf% zKFPI&B$w|1rFL7-RtGeWSx(}EV2Ep^@P6hW%SDn28^{}syvMe{ByWSedbVRK25*i;Eklp|0$45*?#M4{`lq z#&lDZ=vi4P6tQ7RO60pliE$X*-Gk}8F5Zr>{a8~9kgrJ`I$S;Z+x!LNklU%T&dmOf zgSfeKD89)1o(_NBk*>0GzOENt69*)mrW7MGx8(Kl00s++rE|24OaujsLFIkcW?GCm1J zWyA}6iGq zk&FrD5*JDGk-I()i$D=s$|)(zwfq^g)*SA$qxX2tvjSX4LTD%DS=uG_Q)JuYjk5M! z^~PX>SNe-B*yXXy{cU9AbbrX3N`m7E!qCJSpG8z|@HkF3(tmhi^kzY2Q;RD-O&%0U ze&_s5GK|t~R@JuDLN-6nCb+WtAtk`i!iqM=_RRcydP-!`+rA{=VEC>O=@1O*5KbvJ zJg5xV4$1X@iP^3L^;&^b)^}An6gbM?N+bcE!DXGRdkurOeA(e?X_=YB4KW-{=VXS% ztL5A$%9yd~dS>g_BST``&(aY~ToTzmYf_nh#bJwT>V2}-cONzdUQFcFI8AiltnBs` z(Ac~iW`kCTpZpqr{!{9)A|Kb)7mM+&ClTlw&tJ<~iyvCeFECMbQ$m@U5DYJrpvUlY zlGnBu<`EQ>mTssXS#QQ^Cq*vah0<{Xxhx)G_l^WVu6xF~nz*89eh8${Q+G5}mIqEcV?gFpg3p)+#avB(rrU*J8)Ne#KgweYj zc84^R4KLpjLBRbw4QfTc+D+}s{_puho9^rx9iv7GZmx##v2 z7NdRFHJ?eunl&-Li)qnZ>{)n%f$=wImz)!9xWyMBk(BOKKYpZ(_=~AB7ax7!e^UKj zXFcP|{*0eB!~g*62VnZxYRNn6&_ISG?g{co%w+7Pn|s;W@Iz$#TM$t}E4jSAOxt+Q ztETf!;$*Q?rt>QsSs?~J6P6kK9?h)>Xi3$SpOza{3Wqd_#)-)MMf9^d9wo&*eAvFo~*|mXAX#4 zR>I~y%m|TJgsqMH<@W|Ox<$cMzEA8~xF_6vZtyB{d7+3;vK17Cm@rfQyykb_&NTm) zW2Ga)LegEOT2TD9gXDwEqQPI1p2w>*E%sCQbD6#T`epUbbK1P828~Wy(^Q00 zSg#JGj)fUoE}8|it@q0_70b8Dygk8jRf7^RpnIq+5>N2Vh%>ZILz)OtO6|nCEqYm z75^OWpU%kL?`J>==Df>wh2KQW%)_OrHLEf1b5z6pMuCruiD9^a>f03Nw=k|Bx0Xj} zDY~S2-OmKT8LD!TIyIWYA$e-jP)r^nP4<+n! zZ39DRF0S2)J=gnk*Mrdkpr^M7%naE+~%#{q|c4A{kv>xGBmAc1ev{Go8(r>u|p zrzdCCUVGv+IIX~zF?{PL*p5$nN;B^0{vDBqYcB$w=|jhBA?(6wwY^Z-fBE5?ftaIk zvXYvjk=kqdb*~^C2ecDjdU~6C0(9@|7Q@%!PFx4YOM}XTidB7qiCVN{Hz+9~N_n@EXUBW5 zKKiPwyuc932o_k8zqrvt8X9bD_^?D)cNQeJPqIbah%$E4$t*%qMieZy>hCP{?V=1s zzOuK1sZ&nzjB}0TtogMPyFIzHUll$O1#FFyK82{iddyWv{%Efg>rYEC=Mk2U;-VsV zm3z|TX^86Yd*;qIGWzp2Ktgh=8XPAzYWd7W7>|T7OiXx;^AJg33GBG&8;w|SmzJoPDINcY;_!c0T7MAG#V%A~dUnL^pKR!S?n zb_Rmnw_?;Uos5s3l;S+y_2u2!)(gCZ>ja#25|^LiV4R(q3=I)mX>qsfJo4YsY@KAP zwXhRDZn^R>UY=d6ghGx^4Up?ig9O?O^am;z$5_9;PqmAyl#~Gt!v;9o=#Gr!$l|a{ z(Nhp#U>~r{utzaqYavRCHNv(f2&+fyPcI>%Da+wy-BwZBs z$*f|RJxV)SXMbPg==J8+ERym8tZoD?WU%6U?BnR8diiA9B-t?`=j)s_U8J$p zi}F9Ewb%Q z^Yick&-X=a=hNQ_*jr!FU!44#GPHqH=~j$FY-vlqG(5J1;piM;_7@Ll)E=Wn>3hvQd$y9;Q2c>cfi!#Xb-}dx6W&ZrJj9p!gla| zc#OGL`A5;1Mo`DD;Z>KGy9%fuSD)7$LWtO$5%U2~DP5pZ@~Jbu%vmk`Vc_Fo*;0!z z`^n~q>r;oN@BAPAs!f=g*q!~ z=6S|hIhca@D5f9g{yjz~TAG1)YaFI1I|&VBVh3)T@fjQqV=W^vQ$CQC$NbS3%KLuu zt59IfCO*rb9b~^<#R1Yv(x%5?Y{}xfQ@m1z+z)-Lw8$6Bk(DQm+bjIqDu~yDJ(;zn`?{9kvcQy(JA|$%|7874MEv zX%h4{uM z^*Qw_Obm_Xw|vRD03HFV4*`miA@1%#t%kLFtQ;6dLgKT4rC$}Ik8ah`mV01&k_7s; z7W>>D*Ne79Cbz+`JJO{q1#*d@V5M0OG(RGx&JWMRL(*cPp^7?sx z4@J$a%R3B4o9*4Z>|x2n`v>FI2tebL(`MXB4PPES^e{3MwH^E=ZaSF9t&ag*QHDdM z+Hou3wJn{|TeOfm;-+Z#NFPL;7-oZaG8wk66R{q$m2iSx{^ofWc`IbdeF7Q7!1mQC z$-$NL)V#xh@4)80NMsBe!L#E1x}k1~9eWJ#9xkLk1}eS%w9KVW8$c-Mw!oDGj=D*Q zWk4cAxXAO&BEuHvgC$_;Q8nS*uYa^g@&^v|IYsQvmhU%&OJvwGvKilcPLTISGJ1nk zXWeB6^Iy-et^IO70NdNmYOuYTdtPMRG637#2e;Q-LL`12*`NJwqIbs}!yeDKD&TaJ z+wMsrl(e+P_9_X0x^I_4kEs`Wk|+U!-!YSZr;8o(h3;GQs<-Nph>F&Z!l$xua&jH= z%R~NOuak=H9uHQ1jVNg6I>9e_ty0af&$qw2&HnPwAIoxpA|jvYm;*js1udX(i1^mH zwINqSbpR~!j+D;h<1hKkJ=c0MyA$_dh|OZ}@Ns^r6j5!_GIM9olb-=fN%&z_Ef6o+ zJE_K@)g>}9K0UALtCty1b?J+)(J}f?4<6~T1`F8}vmY>md$+?MUUfBaG-!xetNZpV{XR0xZJX_KiUSX>ly*T*1H9?3R6IyZgq3>(oI5zxe zPM0^o#}F5b5AR5jXOJ(VZ5-!f71JMb{-Bb?qA-92b}h-<0*0UZ;PATm6Fg*u<7!4@ zz@p}}e&RJwotcsmV%IV0D|M%GqRh10-*=ixzS7F0JkBn8k$vRc6R=iwXEJY8H39%( zZk74)#cI3)=k(!?r*lGz!uD825HP`(5)7VsjFVq>g>tofe&2m z(tmutVBL0iyXF6b74a_V*jmt>N4$|3-X<4f{ z&a0PbQ2RHKj;RUZf}Bc#4>YN7jc-I#@YuvwavVHygu}SsH$%82@?E$Y)Ce}AQJHRa~lg_29PaFa5hvR1Dm@X6Ap)F!QqLZ z)?da3ge;p7Ks0Xd7_M)?UK1U;R37!o?#~TM5Cl&nczlQk5>jJjM3#RI)USlH*4OML z5a*5Klju#W3)cXfH|u7g1{&1c`;&A7WsU#-ZH9b5B+wd_)?g}JJ|4J+Zphmq7ip3F z$7`qmRGgs!51y;B;m6<}jRh=IX%+7qRn&<5V@1SugC+)Co^asLk9^g3Jg9|I06d+d z!Su``yd)C*W@P;%fv_Y0?d#z3#AL+?MGUhR%myREPxCURHardq2OXkSPNnyq8s|X* zze3x^!u(I<;NE+hsB351vy-xDhIN$6gi~(ylHcVAI<>|k@QwyFUZQ3grXvtkp_+*5 zO`_?ki>xOYaE|J#*QHejJ~pb*;ta-BZc`iIY9cg$v{{&rxaqX)&$Pg}L0_jQp?c{K zBj9a5sfmE#(Z=fq3sKOzh@`&wtk5uGnnt@jAYocogdGu~is#^TGHQT4K;75{u{s_f zO5UVaP9l0heaZKzYeCi)q|w}=Frfsm={;((yX|JP-92mH|GHmDCRJeet^vw;}*R2liIrytH6g@TvH@-p0}5e&kC% zn|K6FsWrxGrIDV3cW-OWYfLBcgY&sD6G@G)M&r3TgX|4^+W=K$8{@QED$Dj4DdvH} zm14vJTw~gi?#q4uV=^Hp9l1c4X-NJX$rKP~hXz?m{PP2E?s)<2rh1P2OKA{7Ud@Z; zUTk;b`30#Vxu=67kIysIB{5U*_n1pqGlArnj?vJKKm8YN|$sCWhQ1(Xw;e{)( z#v|>8kyFM-RqkIGZv-ssgRtDj=E))ypUz@akGTLPM5`Thd-*awMfR-3qiF0CWl@0E z`0v;5p+Rfd3a%Zg*<$SaG@YND^T9n9vhAHb^Uf8PK!*p6QIii@E?CanhW14i02fWXk^b2qt4wQ=e9!)?j`Yd)apUCB3N zGp}cA9-FV*B*5^``rx8pe!{4O4Z_@j5@KBo!XQcV`vD_9!=Ho_$x=+5FO$PhKOuw9 zfrHkauDIR2~j-QJhJowH!lDoD5J%|MN}2a zOrS*@UU(I0!QV=Iiu^M19GoaJM#$m|1uAlpD80NoI#|d!700k(3tOfJKnSFyr8Fj>iEw3JBm#fAGoV8AEr*6NLN zQfjf9z6U5yZtA;jF3gUHxCDkri(A4dx~sIec{6D#9ii@Q#y4oupve;bG%W7a+eCDT z3XPrM;8$_EUZ-dM#+hA^5UsMr>nVVBANoU9~9eFK5!zV1i1nR zf)UR^xNz(M5%kPw(c5QDS&e};fWSNaF%Kr7N}0Gvq=z?q;F;o>8C znX2!g=zK?0tOusHmjBmO&h0DrUrKM(;I?$be!0rNCXYbzto6DuJnNJS>k2*^h6I2? zllxr?=zQ;U93YrRc+;^>MS_s_@AM{IKL=E=B*p#mB56b+IM%wQ7j1`e%=q-HV@;(P zpg9oSUfAJ}s+=(dqUBj5{0v-7rXLjl*IXgNX(LmC9St!fmN0`n`8oVYxp3+K>usLh zL=#DgxClm4<@4&B(@>CwiYQ6fQ}wO;yqFJpAIK zUs}?oRZ;#g!RDle9s<2Sh0DJ5llz2IJB#L2PwZKs&wV?yM}|h_N`84_50u~nh~nKq z`tXBh73$wW@35_Cxog(J_a!Fw<}mX5-9W3NsGOSm@b@`X8&;kC5T_Pxm$HT&p&6Z2`W0TOJ9XFn$T`SbTpvji;o%3w*)h4?-+@A?ZU0=4$gvH8_ z%3Jr{KEOjt{<62|W(3${LFu0VGNTSmVSNQ`BcBD8Bw? zObrf50)SAegx%@*pRqCXv|Rfjf}l@_X{ob>BnWu{Y;{`)pndg0Y7M&m7zCB7Q~bx9 z{c8Jn1`DufwVL|1bZPI*tpv?aH&O={4GC!dIr>E7)@p@r^JY&9S1f zP!&f5)!cCwpoI3%8t1}{yUG511Y7|mkeU7cbUyJ1FBMQ^!8-^r0ECJsxhuieAgzcY|Ubm zeDY%Qq7DXBANp(=?*P`(FEV{0lYkZ+JE0WFjNOk6g9Zkd#aWn|uZIImlYC<|d}X!| z<=?7*r`bw9DcI?a!QgArb&p*kVOv|>#j9eM1Im*W|7c2h7(&ss4%_7U_G&;FGXom@ z2>QT|;#KoE36}bMF|tE0?SA(>aS895y}nMC1wLpS>1lBHarnJ({?{6iFKizF0Zo;@KVMpJ1UY5`4_tR?tb4haU*Qzq}TFkQDK|IMy8=Z_~qU{h&J_b9M!R?~Uz+BX); ztG8Dk;;=^Au9Sed`h0Dm5@=k}zG#ufM^#iESxc!K*!WGsgo`s^ks)Rn{>J+SxO|c}IN{wT%)upMZTd zH{|MRYuU+{+?l7u=)_3=56||sWNeF-j~{`t*%Igzfi88u2G{*sehDD$kI<9`Rdm3c zU@XbIQmC`~)T8%K>L5XtRwQRwsYuA{^eYm-5ZAp4`irTX=o$~q$l??c2*PeNbwX)o z*(hWFG@M*{B-+;BOO-%Kn56vm3phU+t2TRk+cRTU7&FumQ{w;aYp+0|$)pXMZ-Ubw&RRMu|KzTPVFX75XtDO`k2N10ZG65R5L==r zhvAs=I5(tS2u_q^z_ON%d7TBO{0+#$PJB#(Ac063glK$tCxhU`XGWI5iFM)w* zy25;IGxf=;`$7MG)9&Zp^+tB*48~q{31nuT(Iq%Bcu=)`GRwaAODbyaysmAJ{j)de z_H)`zr&D;c*N5}9qQMx~r7{cNKOuG`sMZ7|ym`_#zxX#-pAFcv`iPcdY-x0?c|lAnjFC&2np!eQnMh z^tTxsFl@9Ln{>XA$ChZy>?`|A0~$T`APg$0?!DU!IfpLvXrIS71_mrMVXJD}kXzJHa)Vzni#5UjQ&!bT)|T{T!Gi56vvmIOhF7M(?kwh2LWL5iNHTO^1? zFOi~mqPGydSxJ8X^4>cqXHU%c`<=OS=gyrwGxvTjTCe)_@XfxhzrFh);U4S1m$`WM zGukC!F($=PzT9Qt8-4qB(_TVgKBr@)qFXP+SEUcNQ6O{xfUN$Qpu}6(u){+m?SEjO zlYH@O457UI@BG*wS|css@a|R8&E0WEWDM6x_8WQ*xiMzotlNZCQ{$!Jf%WOYITONF z88TI(Bffhg@S@D7WlaN3ULY7{&1kebj_3BCe}~h;my;yNAP`=!{Q{8$0(ODp=0(0G z#byZXd?{Z*IVE1H5y=nW5s@qwAf3k@dJ069qMFyb*ei*f?(1y#%Q(VESs7GW8NTWCN~#Ta+|NMDWze}LYMnb&EpVdAb%I_=)N0E_=Z8V) z<~S)rRNZ7@O>EIGzSu4tAxr)ufmOs61Iqp)W*lMLkOCXVhcOT|aC99CXGH9h(bKMp zY0E^&+Q&s&pa5<^qR%}#_HEzk`RYFit%n)CJn)=u*tO?Yxa?!;==iR59|r`6nK;r|!AK$t(TRoP{cE)x7}?a8Rk zqtL%@y5`y_cYudh##}3u18@UE>4Af`m*TNY?{IiEuizIrRfZT4e-9jxYd|2LC4guI zuBJ!0d#=)moJ_%t%U3vRwVV4B$v?-v_(CZMTcif@8?m5`cgM0+%D{z=$iQYpHwbSw zJV8}{hiR1@^BDpIsD4M0=8wfX)$9PKpDNT6q{={`cidHhZGaETpxQ{xQG){;TY;vJ9H$(7vjCU7fFyV%yjm@c;Jvf@O-3zLbbb@H;6s*>e?CLPTxv` zN~k65+kRdH+Bp{!kGfFyE%~uf71j4%iphhs!*rE(0k((Lh7ZCR0Z>@s0LgZT$-qQL z0W{E2f)7I|sD#dg)14Vqa>+3CI~nDgSaaQdt=ht5Z?I`p<$;@XMTFEl9DQV~JShQ17z-V64o$%@li}4a-S7pbzT_x6 zE2IS6uu4#AW~m@aEN^ybP$jWkm1>O=Bxsqf_@)z390{q#Yyj|Q3_rAuE1;XSCt->a zJ9S$XOB+Q_^%swVm$5Dk=z~iuIpYt3gos2?nAb&lDS`zhUW-C&(p*H03>>AXzsA#m zohT)sdd-F1S&kKNa|CL|q7%<$k!#Iu=KmeEnlC5=3v3c{zAAJXGetuw& z<3L0R_54pD1<}_Tn7~eq8py{A;M~g71}KSWFe2|@GEkr>P8O;p9iggdx$R%|G&u{Q zuf$_OK!tH7(-bf*%LicrHxE^jODimj(JxPJMSqO$ z#Z#n^fVxv2eqm;7B_UDi**9w*%|3Ff@*pP8`ejWExwF&cc+DTcmFxVMF||xT0HAk9 zs+2F@_O=UG`nS>#wVh-Lihfsj*7D*n;qsn~w4?|=GW>OG(G4yt&pT# z`YV%U2-itYnhzF0?q8q=eCZ}Pc|4D>?7l>eRkG)(A#)nQHk_>|ZESwaru!*JaMq7e zGFU2i_Cg4`G7<_GRqXG+Yh^-mF?sR^%x{<{rMM+%&|of_duEi7c4cOs7I0* z*!8~o8EGDhP_5lz8sq4Q84OY*q5FL19ZpcC(9$v(7K!0bX!aPjX29lR&+ZMo-xAk0 z)>{8S4e-%8N0${&ZIKyJR)O`o&?>Gbww2I%pJsRFX99t=n z0vrn!A*+EIOa#PHSqQ}=6S8IGqq-W5qAZZbk8yVu)mO`Ih~NMpiTrfJs-Z}*hMyWdaCvpxdwG%{Jajc!>DvnNYeYujVy1qQz@D|@?m zMK-2jLdiu}+D$tIiwQMie?m$PvwT4B{0A9tXo6LMxyT^@7ALo0C=4b*3WZjwqkDP& z#4doH(Z7T;u(J5LjVtvAD$3c>_R+z&BHj-JI@b8)_arxGFt{5eINas0wx69rl{uSj zUT$y_3W@zm+4Iy?{C}}yv}w<<(%C;(`EQT18lp*E%i#zjD`03qlO^<99f8WIUZ$Tg zJLxi9G3)GFy>&GxB~CkW7q2Qy2*+?+@^c@Y(YKhwguOvgtOG#2x5G zXm57ID4QXv5wy&YGL#5&e#jA%^^kf;=IDPLN;oARz<`{P31Se!Fl2o5#MjkrU3E^) z&c72EYxDR)-NGqJAG_@}m)bsXRt`W=1vYM7% z2dySo>P!K4WLASS=o9+upmJwL90XRfD5Ec}yuw2Q&QxpD2|rwWV4CfU{dxd4E2K0& zS}zdUcT@$GHP+;VU!!?qrF`;*hboWm~RW(`H)t)A_hhk>Vx0&MRi%uL5_dQ_?! z)cUh|OBU==xyJqeX6<6I!!u0+2n1+UNfhA}M|Y2>Bt~=#cn^-J-&r|zbRi1A?hdqZ zO#9@tjbY%Lp1ngE1$7W|58%V>Yiy(MmuDxY~%$04Zk^N#r#K2z-`;w@AuOtwky_3dT}ie zHzj!U)g2~mEqFz=ocA?rUB386*tO>iyh7$HfX%DW$rA2t6vdQ?;_6-Nj5|{z?~_d9 zdI)DCxfJ#v%Jcq;N1)v)H#^2-sPd zuD_7xlYYwo^*x2UA5o~^>%RcZ#5-@vPnug7QGr}-Z;UGvet&Zl@T+nZ_~k>v)z)t2 zDrf+_eGYjN(_vq<-lSB4x(f-b-bu=Q9#;0%joyUiYow5Tv>e?e0|LP2&3zI$R7Wiz zW=j3~ARtwLc-lhYRk+33!P@_1BFq{1{>pl~;MxB(7vXEM_=-N!c5oLUG4J3I^F+m5 zxWMt$smVUMr*&$vHrFTn*LqxIk}!>Dg?)OSX*W-Ts-9!C})4v)=*8JYzFgI>~uE6X`a!2z9Ahfnd@n4!8SYu=W78%l4nl4iah z3o=$$xj3CrQI;r0u4`Y~VjnE_V>O4gQVPCl17QFF4B#G3B3y2$D9y-Q>}L(fquabA z-|oasFagtL{!}~)_xQkB;KX6~%r&>=1(1|-Ad(KoVM$7D2`$YcQZbxF+Z$$w-G5;i z+Cl>ZyRJ!c_Ea5!Cx~amg{op8X3#Y0Ujx*~fn93HbZpʫG&(w$4+rt&KNPX%|O zI%0E(>J6!W=aZslqa3Lb;IbW)gbNMDDC0u8u)rC+kRkuKjs4wSc&-UPateQ_*07Z2 za9yEnhN`J9cxUXqobRHA!$O`lZ)MPJRo=Mv_6vQb^WNo)IMpHoaHb#34PAD{!yI-e zN>3@d9qh^n-F{ObmqHWL`4zi$1+}>gEoF8(6-B<+C+CMj5a_eMSyRD*E&9aQ>%7NE^1fvW9v}9sr*R?$+NkPm~!tmmZ-ShS9V8htu0B zUne-tPGW*F_a*{f(@juP$Hq8tR309b)E5wRj_V%>;EP#zkPmj{u7^ro!1QdmF!q zTQth^Wtp-;?NQMaIa2KIoX|*r=wHZ4I@jJSw@z}W#Y(lhhhiSuDYDa9qnZ@<$VLf% ztU}~L9)`AN*6PMQ`%#4wJIM=Nrjl*7S~lGg7Xf<&3}p zUq}iL7o6>Tb~+YnaR0^M=^icodP0F6;Kz$`H!?-G(txCnxgCQ5oY{E{u7r!n59j+} zHKH);p^@b8tY<)E?$=+5h*ft8Fev~ z>YcT2VFXC$2`j|~C(Kn{(KU;>j;21-kpY9fk!7rzjp6-Ogy(9JnJ62jXH3UJ>(6nL|%l0o_u3Co?B)O-g1$Gw7i z`gOc3$Uj)@u7rPdujPBdXDZmDWXA{ly%vsr{6!fvk)(xl-xXHReW)Q$1l?Mc3eFZ? z8|2jC-=2H6l-#o-mqM|{X`_8H60fOev6QvW@Xmu_o=#^M?W6EoBS z5|))!j~lT0g&XAG;)N6xu&B-kRTkj+38Jj7^YRZ85^NRfo6IiT2m8D!NJODnTL$kRznrU=-2sR`L0Y++IxDO2UmKdW1}cQvIh5I`*#j zR;6i0cWmrO{avfe2-twDgR&fjwZWsFwng+UdF$EY*dC zYxCA$$|(?c7cXCG*gnS%XA^luVNOB4q&WU%TS>Plg9y@9-h;9BB~qCI*O2mFkTXru z-1J`0*=Fc%c}_>xM7_5Z__U=VH>oH73%4aN>%C1xi=Q;NTysNJ_|%Gj(A7Ir7oYU& z#(j%3A1M$3a>T%PLC`aE(gyVcIF}TKPy3paH3>+>ih9GzGBAWhJgV&U=fR4SZ8=&p zUcKJ{JNo&yI=2|xtWGFEVqk$M=)NV~n-t@682y1Qwy0&_y)bGPraIjIZcH6aEs$b6)OQ8@< zki4dwBdVIG5yE$EDX)Qn4;oLF3V||{RhLN}ch1@lN@p#2?snP4#L-paM!<4S^!yzw zU6b02+m6Eue#`m~%n(lJ@opKS(w&d`7hm4{Jatx)_2LNb`tWAep3st4cJW%EFC;vl z5Z+c%njar%eV*2@^;EC5`pU_*Ii2S3sLNh?KW{~8fsHPTHBhWVQhlI8;srr)^zB3D zP=Z>+y(I#!@SOF8hC0p=Ds=&ExdyU8@W3pJBcEz0H>+g~s@)i4{`!&GuelL=O09Ju zFoSANz~w0c=iIoNs{8$s0Q>B8O1f$pZAn}ph94m}s3Y!CP-)Y<`8DcbXdVG4j%uKE z8VqE}Gp%m-2yX`tYlYbwPN*x1u@pwC2tY9EVlH+X&>=ht>0yj{Uzj~lq8)`e`6=Fn zE@~9+^KrT?UWolpz-he$xDWpqhjtL7F7PU7W}>!lTx@#7cw5PDJS; zX=AgVj_=Lyzh1_MV;S$_oYg=l2_dFatuj|K2bz`Ou<+xkHQN}K(~wj?#K(ZsmXan> zUC~q#8as4R+AQ?i(}hAFFF6^{eD-5s+f6fuVD!WR1h^siq_#{ja~|HbkiKs@Qt%lE*WjchuJrTPbyZ^XH_s@_ zTR1WkKC3GOSy7;$2spfGP}D_!M+xrbb{1q# zS|4?aWkqODXIkl6Mu-;ALxsRRY)`nYVeZk)hygRB$pLxHke!7-BPm@>L+qt}3>7oO zqqazf*RRX6O?VCpzXpH4Jm1=M^LdfJ02}|!p*9&dJI`pUvMD-Ryh5(%R9P0Mp2^_C znF+1-Pw_pU32qsr>4SvBMAKiYZxzL+TsrsjHCbFlPEm>6kXCugqDuZt7pBb`A~S6J zr)g^$8sz2E>D948h6}Rq=GAZD3T5pk9Mo|tP&O9W-bhuUd&l?R*T+gM(v+Vv2L9T= z&c+M_g5~GO-UA6g-3c^DqrI=#^WPMg8*IAauIaI>l;{IQFk&w%sEikkG!N63$i zcEZTeT+zihhf(V2$vJzWasyq&Oe!*+2T1#CeTk~?(t=^cG;CssDeatA`l9in%eFAl zP$pxXRvotjeTYOT?NmA)d&8fDL?RwLWdrDr!WI=M$xA; zU>@_zuhB4vszIJJ!!*<}dT2|Quxuxi|5x<68^w`jYhrrWfGZ`h2P@2vn!wK3tv;~8 z|90Ue@!^9xYl~qh+~9NxTe3q^U!nq?hJz$M<;}>KEsbCOWsQjigA_4T(p)kkVcCh; zuWd?V-ZT8a*Qh03=j}NIo99n!REe}aBe?x~>rH31%*93=RYh6tYE#r%+5jQ;+b8ki z{KOF-`j7}qb&MMNadRUxKr($%^gz1*`)5-sKlb<=wuk2iUr?F5!KdmL!mpyXkZj{4 z{V}85`@diG9i#>4nHbY@^L9S*u?t@R>38M4HIk?@xKKsOS>^XesCnYM6B^R=hGYEL z6-znU)Alxjo}qHGvBj^ve=^hJuMO%T*YAE*N(|g8Rej1VTzJ#9Etq#16K1<|RqKF^ z-!QsC1`@6#We>#*88oX6mlJMie-}wBlTZe^p@j{_=Fp|gKDzu?#>WQrkAHorGd8ChDX=9e?F20CK}kMu=g@1Hf1jY zT=HBOOT>wAr>#Qx(Y}xf9CJDD$r_i>vZ=&D{fKF9Pjln3a~5T%EAgLDN$P*5KN6+b6M;981?HcKMAq#43QCR zy#IUwmhoJDVcjUm-pzwDi!)o*i%*r7eLxSIMEN;>i^+pU&Bc#ncG^6ad8J4XtfkNA zC-bVU>(5pf2JL-)nOgr29;LgJid4L1Yo0C_U~W}Y^Sq}jrTNxXKY4MF8Jn9?YUtk+ zfhEt(z*^KI=j?xGr>E@|Pb@{PLUm~lHoZ1S+}fJntL(ngS4H+x1(&Z^Z7tv@lIJe! zpkYCat-LA_5STo%le4R=EHnE3U@dJg3=*f&+~ibWX`s0IOqedPEpAti-NTXXs7ezzdeumFusJ zBfc76nU9Gt?W*PASF?54Zh6^^Nx5Bhb7qniCTeHflhpgw(Eas!>0X)g{pEJt8UP=E z+1)2izPRg0t%cTmg*EAI3Gm@DS^l-LPxtg00W~B3LXl>g?ZTwuv(!Dvrd`15sHHkO z-ppCo6xalLUY_5-qYh|y%6~=UAMXhk6}ZJIt#0Hx*(j>TLoj1s?xUDPW3Kfr?5pIU` z0;jq8HPEf)1f$r0qbp`}F#rjQ`HvNP_JdHS{SJ)Nz#O(Zd1n8Us_(bSXgVm&K2 zjJQ%#axGAh))zU^8)#n<6MI!LFtCGDT*Ti(6BW31nsAAO79T@WRdArHEFs}oF>RqU z#L+Y}X9a+JR~b@6-{BrDQlQ2>PL@c{L!V#JIFLlw#HW-$J*^ke`jd#(*~b%bt@Kfk zy#{ZbdALz-o-3*tlATuY?EdF{Wx=rQSZr(>STz0E%OM!GowkNt7!h}?Hfg~-OqKei zpF@f&%&KI4(@z^h=`E?>Sg_Zo&b#2t;;qfh?!@wi^Jz~|%oev}CAT%uGRQIPHoTiDo*Z`PNR;{~yAv{3~q zi@L0^IpU{hXOsKsX=d0iTaj$0je8y7rG( z{jjE=BiHMHyabkCxiS?`VkS}Wu9QreErqx7U}eqnG~WkJG}OcJlH=wX{jDhmOFas- ze7tSBOm~Ug)F>2BK4)JMXusYZh80&~a`jK`*AjMrnH#lx?!PEseqtVbON=c$o*72f ztR?~WuZ_q=Zu;yN3X`Dt)3qnn0ZN?Bv(reX@mo{J&ssJn-m_I}rP|)F*QaPCbNP%cf7p8@|6(k?aGg-XXy8P>5!(pO6#k#)mMUdJryR%n;?hvL0(G2TpOyNGAJwN zxs(Hiyp@WHb-4xN7C(x86g;G44*LaWC?v<`Y72}9gC{brjaslacWETe>=tZzloI7= z4i_jn(}-S^&0$qPGiskLthmJ0t;Akz<-N0O=6(>GI}?5@=#6~kviC@Uxc86tsE)#K zr#G*T#0p23ssCQWi`TO5-QH_YrKkH`x9z`C&rYrmX`qHoHE5!WO(zr%o`!coS=A3 zv9=?ppsUTr=2@Ta%_n7BkNg5TH>DOtwZl|-Uuf|s!A=U;%nnz!D;D$4 zhUNl5Zf=rbJ1m>i;_IuXXdTWzAsIQ-=A~TvONP<%%gXV`wP!yt!}#NAUgJT1JrCp}ItbI~g&A{61D}R355I5cHbUGC#p)^<=d6DYwW;QNxRJ4nvbPk}XI88mT<9IIZm7awkE1e`S>D~b zn;qe4>v1k4CUh*1w~6nA)RT0u+D6ijxV%0qrOlM%%*=C~DemVr@1Nox zv=OnC8H)*-K*w)f-4WL$SOSO#b;z3(pTsJ6?iRyLYnl$$HS(uHF0CU`7f5#cHpvoH zrAF(d2(ha#tB)9dR1)31dM(UWS5-oTsq{jGFN*I{n;<=~sjOg|6Y3|$XvGJirYC#i z%H?G21J_m;F+zylB7z%*bz{7xL(V<5-w!Byth}H?HKAR~cJdork!HM8UsCAXMGbQ9 zC7T{o*mdWA8lkF9n|{YXGrzzBu9cR7hhXFsXuSvL+6xQJb5nR=zrsa%gecVc9;m3H zzqCfKvdY8`3|?}Bx8PEA6xkXdIgz(`)5#Rl2Id!ZFL1t`6}pdl#=5sjya&7P{(EIL@?p3AaXmiH zj?*uj!?{wB$JWNWIk4&wjGdH~kCrwcUj)&yHC!a#>1-{+E0&?Qwt+`E(boqx9 z7y$&nm{6<=+ggwll`BlH;u)`=P18Zo+VjRn%B6may&L;`w=Gw6pEXAnwcB$x7e1Ii zE7(dEGbi4|#49JmR(pI#HcAt%my5kiL+Pc{7_YbAcR0AG^D9=#*vkmTuFf>|_*sG4 zR(9OOTg-IGDiMba*!0}V&4&<-e_``sG#N|bT@^j_I}`luP&ok>)%s@7FVH#VDmD!1 zDs_3?Y*trKT7j*ZEJeN^Z0hDz^)_D)Y8wi9^ZVr@bVVE*D7SB%d2D!>2)RjBx%JaW z;{D!xoSNv_LXr1|YB68jDhC9aHaJetG=%hQ-|M!YzS7kxx*KaBXMC6^7{m=HqG{!> z2_WX@^Jdeq=xC++7B4Gx-^A9^B}GBGlTV)zgb|C)VYB)PK|5Wlm4kxG;Yh?%u)h8a zEFBRCD$}95!x5;Lj2xmVQR+T>k)vBVwPhp7xUOARj%cVUz2^1EKlPl{9MRC*_x*ZI zgO@5DN5`!5BU&q&+OK#NL?|h%qJhu z;}Qb>ebs-c-oJ*fq{W}!?5u8VjuG*;Q>8vFN93b^gt%b*6-r$r;(I#!4An&I>o(c^ z{x*spbCEU*}*q}9b!y#`yd+q7Qj?h+T@#98Om zOU(Ov3NdR?YAXW`RIh{fmXQDT?fF}Cni<8$1{FWKXz_DAVrzq}q0ga$5X>>V?CTkk z-XLr7>d9lW5X`yUXAAC7gcgCN(qFJscWz%5n(6mIL zYI$F^`lREuC0vu=S*Jn55!(KqTI;edhjAEm!%pMAU_p3M&=Y~q^OL}OYFhC&-Z}51z^jGU?`t+ zS7u4r(1ekHcHcLX59a@cSCD8m3mS~m@3uP=gz$)@3+e} z30V6OT!VM{+zW$voO8)~gZ!bzD3O`b@vc{SoP8$D`7X7jq_*hEGtu2NvSV5Kj~9Ds z#B6FC)=C%S&07J6&v0YrYCqbsbK>quif799k!3 z=}oo&q>e5^2i8;o{mUZxlZ-PeD4UX&KUg0`kz~)hzWPG2$q7TD|<4)0JtzmCo>87QInM81_ zh=y~B+6oeHs1`FQBFd==?eEC5`347A=uB(cT>0i_yd}3cDH$1;Xf8r4AUGp!a zynlpe+{Fq{CG}wVx{@#0h&;udMF;IdOu%^Sjxuah%RIyzr`s{6Ppo7bLgkT^OUE;tN%sj z_Tr0W>TP$Su)YW9;jo|Hbm*MJHz7D@6V$%A2TCyWVNQU?(`RvQe5iCMqya}+%?sFt z4jaqvq%eIZyGicAQOV+TRt|m z5s4A+#Bc~Ez=~|R`W_CEiluvI*m4!Aj~2A1dz1G(U;J*YxOmZffBP!{T_tnLqKFkX zF|Wl5Oro@D&shk@#=A#w)Z6OevW8$L_ghw&jxuec>aKBwTJ*u()Qy|MfzNc%V&X#k zCpL^wV;a(y{&y!PW{;MlfLi%wb%-!WXp_xhsfCiCj38njPH)|a6lg~-hucU?sKhR+eMm2RmL=m!apr_*`XS8V~MsQU&H>3rACa1yrVI+*#`3P!CxT~^~i z^<9BNRT)2m4a=^@5;f4U%c%B5`fF-P-L1+-K6LaMTD&?F;cCONvrpmdp~lq0GI)Iy z!s`Tj%0?N#@M)r}Oj1syO6K#FAuu$R&ew+-4}n~L!HL2gAE(TjC4p6h>V;rZ&B!Xr zPV~`2DoXm|RBhm4$W=l3i4PvPhNcu0BbR1(o#%6cKIOmYy9k7GbE$@v<*_9MyFY$o z{ySm&dAC0$00oYvnzTUIu!Cc0pw@emo&>I3ng1m4jgtH=99DNf-%*kMyLm~cr5Uye zg4E&K>_-Cg@8mH?@? zTDIceUE_^=2FvnAZ+^-!Z=5k!o9U9CSLdzkw6V~c`}XtLh8Jm?B;p=_NAd=9vN$t^ zP7$K|KAq-mV}@PfGNWpI<9Z(?(~6;5XufsvjfHhBSNlCW9Kkp3Sr>|nM@PV4+Mn%47-WlpaIM-0_QvWESH5CA5V{d zj4n;swWRb)NeNAu@b^P(EKr_!#wt`C#S!jTgwU69`CyG+%Y6?- zZrzspt*kGGQ|XOZX#h!tLm5T_hbd)w^V~8ww~C6vU4gQ{2br8~-GVnTY_`lo+~%uh zcC9r~-Y1^(r}gIWUPiBaFX5ng8=fp|QlHMGk}5(SPM0A3yGv_&lij1nC;3WW*FAT1+mE)8RlGQI!}g_n^y)Wdn>U|d_2tf3GaH?O;>Vcj>#R!J zVfN2qcKI5G_#OEu4Yb}p?0Ew*n2|}rR0{YLriHS~tFBbmUz-YGsnk{)>J0Lfynh7_ zvlFfmq)^W~NdbKnQvLfWrGc{`WfM}kJ72!uDJr#cb6!SD&0Z)OS6IJjuM7^E9u&?~ zavs+s*rH?Xm{M>exdhCu+5yGr$!eI1rI5F0xtC`l1j8BV5=8ZMOYNcMZuEX%TN}8I z{MQmAMfCfYyA-J9O4XvyPH62F@5p)01tSu+xF z^HN&-Jx-P~IBXxCil6aUTxuWR{E2YJ2qLPw+KSi2CK>ELGTImF{+vyecoLNkhOI0I7TNp^lH+4!2xVNXD~`BG)nlHu5v;<;_Z7}uHYKFiYm z@YHW@MNA037HS1{#=t4c>#Gl(p1>2pI&)laGr9MuV+39@5(N}4HAyDSf<4``A941P8r6yl>hNSG~YO&pB zaGy7_n|D(3$*h06peJ`&XoJ%$u3!u}##qvlz}8^d{Z2gu`CfI|wG>MiR5RVdWnSse zWEyJHMfF|@O(2P#8q;UvSjmBiM2FCaf;%Py&+It2jIB^TH4!$4nk9m5gHxNdASx8x zKgPL#UdqLK-j=e7Md@^cqAaZN{*++b^s57QI83}EWiN&doL8u4K||HTvdwd2iU)hI z*LeM^j>3X-CfWEoS0!xCUcOvBXJJ0tE3vcxW5tfc; zxBv7+g@u9>5D@+hJ6537HsB6+SeWViV4%F}|7;w`47-*a<#@v^W9hp7Qy-0R+q#(kYfb zAX1ZXLr4CWvQ^2o*@tIt-^n;D6_%ZWOnYxaqLM1;l&bl;%o5@LBlWZq51Y$%k9o z=y#tSmr$f(U=HPnTwsL>vOCY}-Ay~R8UTx65jc@G^91tJodLUfNt7I zqw87zu&m5GYoB1fBJ)S$n8{f~LvYObyUmMsIE7#Q?D1XQ9F+|(2Y)b<311@-pA$lQP2LId#IonWzk&d{%=0qQb~RD3oy|i3fI2$pIrp@J#CG* z(yiW!%id4>*Ms*byFs#Sa8$eOmDsGNIF#G*)k9vX8qvCI`nI;~dF$iz~~g${L0cjK+)M z56j=-m?=$X(mPL-JU>;Gp&aa?u|GT$oO`eUex^IDvay~jXUiy$#|H*7nTM@4H8ELh zw3>SPU2`>*Gk+5}={K<{K?oU{-*p_VBr}itw2t{B!KBrVoEUPCktdz$eog((UdvfL ztJ;Z=F6O2N;vUkp)Gc|x@me;%sQ6WhJ2OMi{d?Me#c6czP04Tf^|ZZI;!qUP1D3DDS_;khd9TTJjAIUJB^+Ul@X`?QAMWboJ>R{GF`( zYJ3{#hE~=I+NGh+x?Q)W5d7J^O*xO_%0I{6yn9tF_)LKM>h5kJG1vsezlhM z(sX!RG5-kj;r7l;e+P=U1zWZ93H2|Smg#gy+EmCs^cp@8W)2>=f~5amW7;nK(U}>& z$x>ym$?JhlP#X6&;VtXz()z6&y1Uc3;M7;K=Y(2&>%Ct$ssY))5l!C68nBQTo3c+7xGBGkn^J%DNMr1 zkJ+Jx7x{{WOQ$XPe)s{tDaM%xg~MH=s*e&lTSt)F5sI4l3mm9TKj8~8 zfY5<;Tl%WiHHh2cP#QTgez=`<Vt4-e0#wK2N*f!~)w9k*w9#JFiZ}Hj zFQI?S{x;!%kBk=2Xu|-0h%-eoAynjW7`x%Sdr+@v(gRZ2kr5LD?IV%R&~?X3EsV&K z_%aH*qY6micO3kLbzWg5r>77^h-JBRu*@=(9vqeZsTP=-wD{v$zp5hs_IxEAG^;FA z&dxtd58kbdw^#hfxvzjWjJ(X1Wk4()y$?LJgTx-rjhZtorTl=0q%zVvL>jWwphdYm z;4V`RlKEDcf*tV6Ol3@*zY9rva8&lET0=_E$~l#FZ`!}T9i;&ULGJda)f+g#yLXUn z{{MvW6`&1&+x^XOBqaP8`;s6_=T-Zn6Sg5RU97SSOR;N2ay#La(;kTZ$4pQA3d z^t9Bc#J=l59GPF?FC)En*sz8#hQt@bTxbXNMo5A&VjRHzx?6^=ZzI$9-)^x&xM zPqiuxpp`zd&x1mLdmF+_YL=Mrnwi`Nyt@)9xbScGcQFFmz7mK0Ip8o8vv&lj7ap*r zBWvbCOvo`n+fmuyYHtFe=o)*0Bz7TpYR<8glEj>X&W9zGk@VoGjPH-lavZc$NS<1v z`>(gkq-MhfZXhY@0NH=0$=`0B0JOaduh8BWuK4rx_ z{I3JIwgO42>-=kjj%#yezFhq1*HDGBZ5PdLrC*!LeOh&|P!)%!2I(0VPj;GORIWgO zf4fz|`6FCOP2%azho0N_oWA;t4gb?vV3!sG*iVOhaO))XSw7Z}E{`=x>MYceW#`vb zl7yGa#NlSH!Q78w^`d&+ibT|CdE}&GI#LNxszu zLh&4+KIkcXFf;#(t29YkeqH|a)>Ja_8w>fREx1HBw#O2YaI@{;A|k zBrSLBl|uRg9f<-|h&cmGVRXo85>t+;PR^+}2vEF1)8e&!A&)P!{MC2PP%sM^$kR-V zj^6W&UPZ7tvy)#THI(}P$!pe71&9l&d##xJEq_Q0fo_mg--#NUqqpRIS!#q>qM1uc z{Rtaag;Zf0*h$=38m3v+`_~;YAnPCbr+z7%C6SXs?&z(a#ZTc+YUX5EOK*${C3Ke5 zj58U|7>bcm!%XH{NRTU=CS9Q*2U!WWQaC?cQ zB)aAQyw$Bc>KYS*58)xPmjMwZL~#O{6-8ovZgRu7e@efQw7T65`K4e*V(J*{o|SXtpPjUQ(*T47QkzybZc-_QQ9EeCF0nKh2QX zp8S-;$fl~66^K)LRKC3VYw6yu^@!o_`HR#GTT_KJws1#Z_4GQZ=kjjfGl>$3H7lDT zkh8kpegF2~jUU)ZbAclX+nHM}{SQWht!rm`w_DO)bejLco9NgZW z6yn#j1Da&ghU(?!h|yMnW$5ZtY#?uaP_mKkTFLTm&|1U7qu`sv>;2D`zkbWO5R|q0 z*(Xr?$EN4hNZR%t@A;;4#c|8KD~k>eW*zMTb#j|yC#sa*><;fa$@c~S{uJIZ)A53- zT(mydWv*Ijws7I%R?5cuu65>eUE%U3!_%8*9ZKD7V*QzgZNbRQ-Qyc9Hb~!H+vOkG zy!`Xtii^*|TFAnT=iieB=1@%*cQ_0;TECL+bTypw!_Eoik&>?N%KO}(j7NR@KcBl1 zv3IG6erCH?X^`vvA|`!gp`vb&C*9+6!MuWvbs$>q#ewz6<`hHL(Vqs(idMh2cZ-=i zM|b=D>S{ngE{}j-TT*J`3}=3=&4;^~?AO*Uw5JhY?z+g-|7S4v-c$uvz9@R$%b}WK zl_|G5wW(3J@S=yqRt*ZFT$KKg$?6pv@(33)wx4~27urhoNu=t2syNT%ZE$q>S7(9w z=Psa9{rLey98k?FN7@xQx+>B*QreAeD;;u0F8?>n^1porrUp27Vk44$;A(E%Q>t#_ zq&B*Q{Cl+Z*Yx^RBk7OcC~%$-FL<*)Xl-@_NwoPliAlPQR)eha^_t0iN>}FROpQOY z`?LT!4A5?j5Np?-ZvC%C0?^lB&qwA-Uitd*Zx6{~R4^53j%Ia2n<13tZ*$k%+Q)}y zcpIGr{(A>Y1wv>ABN~c}ZhkcwwI+3vL=w;cDeJrA;p*CGBM~J=O~MdibcsHCjTVF` ziRhgnLX<>A3t=#bHhQmlBSeed%ODIUNc7%Ei#B=*_lUgT_uYH_;rBboIJ3{$dp~sf0bieO>_a@Ud@|DL$GIX?zw-%k>=dC65$j^vCfk&2g}y#_#=)@D;MW5VC$@ZthG zl+9EN5b$!I;cfBszlogA#o~RPCtdAtkh-`e9y0_e=>4Zlga9my$U#~+{LjN&CI(`e zCyx0L{HGDb%9*#}7A_u7(!A$B+hOA)bAJyTe{+}t*Pnel_!4t4mCM^(KVq~uVrZtb zlyOueaXhK{WQu^-$ySieXWMNsq^>EGcjJ9Q6*P!2Wx7n^?)3g}Yz;9|96h(pJ{1fWYECM z`=bxFxZ@qWofd-%k6Ghto>c7=)LT;rUd8cmz}dNXo^?XSB77!9l|Md_+#jK)89-0) znmzkC@b*!7(eijUok>XLr-#CZwW(8p{x-_N$l&QmOl5%**N@8D0;TQBY^D6MO5iuD z@{5vvdu8Jnr8WB@2YV}%(W*a=fl86@4%!Y5O5?WL4&TOA9Ky>T?Ek5pb5L_|piKc( zxvGnfYY9DhJ6>-&!SWC~S;*c>+3tQm@Cc{%E|_Z*&&4PkYsVkVE(v#`grkDiWar~B z$5D2dk>@eu=xJE@L_03Kb_=I~^4%OEKQnePmKf%U($0737)Yr)&+=U2$k3xnKhcp+ zQmA`g>0-Nyd0SM9F<2-PNK){6UhZ<(6!MmA-Nb*Lehd}(JMy~2M9TE3;#R~7>zh75 ztLbyWz5NG1O5Y#s30Az?fA+H~J5u;Wceli5nhtHu>Go<`LIA3#v!S|M=m8D#d5dYh z4XqN}$(Gm)EzV0aD%M@@e&++gQpFh8%e59@kQ2?TH-FFuejg_E*8purOl@=6?WMH} zRjYsW$|R_IB;#uo97S!bDluC<9*0 ztW^kk+m}b_bMM1t9W0Q~yC!ZUXy3d8Pai{a9pRX5RojaQgZOXz}AM2VypQBA4 z+xU(ARPoYmgWq%Eg77pQ5euKpR7pJwf65B3m+Sm+2?!vBz?~f)?$zBAF=NCOq#cki_f7l3`4`WF#-`60VRn~Ex`|p0>LXq{$Ry~oud(ki z+d(20BTI|*NMwYwTDyM470HGJ}{^GW~%?i%?gbYr`U_^AVhAZ3p>2!6@ zXMNIYrP7gt!Ose5&=4=~&ZocN_&CX*9&YY_kWl4LDNY44HHWcX&R#8Fz~gewMsKQS zZHL-Too+%z&%%pSQ7A@FC`Hdr_j)^$a9(-!Lfm$k9DIidIXlCTldIobKbP}2;-IRfD5&vz18>#zwL|zG^4@aiu9kO(GMPcQ$^vI>XGQ(#7mn=L zWTCRyt&O@v*&!yEEWOMiEd{<=3Z>jEHEex=ca4-a9g#3*CpJWZ$#^~W0WdCuoD~_x zBY{LOtD>IcB3zmuQ*3GQH(!zO39N9NcI4~5O1e?rZPVQz2|C0wjBX63l@&<92gFX@ z&JB1NCHWgMq&+j0ZFT;zL?i`$6pvm=JxJwj@9+)HVECl63bM%`fDV!v5xp|0(;b28s z&P2dvYb4&^CnC9%&4DYj3!%7eZ1p^PI ztmCa^RxPGR-Z{+uAcDZ77UMTyCrL;o--k3~SBd^LsBQBBc9XTZb#1a9O&~_Ua99TNeda8l^9YeJ@fnP^ifH#x`C7Y z%|JQ4j=|ayrSICQ=NQQquai)I+Zh*)AbW}(TQRHKO?^@JV5#A9s+b=VxwC=p z3F5r2UjmiFYHnye9y|D$rf?F}81Wyz*ju@o3q?`wJhe}lg{SB*8SKxV8Np`MCH2!%hSG@jk4`)*K*fK7y9h6zwP>!c-^DK=_b4rYItp8t`*aE84H9+ z%p|(GI6ps|-hcUeM%d+4HW@8{PFLZQqC$OtoLS0h_xo=2_H8nz{K!P>a=K3QRwnK& zM7o|SbTJQezLhMO(eW=@@snZcyMo&4EXQk;Gxa3!fRj3cRVFDxV<0Fn-7vS=$aog- z^=tayfeznAY3X2-Z=eJS-Eil9gdV?NsaKJn)iQy}*nivSz3pLgG4{x#;o=)zxbMVB zjF3s`eI=qrFqi8l402W!=2B82a-tEn z1RncrSH#h3(Ft#r_!SrAf5n@I>f3VSbFeufo3^csj!R=w?rYE`D|7D(^~ygn8U||l zbD64v^1F|)xZsl>J=Dtd)6Wla){-*3D1M-z@Fyd>NZKzksy303Jb&Mesh-h8$CaD5 z&aIHu59Z6CFh6E=MOa1;8aIdt+Uv*1Ye)%8`ldf=tf66|z*NPQ`8HPMzUR5UzlbAx zjP90r{bNCRo(d$HAz3uFvvP9#e?c0cEPdxXPI6;hjFHtt>UdofL7;UX73*-Xl2`*O zJPR0JLAe@$c`Ux>o^Ul&s0k$qFE1`ioLpeDTW-HexW_%O z32a(@U)bZr?_hUJqXb!Xr#{?x*#@UcatWLfg1C508QkOt;`1*+E3!oT@qOV~&z>Ci zmxb8|MJg2#S)rnRK%5tB04i#&Pp$7>#=AbDtE4wU3PL`%&T9nfgPW7UK`bzRbQT{! zX!3$*sbJ~4%9<%sHHe2Ci3z%lQ>Q=P3b9+@yAhvCPvG~Z%9RzIED~byH$VX>ghhZ9MpBxBpCKAu_I@@>U75 zr;MU|W?~C{9nA{#hz;M`pNT->mqTfbFjt|KwX{_mO+__q#atK=H=cDHw(l;c=@BaR zMx_Pata_7X-WSf;`mn)czJtHqIxzS?0Z_4Wy$H4+<6(|RSt{ua5(}oZ%oP#=N|ulG z6+c50M5^a?9(h(XKT48g=vfH^?u8cs0p%`56!i8ZH1e-rGKtaoNnA+^C3KVL2kB!W zZfO)C4)PZcCK)*Hg50R7VkL)3Tu~(A_hwO&b7z0Bd7>m~=|E*TE!h(a_|0x}O`@Yx zhtyQSSpFg=aqq7RfgGQX75qAk(I`EzgmeS#HOo;K!!|mk`%Io610zN>kOn9WBGp+X*!YvY=5-5x~U1?;iGfWR0Fswty1tm)ikE?w;)R}N9Mr<<57F|Q@o+m6k1 za6b296+ADp6u(KHItG%24DN^7HVtJ6nQFns z;ydu!uXN#e$t=2(PZ=v*JfI0n_gL`#e-tw>aFLyqdh#d!13xW}BJsmW$CtmLve6gt zoA0v*ta|*_s2ddaD65Vbm;t3P1ECSkWY<*Zd#@6MeBsoF$Q1BU!<-RS6setDLt0~``%lbovAuB(MJU&> z)AfVqz5Dru*qU0IXQxwlQJI~opD9uy!}4}{{cq+B>$~TRVF{_RT3>f1l_jgyd5S|Z z4SCo%w4@6so)t5!)FOjjts(};xydQv(L)6OQrcupF}CL|SLmaHe$DVk<(L0b46rMB zua~#`>eEE&plKap!T2d;7}M3s@#+psSrkkilVHT`RF)FIiNjO|i( z<9^#zZeb`fg0#XpVd|~PCyihI$GR-|9aA}SWUiXDLG*U(oqvW~3SGxvC?sowPX_^? zaSHhI=L%LyEXa4wylsQ0pIY2v&RR6Qg362{p8Im|#PZ|P8zG|lwI>zVt?rCBuC*>a zBX6Sp7FZje!P9TDqeB6T+J55(_2-89IdT)8I06RXA7PnD&OXvbW^%-qsq)*VyMbE4 zvkYV#ZOu*}f2Ep#jUtJ&*1&m46|lVyvxx{aCnE586hU6UIWiLg+d!*>Q%ycoGt3xVXxoM7N|>TD)F*WXmg=nz8>jegWd~>FHE~hjLAR#C!C~k=ks2LKnUG>=*d`vMH5|pRkf2k8sQ5IZlK1* zB?yrdNKdFaZQeNknbi#;Mu-SU957$vf?6UW!1@pM>7pRIE16bPQ{0? z*ysD*+la27izd^wF|OhJQ*NdNj$W2d`df&=b6!sDIAi=}q!zKaBk zqRV(WY42eLWC1Qy9fOyFHdJIM3ZW~?Wr4ML3K_jiGX$*4@m0a@&Xq1iL4C0A!5-v~ z;AkVJ#S)}<$MH;$lB}%E{BhX9-Cx7w>UzNLh)x3u`6O(;>jCCsu5IKm7?%vhZh4y0 z=JS0E&}DUj+@2#{H(p3`RRlIGAwncR_AU74?Lhg8F-jk)@H{sQZ6QCV=0lq>@iMQR zp3H>c5&wX&tH<3-pj#dys{nX&YqUJ}t07ovkxEig3nuUze`P_>r$s9zKdx1MM>iu5 zYS}aPUc~4(sPk`UrA%CFGf}M0AdSOr~=wHDMXG*#qM!NL)uHkPs z(&g%+8%u7={D-mPOyHkf(fCSL0TYTFsHD;PW+NtM@Z9L!8%{+P{^ps=PE#>|`tI~N zW1^4(t$5_ApuKYkqC)0o>Hz}}F8j`W2)81>zsv~9Dyg*)(>GZBD07PVwax{Z#l;HA z3nW&g_(-0u2_9>0Ipvl=zXiWx6IFql9J4EjMj-W~3SQt}Ll^jFepu9u5mXaX1jLUH z{h~}ytj&W0vgH5CF1$&_72O1{Cg%>^ zf?fTe1Bs(6S8g#sQj~kfKXN>LUoCC5CiKg~K0gtGy_ljbe*gms2Qcfldf+q(*n2lS zjVDAu1U>$MfltulsBaoN)}Xw?)^vzpd~i#+AsjZYwO3W5@dAlK(?3SvUkOu9};m=o~}bnI|`;K(wyzvO)@tMNoRYPeja zMNS5$Xa8=fu$9>Ym$z183!S?Ia%0{&Hl=Js~xcuhl!djq%d)(PzX z>Mit2Gm;1a8Ioq0F(5+tV(zne9YGV~?}D0Hd*vfAFpxtECPcuIY)*wgv*g|SL3RJu7ncgEg9Ft$sR<9&qE9=`D3QvQ*{U#^M;; zZCN_Bq`s}K;lfX{@O^TK&HJT_2I)l` zydsbZ4Sh)`?S!Dk?SWwqx@>XH0wUFA4RDD5s$FoDH<$i!KFiw9Q1c*i*c|Hrh{&(8odnXWGQ9EFTkiN)^TzUqi(t z#_UuQ>dYxxyZtMD&;|We3Hy!`WW6i9`_XjNAXw2ZlLKlK&j#yRP3E-_0q~ZtB~xru z;YHBQ0!-oM&cP#UOfj15e10FxU7%mPffwWXw}cTY==K!g^4M-nMh~h&XGN)NmgVY( zrBUVpkTrXVrFSslc8OHXlCWseD)*3+OxTW=GqVp+Xf|S3)xT3r!9@Jht?4$eFl~&X zyyYWs1oDPql7ehpTfY96s4s2$VHX!3qstF|u z6q}Y`C{2QT@^6hlGx}T=)V)5LX1XM;Pq*gi^4;P4Occ=dB$hB`{A(aBX?AHL_^yGZ zQA<6VU(;mmwHgQNtH^s*3md-FK|yQvv*P22Hnjl9)6h+-o-~r=yYiH_Bm(IrqNju8?x7PfuuiUGBmVJR#5Hru> z#MLBe0=?IVaa?Wx=zg6e9jiu5%DEU3PT^roK4X-AwoiVldPRx4o)qP0%Izf2o7oT= ze@;ShR%>7T)M!pTrfbh4Mj%wGvqB;Za`JTV;qdB7PP#d=@;f(64ObjQJhtXpwqdzj zbR8#BavK?ipYXSN# z!-B&Z9gw%~G+Ek)o|J0vqK_(FTD&F1DevG=J1_Ry4x*25xv%z1HR5{Gy`jzmm9qd}B2{jeWE|#a$aVF7*i1$g zTYX2st?;!TbWosqvm`kTKSXtppG3_&Z~k@K5ZQ;*@{x_%s>&l`1=cA{uhp{;`sl7y zBxw7jKYsXbVQ((v^J8v*+f8{Y;bkY%q|k=jvQujeAY_@hehY1YuA<>LSEWZ$pTY&} z%Pc1uwaE${#(ZHw>h^zOX~PR|!G!1OtA!$344rcO+f$PreTr+Goc-G!kA`+(`(i|) zxeDCDQFq7-PD!&Zuc>YP=r0w7Gbz{!bGVeGICI-Gds%fE+hu+x-Dq2`j53gp^OrR> z`soh@*#yitwJTjeW7?m7t~r8b)55}~NNr-j2~Wn>H0#{GCX!Af z2P4c+!EJDI|GH0pIz~$BaVKvy`9X5noVFRqN945}U{!Rga4(#iI%+A+I{L=;`JE_2 z()gcocnqo_9BUPvfy$!XRUpKUsSHlLu)?B}U+y|d1)A(M{wlAiI;DtZew}rzWM(Ut zmM&zBIqUi;eS&#nQ+4(}gG{~_?mrUWdl%U~!tpfhg>#kA*Dsc!DpyB0{MJ5eN1!MX zznf_UXNsM^5Sv+xF#p<_>X6r8 zkKg%1v)P!B7T9NWF@{oJ4R~iPPz~wi?wSzu-mb=P?OBS*zYu>1M*oMfR=<#*n@#S+ zTSR)6aG225Ww$exaNLA&ib%D#AXoCX;81~T>_h1Ynz#|yvM|dTBRc(Xt6U*hhtK#5 zb^z^Pm*-|&I|Vcs7|`;N-KQ)xb6UYKFPa#-D|%g{%)uSXD zU~2KAR~4ef6YM2+xCi1UH;52f-Kix5_oQ#BRt^6DoSvMZujOkwHCseQrT<K$1pYT$*x}KLpS097qgy)v?AN z)L{6$+R&~i!2U-B&IVH1jErk3bfxlgH8ujfRgSUR{JtC%GuQsD{W_%fj-B3f{y;9-r}4j;U-SdAhdj_TUrsw7Vmw((!SmY*GEdi*X6wyo#Ng`BmwRMzgH{ zW#R!I>%u4^zsX)HlBM0Y?O+43M1+v;yeot>mCUBb$ORDYl_Z$8> zUR?R*$!DK!Vb_gLuR7<>xv~P(A zMGK?{X@Dj52DA@L2=zHF>UHIG;f-vb<@ojMX?)kz$%Nnoz2&v`BcwT)P}=mb+QXU> zD61N$dptqK^|R8v4hhTwIbscLfpTi7fi%(a>V$x>OSkd>_NS%lMsfns2&HbRtv=DO5!3Q!gSmmS0lOZvu ziN#4-gUNo_oxkWxIy>idKhPueo)m)u`;yY{y7OG-PO%Sx+NtlIHNmb)Qrrb?ui8It zapmsWanNm?^P6qr=dn)Ypv{tt$?g=k3Wbgbc7<ZDKVQ1-oKvR+7GLv{)&@$fhiu zrp|%{P)IJfB>YYg>Rx}&?l!#{gCHwdF-HGV@g^H{QIavwx$ggcu&hI8=!zxAi;*<( zRR-}tKaZaKO#M?pJrs_@##NbCqJgf46C6};HgvBoF^n=)f1$adh?ZMF64d@o&Wcmpv_%~3#LqKzroF8>NZjpP*#+fZ-Fk65ag<~ zTqY$PugT$PAa{DnSaZxi3P*8 zb=i|;$Qca5Bl9NnX>Lb&e&nMMJbL%+tX?&;C4E#j?}ystGqT=4E-#Y0Ru}6sm2z3O zLWIg%;a`LJF(KGUq7GlJ9LiP1EtMK{&DdB9rcnO;CIiAD-`q&Q56Er$?~wX#-vt5E ze}xM7|0vSXLLro3as&kOc zvx@RnAj~A8F?H7p*DV6S{!Ez*I-$GuBJ%C+qp+)UIFysCMmBZlf5lq-l(wULRyA5(X>?Ad=Py}fHknnpK-eQ z_g4(d97y_kd5(APJsy4iI}-LW(E#D7g=PPEx1+92@>|{dJYlUbl60BC+7SxlT`t== zoFCG&TK)KLC+TkuJCUedo}Wm_d8hUM%?U)f+<3Rvhtq+37QWh32p%Wg*=qecZxP-K zdoSkoNa?WN)j{^**b0m8M&`>rdsdd5bcb z7|}biTYcQndv0^s6x{S*AskTPfdn=K4vy7E<>v#7pY{fP&i(}|%^biQycM`9m48tw z^d9SBkC(?WN`fA8tyyk8}ytUi8=`8nWJsqnU+io6_C zITwm^?fsS`mwzR@6n-o6vh_4RfbVPNc=q&P2q6Io%nXB4MgK?nCo4&G;UNo9GWHtR zUY|g^T?*^{@{|Cv+p{L0Gxz^A0Jsw|_ct;`;qKGV-8iwPi33%b=|2&8ndWK@{x##3 zKJg1__MhAS^GNtX_<($M@WNW=&g*OJ{wvT8u|4xn_41NnsjC`ri?g{8$9)y#_-QwF z6J(bHK81gI>9K&HxO?Aq*0^By*YRP|X4|jq;iY_S#-dl=?}iCq<~K&x1BYf)zdmdp zLsE1ug`wmUSm3k8t4`mJ{zzZzD~7#?2wQ8(_h!XkGR^=(NZ6yA3Ih>Q|Vqm0fa_ZPu2xRrHgRKtN8bP{;J=Y0sjRD%6Ik0J?~k`UV^k+ zLmEZ3S^h5GeR#Tse0)BAZr;%pd0m27X|uFr=kT@pIvFXixakirJL$z5-tIvc%G9>Q zFSwLH$3U5Rj2hpuT{bD?}%9XU~^I(l$y zyh;7~*`ixljdxM6+cdVWvA!|b+q#5jzRqdK#LUzTUmi~)fH)i2O7d+U zpD|o+;{&azJAbBje(B`Fa@ez|dloRgJd!0Wsr$R(gWaiq(+r@6)$M!RCX8PTk7aNd z4KBiC;NLP~gq5vse!o!CE`Em||M#)V3}2z$9DeR?TYRjQ(QS223=pg~4lmi|LzG>8 zoHEvSCaaE$BE7AdrTa(pnDba3{5u0{Xk+i5;^DO_6y8`??dZ?jTBySG?KYwGD A&Hw-a literal 0 HcmV?d00001 diff --git a/viz/README.md b/viz/README.md new file mode 100644 index 0000000..81cc838 --- /dev/null +++ b/viz/README.md @@ -0,0 +1,27 @@ + + +### Viz & Sim + +- streetscape.gl + - https://www.youtube.com/watch?v=irS9H0fU-ig + - https://github.com/prodramp/DeepWorks/tree/main/selfdrivingtech/streetscape.gl_demo +- apollo-DreamView + - https://github.com/ApolloAuto/apollo/tree/master/modules/dreamview +- Carla + - https://www.bilibili.com/video/BV1eN4y1Z7Zy +- lgsvl https://github.com/lgsvl/simulator +- Foxglove Studio https://github.com/foxglove/studio + - https://github.com/foxglove/nuscenes2mcap \ No newline at end of file diff --git a/viz/carla_apollo_bridge b/viz/carla_apollo_bridge new file mode 160000 index 0000000..a54a4b1 --- /dev/null +++ b/viz/carla_apollo_bridge @@ -0,0 +1 @@ +Subproject commit a54a4b10f6c0aa0c27b0b39f45a40d039f5fdbd0 diff --git a/viz/selfdrivingtech/README.md b/viz/selfdrivingtech/README.md new file mode 100644 index 0000000..aecfce5 --- /dev/null +++ b/viz/selfdrivingtech/README.md @@ -0,0 +1,51 @@ +# Various Self-Driving Technologies # + + + + + + + +
🔥  Tutorial #1: 🔥  What does self-driving car see? You too can see as an autonomous ai engineer..
+
+ +[![What does self-driving car see? You too can see as an autonomous ai engineer](https://img.youtube.com/vi/AdjLjQ77XQo/0.jpg)](https://www.youtube.com/watch?v=AdjLjQ77XQo) + +
+ + + + + + + + +
🔥  Tutorial #2: 🔥  Transform open-source self-driving cars data to analyze and visualize locally
+
+ +[![Transform open-source self-driving cars data to analyze and visualize locally](https://img.youtube.com/vi/irS9H0fU-ig/0.jpg)](https://www.youtube.com/watch?v=irS9H0fU-ig) + +
+ + +## Aurora (UBER self-driving tech owner) ## +- [xviz](https://github.com/aurora-opensource/xviz) +- [streetscape.gl](https://github.com/aurora-opensource/streetscape.gl) + + +## Self Driving Open-source Resources ## +- [NuScenes Development Kit](https://github.com/nutonomy/nuscenes-devkit) +- [WayMo Open Dataset Repo](https://github.com/waymo-research/waymo-open-dataset/) + + +## Self-driving dataset providers ## +- [Kitti](http://www.cvlibs.net/datasets/kitti/) +- [NuScenes](https://www.nuscenes.org/nuscenes#data-collection) +- [Waymo Open Dataset](https://waymo.com/open/) +- [Level 5 Dataset (Lyft)](https://level-5.global/data/) +- [Pandaset from Scale.ai](https://scale.com/open-datasets/pandaset) + + +## Articles and Resources ## +- https://analyticsindiamag.com/top-10-popular-datasets-for-autonomous-driving-projects/ + diff --git a/viz/selfdrivingtech/streetscape.gl_demo/README.md b/viz/selfdrivingtech/streetscape.gl_demo/README.md new file mode 100644 index 0000000..5938e03 --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/README.md @@ -0,0 +1,54 @@ +# My work on streetscape.gl project # + + +## Project: Visualize Xviz protocol transformed data with streetscape.gl ## + +### Step 1: Get (Familiarize yourself) with the streetscape.gl code and example +- Streetscape Source: https://github.com/aurora-opensource/streetscape.gl +- Get get-started sample from the steetscape.gl project + - https://github.com/aurora-opensource/streetscape.gl/tree/master/examples/get-started +- Review the above code +- Note this code is designed to read the KITTI dataset directly from the web +- You don't need to download any dataset locally on your machine to make this sample works +- How to run + - yarn or npm install + - create a .env file in the root of this project and add your mapbox token as below: + MapboxAccessToken=pk.eyJ1IjoiZm9kZ2Fib3JtYXRoIiwiYSI6ImNrZmY3Nzc2bjBiemkyeG8zdGNzcXgzMGIifQ.J0dZhMiuZTPVexL8nrpS6Q + - {npm | yarn} start + - Visit http://localhost:8080 + + +### Step 2: Get Xviz protocol supported data +- Downalod Xviz transformed data for KITTI dataset + - $ wget https://raw.githubusercontent.com/uber/xviz-data/master/kitti/2011_09_26_drive_0005_sync/0-frame.json + - $ wget https://raw.githubusercontent.com/uber/xviz-data/master/kitti/2011_09_26_drive_0005_sync/{1..155}-frame.glb + - Note: You should have 1 0-frame.json and 155 {1..155}-frame.glb files, total 156 files +- Download Xviz transformed data for NuScenes V0.1 dataset + - $ wget https://raw.githubusercontent.com/uber/xviz-data/master/nutonomy/scene-0006/0-frame.json + - $ wget https://raw.githubusercontent.com/uber/xviz-data/master/nutonomy/scene-0006/{1..390}-frame.glb + - Note: You should have 1 0-frame.json and 390 {1..390}-frame.glb files, total 391 files + + +### Step 3: Update existing streetscape.gl example to React 17 and Chakra UI with my code +- Please download the get-started folder located from the link below (its in the same folder where this README is) + - https://github.com/prodramp/DeepWorks/tree/main/selfdrivingtech/streetscape.gl_demo/get-started +- Add your both kitti and NuScenes data you have downloaded in the step 1 + - Create a folder name 'kitti/2011_09_26' in the project root + - Copy all the files (0-frame.json and 0-frame.glb to 153-frame.glb) into the 2011_09_26 sub-folder + - Create folder/subfolder name(s) 'nuscenes/v0.1' in the project root + - Copy all the files (0-frame.json and 0-frame.glb to 153-frame.glb) into the v0.1 sub-folder +- Your Project Tree should look like as below + - index.html + - kitti + - 2011_09_26 (This folder should have total 156 files) + - nuscenes + - v0.1 (This folder should have total 391 files) + - node_modules + - src + - package.json + - .env + MapboxAccessToken=pk.eyJ1IjoiZm9kZ2Fib3JtYXRoIiwiYSI6ImNrZmY3Nzc2bjBiemkyeG8zdGNzcXgzMGIifQ.J0dZhMiuZTPVexL8nrpS6Q +- How to Run the example: + - npm install + - npm start + - Visit http://localhost:8080 diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/README.md b/viz/selfdrivingtech/streetscape.gl_demo/get-started/README.md new file mode 100644 index 0000000..a45539e --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/README.md @@ -0,0 +1,10 @@ +# streetscape.gl Starter Kit + +This is a minimal example that uses the components from streetscape.gl to display a XVIZ log. + +[Instructions for running this application](../../docs/get-started/starter-kit.md) + +Note: This sample code is updated by @avkashchauhan. + +Please follow the directions in the link below to run this code: + - https://github.com/prodramp/DeepWorks/tree/main/selfdrivingtech/streetscape.gl_demo diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/index.html b/viz/selfdrivingtech/streetscape.gl_demo/get-started/index.html new file mode 100644 index 0000000..8a19e7c --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/index.html @@ -0,0 +1,26 @@ + + + + streetscape.gl quick start + + + + +

+ + + + + diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/package.json b/viz/selfdrivingtech/streetscape.gl_demo/get-started/package.json new file mode 100644 index 0000000..cc90b73 --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/package.json @@ -0,0 +1,40 @@ +{ + "name": "streetscape.gl-quick-start", + "description": "A template app of streetscape.gl", + "version": "0.1.0", + "scripts": { + "start-local": "webpack-dev-server --env.local --progress --hot --open", + "start-streaming-local": "webpack-dev-server --env.local --env.stream --progress --hot --open", + "start-live-local": "webpack-dev-server --env.local --env.live --progress --hot --open", + "start": "webpack-dev-server --progress --hot --open", + "start-streaming": "webpack-dev-server --env.stream --progress --hot --open", + "start-live": "webpack-dev-server --env.live --progress --hot --open" + }, + "dependencies": { + "@chakra-ui/react": "1.7.4", + "@emotion/react": "^11.7.1", + "@emotion/styled": "^11.6.0", + "@testing-library/jest-dom": "^5.16.1", + "@testing-library/react": "^10.4.9", + "@testing-library/user-event": "^12.8.3", + "framer-motion": "^4.1.17", + "react": "^17.0.2", + "react-dom": "^17.0.2", + "react-router": "^5.2.1", + "react-router-dom": "^5.2.1", + "react-scripts": "5.0.0", + "streetscape.gl": "1.0.11" + }, + "devDependencies": { + "@babel/cli": "^7.0.0", + "@babel/core": "^7.0.0", + "@babel/plugin-proposal-class-properties": "^7.0.0", + "@babel/preset-env": "^7.0.0", + "@babel/preset-react": "^7.0.0", + "babel-loader": "^8.0.0", + "source-map-loader": "^0.2.3", + "webpack": "^4.20.0", + "webpack-cli": "^3.1.2", + "webpack-dev-server": "^3.1.1" + } +} diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/app.js b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/app.js new file mode 100644 index 0000000..4c73ef1 --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/app.js @@ -0,0 +1,220 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +/* global document, console */ +/* eslint-disable no-console, no-unused-vars, no-undef */ +import React, {PureComponent} from 'react'; +import {render} from 'react-dom'; +import { + ChakraProvider, + Box, + HStack, Select, Tabs, TabList, TabPanels, Tab, TabPanel, + Text, + Link, + VStack, + Code, + Grid, + theme, +} from '@chakra-ui/react'; + +import {setXVIZConfig, getXVIZConfig} from '@xviz/parser'; +import { + LogViewer, + PlaybackControl, + StreamSettingsPanel, + MeterWidget, + TrafficLightWidget, + TurnSignalWidget, + XVIZPanel, + VIEW_MODE +} from 'streetscape.gl'; +import {Form} from '@streetscape.gl/monochrome'; + +import {XVIZ_CONFIG, APP_SETTINGS, MAPBOX_TOKEN, MAP_STYLE, XVIZ_STYLE, CAR} from './constants'; + +setXVIZConfig(XVIZ_CONFIG); + +const TIMEFORMAT_SCALE = getXVIZConfig().TIMESTAMP_FORMAT === 'seconds' ? 1000 : 1; + +// __IS_STREAMING__ and __IS_LIVE__ are defined in webpack.config.js +const exampleLog = require(__IS_STREAMING__ + ? './log-from-stream' + : __IS_LIVE__ + ? './log-from-live' + : './log-from-file'); + +class Example extends PureComponent { + state = { + dataChoice: 'kitti', + logK: exampleLog.kittiData, + logN: exampleLog.nuscenesData, + styleValue:'light', + mapStyle: MAP_STYLE['light'], + settings: { + viewMode: 'PERSPECTIVE', + showTooltip: false + } + }; + + handleMapChange = (e) => { + this.setState({ + mapStyle:MAP_STYLE[e.target.value], + styleValue:e.target.value + }) + } + + handleChange = (e) => { + this.setState({ + dataChoice:e.target.value + }) + }; + + componentDidMount() { + this.state.logK.on('error', console.error).connect(); + this.state.logN.on('error', console.error).connect(); + } + + _onSettingsChange = changedSettings => { + this.setState({ + settings: {...this.state.settings, ...changedSettings} + }); + }; + + render() { + const {logK, logN, settings} = this.state; + + return ( + + + + + + + Dataset View + Settings + + + + + + + + + + + +
+ + + + + Charts (Metrics) + Streams (Objects) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + (x > 6 ? 'FAST' : '')} + min={0} + max={20} + /> + + + + + new Date(x * TIMEFORMAT_SCALE).toUTCString()} + /> + + + + + + ); + } +} + +render(, document.getElementById('app')); diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/constants.js b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/constants.js new file mode 100644 index 0000000..7e8c72f --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/constants.js @@ -0,0 +1,64 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +import {CarMesh} from 'streetscape.gl'; + +/* eslint-disable camelcase */ +// MapboxAccessToken +//export const MAPBOX_TOKEN = process.env.MapboxAccessToken; // eslint-disable-line +export const MAPBOX_TOKEN = 'pk.eyJ1IjoiZm9kZ2Fib3JtYXRoIiwiYSI6ImNrZmY3Nzc2bjBiemkyeG8zdGNzcXgzMGIifQ.J0dZhMiuZTPVexL8nrpS6Q' + +export const MAP_STYLE = { + 'light': 'mapbox://styles/mapbox/light-v9', + 'dark': 'mapbox://styles/mapbox/dark-v10' +} ; +// export const MAP_STYLE = 'mapbox://styles/mapbox/dark-v10'; +// export const MAP_STYLE = 'mapbox://styles/mapbox/streets-v11'; +// export const MAP_STYLE = 'mapbox://styles/mapbox/satellite-streets-v11'; + +export const XVIZ_CONFIG = { + PLAYBACK_FRAME_RATE: 10, + ALLOW_MISSING_PRIMARY_POSE: true +}; + +export const CAR = CarMesh.sedan({ + origin: [1.08, -0.32, 0], + length: 4.3, + width: 2.2, + height: 1.5, + color: [160, 160, 160] +}); + +export const APP_SETTINGS = { + viewMode: { + type: 'select', + title: 'View Mode', + data: {TOP_DOWN: 'Top Down', PERSPECTIVE: 'Perspective', DRIVER: 'Driver'} + }, + showTooltip: { + type: 'toggle', + title: 'Show Tooltip' + } +}; + +export const XVIZ_STYLE = { + '/tracklets/objects': [{name: 'selected', style: {fill_color: '#ff8000aa'}}], + '/tracklets/label': [{style: {fill_color: '#00008B'}}], + '/lidar/points': [{style: {point_color_mode: 'ELEVATION'}}] +}; diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-file.js b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-file.js new file mode 100644 index 0000000..ac2b310 --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-file.js @@ -0,0 +1,84 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +import {XVIZFileLoader} from 'streetscape.gl'; +import { resolve } from 'path'; + +const KITTI_DATA_DIR = resolve(__dirname, "./kitti"); + +const NUSCENE_DATA_DIR = resolve(__dirname, "./nuscenes"); + + +const nuscenesData = new XVIZFileLoader( + { + timingsFilePath: + `${NUSCENE_DATA_DIR}/v0.1/0-frame.json`, + getFilePath: index => + `${NUSCENE_DATA_DIR}/v0.1/${index +1}-frame.glb`, + worker: true, + maxConcurrency: 4 + } + ); + +const kittiData = new XVIZFileLoader( + { + timingsFilePath: + `${KITTI_DATA_DIR}/2011_09_26/0-frame.json`, + getFilePath: index => + `${KITTI_DATA_DIR}/2011_09_26/${index + + 1}-frame.glb`, + worker: true, + maxConcurrency: 4 + } +); + +export {nuscenesData, kittiData}; + +// export default new XVIZFileLoader( +// { +// timingsFilePath: +// `${NUSCENE_DATA_DIR}/v0.1/0-frame.json`, +// getFilePath: index => +// `${NUSCENE_DATA_DIR}/v0.1/${index +1}-frame.glb`, +// worker: true, +// maxConcurrency: 4 +// } +// ); + + +// export default new XVIZFileLoader({ +// timingsFilePath: +// `${KITTI_DATA_DIR}/2011_09_26/2011_09_26_drive_0005_sync/0-frame.json`, +// getFilePath: index => +// `${KITTI_DATA_DIR}/2011_09_26/2011_09_26_drive_0005_sync/${index + +// 1}-frame.glb`, +// worker: true, +// maxConcurrency: 4 +// }); + +// export default new XVIZFileLoader({ +// timingsFilePath: +// 'https://raw.githubusercontent.com/uber/xviz-data/master/kitti/2011_09_26_drive_0005_sync/0-frame.json', +// getFilePath: index => +// `https://raw.githubusercontent.com/uber/xviz-data/master/kitti/2011_09_26_drive_0005_sync/${index + +// 1}-frame.glb`, +// worker: true, +// maxConcurrency: 4 +// }); diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-live.js b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-live.js new file mode 100644 index 0000000..c1a9beb --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-live.js @@ -0,0 +1,32 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +import {XVIZLiveLoader} from 'streetscape.gl'; + +export default new XVIZLiveLoader({ + logGuid: 'mock', + bufferLength: 10, + serverConfig: { + defaultLogLength: 30, + serverUrl: 'ws://localhost:8081' + }, + worker: true, + maxConcurrency: 4 +}); diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-stream.js b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-stream.js new file mode 100644 index 0000000..ed8892f --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/src/log-from-stream.js @@ -0,0 +1,37 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +/* global URLSearchParams, window */ + +import {XVIZStreamLoader} from 'streetscape.gl'; + +const params = new URLSearchParams(window.location.search); + +export default new XVIZStreamLoader({ + ...Object.fromEntries(params.entries()), + logGuid: 'mock', + // bufferLength: 15, + serverConfig: { + defaultLogLength: 30, + serverUrl: `ws://localhost:8081${window.location.pathname}` + }, + worker: true, + maxConcurrency: 4 +}); diff --git a/viz/selfdrivingtech/streetscape.gl_demo/get-started/webpack.config.js b/viz/selfdrivingtech/streetscape.gl_demo/get-started/webpack.config.js new file mode 100644 index 0000000..f4849de --- /dev/null +++ b/viz/selfdrivingtech/streetscape.gl_demo/get-started/webpack.config.js @@ -0,0 +1,76 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +/* eslint-disable no-process-env */ +const {resolve} = require('path'); +const webpack = require('webpack'); + +const BABEL_CONFIG = { + presets: ['@babel/preset-env', '@babel/preset-react'], + plugins: ['@babel/proposal-class-properties'] +}; + +const CONFIG = { + mode: 'development', + devServer: { + historyApiFallback: true + }, + entry: { + app: resolve('./src/app.js') + }, + devtool: 'source-map', + output: { + path: resolve('./dist'), + filename: 'bundle.js' + }, + module: { + noParse: /(mapbox-gl)\.js$/, + rules: [ + { + // Compile ES2015 using bable + test: /\.js$/, + exclude: /node_modules/, + loader: 'babel-loader', + options: BABEL_CONFIG + } + ] + }, + plugins: [ + new webpack.HotModuleReplacementPlugin(), + new webpack.EnvironmentPlugin(['MapboxAccessToken']) + ] +}; + +module.exports = (env = {}) => { + let config = Object.assign({}, CONFIG); + + // This switch between streaming and static file loading + config.plugins = config.plugins.concat([ + new webpack.DefinePlugin({__IS_STREAMING__: JSON.stringify(Boolean(env.stream))}), + new webpack.DefinePlugin({__IS_LIVE__: JSON.stringify(Boolean(env.live))}) + ]); + + if (env.local) { + // This line enables bundling against src in this repo rather than installed module + config = require('../webpack.config.local')(config)(env); + } + + return config; +}; diff --git a/viz/streetscape.gl b/viz/streetscape.gl new file mode 160000 index 0000000..befae13 --- /dev/null +++ b/viz/streetscape.gl @@ -0,0 +1 @@ +Subproject commit befae1354ca8605c9f6cb1229b494858a8690e4f