diff --git a/.readthedocs.yml b/.readthedocs.yml index 49178bb133..6cfbf5d310 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -6,5 +6,4 @@ python: version: 3.7 install: - requirements: requirements/docs.txt - - requirements: requirements/runtime.txt - requirements: requirements/readthedocs.txt diff --git a/docs/en/_static/image/mmdet3d-logo.png b/docs/en/_static/image/mmdet3d-logo.png new file mode 100644 index 0000000000..f4076bd162 Binary files /dev/null and b/docs/en/_static/image/mmdet3d-logo.png differ diff --git a/docs/en/advanced_guides/index.rst b/docs/en/advanced_guides/index.rst index 9b6514961d..1faa4c57b3 100644 --- a/docs/en/advanced_guides/index.rst +++ b/docs/en/advanced_guides/index.rst @@ -1,18 +1,26 @@ +Datasets +************** + .. toctree:: :maxdepth: 1 - :caption: Datasets datasets/index.rst + +Supported Tasks +************** + .. toctree:: - :maxdepth: 1 - :caption: Supported Tasks + :maxdepth: 2 supported_tasks/index.rst +Customization +************** + .. toctree:: - :maxdepth: 1 + :maxdepth: 2 customize_dataset.md customize_models.md diff --git a/docs/en/api.rst b/docs/en/api.rst index 00153b9151..74b14c54f9 100644 --- a/docs/en/api.rst +++ b/docs/en/api.rst @@ -1,73 +1,76 @@ -mmdet3d.core +mmdet3d.apis -------------- +.. automodule:: mmdet3d.apis + :members: -anchor +mmdet3d.datasets +-------------- + +datasets ^^^^^^^^^^ -.. automodule:: mmdet3d.core.anchor +.. automodule:: mmdet3d.datasets :members: -bbox -^^^^^^^^^^ -.. automodule:: mmdet3d.core.bbox + +transforms +^^^^^^^^^^^^ +.. automodule:: mmdet3d.datasets.transforms :members: -evaluation +mmdet3d.engine +-------------- + +schedulers ^^^^^^^^^^ -.. automodule:: mmdet3d.core.evaluation +.. automodule:: mmdet3d.engine.schedulers :members: -visualizer -^^^^^^^^^^^^^^^ -.. automodule:: mmdet3d.core.visualizer - :members: +mmdet3d.evaluation +-------------- -voxel -^^^^^^^^^^^^^^^ -.. automodule:: mmdet3d.core.voxel +functional +^^^^^^^^^^ +.. automodule:: mmdet3d.engine.functional :members: -post_processing -^^^^^^^^^^^^^^^ -.. automodule:: mmdet3d.core.post_processing +metrics +^^^^^^^^^^ +.. automodule:: mmdet3d.engine.metrics :members: -mmdet3d.datasets ----------------- - -.. automodule:: mmdet3d.datasets - :members: mmdet3d.models -------------- -detectors -^^^^^^^^^^ -.. automodule:: mmdet3d.models.detectors - :members: - backbones ^^^^^^^^^^ .. automodule:: mmdet3d.models.backbones :members: -necks +data_preprocessors ^^^^^^^^^^ -.. automodule:: mmdet3d.models.necks +.. automodule:: mmdet3d.models.data_preprocessors :members: +decode_heads +^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.decode_heads + :members: + + dense_heads ^^^^^^^^^^^^ .. automodule:: mmdet3d.models.dense_heads :members: -roi_heads +detectors ^^^^^^^^^^ -.. automodule:: mmdet3d.models.roi_heads +.. automodule:: mmdet3d.models.detectors :members: -fusion_layers -^^^^^^^^^^^^^ -.. automodule:: mmdet3d.models.fusion_layers +layers +^^^^^^^^^^ +.. automodule:: mmdet3d.models.layers :members: losses @@ -76,11 +79,74 @@ losses :members: middle_encoders -^^^^^^^^^^^^^^^ +^^^^^^^^^^^^ .. automodule:: mmdet3d.models.middle_encoders :members: -model_utils -^^^^^^^^^^^^^ -.. automodule:: mmdet3d.models.model_utils +necks +^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.necks + :members: + +roi_heads +^^^^^^^^^^ +.. automodule:: mmdet3d.models.roi_heads + :members: + +segmentors +^^^^^^^^^^ +.. automodule:: mmdet3d.models.segmentors + :members: + +task_modules +^^^^^^^^^^ +.. automodule:: mmdet3d.models.task_modules + :members: + +test_time_augs +^^^^^^^^^^ +.. automodule:: mmdet3d.models.test_time_augs + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet3d.models.utils + :members: + +voxel_encoders +^^^^^^^^^^ +.. automodule:: mmdet3d.models.voxel_encoders + :members: + +mmdet3d.structures +-------------- + +structures +^^^^^^^^^^ +.. automodule:: mmdet3d.structures + :members: + +bbox_3d +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.bbox_3d + :members: + +ops +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.ops + :members: + +points +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.points + :members: + +mmdet3d.utils +-------------- +.. automodule::mmdet3d.utils + :members: + +mmdet3d.visulization +-------------- +.. automodule::mmdet3d.visulization :members: diff --git a/docs/en/index.rst b/docs/en/index.rst index 24ee00ee76..24017966c2 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -9,7 +9,7 @@ Welcome to MMDetection3D's documentation! getting_started.md .. toctree:: - :maxdepth: 2 + :maxdepth: 3 :caption: User Guides user_guides/index.rst @@ -40,7 +40,7 @@ Welcome to MMDetection3D's documentation! .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: Notes notes/index.rst diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 748aa94d44..3ee06c004a 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,4 +1,4 @@ -## Changelog +# Changelog ### v1.0.0rc3 (8/6/2022) diff --git a/docs/en/notes/compatibility.md b/docs/en/notes/compatibility.md index b5376dffd1..385778355f 100644 --- a/docs/en/notes/compatibility.md +++ b/docs/en/notes/compatibility.md @@ -1,3 +1,5 @@ +# Compatibility + ## v1.0.0rc1 ### Operators Migration diff --git a/docs/en/notes/index.rst b/docs/en/notes/index.rst index f3063056c5..d43ae735c6 100644 --- a/docs/en/notes/index.rst +++ b/docs/en/notes/index.rst @@ -1,5 +1,5 @@ .. toctree:: - :maxdepth: 3 + :maxdepth: 1 benchmarks.md changelog.md diff --git a/docs/en/user_guides/1_exist_data_model.md b/docs/en/user_guides/1_exist_data_model.md index 8696dba6e2..810dc4bdc2 100644 --- a/docs/en/user_guides/1_exist_data_model.md +++ b/docs/en/user_guides/1_exist_data_model.md @@ -1,4 +1,4 @@ -# 1: Inference and train with existing models and standard datasets +# Inference and train ## Inference with existing models diff --git a/docs/en/user_guides/2_new_data_model.md b/docs/en/user_guides/2_new_data_model.md deleted file mode 100644 index 309fc6b64c..0000000000 --- a/docs/en/user_guides/2_new_data_model.md +++ /dev/null @@ -1,104 +0,0 @@ -# 2: Train with customized datasets - -In this note, you will know how to train and test predefined models with customized datasets. We use the Waymo dataset as an example to describe the whole process. - -The basic steps are as below: - -1. Prepare the customized dataset -2. Prepare a config -3. Train, test, inference models on the customized dataset. - -## Prepare the customized dataset - -There are three ways to support a new dataset in MMDetection3D: - -1. reorganize the dataset into existing format. -2. reorganize the dataset into a middle format. -3. implement a new dataset. - -Usually we recommend to use the first two methods which are usually easier than the third. - -In this note, we give an example for converting the data into KITTI format. - -**Note**: We take Waymo as the example here considering its format is totally different from other existing formats. For other datasets using similar methods to organize data, like Lyft compared to nuScenes, it would be easier to directly implement the new data converter (for the second approach above) instead of converting it to another format (for the first approach above). - -### KITTI dataset format - -Firstly, the raw data for 3D object detection from KITTI are typically organized as follows, where `ImageSets` contains split files indicating which files belong to training/validation/testing set, `calib` contains calibration information files, `image_2` and `velodyne` include image data and point cloud data, and `label_2` includes label files for 3D detection. - -``` -mmdetection3d -├── mmdet3d -├── tools -├── configs -├── data -│ ├── kitti -│ │ ├── ImageSets -│ │ ├── testing -│ │ │ ├── calib -│ │ │ ├── image_2 -│ │ │ ├── velodyne -│ │ ├── training -│ │ │ ├── calib -│ │ │ ├── image_2 -│ │ │ ├── label_2 -│ │ │ ├── velodyne -``` - -Specific annotation format is described in the official object development [kit](https://s3.eu-central-1.amazonaws.com/avg-kitti/devkit_object.zip). For example, it consists of the following labels: - -``` -#Values Name Description ----------------------------------------------------------------------------- - 1 type Describes the type of object: 'Car', 'Van', 'Truck', - 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', - 'Misc' or 'DontCare' - 1 truncated Float from 0 (non-truncated) to 1 (truncated), where - truncated refers to the object leaving image boundaries - 1 occluded Integer (0,1,2,3) indicating occlusion state: - 0 = fully visible, 1 = partly occluded - 2 = largely occluded, 3 = unknown - 1 alpha Observation angle of object, ranging [-pi..pi] - 4 bbox 2D bounding box of object in the image (0-based index): - contains left, top, right, bottom pixel coordinates - 3 dimensions 3D object dimensions: height, width, length (in meters) - 3 location 3D object location x,y,z in camera coordinates (in meters) - 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] - 1 score Only for results: Float, indicating confidence in - detection, needed for p/r curves, higher is better. -``` - -Assume we use the Waymo dataset. -After downloading the data, we need to implement a function to convert both the input data and annotation format into the KITTI style. Then we can implement WaymoDataset inherited from KittiDataset to load the data and perform training and evaluation. - -Specifically, we implement a waymo [converter](https://github.com/open-mmlab/mmdetection3d/blob/master/tools/dataset_converters/waymo_converter.py) to convert Waymo data into KITTI format and a waymo dataset [class](https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/datasets/waymo_dataset.py) to process it. Because we preprocess the raw data and reorganize it like KITTI, the dataset class could be implemented more easily by inheriting from KittiDataset. The last thing needed to be noted is the evaluation protocol you would like to use. Because Waymo has its own evaluation approach, we further incorporate it into our dataset class. Afterwards, users can successfully convert the data format and use `WaymoDataset` to train and evaluate the model. - -For more details about the intermediate results of preprocessing of Waymo dataset, please refer to its [tutorial](https://mmdetection3d.readthedocs.io/en/latest/datasets/waymo_det.html). - -## Prepare a config - -The second step is to prepare configs such that the dataset could be successfully loaded. In addition, adjusting hyperparameters is usually necessary to obtain decent performance in 3D detection. - -Suppose we would like to train PointPillars on Waymo to achieve 3D detection for 3 classes, vehicle, cyclist and pedestrian, we need to prepare dataset config like [this](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/waymoD5-3d-3class.py), model config like [this](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/models/hv_pointpillars_secfpn_waymo.py) and combine them like [this](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py), compared to KITTI [dataset config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/datasets/kitti-3d-3class.py), [model config](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/_base_/models/hv_pointpillars_secfpn_kitti.py) and [overall](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/pointpillars/hv_pointpillars_secfpn_6x8_160e_kitti-3d-3class.py). - -## Train a new model - -To train a model with the new config, you can simply run - -```shell -python tools/train.py configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py -``` - -For more detailed usages, please refer to the [Case 1](https://mmdetection3d.readthedocs.io/en/latest/1_exist_data_model.html). - -## Test and inference - -To test the trained model, you can simply run - -```shell -python tools/test.py configs/pointpillars/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class.py work_dirs/hv_pointpillars_secfpn_sbn_2x16_2x_waymoD5-3d-3class/latest.pth --eval waymo -``` - -**Note**: To use Waymo evaluation protocol, you need to follow the [tutorial](https://mmdetection3d.readthedocs.io/en/latest/datasets/waymo_det.html) and prepare files related to metrics computation as official instructions. - -For more detailed usages for test and inference, please refer to the [Case 1](https://mmdetection3d.readthedocs.io/en/latest/1_exist_data_model.html). diff --git a/docs/en/user_guides/backends_support.md b/docs/en/user_guides/backends_support.md index 5304ccd6f4..b4fafd4e21 100644 --- a/docs/en/user_guides/backends_support.md +++ b/docs/en/user_guides/backends_support.md @@ -1,4 +1,4 @@ -# Tutorial 7: Backends Support +# Backends Support We support different file client backends: Disk, Ceph and LMDB, etc. Here is an example of how to modify configs for Ceph-based data loading and saving. diff --git a/docs/en/user_guides/config.md b/docs/en/user_guides/config.md index d0be37af5f..55e693365f 100644 --- a/docs/en/user_guides/config.md +++ b/docs/en/user_guides/config.md @@ -1,4 +1,4 @@ -# Tutorial 1: Learn about Configs +# Learn about Configs We incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. diff --git a/docs/en/user_guides/coord_sys_tutorial.md b/docs/en/user_guides/coord_sys_tutorial.md index 02acee02fe..08d646e59f 100644 --- a/docs/en/user_guides/coord_sys_tutorial.md +++ b/docs/en/user_guides/coord_sys_tutorial.md @@ -1,4 +1,4 @@ -# Tutorial 6: Coordinate System +# Coordinate System ## Overview diff --git a/docs/en/user_guides/data_pipeline.md b/docs/en/user_guides/data_pipeline.md index 60dc18728f..5f8cd227b5 100644 --- a/docs/en/user_guides/data_pipeline.md +++ b/docs/en/user_guides/data_pipeline.md @@ -1,4 +1,4 @@ -# Tutorial 3: Customize Data Pipelines +# Customize Data Pipelines ## Design of Data pipelines diff --git a/docs/en/user_guides/index.rst b/docs/en/user_guides/index.rst index ab868d1934..7c5b53e1da 100644 --- a/docs/en/user_guides/index.rst +++ b/docs/en/user_guides/index.rst @@ -2,7 +2,6 @@ :maxdepth: 3 1_exist_data_model.md - 2_new_data_model.md backends_support.md config.md coord_sys_tutorial.md diff --git a/docs/en/user_guides/model_deployment.md b/docs/en/user_guides/model_deployment.md index 0feaff79f1..4192ddb963 100644 --- a/docs/en/user_guides/model_deployment.md +++ b/docs/en/user_guides/model_deployment.md @@ -1,4 +1,4 @@ -# Tutorial 8: MMDetection3D model deployment +# Model deployment To meet the speed requirement of the model in practical use, usually, we deploy the trained model to inference backends. [MMDeploy](https://github.com/open-mmlab/mmdeploy) is OpenMMLab model deployment framework. Now MMDeploy has supported MMDetection3D model deployment, and you can deploy the trained model to inference backends by MMDeploy. diff --git a/docs/mv.py b/docs/mv.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/zh_cn/_static/image/mmdet3d-logo.png b/docs/zh_cn/_static/image/mmdet3d-logo.png new file mode 100644 index 0000000000..f4076bd162 Binary files /dev/null and b/docs/zh_cn/_static/image/mmdet3d-logo.png differ diff --git a/docs/zh_cn/advanced_guides/index.rst b/docs/zh_cn/advanced_guides/index.rst index 9b6514961d..1faa4c57b3 100644 --- a/docs/zh_cn/advanced_guides/index.rst +++ b/docs/zh_cn/advanced_guides/index.rst @@ -1,18 +1,26 @@ +Datasets +************** + .. toctree:: :maxdepth: 1 - :caption: Datasets datasets/index.rst + +Supported Tasks +************** + .. toctree:: - :maxdepth: 1 - :caption: Supported Tasks + :maxdepth: 2 supported_tasks/index.rst +Customization +************** + .. toctree:: - :maxdepth: 1 + :maxdepth: 2 customize_dataset.md customize_models.md diff --git a/docs/zh_cn/api.rst b/docs/zh_cn/api.rst index 00153b9151..74b14c54f9 100644 --- a/docs/zh_cn/api.rst +++ b/docs/zh_cn/api.rst @@ -1,73 +1,76 @@ -mmdet3d.core +mmdet3d.apis -------------- +.. automodule:: mmdet3d.apis + :members: -anchor +mmdet3d.datasets +-------------- + +datasets ^^^^^^^^^^ -.. automodule:: mmdet3d.core.anchor +.. automodule:: mmdet3d.datasets :members: -bbox -^^^^^^^^^^ -.. automodule:: mmdet3d.core.bbox + +transforms +^^^^^^^^^^^^ +.. automodule:: mmdet3d.datasets.transforms :members: -evaluation +mmdet3d.engine +-------------- + +schedulers ^^^^^^^^^^ -.. automodule:: mmdet3d.core.evaluation +.. automodule:: mmdet3d.engine.schedulers :members: -visualizer -^^^^^^^^^^^^^^^ -.. automodule:: mmdet3d.core.visualizer - :members: +mmdet3d.evaluation +-------------- -voxel -^^^^^^^^^^^^^^^ -.. automodule:: mmdet3d.core.voxel +functional +^^^^^^^^^^ +.. automodule:: mmdet3d.engine.functional :members: -post_processing -^^^^^^^^^^^^^^^ -.. automodule:: mmdet3d.core.post_processing +metrics +^^^^^^^^^^ +.. automodule:: mmdet3d.engine.metrics :members: -mmdet3d.datasets ----------------- - -.. automodule:: mmdet3d.datasets - :members: mmdet3d.models -------------- -detectors -^^^^^^^^^^ -.. automodule:: mmdet3d.models.detectors - :members: - backbones ^^^^^^^^^^ .. automodule:: mmdet3d.models.backbones :members: -necks +data_preprocessors ^^^^^^^^^^ -.. automodule:: mmdet3d.models.necks +.. automodule:: mmdet3d.models.data_preprocessors :members: +decode_heads +^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.decode_heads + :members: + + dense_heads ^^^^^^^^^^^^ .. automodule:: mmdet3d.models.dense_heads :members: -roi_heads +detectors ^^^^^^^^^^ -.. automodule:: mmdet3d.models.roi_heads +.. automodule:: mmdet3d.models.detectors :members: -fusion_layers -^^^^^^^^^^^^^ -.. automodule:: mmdet3d.models.fusion_layers +layers +^^^^^^^^^^ +.. automodule:: mmdet3d.models.layers :members: losses @@ -76,11 +79,74 @@ losses :members: middle_encoders -^^^^^^^^^^^^^^^ +^^^^^^^^^^^^ .. automodule:: mmdet3d.models.middle_encoders :members: -model_utils -^^^^^^^^^^^^^ -.. automodule:: mmdet3d.models.model_utils +necks +^^^^^^^^^^^^ +.. automodule:: mmdet3d.models.necks + :members: + +roi_heads +^^^^^^^^^^ +.. automodule:: mmdet3d.models.roi_heads + :members: + +segmentors +^^^^^^^^^^ +.. automodule:: mmdet3d.models.segmentors + :members: + +task_modules +^^^^^^^^^^ +.. automodule:: mmdet3d.models.task_modules + :members: + +test_time_augs +^^^^^^^^^^ +.. automodule:: mmdet3d.models.test_time_augs + :members: + +utils +^^^^^^^^^^ +.. automodule:: mmdet3d.models.utils + :members: + +voxel_encoders +^^^^^^^^^^ +.. automodule:: mmdet3d.models.voxel_encoders + :members: + +mmdet3d.structures +-------------- + +structures +^^^^^^^^^^ +.. automodule:: mmdet3d.structures + :members: + +bbox_3d +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.bbox_3d + :members: + +ops +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.ops + :members: + +points +^^^^^^^^^^ +.. automodule:: mmdet3d.structures.points + :members: + +mmdet3d.utils +-------------- +.. automodule::mmdet3d.utils + :members: + +mmdet3d.visulization +-------------- +.. automodule::mmdet3d.visulization :members: diff --git a/requirements/docs.txt b/requirements/docs.txt index a31b7716bb..5adbb4d475 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,5 +1,5 @@ docutils==0.16.0 -m2r +m2r==0.2.1 mistune==0.8.4 myst-parser -e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme