From aa95f295f16ffa2d188435d54708e30ef081eb7c Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 19 Jan 2024 01:42:52 +0000 Subject: [PATCH 01/70] base node for openpcdet --- .../lidar-object-det/__init__.py | 0 .../lidar-object-det/openpcdet.py | 6 +++++ .../lidar-object-det/package.xml | 18 +++++++++++++ .../resource/lidar-object-det | 0 .../lidar-object-det/setup.cfg | 4 +++ .../lidar-object-det/setup.py | 26 +++++++++++++++++++ .../lidar-object-det/test/test_copyright.py | 25 ++++++++++++++++++ .../lidar-object-det/test/test_flake8.py | 25 ++++++++++++++++++ .../lidar-object-det/test/test_pep257.py | 23 ++++++++++++++++ 9 files changed, 127 insertions(+) create mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/package.xml create mode 100644 src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det create mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.cfg create mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py new file mode 100644 index 00000000..3a433920 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py @@ -0,0 +1,6 @@ +def main(): + print('Hi from lidar-object-det. lol!') + + +if __name__ == '__main__': + main() diff --git a/src/perception/lidar_object_detection/lidar-object-det/package.xml b/src/perception/lidar_object_detection/lidar-object-det/package.xml new file mode 100644 index 00000000..a7092bce --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/package.xml @@ -0,0 +1,18 @@ + + + + lidar-object-det + 0.0.0 + TODO: Package description + bolty + TODO: License declaration + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det b/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.cfg b/src/perception/lidar_object_detection/lidar-object-det/setup.cfg new file mode 100644 index 00000000..5808ed9e --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/lidar-object-det +[install] +install_scripts=$base/lib/lidar-object-det diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.py b/src/perception/lidar_object_detection/lidar-object-det/setup.py new file mode 100644 index 00000000..23215bbf --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/setup.py @@ -0,0 +1,26 @@ +from setuptools import find_packages, setup + +package_name = 'lidar-object-det' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='bolty', + maintainer_email='bolty@todo.todo', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'openpcdet = lidar-object-det.openpcdet:main' + ], + }, +) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py new file mode 100644 index 00000000..97a39196 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py @@ -0,0 +1,25 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +# Remove the `skip` decorator once the source file(s) have a copyright header +@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found errors' diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py new file mode 100644 index 00000000..27ee1078 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py @@ -0,0 +1,25 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_flake8.main import main_with_errors +import pytest + + +@pytest.mark.flake8 +@pytest.mark.linter +def test_flake8(): + rc, errors = main_with_errors(argv=[]) + assert rc == 0, \ + 'Found %d code style errors / warnings:\n' % len(errors) + \ + '\n'.join(errors) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py new file mode 100644 index 00000000..b234a384 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found code style errors / warnings' From ff4a955b14fc62087922b75b2d8aa9638b802ba2 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 19 Jan 2024 01:44:26 +0000 Subject: [PATCH 02/70] copied dockerfile for openpcdet --- ...idar_object_detection_openpcdet.Dockerfile | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile new file mode 100644 index 00000000..6deba0bc --- /dev/null +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -0,0 +1,55 @@ +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 + +################################ Source ################################ +FROM ${BASE_IMAGE} as source + +WORKDIR ${AMENT_WS}/src + +# Copy in source code +COPY src/perception/lidar_object_detection lidar_object_detection +COPY src/wato_msgs/sample_msgs sample_msgs + +# Scan for rosdeps +RUN apt-get -qq update && rosdep update && \ + rosdep install --from-paths . --ignore-src -r -s \ + | grep 'apt-get install' \ + | awk '{print $3}' \ + | sort > /tmp/colcon_install_list + +################################# Dependencies ################################ +FROM ${BASE_IMAGE} as dependencies + +# Install Rosdep requirements +COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) + +# Copy in source code from source stage +WORKDIR ${AMENT_WS} +COPY --from=source ${AMENT_WS}/src src + +# Dependency Cleanup +WORKDIR / +RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* + +################################ Build ################################ +FROM dependencies as build + +# Build ROS2 packages +WORKDIR ${AMENT_WS} +RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ + colcon build \ + --cmake-args -DCMAKE_BUILD_TYPE=Release + +# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash +COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +ENTRYPOINT ["./wato_ros_entrypoint.sh"] + +################################ Prod ################################ +FROM build as deploy + +# Source Cleanup and Security Setup +RUN chown -R $USER:$USER ${AMENT_WS} +RUN rm -rf src/* + +USER ${USER} From 4455065ca4474bf3159604959e885a6d727362bb Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 20 Jan 2024 05:57:32 +0000 Subject: [PATCH 03/70] working package in ros --- ...idar_object_detection_openpcdet.Dockerfile | 10 +++--- .../lidar-object-det/__init__.py | 0 .../lidar-object-det/openpcdet.py | 6 ---- .../resource/lidar-object-det | 0 .../lidar-object-det/setup.cfg | 4 --- .../lidar-object-det/setup.py | 26 -------------- .../lidar-object-det/test/test_copyright.py | 25 ------------- .../lidar-object-det/test/test_flake8.py | 25 ------------- .../lidar-object-det/test/test_pep257.py | 23 ------------ .../lidar_det/CMakeLists.txt | 33 +++++++++++++++++ .../package.xml | 15 ++++---- .../lidar_det/src/lidar_det.cpp | 35 +++++++++++++++++++ 12 files changed, 82 insertions(+), 120 deletions(-) delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.cfg delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py create mode 100644 src/perception/lidar_object_detection/lidar_det/CMakeLists.txt rename src/perception/lidar_object_detection/{lidar-object-det => lidar_det}/package.xml (59%) create mode 100644 src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 6deba0bc..291364ee 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -46,10 +46,10 @@ COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] ################################ Prod ################################ -FROM build as deploy +# FROM build as deploy -# Source Cleanup and Security Setup -RUN chown -R $USER:$USER ${AMENT_WS} -RUN rm -rf src/* +# # Source Cleanup and Security Setup +# RUN chown -R $USER:$USER ${AMENT_WS} +# RUN rm -rf src/* -USER ${USER} +# USER ${USER} diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py deleted file mode 100644 index 3a433920..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py +++ /dev/null @@ -1,6 +0,0 @@ -def main(): - print('Hi from lidar-object-det. lol!') - - -if __name__ == '__main__': - main() diff --git a/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det b/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det deleted file mode 100644 index e69de29b..00000000 diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.cfg b/src/perception/lidar_object_detection/lidar-object-det/setup.cfg deleted file mode 100644 index 5808ed9e..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/lidar-object-det -[install] -install_scripts=$base/lib/lidar-object-det diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.py b/src/perception/lidar_object_detection/lidar-object-det/setup.py deleted file mode 100644 index 23215bbf..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'lidar-object-det' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools'], - zip_safe=True, - maintainer='bolty', - maintainer_email='bolty@todo.todo', - description='TODO: Package description', - license='TODO: License declaration', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'openpcdet = lidar-object-det.openpcdet:main' - ], - }, -) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py deleted file mode 100644 index 97a39196..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_copyright.main import main -import pytest - - -# Remove the `skip` decorator once the source file(s) have a copyright header -@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') -@pytest.mark.copyright -@pytest.mark.linter -def test_copyright(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found errors' diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py deleted file mode 100644 index 27ee1078..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_flake8.main import main_with_errors -import pytest - - -@pytest.mark.flake8 -@pytest.mark.linter -def test_flake8(): - rc, errors = main_with_errors(argv=[]) - assert rc == 0, \ - 'Found %d code style errors / warnings:\n' % len(errors) + \ - '\n'.join(errors) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py deleted file mode 100644 index b234a384..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_pep257.main import main -import pytest - - -@pytest.mark.linter -@pytest.mark.pep257 -def test_pep257(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found code style errors / warnings' diff --git a/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt b/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt new file mode 100644 index 00000000..507bfafb --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt @@ -0,0 +1,33 @@ +cmake_minimum_required(VERSION 3.8) +project(lidar_det) + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# find dependencies +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(std_msgs REQUIRED) + +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + # the following line skips the linter which checks for copyrights + # comment the line when a copyright and license is added to all source files + set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # comment the line when this package is in a git repo and when + # a copyright and license is added to all source files + set(ament_cmake_cpplint_FOUND TRUE) + ament_lint_auto_find_test_dependencies() +endif() + +add_executable(lidar_det src/lidar_det.cpp) +ament_target_dependencies(lidar_det rclcpp std_msgs) +install(TARGETS + lidar_det + DESTINATION lib/${PROJECT_NAME} +) + + +ament_package() diff --git a/src/perception/lidar_object_detection/lidar-object-det/package.xml b/src/perception/lidar_object_detection/lidar_det/package.xml similarity index 59% rename from src/perception/lidar_object_detection/lidar-object-det/package.xml rename to src/perception/lidar_object_detection/lidar_det/package.xml index a7092bce..959555ab 100644 --- a/src/perception/lidar_object_detection/lidar-object-det/package.xml +++ b/src/perception/lidar_object_detection/lidar_det/package.xml @@ -1,18 +1,21 @@ - lidar-object-det + lidar_det 0.0.0 TODO: Package description bolty TODO: License declaration - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest + ament_cmake + + rclcpp + std_msgs + + ament_lint_auto + ament_lint_common - ament_python + ament_cmake diff --git a/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp b/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp new file mode 100644 index 00000000..62e663eb --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp @@ -0,0 +1,35 @@ +#include "rclcpp/rclcpp.hpp" +#include "std_msgs/msg/string.hpp" +#include + +using namespace std::chrono_literals; + +class TestNode : public rclcpp::Node +{ +public: + TestNode() : Node("test_node") + { + publisher_ = this->create_publisher("chatter", 10); + timer_ = this->create_wall_timer(1s, std::bind(&TestNode::timer_callback, this)); + } + +private: + void timer_callback() + { + auto message = std_msgs::msg::String(); + message.data = "Hello, ROS 2 world! " + std::to_string(count_++); + RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", message.data.c_str()); + publisher_->publish(message); + } + rclcpp::TimerBase::SharedPtr timer_; + rclcpp::Publisher::SharedPtr publisher_; + size_t count_ = 0; +}; + +int main(int argc, char *argv[]) +{ + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared()); + rclcpp::shutdown(); + return 0; +} From 1af45db27711fbe3644e5a4c10e7b860030532b3 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 20 Jan 2024 05:59:24 +0000 Subject: [PATCH 04/70] ig we dont need these --- .../lidar_object_detection/CMakeLists.txt | 26 ------------------- .../lidar_object_detection/package.xml | 18 ------------- 2 files changed, 44 deletions(-) delete mode 100644 src/perception/lidar_object_detection/CMakeLists.txt delete mode 100644 src/perception/lidar_object_detection/package.xml diff --git a/src/perception/lidar_object_detection/CMakeLists.txt b/src/perception/lidar_object_detection/CMakeLists.txt deleted file mode 100644 index e5e82c44..00000000 --- a/src/perception/lidar_object_detection/CMakeLists.txt +++ /dev/null @@ -1,26 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(lidar_object_detection) - -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() - -# find dependencies -find_package(ament_cmake REQUIRED) -# uncomment the following section in order to fill in -# further dependencies manually. -# find_package( REQUIRED) - -if(BUILD_TESTING) - find_package(ament_lint_auto REQUIRED) - # the following line skips the linter which checks for copyrights - # comment the line when a copyright and license is added to all source files - set(ament_cmake_copyright_FOUND TRUE) - # the following line skips cpplint (only works in a git repo) - # comment the line when this package is in a git repo and when - # a copyright and license is added to all source files - set(ament_cmake_cpplint_FOUND TRUE) - ament_lint_auto_find_test_dependencies() -endif() - -ament_package() diff --git a/src/perception/lidar_object_detection/package.xml b/src/perception/lidar_object_detection/package.xml deleted file mode 100644 index 83c42a2c..00000000 --- a/src/perception/lidar_object_detection/package.xml +++ /dev/null @@ -1,18 +0,0 @@ - - - - lidar_object_detection - 0.0.0 - TODO: Package description - bolty - TODO: License declaration - - ament_cmake - - ament_lint_auto - ament_lint_common - - - ament_cmake - - From 47b71633b51e6f506a3ee397ddc84200289859cc Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 20 Jan 2024 06:12:35 +0000 Subject: [PATCH 05/70] added python package if I want to use that --- .../lidar_det_py/lidar_det_py/__init__.py | 0 .../lidar_det_py/lidar_det_py/test_node.py | 34 +++++++++++++++++++ .../lidar_det_py/package.xml | 21 ++++++++++++ .../lidar_det_py/resource/lidar_det_py | 0 .../lidar_det_py/setup.cfg | 4 +++ .../lidar_det_py/setup.py | 26 ++++++++++++++ 6 files changed, 85 insertions(+) create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/package.xml create mode 100644 src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/setup.cfg create mode 100644 src/perception/lidar_object_detection/lidar_det_py/setup.py diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py new file mode 100644 index 00000000..934bd9f1 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py @@ -0,0 +1,34 @@ +import rclpy +from rclpy.node import Node +from std_msgs.msg import String +from time import sleep + +class TestNode(Node): + + def __init__(self): + super().__init__('test_node') + self.publisher_ = self.create_publisher(String, 'chatter', 10) + self.timer = self.create_timer(1.0, self.timer_callback) + self.count = 0 + + def timer_callback(self): + msg = String() + msg.data = 'Hello ROS 2 world! %d' % self.count + self.publisher_.publish(msg) + self.get_logger().info('Publishing: "%s"' % msg.data) + self.count += 1 + +def main(): + rclpy.init() + test_node = TestNode() + + try: + rclpy.spin(test_node) + except KeyboardInterrupt: + pass + + test_node.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_det_py/package.xml new file mode 100644 index 00000000..26b37b62 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/package.xml @@ -0,0 +1,21 @@ + + + + lidar_det_py + 0.0.0 + TODO: Package description + bolty + TODO: License declaration + + rclpy + std_msgs + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py b/src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.cfg b/src/perception/lidar_object_detection/lidar_det_py/setup.cfg new file mode 100644 index 00000000..b74c15be --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/lidar_det_py +[install] +install_scripts=$base/lib/lidar_det_py diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.py b/src/perception/lidar_object_detection/lidar_det_py/setup.py new file mode 100644 index 00000000..f9eca843 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/setup.py @@ -0,0 +1,26 @@ +from setuptools import find_packages, setup + +package_name = 'lidar_det_py' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='bolty', + maintainer_email='bolty@todo.todo', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'test_node = lidar_det_py.test_node:main' + ], + }, +) From a42c5d62391b6c283f0d40c2bc643769d269774d Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 23 Jan 2024 05:05:36 +0000 Subject: [PATCH 06/70] adding ros node --- ...idar_object_detection_openpcdet.Dockerfile | 21 +- .../lidar_det/CMakeLists.txt | 33 --- .../lidar_det/package.xml | 21 -- .../lidar_det/src/lidar_det.cpp | 35 --- .../lidar_det_py/launch/3d_object_detector.py | 13 + .../lidar_det_py/launch/config.yaml | 15 + .../lidar_det_py/lidar_det_py/inference.py | 258 ++++++++++++++++++ .../lidar_det_py/utils/draw_3d.py | 222 +++++++++++++++ .../lidar_det_py/utils/global_def | 30 ++ .../lidar_det_py/package.xml | 5 + .../lidar_det_py/setup.py | 5 +- 11 files changed, 556 insertions(+), 102 deletions(-) delete mode 100644 src/perception/lidar_object_detection/lidar_det/CMakeLists.txt delete mode 100644 src/perception/lidar_object_detection/lidar_det/package.xml delete mode 100644 src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp create mode 100644 src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 291364ee..2211c4e2 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -1,50 +1,47 @@ ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - ################################ Source ################################ FROM ${BASE_IMAGE} as source - WORKDIR ${AMENT_WS}/src - # Copy in source code COPY src/perception/lidar_object_detection lidar_object_detection COPY src/wato_msgs/sample_msgs sample_msgs - # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ rosdep install --from-paths . --ignore-src -r -s \ | grep 'apt-get install' \ | awk '{print $3}' \ | sort > /tmp/colcon_install_list - ################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies - # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - # Copy in source code from source stage WORKDIR ${AMENT_WS} COPY --from=source ${AMENT_WS}/src src - # Dependency Cleanup WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - +################################ INSTALL OpenPCDet ################################ +RUN apt update && apt install -y python3-pip +RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +RUN pip3 install spconv-cu116 +RUN apt update && apt install -y python3-setuptools +WORKDIR /home/bolty/ +RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git +RUN cd OpenPCDet && pip3 install -r requirements.txt +RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d ################################ Build ################################ FROM dependencies as build - # Build ROS2 packages WORKDIR ${AMENT_WS} RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ colcon build \ --cmake-args -DCMAKE_BUILD_TYPE=Release - # Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] - ################################ Prod ################################ # FROM build as deploy diff --git a/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt b/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt deleted file mode 100644 index 507bfafb..00000000 --- a/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(lidar_det) - -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() - -# find dependencies -find_package(ament_cmake REQUIRED) -find_package(rclcpp REQUIRED) -find_package(std_msgs REQUIRED) - -if(BUILD_TESTING) - find_package(ament_lint_auto REQUIRED) - # the following line skips the linter which checks for copyrights - # comment the line when a copyright and license is added to all source files - set(ament_cmake_copyright_FOUND TRUE) - # the following line skips cpplint (only works in a git repo) - # comment the line when this package is in a git repo and when - # a copyright and license is added to all source files - set(ament_cmake_cpplint_FOUND TRUE) - ament_lint_auto_find_test_dependencies() -endif() - -add_executable(lidar_det src/lidar_det.cpp) -ament_target_dependencies(lidar_det rclcpp std_msgs) -install(TARGETS - lidar_det - DESTINATION lib/${PROJECT_NAME} -) - - -ament_package() diff --git a/src/perception/lidar_object_detection/lidar_det/package.xml b/src/perception/lidar_object_detection/lidar_det/package.xml deleted file mode 100644 index 959555ab..00000000 --- a/src/perception/lidar_object_detection/lidar_det/package.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - lidar_det - 0.0.0 - TODO: Package description - bolty - TODO: License declaration - - ament_cmake - - rclcpp - std_msgs - - ament_lint_auto - ament_lint_common - - - ament_cmake - - diff --git a/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp b/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp deleted file mode 100644 index 62e663eb..00000000 --- a/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "rclcpp/rclcpp.hpp" -#include "std_msgs/msg/string.hpp" -#include - -using namespace std::chrono_literals; - -class TestNode : public rclcpp::Node -{ -public: - TestNode() : Node("test_node") - { - publisher_ = this->create_publisher("chatter", 10); - timer_ = this->create_wall_timer(1s, std::bind(&TestNode::timer_callback, this)); - } - -private: - void timer_callback() - { - auto message = std_msgs::msg::String(); - message.data = "Hello, ROS 2 world! " + std::to_string(count_++); - RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", message.data.c_str()); - publisher_->publish(message); - } - rclcpp::TimerBase::SharedPtr timer_; - rclcpp::Publisher::SharedPtr publisher_; - size_t count_ = 0; -}; - -int main(int argc, char *argv[]) -{ - rclcpp::init(argc, argv); - rclcpp::spin(std::make_shared()); - rclcpp::shutdown(); - return 0; -} diff --git a/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py b/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py new file mode 100644 index 00000000..05984a36 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python3 +from launch import LaunchDescription +from launch_ros.actions import Node + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='lidar_det_py', + executable='inference', + name='inference', + output='screen', + ), + ]) diff --git a/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml b/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml new file mode 100644 index 00000000..d74146d7 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml @@ -0,0 +1,15 @@ +# which go to the model you want to use +cfg_root: "/home/bolty/workspace/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml" + +# which go to the .pth +model_path: "/home/kin/workspace/OpenPCDet/tools/pv_rcnn_8369.pth" + +# pointcloud_topic in the bag if you are not sure +# rosbag info xxx.bag check the topic name +pointcloud_topic: "/velodyne_points" # default is from kitti2bag: /velodyne_points + +# only the predict probablity over this threshold will be draw a box +threshold: 0.8 + +move_lidar_center: 20 +viz_rate: 1.0 \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py new file mode 100644 index 00000000..141a6911 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -0,0 +1,258 @@ +#! /usr/bin/env python3.8 +""" +Created on Thu Aug 6 11:27:43 2020 + +@author: Javier del Egido Sierra and Carlos Gómez-Huélamo + +=== + +Modified on 23 Dec 2022 +@author: Kin ZHANG (https://kin-zhang.github.io/) + +Part of codes also refers: https://github.com/kwea123/ROS_notes +""" + +# General use imports +import os +import time +import glob +from pathlib import Path + +# ROS imports +import rospy +import ros_numpy +from sensor_msgs.msg import PointCloud2, PointField +from visualization_msgs.msg import MarkerArray, Marker + +# Math and geometry imports +import math +import numpy as np +import torch + +# OpenPCDet imports +from pcdet.datasets import DatasetTemplate +from pcdet.models import build_network, load_data_to_gpu +from pcdet.config import cfg, cfg_from_yaml_file +from pcdet.utils import box_utils, calibration_kitti, common_utils, object3d_kitti + +# Kin's utils +from utils.draw_3d import Draw3DBox +from utils.global_def import * +from utils import * + +import yaml +import os +BASE_DIR = os.path.abspath(os.path.join( os.path.dirname( __file__ ), '..' )) +with open(f"{BASE_DIR}/launch/config.yaml", 'r') as f: + try: + para_cfg = yaml.safe_load(f, Loader=yaml.FullLoader) + except: + para_cfg = yaml.safe_load(f) + +cfg_root = para_cfg["cfg_root"] +model_path = para_cfg["model_path"] +move_lidar_center = para_cfg["move_lidar_center"] +threshold = para_cfg["threshold"] +pointcloud_topic = para_cfg["pointcloud_topic"] +RATE_VIZ = para_cfg["viz_rate"] +inference_time_list = [] + + +def xyz_array_to_pointcloud2(points_sum, stamp=None, frame_id=None): + """ + Create a sensor_msgs.PointCloud2 from an array of points. + """ + msg = PointCloud2() + if stamp: + msg.header.stamp = stamp + if frame_id: + msg.header.frame_id = frame_id + msg.height = 1 + msg.width = points_sum.shape[0] + msg.fields = [ + PointField('x', 0, PointField.FLOAT32, 1), + PointField('y', 4, PointField.FLOAT32, 1), + PointField('z', 8, PointField.FLOAT32, 1) + # PointField('i', 12, PointField.FLOAT32, 1) + ] + msg.is_bigendian = False + msg.point_step = 12 + msg.row_step = points_sum.shape[0] + msg.is_dense = int(np.isfinite(points_sum).all()) + msg.data = np.asarray(points_sum, np.float32).tostring() + return msg + +def rslidar_callback(msg): + select_boxs, select_types = [],[] + if proc_1.no_frame_id: + proc_1.set_viz_frame_id(msg.header.frame_id) + print(f"{bc.OKGREEN} setting marker frame id to lidar: {msg.header.frame_id} {bc.ENDC}") + proc_1.no_frame_id = False + + frame = msg.header.seq # frame id -> not timestamp + msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) + np_p = get_xyz_points(msg_cloud, True) + scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, frame) + for i, score in enumerate(scores): + if score>threshold: + select_boxs.append(dt_box_lidar[i]) + select_types.append(pred_dict['name'][i]) + if(len(select_boxs)>0): + proc_1.pub_rviz.publish_3dbox(np.array(select_boxs), -1, pred_dict['name']) + print_str = f"Frame id: {frame}. Prediction results: \n" + for i in range(len(pred_dict['name'])): + print_str += f"Type: {pred_dict['name'][i]:.3s} Prob: {scores[i]:.2f}\n" + print(print_str) + else: + print(f"\n{bc.FAIL} No confident prediction in this time stamp {bc.ENDC}\n") + print(f" -------------------------------------------------------------- ") + +class DemoDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.root_path = root_path + self.ext = ext + data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path] + + data_file_list.sort() + self.sample_file_list = data_file_list + + def __len__(self): + return len(self.sample_file_list) + + def __getitem__(self, index): + if self.ext == '.bin': + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) + elif self.ext == '.npy': + points = np.load(self.sample_file_list[index]) + else: + raise NotImplementedError + + input_dict = { + 'points': points, + 'frame_id': index, + } + + data_dict = self.prepare_data(data_dict=input_dict) + return data_dict + +class Processor_ROS: + def __init__(self, config_path, model_path): + self.points = None + self.config_path = config_path + self.model_path = model_path + self.device = None + self.net = None + self.voxel_generator = None + self.inputs = None + self.pub_rviz = None + self.no_frame_id = True + self.rate = RATE_VIZ + + def set_pub_rviz(self, box3d_pub, marker_frame_id = 'velodyne'): + self.pub_rviz = Draw3DBox(box3d_pub, marker_frame_id, self.rate) + + def set_viz_frame_id(self, marker_frame_id): + self.pub_rviz.set_frame_id(marker_frame_id) + + def initialize(self): + self.read_config() + + def read_config(self): + config_path = self.config_path + cfg_from_yaml_file(self.config_path, cfg) + self.logger = common_utils.create_logger() + self.demo_dataset = DemoDataset( + dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, + root_path=Path("/home/kin/workspace/OpenPCDet/tools/000002.bin"), + ext='.bin') + + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) + print("Model path: ", self.model_path) + self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) + self.net = self.net.to(self.device).eval() + + def get_template_prediction(self, num_samples): + ret_dict = { + 'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples), + 'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples), + 'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]), + 'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples), + 'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7]) + } + return ret_dict + + def run(self, points, frame): + t_t = time.time() + num_features = 4 # X,Y,Z,intensity + self.points = points.reshape([-1, num_features]) + + timestamps = np.empty((len(self.points),1)) + timestamps[:] = frame + + # self.points = np.append(self.points, timestamps, axis=1) + self.points[:,0] += move_lidar_center + + input_dict = { + 'points': self.points, + 'frame_id': frame, + } + + data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) + data_dict = self.demo_dataset.collate_batch([data_dict]) + load_data_to_gpu(data_dict) + + torch.cuda.synchronize() + t = time.time() + + pred_dicts, _ = self.net.forward(data_dict) + + torch.cuda.synchronize() + inference_time = time.time() - t + inference_time_list.append(inference_time) + + boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() + scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() + types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() + + pred_boxes = np.copy(boxes_lidar) + pred_dict = self.get_template_prediction(scores.shape[0]) + + pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1] + pred_dict['score'] = scores + pred_dict['boxes_lidar'] = pred_boxes + + return scores, boxes_lidar, types, pred_dict + +if __name__ == "__main__": + no_frame_id = False + proc_1 = Processor_ROS(cfg_root, model_path) + print(f"\n{bc.OKCYAN}Config path: {bc.BOLD}{cfg_root}{bc.ENDC}") + print(f"{bc.OKCYAN}Model path: {bc.BOLD}{model_path}{bc.ENDC}") + + proc_1.initialize() + rospy.init_node('inference') + sub_lidar_topic = [pointcloud_topic] + + cfg_from_yaml_file(cfg_root, cfg) + + sub_ = rospy.Subscriber(sub_lidar_topic[0], PointCloud2, rslidar_callback, queue_size=1, buff_size=2**24) + pub_rviz = rospy.Publisher('detect_3dbox',MarkerArray, queue_size=10) + proc_1.set_pub_rviz(pub_rviz) + print(f"{bc.HEADER} ====================== {bc.ENDC}") + print(" ===> [+] PCDet ros_node has started. Try to Run the rosbag file") + print(f"{bc.HEADER} ====================== {bc.ENDC}") + + rospy.spin() \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py new file mode 100644 index 00000000..c5fbaefd --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py @@ -0,0 +1,222 @@ +#! /usr/bin/env python3.8 +""" +Created on 23 Dec 2022 + +@author: Kin ZHANG (https://kin-zhang.github.io/) +% Copyright (C) 2022, RPL, KTH Royal Institute of Technology + +Part of codes also refers: +1. https://github.com/kwea123/ROS_notes +2. https://github.com/seaside2mm/sharecode + +""" + +import rospy +from visualization_msgs.msg import Marker, MarkerArray +from geometry_msgs.msg import Point +import numpy as np +from .global_def import * + +""" +This file is from OpenPCDet visualize_utils.py +Check their repo: https://github.com/open-mmlab/OpenPCDet +""" +import numpy as np +import torch + +box_colormap = [ + [1, 1, 1], + [0, 1, 0], + [0, 1, 1], + [1, 1, 0], +] + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + +def rotate_points_along_z(points, angle): + """ + Args: + points: (B, N, 3 + C) + angle: (B), angle along z-axis, angle increases x ==> y + Returns: + + """ + points, is_numpy = check_numpy_to_torch(points) + angle, _ = check_numpy_to_torch(angle) + + cosa = torch.cos(angle) + sina = torch.sin(angle) + zeros = angle.new_zeros(points.shape[0]) + ones = angle.new_ones(points.shape[0]) + rot_matrix = torch.stack(( + cosa, sina, zeros, + -sina, cosa, zeros, + zeros, zeros, ones + ), dim=1).view(-1, 3, 3).float() + points_rot = torch.matmul(points[:, :, 0:3], rot_matrix) + points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) + return points_rot.numpy() if is_numpy else points_rot + + +def boxes_to_corners_3d(boxes3d): + """ + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + Args: + boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + Returns: + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + + template = boxes3d.new_tensor(( + [1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], + [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1], + )) / 2 + + corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] + corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3) + corners3d += boxes3d[:, None, 0:3] + + return corners3d.numpy() if is_numpy else corners3d + +class Draw3DBox: + def __init__(self, box3d_pub, marker_frame_id = 'velodyne', rate=10): + self.frame_id = marker_frame_id + self.lifetime = 1.0/rate + self.box3d_pub = box3d_pub + + def set_frame_id(self, marker_frame_id): + self.frame_id = marker_frame_id + + def publish_3dbox(self, dt_box_lidar, track_ids, types=None, publish_id=True, move_lidar_center=20, publish_type=True): + """ + Publish 3d boxes in velodyne coordinate, with color specified by object_types + If object_types is None, set all color to cyan + corners_3d_velos : list of (8, 4) 3d corners + """ + # (N, 8, 3) + # -move_lidar_center + dt_box_lidar[:,0] = dt_box_lidar[:,0]-move_lidar_center + corners_3d_velos = boxes_to_corners_3d(dt_box_lidar) + + marker_array = MarkerArray() + + for i, corners_3d_velo in enumerate(corners_3d_velos): + # 3d box + marker = Marker() + marker.header.frame_id = self.frame_id + marker.header.stamp = rospy.Time.now() + marker.id = i + marker.action = Marker.ADD + marker.lifetime = rospy.Duration(self.lifetime) + marker.type = Marker.LINE_LIST + + b, g, r = DETECTION_COLOR_MAP[types[i]] + if types is None: + marker.color.r = 0.0 + marker.color.g = 1.0 + marker.color.b = 1.0 + else: + marker.color.r = r/255.0 + marker.color.g = g/255.0 + marker.color.b = b/255.0 + marker.color.a = 1.0 + marker.scale.x = 0.1 + + marker.points = [] + for l in LINES: + p1 = corners_3d_velo[l[0]] + marker.points.append(Point(p1[0], p1[1], p1[2])) + p2 = corners_3d_velo[l[1]] + marker.points.append(Point(p2[0], p2[1], p2[2])) + marker_array.markers.append(marker) + + # add track id + if publish_id and track_ids != -1: + track_id = track_ids[i] + text_marker = Marker() + text_marker.header.frame_id = self.frame_id + text_marker.header.stamp = rospy.Time.now() + + text_marker.id = track_id + 1000 + text_marker.action = Marker.ADD + text_marker.lifetime = rospy.Duration(self.lifetime) + text_marker.type = Marker.TEXT_VIEW_FACING + + p4 = corners_3d_velo[4] # upper front left corner + + text_marker.pose.position.x = p4[0] + text_marker.pose.position.y = p4[1] + text_marker.pose.position.z = p4[2] + 0.5 + + text_marker.text = str(track_id) + + text_marker.scale.x = 1 + text_marker.scale.y = 1 + text_marker.scale.z = 1 + + if types is None: + text_marker.color.r = 0.0 + text_marker.color.g = 1.0 + text_marker.color.b = 1.0 + else: + b, g, r = DETECTION_COLOR_MAP[types[i]] + text_marker.color.r = r/255.0 + text_marker.color.g = g/255.0 + text_marker.color.b = b/255.0 + text_marker.color.a = 1.0 + marker_array.markers.append(text_marker) + + if publish_type: + text_marker = Marker() + text_marker.header.frame_id = self.frame_id + text_marker.header.stamp = rospy.Time.now() + + text_marker.id = i + 1000 + text_marker.action = Marker.ADD + text_marker.lifetime = rospy.Duration(self.lifetime) + text_marker.type = Marker.TEXT_VIEW_FACING + + bc = (corners_3d_velo[6] + corners_3d_velo[7] + corners_3d_velo[2] + corners_3d_velo[3])/4 # back center + + text_marker.pose.position.x = bc[0] + text_marker.pose.position.y = bc[1] + text_marker.pose.position.z = bc[2] + 1.5 + + text_marker.text = types[i] + text_marker.scale.x = 1 + text_marker.scale.y = 1 + text_marker.scale.z = 1 + b, g, r = DETECTION_COLOR_MAP[types[i]] + text_marker.color.r = r/255.0 + text_marker.color.g = g/255.0 + text_marker.color.b = b/255.0 + text_marker.color.a = 1.0 + marker_array.markers.append(text_marker) + + self.box3d_pub.publish(marker_array) + + def compute_3d_box_cam2(self, h, w, l, x, y, z, yaw): + """ + Return : 3xn in cam2 coordinate + """ + R = np.array([[np.cos(yaw), 0, np.sin(yaw)], [0, 1, 0], [-np.sin(yaw), 0, np.cos(yaw)]]) + x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2] + y_corners = [0,0,0,0,-h,-h,-h,-h] + z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2] + corners_3d_cam2 = np.dot(R, np.vstack([x_corners,y_corners,z_corners])) + corners_3d_cam2[0,:] += x + corners_3d_cam2[1,:] += y + corners_3d_cam2[2,:] += z + return corners_3d_cam2 \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def new file mode 100644 index 00000000..d318a6ac --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def @@ -0,0 +1,30 @@ +#! /usr/bin/env python3.8 +""" +Created on 23 Dec 2022 + +@author: Kin ZHANG (https://kin-zhang.github.io/) +% Copyright (C) 2022, RPL, KTH Royal Institute of Technology + +Part of codes also refers: https://github.com/kwea123/ROS_notes +""" + +DETECTION_COLOR_MAP = {'Car': (255,255,0), + 'Pedestrian': (0, 226, 255), + 'Cyclist': (141, 40, 255)} # color for detection, in format bgr + +# connect vertic +LINES = [[0, 1], [1, 2], [2, 3], [3, 0]] # lower face +LINES+= [[4, 5], [5, 6], [6, 7], [7, 4]] # upper face +LINES+= [[4, 0], [5, 1], [6, 2], [7, 3]] # connect lower face and upper face +LINES+= [[4, 1], [5, 0]] # front face and draw x + +class bc: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKCYAN = '\033[96m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_det_py/package.xml index 26b37b62..f9da549c 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/package.xml +++ b/src/perception/lidar_object_detection/lidar_det_py/package.xml @@ -9,6 +9,11 @@ rclpy std_msgs + rospy + sensor_msgs + nav_msgs + geometry_msgs + tf ament_copyright ament_flake8 diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.py b/src/perception/lidar_object_detection/lidar_det_py/setup.py index f9eca843..305cea43 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/setup.py +++ b/src/perception/lidar_object_detection/lidar_det_py/setup.py @@ -1,4 +1,5 @@ from setuptools import find_packages, setup +from glob import glob package_name = 'lidar_det_py' @@ -10,6 +11,7 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), + ('share/' + package_name + '/launch', glob('launch/*.py')), ], install_requires=['setuptools'], zip_safe=True, @@ -20,7 +22,8 @@ tests_require=['pytest'], entry_points={ 'console_scripts': [ - 'test_node = lidar_det_py.test_node:main' + 'test_node = lidar_det_py.test_node:main', + 'inference = lidar_det_py.inference:main' ], }, ) From afddd09541d2c77fbbefdc7cd4e94fd0c3b2ba75 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Thu, 25 Jan 2024 03:37:32 +0000 Subject: [PATCH 07/70] made node and WIP on dockerfile --- ...idar_object_detection_openpcdet.Dockerfile | 85 +++++- .../lidar_det_py/lidar_det_py/inference.py | 272 +++++------------- 2 files changed, 149 insertions(+), 208 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 2211c4e2..c76f493b 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -24,14 +24,87 @@ WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* ################################ INSTALL OpenPCDet ################################ -RUN apt update && apt install -y python3-pip +# Set environment variables +ENV NVENCODE_CFLAGS "-I/usr/local/cuda/include" +ENV CV_VERSION=4.2.0 +ENV DEBIAN_FRONTEND=noninteractive +# Get all dependencies +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim +RUN apt-get update && apt-get install -y \ + python3-pip \ + python3-dev +RUN apt-get update && apt-get install -y \ + pcl-tools \ + python3-pcl \ + xvfb \ + x11-utils +RUN rm -rf /var/lib/apt/lists/* +# OpenCV with CUDA support +WORKDIR /opencv +RUN git clone https://github.com/opencv/opencv.git -b $CV_VERSION &&\ + git clone https://github.com/opencv/opencv_contrib.git -b $CV_VERSION +# While using OpenCV 4.2.0 we have to apply some fixes to ensure that CUDA is fully supported, thanks @https://github.com/gismo07 for this fix +RUN mkdir opencvfix && cd opencvfix &&\ + git clone https://github.com/opencv/opencv.git -b 4.5.2 &&\ + cd opencv/cmake &&\ + cp -r FindCUDA /opencv/opencv/cmake/ &&\ + cp FindCUDA.cmake /opencv/opencv/cmake/ &&\ + cp FindCUDNN.cmake /opencv/opencv/cmake/ &&\ + cp OpenCVDetectCUDA.cmake /opencv/opencv/cmake/ +WORKDIR /opencv/opencv/build +RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ +-D CMAKE_INSTALL_PREFIX=/usr/local \ +-D OPENCV_GENERATE_PKGCONFIG=ON \ +-D BUILD_EXAMPLES=OFF \ +-D INSTALL_PYTHON_EXAMPLES=OFF \ +-D INSTALL_C_EXAMPLES=OFF \ +-D PYTHON_EXECUTABLE=$(which python2) \ +-D PYTHON3_EXECUTABLE=$(which python3) \ +-D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ +-D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ +-D BUILD_opencv_python2=ON \ +-D BUILD_opencv_python3=ON \ +-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ +-D WITH_GSTREAMER=ON \ +-D WITH_CUDA=ON \ +-D ENABLE_PRECOMPILED_HEADERS=OFF \ +.. &&\ +make -j$(nproc) &&\ +make install &&\ +ldconfig &&\ +rm -rf /opencv +WORKDIR / +ENV OpenCV_DIR=/usr/share/OpenCV +# PyTorch for CUDA 11.6 RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" +# OpenPCDet +RUN pip3 install numpy==1.23.0 llvmlite numba tensorboardX easydict pyyaml scikit-image tqdm SharedArray open3d mayavi av2 pyquaternion RUN pip3 install spconv-cu116 -RUN apt update && apt install -y python3-setuptools -WORKDIR /home/bolty/ -RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git -RUN cd OpenPCDet && pip3 install -r requirements.txt -RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d +RUN pip3 install kornia==0.6.8 +RUN pip3 install nuscenes-devkit==1.0.5 +# Get extra kitti data +WORKDIR / +RUN git clone https://github.com/NVIDIA-AI-IOT/CUDA-PointPillars.git +# Change based on if you want to retain changes to the OpenPCDet repo +RUN git clone https://github.com/open-mmlab/OpenPCDet.git +WORKDIR /OpenPCDet +# Set up xvfb (X Virtual FrameBuffer) +RUN echo '#!/bin/bash\nXvfb :99 -screen 0 1280x1024x24 &\nsleep 3\nexec "$@"' > /usr/local/bin/start-xvfb.sh \ + && chmod +x /usr/local/bin/start-xvfb.sh +# Set the environment variable for DISPLAY +ENV DISPLAY=:99 +RUN python3 setup.py develop +WORKDIR / +ENV NVIDIA_VISIBLE_DEVICES="all" \ + OpenCV_DIR=/usr/share/OpenCV \ + NVIDIA_DRIVER_CAPABILITIES="video,compute,utility,graphics" \ + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/lib:/usr/lib:/usr/local/lib \ + QT_GRAPHICSSYSTEM="native" +CMD ["/usr/local/bin/start-xvfb.sh"] ################################ Build ################################ FROM dependencies as build # Build ROS2 packages diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 141a6911..8e42c78a 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -1,111 +1,78 @@ -#! /usr/bin/env python3.8 -""" -Created on Thu Aug 6 11:27:43 2020 - -@author: Javier del Egido Sierra and Carlos Gómez-Huélamo +import argparse +import glob +from pathlib import Path +import rclpy +from rclpy.node import Node +from std_msgs.msg import Header +from sensor_msgs.msg import PointCloud2 +import numpy as np +import torch +import sys +sys.path.append('/home/bolty/OpenPCDet') +from pcdet.config import cfg, cfg_from_yaml_file +from pcdet.datasets import DatasetTemplate +from pcdet.models import build_network, load_data_to_gpu +from pcdet.utils import common_utils +import sensor_msgs_py.point_cloud2 as pc2 -=== +from visual_utils import open3d_vis_utils as V +OPEN3D_FLAG = True -Modified on 23 Dec 2022 -@author: Kin ZHANG (https://kin-zhang.github.io/) -Part of codes also refers: https://github.com/kwea123/ROS_notes -""" +class LidarDemoNode(Node): + def __init__(self): + super().__init__('lidar_demo_node') + self.publisher_ = self.create_publisher(PointCloud2, 'lidar_data', 10) -# General use imports -import os -import time -import glob -from pathlib import Path + args, cfg = self.parse_config() + self.logger = common_utils.create_logger() + self.logger.info('-----------------Quick Demo of OpenPCDet-------------------------') -# ROS imports -import rospy -import ros_numpy -from sensor_msgs.msg import PointCloud2, PointField -from visualization_msgs.msg import MarkerArray, Marker + self.demo_dataset = DemoDataset( + dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, + root_path=Path(args.data_path), ext=args.ext, logger=self.logger + ) -# Math and geometry imports -import math -import numpy as np -import torch + self.model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) + self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) + self.model.cuda() + self.model.eval() -# OpenPCDet imports -from pcdet.datasets import DatasetTemplate -from pcdet.models import build_network, load_data_to_gpu -from pcdet.config import cfg, cfg_from_yaml_file -from pcdet.utils import box_utils, calibration_kitti, common_utils, object3d_kitti + self.process_data() -# Kin's utils -from utils.draw_3d import Draw3DBox -from utils.global_def import * -from utils import * + def process_data(self): + with torch.no_grad(): + for idx, data_dict in enumerate(self.demo_dataset): + data_dict = self.demo_dataset.collate_batch([data_dict]) + load_data_to_gpu(data_dict) + pred_dicts, _ = self.model.forward(data_dict) -import yaml -import os -BASE_DIR = os.path.abspath(os.path.join( os.path.dirname( __file__ ), '..' )) -with open(f"{BASE_DIR}/launch/config.yaml", 'r') as f: - try: - para_cfg = yaml.safe_load(f, Loader=yaml.FullLoader) - except: - para_cfg = yaml.safe_load(f) + # Convert and publish data + header = self.make_header() + point_cloud_msg = pc2.create_cloud_xyz32(header, data_dict['points'][:, 1:4]) # Assuming first 3 columns are x, y, z + self.publisher_.publish(point_cloud_msg) -cfg_root = para_cfg["cfg_root"] -model_path = para_cfg["model_path"] -move_lidar_center = para_cfg["move_lidar_center"] -threshold = para_cfg["threshold"] -pointcloud_topic = para_cfg["pointcloud_topic"] -RATE_VIZ = para_cfg["viz_rate"] -inference_time_list = [] + self.logger.info('Demo done.') + def parse_config(self): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default='cfgs/nuscenes_models/cbgs_second_multihead.yaml', + help='specify the config for demo') + parser.add_argument('--data_path', type=str, default='demo_data', + help='specify the point cloud data file or directory') + parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') + parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') -def xyz_array_to_pointcloud2(points_sum, stamp=None, frame_id=None): - """ - Create a sensor_msgs.PointCloud2 from an array of points. - """ - msg = PointCloud2() - if stamp: - msg.header.stamp = stamp - if frame_id: - msg.header.frame_id = frame_id - msg.height = 1 - msg.width = points_sum.shape[0] - msg.fields = [ - PointField('x', 0, PointField.FLOAT32, 1), - PointField('y', 4, PointField.FLOAT32, 1), - PointField('z', 8, PointField.FLOAT32, 1) - # PointField('i', 12, PointField.FLOAT32, 1) - ] - msg.is_bigendian = False - msg.point_step = 12 - msg.row_step = points_sum.shape[0] - msg.is_dense = int(np.isfinite(points_sum).all()) - msg.data = np.asarray(points_sum, np.float32).tostring() - return msg + args, unknown = parser.parse_known_args() + cfg_from_yaml_file(args.cfg_file, cfg) + return args, cfg -def rslidar_callback(msg): - select_boxs, select_types = [],[] - if proc_1.no_frame_id: - proc_1.set_viz_frame_id(msg.header.frame_id) - print(f"{bc.OKGREEN} setting marker frame id to lidar: {msg.header.frame_id} {bc.ENDC}") - proc_1.no_frame_id = False + def make_header(self): + header = Header() + header.stamp = self.get_clock().now().to_msg() + header.frame_id = 'lidar_frame' + return header - frame = msg.header.seq # frame id -> not timestamp - msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) - np_p = get_xyz_points(msg_cloud, True) - scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, frame) - for i, score in enumerate(scores): - if score>threshold: - select_boxs.append(dt_box_lidar[i]) - select_types.append(pred_dict['name'][i]) - if(len(select_boxs)>0): - proc_1.pub_rviz.publish_3dbox(np.array(select_boxs), -1, pred_dict['name']) - print_str = f"Frame id: {frame}. Prediction results: \n" - for i in range(len(pred_dict['name'])): - print_str += f"Type: {pred_dict['name'][i]:.3s} Prob: {scores[i]:.2f}\n" - print(print_str) - else: - print(f"\n{bc.FAIL} No confident prediction in this time stamp {bc.ENDC}\n") - print(f" -------------------------------------------------------------- ") class DemoDataset(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): @@ -132,7 +99,7 @@ def __len__(self): def __getitem__(self, index): if self.ext == '.bin': - points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) elif self.ext == '.npy': points = np.load(self.sample_file_list[index]) else: @@ -146,113 +113,14 @@ def __getitem__(self, index): data_dict = self.prepare_data(data_dict=input_dict) return data_dict -class Processor_ROS: - def __init__(self, config_path, model_path): - self.points = None - self.config_path = config_path - self.model_path = model_path - self.device = None - self.net = None - self.voxel_generator = None - self.inputs = None - self.pub_rviz = None - self.no_frame_id = True - self.rate = RATE_VIZ - - def set_pub_rviz(self, box3d_pub, marker_frame_id = 'velodyne'): - self.pub_rviz = Draw3DBox(box3d_pub, marker_frame_id, self.rate) - - def set_viz_frame_id(self, marker_frame_id): - self.pub_rviz.set_frame_id(marker_frame_id) - - def initialize(self): - self.read_config() - - def read_config(self): - config_path = self.config_path - cfg_from_yaml_file(self.config_path, cfg) - self.logger = common_utils.create_logger() - self.demo_dataset = DemoDataset( - dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, - root_path=Path("/home/kin/workspace/OpenPCDet/tools/000002.bin"), - ext='.bin') - - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) - print("Model path: ", self.model_path) - self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) - self.net = self.net.to(self.device).eval() - - def get_template_prediction(self, num_samples): - ret_dict = { - 'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples), - 'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples), - 'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]), - 'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples), - 'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7]) - } - return ret_dict - - def run(self, points, frame): - t_t = time.time() - num_features = 4 # X,Y,Z,intensity - self.points = points.reshape([-1, num_features]) - - timestamps = np.empty((len(self.points),1)) - timestamps[:] = frame - - # self.points = np.append(self.points, timestamps, axis=1) - self.points[:,0] += move_lidar_center - - input_dict = { - 'points': self.points, - 'frame_id': frame, - } - - data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) - data_dict = self.demo_dataset.collate_batch([data_dict]) - load_data_to_gpu(data_dict) - - torch.cuda.synchronize() - t = time.time() - - pred_dicts, _ = self.net.forward(data_dict) - - torch.cuda.synchronize() - inference_time = time.time() - t - inference_time_list.append(inference_time) - - boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() - scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() - types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() - - pred_boxes = np.copy(boxes_lidar) - pred_dict = self.get_template_prediction(scores.shape[0]) - - pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1] - pred_dict['score'] = scores - pred_dict['boxes_lidar'] = pred_boxes - - return scores, boxes_lidar, types, pred_dict - -if __name__ == "__main__": - no_frame_id = False - proc_1 = Processor_ROS(cfg_root, model_path) - print(f"\n{bc.OKCYAN}Config path: {bc.BOLD}{cfg_root}{bc.ENDC}") - print(f"{bc.OKCYAN}Model path: {bc.BOLD}{model_path}{bc.ENDC}") - proc_1.initialize() - rospy.init_node('inference') - sub_lidar_topic = [pointcloud_topic] +def main(args=None): + rclpy.init(args=args) + node = LidarDemoNode() + rclpy.spin(node) + node.destroy_node() + rclpy.shutdown() - cfg_from_yaml_file(cfg_root, cfg) - - sub_ = rospy.Subscriber(sub_lidar_topic[0], PointCloud2, rslidar_callback, queue_size=1, buff_size=2**24) - pub_rviz = rospy.Publisher('detect_3dbox',MarkerArray, queue_size=10) - proc_1.set_pub_rviz(pub_rviz) - print(f"{bc.HEADER} ====================== {bc.ENDC}") - print(" ===> [+] PCDet ros_node has started. Try to Run the rosbag file") - print(f"{bc.HEADER} ====================== {bc.ENDC}") - rospy.spin() \ No newline at end of file +if __name__ == '__main__': + main() \ No newline at end of file From 9a072d94ef5d0f109ca199f60b61cbf07b8cf1b1 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 2 Feb 2024 01:58:47 +0000 Subject: [PATCH 08/70] builds with cuda and I can run the setup script :) --- ...idar_object_detection_openpcdet.Dockerfile | 93 +++---------------- 1 file changed, 12 insertions(+), 81 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index c76f493b..61902cb2 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel ################################ Source ################################ FROM ${BASE_IMAGE} as source WORKDIR ${AMENT_WS}/src @@ -11,6 +11,10 @@ RUN apt-get -qq update && rosdep update && \ | grep 'apt-get install' \ | awk '{print $3}' \ | sort > /tmp/colcon_install_list +# Set environment variables +ENV CUDA_HOME /usr/local/cuda +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} +ENV PATH /usr/local/cuda/bin:${PATH} ################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies # Install Rosdep requirements @@ -24,87 +28,14 @@ WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* ################################ INSTALL OpenPCDet ################################ -# Set environment variables -ENV NVENCODE_CFLAGS "-I/usr/local/cuda/include" -ENV CV_VERSION=4.2.0 -ENV DEBIAN_FRONTEND=noninteractive -# Get all dependencies -RUN apt-get update && apt-get install -y \ - git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ - build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ - libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim -RUN apt-get update && apt-get install -y \ - python3-pip \ - python3-dev -RUN apt-get update && apt-get install -y \ - pcl-tools \ - python3-pcl \ - xvfb \ - x11-utils -RUN rm -rf /var/lib/apt/lists/* -# OpenCV with CUDA support -WORKDIR /opencv -RUN git clone https://github.com/opencv/opencv.git -b $CV_VERSION &&\ - git clone https://github.com/opencv/opencv_contrib.git -b $CV_VERSION -# While using OpenCV 4.2.0 we have to apply some fixes to ensure that CUDA is fully supported, thanks @https://github.com/gismo07 for this fix -RUN mkdir opencvfix && cd opencvfix &&\ - git clone https://github.com/opencv/opencv.git -b 4.5.2 &&\ - cd opencv/cmake &&\ - cp -r FindCUDA /opencv/opencv/cmake/ &&\ - cp FindCUDA.cmake /opencv/opencv/cmake/ &&\ - cp FindCUDNN.cmake /opencv/opencv/cmake/ &&\ - cp OpenCVDetectCUDA.cmake /opencv/opencv/cmake/ -WORKDIR /opencv/opencv/build -RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ --D CMAKE_INSTALL_PREFIX=/usr/local \ --D OPENCV_GENERATE_PKGCONFIG=ON \ --D BUILD_EXAMPLES=OFF \ --D INSTALL_PYTHON_EXAMPLES=OFF \ --D INSTALL_C_EXAMPLES=OFF \ --D PYTHON_EXECUTABLE=$(which python2) \ --D PYTHON3_EXECUTABLE=$(which python3) \ --D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ --D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ --D BUILD_opencv_python2=ON \ --D BUILD_opencv_python3=ON \ --D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ --D WITH_GSTREAMER=ON \ --D WITH_CUDA=ON \ --D ENABLE_PRECOMPILED_HEADERS=OFF \ -.. &&\ -make -j$(nproc) &&\ -make install &&\ -ldconfig &&\ -rm -rf /opencv -WORKDIR / -ENV OpenCV_DIR=/usr/share/OpenCV -# PyTorch for CUDA 11.6 +RUN apt update && apt install -y python3-pip RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 -ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" -# OpenPCDet -RUN pip3 install numpy==1.23.0 llvmlite numba tensorboardX easydict pyyaml scikit-image tqdm SharedArray open3d mayavi av2 pyquaternion -RUN pip3 install spconv-cu116 -RUN pip3 install kornia==0.6.8 -RUN pip3 install nuscenes-devkit==1.0.5 -# Get extra kitti data -WORKDIR / -RUN git clone https://github.com/NVIDIA-AI-IOT/CUDA-PointPillars.git -# Change based on if you want to retain changes to the OpenPCDet repo -RUN git clone https://github.com/open-mmlab/OpenPCDet.git -WORKDIR /OpenPCDet -# Set up xvfb (X Virtual FrameBuffer) -RUN echo '#!/bin/bash\nXvfb :99 -screen 0 1280x1024x24 &\nsleep 3\nexec "$@"' > /usr/local/bin/start-xvfb.sh \ - && chmod +x /usr/local/bin/start-xvfb.sh -# Set the environment variable for DISPLAY -ENV DISPLAY=:99 -RUN python3 setup.py develop -WORKDIR / -ENV NVIDIA_VISIBLE_DEVICES="all" \ - OpenCV_DIR=/usr/share/OpenCV \ - NVIDIA_DRIVER_CAPABILITIES="video,compute,utility,graphics" \ - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/lib:/usr/lib:/usr/local/lib \ - QT_GRAPHICSSYSTEM="native" -CMD ["/usr/local/bin/start-xvfb.sh"] +RUN pip3 install spconv-cu113 +RUN apt update && apt install -y python3-setuptools +WORKDIR /home/bolty +RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git +RUN cd OpenPCDet && pip3 install -r requirements.txt +RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d ################################ Build ################################ FROM dependencies as build # Build ROS2 packages From 00eec7b42b343b9b15f7c4c11256e7a175a75323 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sun, 4 Feb 2024 05:54:31 +0000 Subject: [PATCH 09/70] Adding OpenCV --- ...idar_object_detection_openpcdet.Dockerfile | 39 +++++++++++++++++++ .../lidar_det_py/lidar_det_py/inference.py | 2 +- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 61902cb2..38f4801b 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -27,6 +27,45 @@ COPY --from=source ${AMENT_WS}/src src WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* +################################# INSTALL OpenCV with CUDA Support ################ +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim +WORKDIR /opt +RUN git clone https://github.com/opencv/opencv.git && \ + cd opencv && \ + git checkout 4.5.5 + +RUN git clone https://github.com/opencv/opencv_contrib.git && \ + cd opencv_contrib && \ + git checkout 4.5.5 +WORKDIR /opt/opencv/build +RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ +-D CMAKE_INSTALL_PREFIX=/usr/local \ +-D OPENCV_GENERATE_PKGCONFIG=ON \ +-D BUILD_EXAMPLES=OFF \ +-D INSTALL_PYTHON_EXAMPLES=OFF \ +-D INSTALL_C_EXAMPLES=OFF \ +-D PYTHON_EXECUTABLE=$(which python2) \ +-D PYTHON3_EXECUTABLE=$(which python3) \ +-D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ +-D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ +-D BUILD_opencv_python2=ON \ +-D BUILD_opencv_python3=ON \ +-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ +-D WITH_GSTREAMER=ON \ +-D WITH_CUDA=ON \ +-D ENABLE_PRECOMPILED_HEADERS=OFF \ +.. &&\ +make -j$(nproc) &&\ +make install &&\ +ldconfig &&\ +rm -rf /opencv + +WORKDIR / +ENV OpenCV_DIR=/usr/share/OpenCV +WORKDIR / ################################ INSTALL OpenPCDet ################################ RUN apt update && apt install -y python3-pip RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 8e42c78a..6992168d 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -13,7 +13,7 @@ from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils -import sensor_msgs_py.point_cloud2 as pc2 +import sensor_msgs.point_cloud2 as pc2 from visual_utils import open3d_vis_utils as V OPEN3D_FLAG = True From 3fad1f7cea68be028d9c65f1c84150826ade3ff8 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Mon, 5 Feb 2024 01:12:12 +0000 Subject: [PATCH 10/70] ROS node runs and makes a single inference on a .bin! --- ...idar_object_detection_openpcdet.Dockerfile | 117 +++++++----------- .../lidar_det_py/lidar_det_py/inference.py | 45 +++++-- 2 files changed, 82 insertions(+), 80 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 38f4801b..927af4a4 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -1,95 +1,68 @@ ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel -################################ Source ################################ -FROM ${BASE_IMAGE} as source -WORKDIR ${AMENT_WS}/src -# Copy in source code -COPY src/perception/lidar_object_detection lidar_object_detection -COPY src/wato_msgs/sample_msgs sample_msgs -# Scan for rosdeps -RUN apt-get -qq update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -s \ - | grep 'apt-get install' \ - | awk '{print $3}' \ - | sort > /tmp/colcon_install_list -# Set environment variables -ENV CUDA_HOME /usr/local/cuda -ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} -ENV PATH /usr/local/cuda/bin:${PATH} -################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies -# Install Rosdep requirements -COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) -# Copy in source code from source stage -WORKDIR ${AMENT_WS} -COPY --from=source ${AMENT_WS}/src src -# Dependency Cleanup -WORKDIR / -RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* -################################# INSTALL OpenCV with CUDA Support ################ +################################# Dependencies ################################ RUN apt-get update && apt-get install -y \ git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ - libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ + python3-pip python3-setuptools \ + && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc/* +################################# INSTALL OpenCV with CUDA Support ############# WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ - cd opencv && \ - git checkout 4.5.5 - + cd opencv && git checkout 4.5.5 RUN git clone https://github.com/opencv/opencv_contrib.git && \ - cd opencv_contrib && \ - git checkout 4.5.5 + cd opencv_contrib && git checkout 4.5.5 WORKDIR /opt/opencv/build RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ --D CMAKE_INSTALL_PREFIX=/usr/local \ --D OPENCV_GENERATE_PKGCONFIG=ON \ --D BUILD_EXAMPLES=OFF \ --D INSTALL_PYTHON_EXAMPLES=OFF \ --D INSTALL_C_EXAMPLES=OFF \ --D PYTHON_EXECUTABLE=$(which python2) \ --D PYTHON3_EXECUTABLE=$(which python3) \ --D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ --D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ --D BUILD_opencv_python2=ON \ --D BUILD_opencv_python3=ON \ --D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ --D WITH_GSTREAMER=ON \ --D WITH_CUDA=ON \ --D ENABLE_PRECOMPILED_HEADERS=OFF \ -.. &&\ -make -j$(nproc) &&\ -make install &&\ -ldconfig &&\ -rm -rf /opencv - -WORKDIR / + -D CMAKE_INSTALL_PREFIX=/usr/local \ + -D OPENCV_GENERATE_PKGCONFIG=ON \ + -D BUILD_EXAMPLES=OFF \ + -D INSTALL_PYTHON_EXAMPLES=OFF \ + -D INSTALL_C_EXAMPLES=OFF \ + -D PYTHON_EXECUTABLE=$(which python3) \ + -D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ + -D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ + -D BUILD_opencv_python3=ON \ + -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ + -D WITH_GSTREAMER=ON \ + -D WITH_CUDA=ON \ + -D ENABLE_PRECOMPILED_HEADERS=OFF \ + .. && make -j$(nproc) && make install && ldconfig +RUN rm -rf /opt/opencv /opt/opencv_contrib +# Set environment variables +ENV CUDA_HOME /usr/local/cuda +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} +ENV PATH /usr/local/cuda/bin:${PATH} ENV OpenCV_DIR=/usr/share/OpenCV -WORKDIR / -################################ INSTALL OpenPCDet ################################ -RUN apt update && apt install -y python3-pip +# Install Python dependencies RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 -RUN pip3 install spconv-cu113 -RUN apt update && apt install -y python3-setuptools +RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 +WORKDIR /home/bolty +# RUN git clone https://github.com/WATonomous/OpenPCDet.git +COPY /OpenPCDet /home/bolty/OpenPCDet WORKDIR /home/bolty -RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git RUN cd OpenPCDet && pip3 install -r requirements.txt -RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d -################################ Build ################################ +RUN pip3 install kornia==0.6.8 +RUN pip3 install nuscenes-devkit==1.0.5 +################################ Source ####################################### FROM dependencies as build -# Build ROS2 packages +WORKDIR ${AMENT_WS}/src +COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection +COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs +RUN apt-get update && rosdep update && \ + rosdep install --from-paths . --ignore-src -r -y WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ - colcon build \ - --cmake-args -DCMAKE_BUILD_TYPE=Release -# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash -COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release +# Entrypoint setup +COPY wato_monorepo/docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] -################################ Prod ################################ +################################ Prod ######################################### # FROM build as deploy # # Source Cleanup and Security Setup # RUN chown -R $USER:$USER ${AMENT_WS} # RUN rm -rf src/* -# USER ${USER} +# USER ${USER} \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 6992168d..33621c78 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -4,7 +4,7 @@ import rclpy from rclpy.node import Node from std_msgs.msg import Header -from sensor_msgs.msg import PointCloud2 +from sensor_msgs.msg import PointCloud2, PointField import numpy as np import torch import sys @@ -13,9 +13,7 @@ from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils -import sensor_msgs.point_cloud2 as pc2 -from visual_utils import open3d_vis_utils as V OPEN3D_FLAG = True @@ -49,18 +47,19 @@ def process_data(self): # Convert and publish data header = self.make_header() - point_cloud_msg = pc2.create_cloud_xyz32(header, data_dict['points'][:, 1:4]) # Assuming first 3 columns are x, y, z + points = data_dict['points'][:, 1:4] # Extract the relevant columns (x, y, z) + point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) self.logger.info('Demo done.') def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') - parser.add_argument('--cfg_file', type=str, default='cfgs/nuscenes_models/cbgs_second_multihead.yaml', + parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml', help='specify the config for demo') - parser.add_argument('--data_path', type=str, default='demo_data', + parser.add_argument('--data_path', type=str, default='/home/bolty/data/velodyne/data/0000000001.bin', help='specify the point cloud data file or directory') - parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') + parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/pv_rcnn8369.pth", help='specify the pretrained model') parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args, unknown = parser.parse_known_args() @@ -72,6 +71,36 @@ def make_header(self): header.stamp = self.get_clock().now().to_msg() header.frame_id = 'lidar_frame' return header + + def create_cloud_xyz32(self, header, points): + """ + Create a sensor_msgs/PointCloud2 message from an array of points. + + :param header: std_msgs/Header, the header of the message. + :param points: numpy.ndarray, an Nx3 array of xyz points. + :return: sensor_msgs/PointCloud2, the constructed PointCloud2 message. + """ + # Create fields for the PointCloud2 message + fields = [ + PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1), + PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1), + PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1) + ] + points_np = points.cpu().numpy() + # Create a PointCloud2 message + cloud = PointCloud2() + cloud.header = header + cloud.height = 1 # Unstructured point cloud + cloud.width = points_np.shape[0] + cloud.fields = fields + cloud.is_bigendian = False # Assuming little endian + cloud.point_step = 12 # FLOAT32 (4 bytes) * 3 (x, y, z) + cloud.row_step = cloud.point_step * cloud.width + cloud.is_dense = bool(np.isfinite(points_np).all()) + cloud.data = np.asarray(points_np, np.float32).tobytes() + + return cloud + class DemoDataset(DatasetTemplate): @@ -99,7 +128,7 @@ def __len__(self): def __getitem__(self, index): if self.ext == '.bin': - points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) #change back to 5 later elif self.ext == '.npy': points = np.load(self.sample_file_list[index]) else: From cab5d528043ccba00b6c57d5388510be828941ca Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 6 Feb 2024 03:19:00 +0000 Subject: [PATCH 11/70] Foxglove integration and bounding boxes --- ...idar_object_detection_openpcdet.Dockerfile | 3 ++- .../lidar_det_py/lidar_det_py/inference.py | 27 +++++++++++++++++++ .../lidar_det_py/package.xml | 1 + 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 927af4a4..18487a31 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -7,7 +7,7 @@ RUN apt-get update && apt-get install -y \ libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ python3-pip python3-setuptools \ && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc/* + rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc ################################# INSTALL OpenCV with CUDA Support ############# WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ @@ -53,6 +53,7 @@ COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs RUN apt-get update && rosdep update && \ rosdep install --from-paths . --ignore-src -r -y +RUN apt install -y ros-${ROS_DISTRO}-foxglove-bridge WORKDIR ${AMENT_WS} RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release # Entrypoint setup diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 33621c78..dd544077 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -13,6 +13,7 @@ from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils +from visualization_msgs.msg import Marker, MarkerArray OPEN3D_FLAG = True @@ -21,6 +22,7 @@ class LidarDemoNode(Node): def __init__(self): super().__init__('lidar_demo_node') self.publisher_ = self.create_publisher(PointCloud2, 'lidar_data', 10) + self.bbox_publisher = self.create_publisher(MarkerArray, '/bounding_boxes', 10) args, cfg = self.parse_config() self.logger = common_utils.create_logger() @@ -38,6 +40,30 @@ def __init__(self): self.process_data() + def publish_bounding_boxes(self, pred_dicts, frame_id): + marker_array = MarkerArray() + for idx, box in enumerate(pred_dicts[0]['pred_boxes']): + marker = Marker() + marker.header.frame_id = frame_id + marker.header.stamp = self.get_clock().now().to_msg() + marker.id = idx + marker.type = Marker.CUBE + marker.action = Marker.ADD + marker.pose.position.x = float(box[0]) + marker.pose.position.y = float(box[1]) + marker.pose.position.z = float(box[2]) + marker.pose.orientation.w = 1.0 # Assuming no rotation; modify as needed + marker.scale.x = float(box[3]) # Size in X direction + marker.scale.y = float(box[4]) # Size in Y direction + marker.scale.z = float(box[5]) # Size in Z direction + marker.color.a = 0.8 # Alpha + marker.color.r = 1.0 # Red + marker.color.g = 0.0 # Green + marker.color.b = 0.0 # Blue + marker_array.markers.append(marker) + + self.bbox_publisher.publish(marker_array) + def process_data(self): with torch.no_grad(): for idx, data_dict in enumerate(self.demo_dataset): @@ -50,6 +76,7 @@ def process_data(self): points = data_dict['points'][:, 1:4] # Extract the relevant columns (x, y, z) point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) + self.publish_bounding_boxes(pred_dicts, "lidar_frame") # Use appropriate frame_id self.logger.info('Demo done.') diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_det_py/package.xml index f9da549c..0700ea0c 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/package.xml +++ b/src/perception/lidar_object_detection/lidar_det_py/package.xml @@ -14,6 +14,7 @@ nav_msgs geometry_msgs tf + visualization_msgs ament_copyright ament_flake8 From 9832d195b3162ecf56d0552f7ff61b87e16e40bc Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 6 Feb 2024 06:02:14 +0000 Subject: [PATCH 12/70] aligned lidar data with bounding boxes --- .../lidar_det_py/lidar_det_py/inference.py | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index dd544077..52574cae 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -73,7 +73,7 @@ def process_data(self): # Convert and publish data header = self.make_header() - points = data_dict['points'][:, 1:4] # Extract the relevant columns (x, y, z) + points = data_dict['points'][:, :3] # Extract the relevant columns (x, y, z) point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) self.publish_bounding_boxes(pred_dicts, "lidar_frame") # Use appropriate frame_id @@ -113,18 +113,34 @@ def create_cloud_xyz32(self, header, points): PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1), PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1) ] + + y_angle = np.deg2rad(90) + z_angle = np.deg2rad(-90) + rotation_matrix_y = np.array([ + [np.cos(y_angle), 0, np.sin(y_angle)], + [0, 1, 0 ], + [-np.sin(y_angle), 0, np.cos(y_angle)] + ]) + rotation_matrix_z = np.array([ + [np.cos(z_angle), -np.sin(z_angle), 0], + [np.sin(z_angle), np.cos(z_angle), 0], + [0, 0, 1] + ]) + # Apply transformation points_np = points.cpu().numpy() + points_np_y = np.dot(points_np, rotation_matrix_y) + points_transformed = np.dot(points_np_y, rotation_matrix_z.T) # Create a PointCloud2 message cloud = PointCloud2() cloud.header = header cloud.height = 1 # Unstructured point cloud - cloud.width = points_np.shape[0] + cloud.width = points_transformed.shape[0] cloud.fields = fields cloud.is_bigendian = False # Assuming little endian cloud.point_step = 12 # FLOAT32 (4 bytes) * 3 (x, y, z) cloud.row_step = cloud.point_step * cloud.width - cloud.is_dense = bool(np.isfinite(points_np).all()) - cloud.data = np.asarray(points_np, np.float32).tobytes() + cloud.is_dense = bool(np.isfinite(points_transformed).all()) + cloud.data = np.asarray(points_transformed, np.float32).tobytes() return cloud From c0aa3e81d3133a5153a6369a28c71c40c54f50ca Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 9 Feb 2024 00:22:16 +0000 Subject: [PATCH 13/70] changing to work with wato fork of openpcdet --- .../lidar_object_detection_openpcdet.Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 18487a31..e14c962e 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -8,7 +8,7 @@ RUN apt-get update && apt-get install -y \ python3-pip python3-setuptools \ && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc -################################# INSTALL OpenCV with CUDA Support ############# +################################ INSTALL OpenCV with CUDA Support ############## WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ cd opencv && git checkout 4.5.5 @@ -40,8 +40,8 @@ ENV OpenCV_DIR=/usr/share/OpenCV RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty -# RUN git clone https://github.com/WATonomous/OpenPCDet.git -COPY /OpenPCDet /home/bolty/OpenPCDet +RUN git clone https://github.com/WATonomous/OpenPCDet.git +# COPY /OpenPCDet /home/bolty/OpenPCDet WORKDIR /home/bolty RUN cd OpenPCDet && pip3 install -r requirements.txt RUN pip3 install kornia==0.6.8 From 424af34587ee892188fe9ecf7fb922359318962c Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 19 Jan 2024 01:42:52 +0000 Subject: [PATCH 14/70] base node for openpcdet --- .../lidar-object-det/__init__.py | 0 .../lidar-object-det/openpcdet.py | 6 +++++ .../lidar-object-det/package.xml | 18 +++++++++++++ .../resource/lidar-object-det | 0 .../lidar-object-det/setup.cfg | 4 +++ .../lidar-object-det/setup.py | 26 +++++++++++++++++++ .../lidar-object-det/test/test_copyright.py | 25 ++++++++++++++++++ .../lidar-object-det/test/test_flake8.py | 25 ++++++++++++++++++ .../lidar-object-det/test/test_pep257.py | 23 ++++++++++++++++ 9 files changed, 127 insertions(+) create mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/package.xml create mode 100644 src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det create mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.cfg create mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py create mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py new file mode 100644 index 00000000..3a433920 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py @@ -0,0 +1,6 @@ +def main(): + print('Hi from lidar-object-det. lol!') + + +if __name__ == '__main__': + main() diff --git a/src/perception/lidar_object_detection/lidar-object-det/package.xml b/src/perception/lidar_object_detection/lidar-object-det/package.xml new file mode 100644 index 00000000..a7092bce --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/package.xml @@ -0,0 +1,18 @@ + + + + lidar-object-det + 0.0.0 + TODO: Package description + bolty + TODO: License declaration + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det b/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.cfg b/src/perception/lidar_object_detection/lidar-object-det/setup.cfg new file mode 100644 index 00000000..5808ed9e --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/lidar-object-det +[install] +install_scripts=$base/lib/lidar-object-det diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.py b/src/perception/lidar_object_detection/lidar-object-det/setup.py new file mode 100644 index 00000000..23215bbf --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/setup.py @@ -0,0 +1,26 @@ +from setuptools import find_packages, setup + +package_name = 'lidar-object-det' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='bolty', + maintainer_email='bolty@todo.todo', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'openpcdet = lidar-object-det.openpcdet:main' + ], + }, +) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py new file mode 100644 index 00000000..97a39196 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py @@ -0,0 +1,25 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_copyright.main import main +import pytest + + +# Remove the `skip` decorator once the source file(s) have a copyright header +@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') +@pytest.mark.copyright +@pytest.mark.linter +def test_copyright(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found errors' diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py new file mode 100644 index 00000000..27ee1078 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py @@ -0,0 +1,25 @@ +# Copyright 2017 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_flake8.main import main_with_errors +import pytest + + +@pytest.mark.flake8 +@pytest.mark.linter +def test_flake8(): + rc, errors = main_with_errors(argv=[]) + assert rc == 0, \ + 'Found %d code style errors / warnings:\n' % len(errors) + \ + '\n'.join(errors) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py new file mode 100644 index 00000000..b234a384 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py @@ -0,0 +1,23 @@ +# Copyright 2015 Open Source Robotics Foundation, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ament_pep257.main import main +import pytest + + +@pytest.mark.linter +@pytest.mark.pep257 +def test_pep257(): + rc = main(argv=['.', 'test']) + assert rc == 0, 'Found code style errors / warnings' From 46441ba742872f051cfd8609374549de4fd17533 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 19 Jan 2024 01:44:26 +0000 Subject: [PATCH 15/70] copied dockerfile for openpcdet --- ...idar_object_detection_openpcdet.Dockerfile | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile new file mode 100644 index 00000000..6deba0bc --- /dev/null +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -0,0 +1,55 @@ +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 + +################################ Source ################################ +FROM ${BASE_IMAGE} as source + +WORKDIR ${AMENT_WS}/src + +# Copy in source code +COPY src/perception/lidar_object_detection lidar_object_detection +COPY src/wato_msgs/sample_msgs sample_msgs + +# Scan for rosdeps +RUN apt-get -qq update && rosdep update && \ + rosdep install --from-paths . --ignore-src -r -s \ + | grep 'apt-get install' \ + | awk '{print $3}' \ + | sort > /tmp/colcon_install_list + +################################# Dependencies ################################ +FROM ${BASE_IMAGE} as dependencies + +# Install Rosdep requirements +COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) + +# Copy in source code from source stage +WORKDIR ${AMENT_WS} +COPY --from=source ${AMENT_WS}/src src + +# Dependency Cleanup +WORKDIR / +RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* + +################################ Build ################################ +FROM dependencies as build + +# Build ROS2 packages +WORKDIR ${AMENT_WS} +RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ + colcon build \ + --cmake-args -DCMAKE_BUILD_TYPE=Release + +# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash +COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +ENTRYPOINT ["./wato_ros_entrypoint.sh"] + +################################ Prod ################################ +FROM build as deploy + +# Source Cleanup and Security Setup +RUN chown -R $USER:$USER ${AMENT_WS} +RUN rm -rf src/* + +USER ${USER} From e9661323b974ca081e441dddddbc23360cddd433 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 20 Jan 2024 05:57:32 +0000 Subject: [PATCH 16/70] working package in ros --- ...idar_object_detection_openpcdet.Dockerfile | 10 +++--- .../lidar-object-det/__init__.py | 0 .../lidar-object-det/openpcdet.py | 6 ---- .../resource/lidar-object-det | 0 .../lidar-object-det/setup.cfg | 4 --- .../lidar-object-det/setup.py | 26 -------------- .../lidar-object-det/test/test_copyright.py | 25 ------------- .../lidar-object-det/test/test_flake8.py | 25 ------------- .../lidar-object-det/test/test_pep257.py | 23 ------------ .../lidar_det/CMakeLists.txt | 33 +++++++++++++++++ .../package.xml | 15 ++++---- .../lidar_det/src/lidar_det.cpp | 35 +++++++++++++++++++ 12 files changed, 82 insertions(+), 120 deletions(-) delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.cfg delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/setup.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py delete mode 100644 src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py create mode 100644 src/perception/lidar_object_detection/lidar_det/CMakeLists.txt rename src/perception/lidar_object_detection/{lidar-object-det => lidar_det}/package.xml (59%) create mode 100644 src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 6deba0bc..291364ee 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -46,10 +46,10 @@ COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] ################################ Prod ################################ -FROM build as deploy +# FROM build as deploy -# Source Cleanup and Security Setup -RUN chown -R $USER:$USER ${AMENT_WS} -RUN rm -rf src/* +# # Source Cleanup and Security Setup +# RUN chown -R $USER:$USER ${AMENT_WS} +# RUN rm -rf src/* -USER ${USER} +# USER ${USER} diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py b/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py deleted file mode 100644 index 3a433920..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/lidar-object-det/openpcdet.py +++ /dev/null @@ -1,6 +0,0 @@ -def main(): - print('Hi from lidar-object-det. lol!') - - -if __name__ == '__main__': - main() diff --git a/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det b/src/perception/lidar_object_detection/lidar-object-det/resource/lidar-object-det deleted file mode 100644 index e69de29b..00000000 diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.cfg b/src/perception/lidar_object_detection/lidar-object-det/setup.cfg deleted file mode 100644 index 5808ed9e..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/lidar-object-det -[install] -install_scripts=$base/lib/lidar-object-det diff --git a/src/perception/lidar_object_detection/lidar-object-det/setup.py b/src/perception/lidar_object_detection/lidar-object-det/setup.py deleted file mode 100644 index 23215bbf..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import find_packages, setup - -package_name = 'lidar-object-det' - -setup( - name=package_name, - version='0.0.0', - packages=find_packages(exclude=['test']), - data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + package_name]), - ('share/' + package_name, ['package.xml']), - ], - install_requires=['setuptools'], - zip_safe=True, - maintainer='bolty', - maintainer_email='bolty@todo.todo', - description='TODO: Package description', - license='TODO: License declaration', - tests_require=['pytest'], - entry_points={ - 'console_scripts': [ - 'openpcdet = lidar-object-det.openpcdet:main' - ], - }, -) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py deleted file mode 100644 index 97a39196..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/test/test_copyright.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_copyright.main import main -import pytest - - -# Remove the `skip` decorator once the source file(s) have a copyright header -@pytest.mark.skip(reason='No copyright header has been placed in the generated source file.') -@pytest.mark.copyright -@pytest.mark.linter -def test_copyright(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found errors' diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py deleted file mode 100644 index 27ee1078..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/test/test_flake8.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2017 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_flake8.main import main_with_errors -import pytest - - -@pytest.mark.flake8 -@pytest.mark.linter -def test_flake8(): - rc, errors = main_with_errors(argv=[]) - assert rc == 0, \ - 'Found %d code style errors / warnings:\n' % len(errors) + \ - '\n'.join(errors) diff --git a/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py b/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py deleted file mode 100644 index b234a384..00000000 --- a/src/perception/lidar_object_detection/lidar-object-det/test/test_pep257.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2015 Open Source Robotics Foundation, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ament_pep257.main import main -import pytest - - -@pytest.mark.linter -@pytest.mark.pep257 -def test_pep257(): - rc = main(argv=['.', 'test']) - assert rc == 0, 'Found code style errors / warnings' diff --git a/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt b/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt new file mode 100644 index 00000000..507bfafb --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt @@ -0,0 +1,33 @@ +cmake_minimum_required(VERSION 3.8) +project(lidar_det) + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# find dependencies +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(std_msgs REQUIRED) + +if(BUILD_TESTING) + find_package(ament_lint_auto REQUIRED) + # the following line skips the linter which checks for copyrights + # comment the line when a copyright and license is added to all source files + set(ament_cmake_copyright_FOUND TRUE) + # the following line skips cpplint (only works in a git repo) + # comment the line when this package is in a git repo and when + # a copyright and license is added to all source files + set(ament_cmake_cpplint_FOUND TRUE) + ament_lint_auto_find_test_dependencies() +endif() + +add_executable(lidar_det src/lidar_det.cpp) +ament_target_dependencies(lidar_det rclcpp std_msgs) +install(TARGETS + lidar_det + DESTINATION lib/${PROJECT_NAME} +) + + +ament_package() diff --git a/src/perception/lidar_object_detection/lidar-object-det/package.xml b/src/perception/lidar_object_detection/lidar_det/package.xml similarity index 59% rename from src/perception/lidar_object_detection/lidar-object-det/package.xml rename to src/perception/lidar_object_detection/lidar_det/package.xml index a7092bce..959555ab 100644 --- a/src/perception/lidar_object_detection/lidar-object-det/package.xml +++ b/src/perception/lidar_object_detection/lidar_det/package.xml @@ -1,18 +1,21 @@ - lidar-object-det + lidar_det 0.0.0 TODO: Package description bolty TODO: License declaration - ament_copyright - ament_flake8 - ament_pep257 - python3-pytest + ament_cmake + + rclcpp + std_msgs + + ament_lint_auto + ament_lint_common - ament_python + ament_cmake diff --git a/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp b/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp new file mode 100644 index 00000000..62e663eb --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp @@ -0,0 +1,35 @@ +#include "rclcpp/rclcpp.hpp" +#include "std_msgs/msg/string.hpp" +#include + +using namespace std::chrono_literals; + +class TestNode : public rclcpp::Node +{ +public: + TestNode() : Node("test_node") + { + publisher_ = this->create_publisher("chatter", 10); + timer_ = this->create_wall_timer(1s, std::bind(&TestNode::timer_callback, this)); + } + +private: + void timer_callback() + { + auto message = std_msgs::msg::String(); + message.data = "Hello, ROS 2 world! " + std::to_string(count_++); + RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", message.data.c_str()); + publisher_->publish(message); + } + rclcpp::TimerBase::SharedPtr timer_; + rclcpp::Publisher::SharedPtr publisher_; + size_t count_ = 0; +}; + +int main(int argc, char *argv[]) +{ + rclcpp::init(argc, argv); + rclcpp::spin(std::make_shared()); + rclcpp::shutdown(); + return 0; +} From 257407ee7004b4ff22a7953bb32645bab4e7e31e Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 20 Jan 2024 05:59:24 +0000 Subject: [PATCH 17/70] ig we dont need these --- .../lidar_object_detection/CMakeLists.txt | 14 -------------- .../lidar_object_detection/package.xml | 16 ---------------- 2 files changed, 30 deletions(-) delete mode 100644 src/perception/lidar_object_detection/CMakeLists.txt delete mode 100644 src/perception/lidar_object_detection/package.xml diff --git a/src/perception/lidar_object_detection/CMakeLists.txt b/src/perception/lidar_object_detection/CMakeLists.txt deleted file mode 100644 index 3a5b0750..00000000 --- a/src/perception/lidar_object_detection/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(lidar_object_detection) - -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() - -# find dependencies -find_package(ament_cmake REQUIRED) -# uncomment the following section in order to fill in -# further dependencies manually. -# find_package( REQUIRED) - -ament_package() diff --git a/src/perception/lidar_object_detection/package.xml b/src/perception/lidar_object_detection/package.xml deleted file mode 100644 index 5dc0a5df..00000000 --- a/src/perception/lidar_object_detection/package.xml +++ /dev/null @@ -1,16 +0,0 @@ - - - - lidar_object_detection - 0.0.0 - TODO: Package description - bolty - TODO: License declaration - - ament_cmake - - - - ament_cmake - - From f1c47c0ebc783d1f5b8fe58b8bca7eaad996f5b4 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 20 Jan 2024 06:12:35 +0000 Subject: [PATCH 18/70] added python package if I want to use that --- .../lidar_det_py/lidar_det_py/__init__.py | 0 .../lidar_det_py/lidar_det_py/test_node.py | 34 +++++++++++++++++++ .../lidar_det_py/package.xml | 21 ++++++++++++ .../lidar_det_py/resource/lidar_det_py | 0 .../lidar_det_py/setup.cfg | 4 +++ .../lidar_det_py/setup.py | 26 ++++++++++++++ 6 files changed, 85 insertions(+) create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/package.xml create mode 100644 src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/setup.cfg create mode 100644 src/perception/lidar_object_detection/lidar_det_py/setup.py diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py new file mode 100644 index 00000000..934bd9f1 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py @@ -0,0 +1,34 @@ +import rclpy +from rclpy.node import Node +from std_msgs.msg import String +from time import sleep + +class TestNode(Node): + + def __init__(self): + super().__init__('test_node') + self.publisher_ = self.create_publisher(String, 'chatter', 10) + self.timer = self.create_timer(1.0, self.timer_callback) + self.count = 0 + + def timer_callback(self): + msg = String() + msg.data = 'Hello ROS 2 world! %d' % self.count + self.publisher_.publish(msg) + self.get_logger().info('Publishing: "%s"' % msg.data) + self.count += 1 + +def main(): + rclpy.init() + test_node = TestNode() + + try: + rclpy.spin(test_node) + except KeyboardInterrupt: + pass + + test_node.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_det_py/package.xml new file mode 100644 index 00000000..26b37b62 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/package.xml @@ -0,0 +1,21 @@ + + + + lidar_det_py + 0.0.0 + TODO: Package description + bolty + TODO: License declaration + + rclpy + std_msgs + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + + ament_python + + diff --git a/src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py b/src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py new file mode 100644 index 00000000..e69de29b diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.cfg b/src/perception/lidar_object_detection/lidar_det_py/setup.cfg new file mode 100644 index 00000000..b74c15be --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/lidar_det_py +[install] +install_scripts=$base/lib/lidar_det_py diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.py b/src/perception/lidar_object_detection/lidar_det_py/setup.py new file mode 100644 index 00000000..f9eca843 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/setup.py @@ -0,0 +1,26 @@ +from setuptools import find_packages, setup + +package_name = 'lidar_det_py' + +setup( + name=package_name, + version='0.0.0', + packages=find_packages(exclude=['test']), + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='bolty', + maintainer_email='bolty@todo.todo', + description='TODO: Package description', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'test_node = lidar_det_py.test_node:main' + ], + }, +) From db7c63850052f1aec1343f57d71a6d4871f2e8dd Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 23 Jan 2024 05:05:36 +0000 Subject: [PATCH 19/70] adding ros node --- ...idar_object_detection_openpcdet.Dockerfile | 21 +- .../lidar_det/CMakeLists.txt | 33 --- .../lidar_det/package.xml | 21 -- .../lidar_det/src/lidar_det.cpp | 35 --- .../lidar_det_py/launch/3d_object_detector.py | 13 + .../lidar_det_py/launch/config.yaml | 15 + .../lidar_det_py/lidar_det_py/inference.py | 258 ++++++++++++++++++ .../lidar_det_py/utils/draw_3d.py | 222 +++++++++++++++ .../lidar_det_py/utils/global_def | 30 ++ .../lidar_det_py/package.xml | 5 + .../lidar_det_py/setup.py | 5 +- 11 files changed, 556 insertions(+), 102 deletions(-) delete mode 100644 src/perception/lidar_object_detection/lidar_det/CMakeLists.txt delete mode 100644 src/perception/lidar_object_detection/lidar_det/package.xml delete mode 100644 src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp create mode 100644 src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py create mode 100644 src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 291364ee..2211c4e2 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -1,50 +1,47 @@ ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - ################################ Source ################################ FROM ${BASE_IMAGE} as source - WORKDIR ${AMENT_WS}/src - # Copy in source code COPY src/perception/lidar_object_detection lidar_object_detection COPY src/wato_msgs/sample_msgs sample_msgs - # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ rosdep install --from-paths . --ignore-src -r -s \ | grep 'apt-get install' \ | awk '{print $3}' \ | sort > /tmp/colcon_install_list - ################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies - # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - # Copy in source code from source stage WORKDIR ${AMENT_WS} COPY --from=source ${AMENT_WS}/src src - # Dependency Cleanup WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - +################################ INSTALL OpenPCDet ################################ +RUN apt update && apt install -y python3-pip +RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +RUN pip3 install spconv-cu116 +RUN apt update && apt install -y python3-setuptools +WORKDIR /home/bolty/ +RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git +RUN cd OpenPCDet && pip3 install -r requirements.txt +RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d ################################ Build ################################ FROM dependencies as build - # Build ROS2 packages WORKDIR ${AMENT_WS} RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ colcon build \ --cmake-args -DCMAKE_BUILD_TYPE=Release - # Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] - ################################ Prod ################################ # FROM build as deploy diff --git a/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt b/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt deleted file mode 100644 index 507bfafb..00000000 --- a/src/perception/lidar_object_detection/lidar_det/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(lidar_det) - -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() - -# find dependencies -find_package(ament_cmake REQUIRED) -find_package(rclcpp REQUIRED) -find_package(std_msgs REQUIRED) - -if(BUILD_TESTING) - find_package(ament_lint_auto REQUIRED) - # the following line skips the linter which checks for copyrights - # comment the line when a copyright and license is added to all source files - set(ament_cmake_copyright_FOUND TRUE) - # the following line skips cpplint (only works in a git repo) - # comment the line when this package is in a git repo and when - # a copyright and license is added to all source files - set(ament_cmake_cpplint_FOUND TRUE) - ament_lint_auto_find_test_dependencies() -endif() - -add_executable(lidar_det src/lidar_det.cpp) -ament_target_dependencies(lidar_det rclcpp std_msgs) -install(TARGETS - lidar_det - DESTINATION lib/${PROJECT_NAME} -) - - -ament_package() diff --git a/src/perception/lidar_object_detection/lidar_det/package.xml b/src/perception/lidar_object_detection/lidar_det/package.xml deleted file mode 100644 index 959555ab..00000000 --- a/src/perception/lidar_object_detection/lidar_det/package.xml +++ /dev/null @@ -1,21 +0,0 @@ - - - - lidar_det - 0.0.0 - TODO: Package description - bolty - TODO: License declaration - - ament_cmake - - rclcpp - std_msgs - - ament_lint_auto - ament_lint_common - - - ament_cmake - - diff --git a/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp b/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp deleted file mode 100644 index 62e663eb..00000000 --- a/src/perception/lidar_object_detection/lidar_det/src/lidar_det.cpp +++ /dev/null @@ -1,35 +0,0 @@ -#include "rclcpp/rclcpp.hpp" -#include "std_msgs/msg/string.hpp" -#include - -using namespace std::chrono_literals; - -class TestNode : public rclcpp::Node -{ -public: - TestNode() : Node("test_node") - { - publisher_ = this->create_publisher("chatter", 10); - timer_ = this->create_wall_timer(1s, std::bind(&TestNode::timer_callback, this)); - } - -private: - void timer_callback() - { - auto message = std_msgs::msg::String(); - message.data = "Hello, ROS 2 world! " + std::to_string(count_++); - RCLCPP_INFO(this->get_logger(), "Publishing: '%s'", message.data.c_str()); - publisher_->publish(message); - } - rclcpp::TimerBase::SharedPtr timer_; - rclcpp::Publisher::SharedPtr publisher_; - size_t count_ = 0; -}; - -int main(int argc, char *argv[]) -{ - rclcpp::init(argc, argv); - rclcpp::spin(std::make_shared()); - rclcpp::shutdown(); - return 0; -} diff --git a/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py b/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py new file mode 100644 index 00000000..05984a36 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python3 +from launch import LaunchDescription +from launch_ros.actions import Node + +def generate_launch_description(): + return LaunchDescription([ + Node( + package='lidar_det_py', + executable='inference', + name='inference', + output='screen', + ), + ]) diff --git a/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml b/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml new file mode 100644 index 00000000..d74146d7 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml @@ -0,0 +1,15 @@ +# which go to the model you want to use +cfg_root: "/home/bolty/workspace/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml" + +# which go to the .pth +model_path: "/home/kin/workspace/OpenPCDet/tools/pv_rcnn_8369.pth" + +# pointcloud_topic in the bag if you are not sure +# rosbag info xxx.bag check the topic name +pointcloud_topic: "/velodyne_points" # default is from kitti2bag: /velodyne_points + +# only the predict probablity over this threshold will be draw a box +threshold: 0.8 + +move_lidar_center: 20 +viz_rate: 1.0 \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py new file mode 100644 index 00000000..141a6911 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -0,0 +1,258 @@ +#! /usr/bin/env python3.8 +""" +Created on Thu Aug 6 11:27:43 2020 + +@author: Javier del Egido Sierra and Carlos Gómez-Huélamo + +=== + +Modified on 23 Dec 2022 +@author: Kin ZHANG (https://kin-zhang.github.io/) + +Part of codes also refers: https://github.com/kwea123/ROS_notes +""" + +# General use imports +import os +import time +import glob +from pathlib import Path + +# ROS imports +import rospy +import ros_numpy +from sensor_msgs.msg import PointCloud2, PointField +from visualization_msgs.msg import MarkerArray, Marker + +# Math and geometry imports +import math +import numpy as np +import torch + +# OpenPCDet imports +from pcdet.datasets import DatasetTemplate +from pcdet.models import build_network, load_data_to_gpu +from pcdet.config import cfg, cfg_from_yaml_file +from pcdet.utils import box_utils, calibration_kitti, common_utils, object3d_kitti + +# Kin's utils +from utils.draw_3d import Draw3DBox +from utils.global_def import * +from utils import * + +import yaml +import os +BASE_DIR = os.path.abspath(os.path.join( os.path.dirname( __file__ ), '..' )) +with open(f"{BASE_DIR}/launch/config.yaml", 'r') as f: + try: + para_cfg = yaml.safe_load(f, Loader=yaml.FullLoader) + except: + para_cfg = yaml.safe_load(f) + +cfg_root = para_cfg["cfg_root"] +model_path = para_cfg["model_path"] +move_lidar_center = para_cfg["move_lidar_center"] +threshold = para_cfg["threshold"] +pointcloud_topic = para_cfg["pointcloud_topic"] +RATE_VIZ = para_cfg["viz_rate"] +inference_time_list = [] + + +def xyz_array_to_pointcloud2(points_sum, stamp=None, frame_id=None): + """ + Create a sensor_msgs.PointCloud2 from an array of points. + """ + msg = PointCloud2() + if stamp: + msg.header.stamp = stamp + if frame_id: + msg.header.frame_id = frame_id + msg.height = 1 + msg.width = points_sum.shape[0] + msg.fields = [ + PointField('x', 0, PointField.FLOAT32, 1), + PointField('y', 4, PointField.FLOAT32, 1), + PointField('z', 8, PointField.FLOAT32, 1) + # PointField('i', 12, PointField.FLOAT32, 1) + ] + msg.is_bigendian = False + msg.point_step = 12 + msg.row_step = points_sum.shape[0] + msg.is_dense = int(np.isfinite(points_sum).all()) + msg.data = np.asarray(points_sum, np.float32).tostring() + return msg + +def rslidar_callback(msg): + select_boxs, select_types = [],[] + if proc_1.no_frame_id: + proc_1.set_viz_frame_id(msg.header.frame_id) + print(f"{bc.OKGREEN} setting marker frame id to lidar: {msg.header.frame_id} {bc.ENDC}") + proc_1.no_frame_id = False + + frame = msg.header.seq # frame id -> not timestamp + msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) + np_p = get_xyz_points(msg_cloud, True) + scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, frame) + for i, score in enumerate(scores): + if score>threshold: + select_boxs.append(dt_box_lidar[i]) + select_types.append(pred_dict['name'][i]) + if(len(select_boxs)>0): + proc_1.pub_rviz.publish_3dbox(np.array(select_boxs), -1, pred_dict['name']) + print_str = f"Frame id: {frame}. Prediction results: \n" + for i in range(len(pred_dict['name'])): + print_str += f"Type: {pred_dict['name'][i]:.3s} Prob: {scores[i]:.2f}\n" + print(print_str) + else: + print(f"\n{bc.FAIL} No confident prediction in this time stamp {bc.ENDC}\n") + print(f" -------------------------------------------------------------- ") + +class DemoDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.root_path = root_path + self.ext = ext + data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path] + + data_file_list.sort() + self.sample_file_list = data_file_list + + def __len__(self): + return len(self.sample_file_list) + + def __getitem__(self, index): + if self.ext == '.bin': + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) + elif self.ext == '.npy': + points = np.load(self.sample_file_list[index]) + else: + raise NotImplementedError + + input_dict = { + 'points': points, + 'frame_id': index, + } + + data_dict = self.prepare_data(data_dict=input_dict) + return data_dict + +class Processor_ROS: + def __init__(self, config_path, model_path): + self.points = None + self.config_path = config_path + self.model_path = model_path + self.device = None + self.net = None + self.voxel_generator = None + self.inputs = None + self.pub_rviz = None + self.no_frame_id = True + self.rate = RATE_VIZ + + def set_pub_rviz(self, box3d_pub, marker_frame_id = 'velodyne'): + self.pub_rviz = Draw3DBox(box3d_pub, marker_frame_id, self.rate) + + def set_viz_frame_id(self, marker_frame_id): + self.pub_rviz.set_frame_id(marker_frame_id) + + def initialize(self): + self.read_config() + + def read_config(self): + config_path = self.config_path + cfg_from_yaml_file(self.config_path, cfg) + self.logger = common_utils.create_logger() + self.demo_dataset = DemoDataset( + dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, + root_path=Path("/home/kin/workspace/OpenPCDet/tools/000002.bin"), + ext='.bin') + + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) + print("Model path: ", self.model_path) + self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) + self.net = self.net.to(self.device).eval() + + def get_template_prediction(self, num_samples): + ret_dict = { + 'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples), + 'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples), + 'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]), + 'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples), + 'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7]) + } + return ret_dict + + def run(self, points, frame): + t_t = time.time() + num_features = 4 # X,Y,Z,intensity + self.points = points.reshape([-1, num_features]) + + timestamps = np.empty((len(self.points),1)) + timestamps[:] = frame + + # self.points = np.append(self.points, timestamps, axis=1) + self.points[:,0] += move_lidar_center + + input_dict = { + 'points': self.points, + 'frame_id': frame, + } + + data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) + data_dict = self.demo_dataset.collate_batch([data_dict]) + load_data_to_gpu(data_dict) + + torch.cuda.synchronize() + t = time.time() + + pred_dicts, _ = self.net.forward(data_dict) + + torch.cuda.synchronize() + inference_time = time.time() - t + inference_time_list.append(inference_time) + + boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() + scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() + types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() + + pred_boxes = np.copy(boxes_lidar) + pred_dict = self.get_template_prediction(scores.shape[0]) + + pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1] + pred_dict['score'] = scores + pred_dict['boxes_lidar'] = pred_boxes + + return scores, boxes_lidar, types, pred_dict + +if __name__ == "__main__": + no_frame_id = False + proc_1 = Processor_ROS(cfg_root, model_path) + print(f"\n{bc.OKCYAN}Config path: {bc.BOLD}{cfg_root}{bc.ENDC}") + print(f"{bc.OKCYAN}Model path: {bc.BOLD}{model_path}{bc.ENDC}") + + proc_1.initialize() + rospy.init_node('inference') + sub_lidar_topic = [pointcloud_topic] + + cfg_from_yaml_file(cfg_root, cfg) + + sub_ = rospy.Subscriber(sub_lidar_topic[0], PointCloud2, rslidar_callback, queue_size=1, buff_size=2**24) + pub_rviz = rospy.Publisher('detect_3dbox',MarkerArray, queue_size=10) + proc_1.set_pub_rviz(pub_rviz) + print(f"{bc.HEADER} ====================== {bc.ENDC}") + print(" ===> [+] PCDet ros_node has started. Try to Run the rosbag file") + print(f"{bc.HEADER} ====================== {bc.ENDC}") + + rospy.spin() \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py new file mode 100644 index 00000000..c5fbaefd --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py @@ -0,0 +1,222 @@ +#! /usr/bin/env python3.8 +""" +Created on 23 Dec 2022 + +@author: Kin ZHANG (https://kin-zhang.github.io/) +% Copyright (C) 2022, RPL, KTH Royal Institute of Technology + +Part of codes also refers: +1. https://github.com/kwea123/ROS_notes +2. https://github.com/seaside2mm/sharecode + +""" + +import rospy +from visualization_msgs.msg import Marker, MarkerArray +from geometry_msgs.msg import Point +import numpy as np +from .global_def import * + +""" +This file is from OpenPCDet visualize_utils.py +Check their repo: https://github.com/open-mmlab/OpenPCDet +""" +import numpy as np +import torch + +box_colormap = [ + [1, 1, 1], + [0, 1, 0], + [0, 1, 1], + [1, 1, 0], +] + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + +def rotate_points_along_z(points, angle): + """ + Args: + points: (B, N, 3 + C) + angle: (B), angle along z-axis, angle increases x ==> y + Returns: + + """ + points, is_numpy = check_numpy_to_torch(points) + angle, _ = check_numpy_to_torch(angle) + + cosa = torch.cos(angle) + sina = torch.sin(angle) + zeros = angle.new_zeros(points.shape[0]) + ones = angle.new_ones(points.shape[0]) + rot_matrix = torch.stack(( + cosa, sina, zeros, + -sina, cosa, zeros, + zeros, zeros, ones + ), dim=1).view(-1, 3, 3).float() + points_rot = torch.matmul(points[:, :, 0:3], rot_matrix) + points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) + return points_rot.numpy() if is_numpy else points_rot + + +def boxes_to_corners_3d(boxes3d): + """ + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + Args: + boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + Returns: + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + + template = boxes3d.new_tensor(( + [1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], + [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1], + )) / 2 + + corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] + corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3) + corners3d += boxes3d[:, None, 0:3] + + return corners3d.numpy() if is_numpy else corners3d + +class Draw3DBox: + def __init__(self, box3d_pub, marker_frame_id = 'velodyne', rate=10): + self.frame_id = marker_frame_id + self.lifetime = 1.0/rate + self.box3d_pub = box3d_pub + + def set_frame_id(self, marker_frame_id): + self.frame_id = marker_frame_id + + def publish_3dbox(self, dt_box_lidar, track_ids, types=None, publish_id=True, move_lidar_center=20, publish_type=True): + """ + Publish 3d boxes in velodyne coordinate, with color specified by object_types + If object_types is None, set all color to cyan + corners_3d_velos : list of (8, 4) 3d corners + """ + # (N, 8, 3) + # -move_lidar_center + dt_box_lidar[:,0] = dt_box_lidar[:,0]-move_lidar_center + corners_3d_velos = boxes_to_corners_3d(dt_box_lidar) + + marker_array = MarkerArray() + + for i, corners_3d_velo in enumerate(corners_3d_velos): + # 3d box + marker = Marker() + marker.header.frame_id = self.frame_id + marker.header.stamp = rospy.Time.now() + marker.id = i + marker.action = Marker.ADD + marker.lifetime = rospy.Duration(self.lifetime) + marker.type = Marker.LINE_LIST + + b, g, r = DETECTION_COLOR_MAP[types[i]] + if types is None: + marker.color.r = 0.0 + marker.color.g = 1.0 + marker.color.b = 1.0 + else: + marker.color.r = r/255.0 + marker.color.g = g/255.0 + marker.color.b = b/255.0 + marker.color.a = 1.0 + marker.scale.x = 0.1 + + marker.points = [] + for l in LINES: + p1 = corners_3d_velo[l[0]] + marker.points.append(Point(p1[0], p1[1], p1[2])) + p2 = corners_3d_velo[l[1]] + marker.points.append(Point(p2[0], p2[1], p2[2])) + marker_array.markers.append(marker) + + # add track id + if publish_id and track_ids != -1: + track_id = track_ids[i] + text_marker = Marker() + text_marker.header.frame_id = self.frame_id + text_marker.header.stamp = rospy.Time.now() + + text_marker.id = track_id + 1000 + text_marker.action = Marker.ADD + text_marker.lifetime = rospy.Duration(self.lifetime) + text_marker.type = Marker.TEXT_VIEW_FACING + + p4 = corners_3d_velo[4] # upper front left corner + + text_marker.pose.position.x = p4[0] + text_marker.pose.position.y = p4[1] + text_marker.pose.position.z = p4[2] + 0.5 + + text_marker.text = str(track_id) + + text_marker.scale.x = 1 + text_marker.scale.y = 1 + text_marker.scale.z = 1 + + if types is None: + text_marker.color.r = 0.0 + text_marker.color.g = 1.0 + text_marker.color.b = 1.0 + else: + b, g, r = DETECTION_COLOR_MAP[types[i]] + text_marker.color.r = r/255.0 + text_marker.color.g = g/255.0 + text_marker.color.b = b/255.0 + text_marker.color.a = 1.0 + marker_array.markers.append(text_marker) + + if publish_type: + text_marker = Marker() + text_marker.header.frame_id = self.frame_id + text_marker.header.stamp = rospy.Time.now() + + text_marker.id = i + 1000 + text_marker.action = Marker.ADD + text_marker.lifetime = rospy.Duration(self.lifetime) + text_marker.type = Marker.TEXT_VIEW_FACING + + bc = (corners_3d_velo[6] + corners_3d_velo[7] + corners_3d_velo[2] + corners_3d_velo[3])/4 # back center + + text_marker.pose.position.x = bc[0] + text_marker.pose.position.y = bc[1] + text_marker.pose.position.z = bc[2] + 1.5 + + text_marker.text = types[i] + text_marker.scale.x = 1 + text_marker.scale.y = 1 + text_marker.scale.z = 1 + b, g, r = DETECTION_COLOR_MAP[types[i]] + text_marker.color.r = r/255.0 + text_marker.color.g = g/255.0 + text_marker.color.b = b/255.0 + text_marker.color.a = 1.0 + marker_array.markers.append(text_marker) + + self.box3d_pub.publish(marker_array) + + def compute_3d_box_cam2(self, h, w, l, x, y, z, yaw): + """ + Return : 3xn in cam2 coordinate + """ + R = np.array([[np.cos(yaw), 0, np.sin(yaw)], [0, 1, 0], [-np.sin(yaw), 0, np.cos(yaw)]]) + x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2] + y_corners = [0,0,0,0,-h,-h,-h,-h] + z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2] + corners_3d_cam2 = np.dot(R, np.vstack([x_corners,y_corners,z_corners])) + corners_3d_cam2[0,:] += x + corners_3d_cam2[1,:] += y + corners_3d_cam2[2,:] += z + return corners_3d_cam2 \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def new file mode 100644 index 00000000..d318a6ac --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def @@ -0,0 +1,30 @@ +#! /usr/bin/env python3.8 +""" +Created on 23 Dec 2022 + +@author: Kin ZHANG (https://kin-zhang.github.io/) +% Copyright (C) 2022, RPL, KTH Royal Institute of Technology + +Part of codes also refers: https://github.com/kwea123/ROS_notes +""" + +DETECTION_COLOR_MAP = {'Car': (255,255,0), + 'Pedestrian': (0, 226, 255), + 'Cyclist': (141, 40, 255)} # color for detection, in format bgr + +# connect vertic +LINES = [[0, 1], [1, 2], [2, 3], [3, 0]] # lower face +LINES+= [[4, 5], [5, 6], [6, 7], [7, 4]] # upper face +LINES+= [[4, 0], [5, 1], [6, 2], [7, 3]] # connect lower face and upper face +LINES+= [[4, 1], [5, 0]] # front face and draw x + +class bc: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKCYAN = '\033[96m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_det_py/package.xml index 26b37b62..f9da549c 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/package.xml +++ b/src/perception/lidar_object_detection/lidar_det_py/package.xml @@ -9,6 +9,11 @@ rclpy std_msgs + rospy + sensor_msgs + nav_msgs + geometry_msgs + tf ament_copyright ament_flake8 diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.py b/src/perception/lidar_object_detection/lidar_det_py/setup.py index f9eca843..305cea43 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/setup.py +++ b/src/perception/lidar_object_detection/lidar_det_py/setup.py @@ -1,4 +1,5 @@ from setuptools import find_packages, setup +from glob import glob package_name = 'lidar_det_py' @@ -10,6 +11,7 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), + ('share/' + package_name + '/launch', glob('launch/*.py')), ], install_requires=['setuptools'], zip_safe=True, @@ -20,7 +22,8 @@ tests_require=['pytest'], entry_points={ 'console_scripts': [ - 'test_node = lidar_det_py.test_node:main' + 'test_node = lidar_det_py.test_node:main', + 'inference = lidar_det_py.inference:main' ], }, ) From cec4f6d0b52e6a6b3912d1e97b049d7a97ac60fe Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Thu, 25 Jan 2024 03:37:32 +0000 Subject: [PATCH 20/70] made node and WIP on dockerfile --- ...idar_object_detection_openpcdet.Dockerfile | 85 +++++- .../lidar_det_py/lidar_det_py/inference.py | 272 +++++------------- 2 files changed, 149 insertions(+), 208 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 2211c4e2..c76f493b 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -24,14 +24,87 @@ WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* ################################ INSTALL OpenPCDet ################################ -RUN apt update && apt install -y python3-pip +# Set environment variables +ENV NVENCODE_CFLAGS "-I/usr/local/cuda/include" +ENV CV_VERSION=4.2.0 +ENV DEBIAN_FRONTEND=noninteractive +# Get all dependencies +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim +RUN apt-get update && apt-get install -y \ + python3-pip \ + python3-dev +RUN apt-get update && apt-get install -y \ + pcl-tools \ + python3-pcl \ + xvfb \ + x11-utils +RUN rm -rf /var/lib/apt/lists/* +# OpenCV with CUDA support +WORKDIR /opencv +RUN git clone https://github.com/opencv/opencv.git -b $CV_VERSION &&\ + git clone https://github.com/opencv/opencv_contrib.git -b $CV_VERSION +# While using OpenCV 4.2.0 we have to apply some fixes to ensure that CUDA is fully supported, thanks @https://github.com/gismo07 for this fix +RUN mkdir opencvfix && cd opencvfix &&\ + git clone https://github.com/opencv/opencv.git -b 4.5.2 &&\ + cd opencv/cmake &&\ + cp -r FindCUDA /opencv/opencv/cmake/ &&\ + cp FindCUDA.cmake /opencv/opencv/cmake/ &&\ + cp FindCUDNN.cmake /opencv/opencv/cmake/ &&\ + cp OpenCVDetectCUDA.cmake /opencv/opencv/cmake/ +WORKDIR /opencv/opencv/build +RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ +-D CMAKE_INSTALL_PREFIX=/usr/local \ +-D OPENCV_GENERATE_PKGCONFIG=ON \ +-D BUILD_EXAMPLES=OFF \ +-D INSTALL_PYTHON_EXAMPLES=OFF \ +-D INSTALL_C_EXAMPLES=OFF \ +-D PYTHON_EXECUTABLE=$(which python2) \ +-D PYTHON3_EXECUTABLE=$(which python3) \ +-D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ +-D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ +-D BUILD_opencv_python2=ON \ +-D BUILD_opencv_python3=ON \ +-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ +-D WITH_GSTREAMER=ON \ +-D WITH_CUDA=ON \ +-D ENABLE_PRECOMPILED_HEADERS=OFF \ +.. &&\ +make -j$(nproc) &&\ +make install &&\ +ldconfig &&\ +rm -rf /opencv +WORKDIR / +ENV OpenCV_DIR=/usr/share/OpenCV +# PyTorch for CUDA 11.6 RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" +# OpenPCDet +RUN pip3 install numpy==1.23.0 llvmlite numba tensorboardX easydict pyyaml scikit-image tqdm SharedArray open3d mayavi av2 pyquaternion RUN pip3 install spconv-cu116 -RUN apt update && apt install -y python3-setuptools -WORKDIR /home/bolty/ -RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git -RUN cd OpenPCDet && pip3 install -r requirements.txt -RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d +RUN pip3 install kornia==0.6.8 +RUN pip3 install nuscenes-devkit==1.0.5 +# Get extra kitti data +WORKDIR / +RUN git clone https://github.com/NVIDIA-AI-IOT/CUDA-PointPillars.git +# Change based on if you want to retain changes to the OpenPCDet repo +RUN git clone https://github.com/open-mmlab/OpenPCDet.git +WORKDIR /OpenPCDet +# Set up xvfb (X Virtual FrameBuffer) +RUN echo '#!/bin/bash\nXvfb :99 -screen 0 1280x1024x24 &\nsleep 3\nexec "$@"' > /usr/local/bin/start-xvfb.sh \ + && chmod +x /usr/local/bin/start-xvfb.sh +# Set the environment variable for DISPLAY +ENV DISPLAY=:99 +RUN python3 setup.py develop +WORKDIR / +ENV NVIDIA_VISIBLE_DEVICES="all" \ + OpenCV_DIR=/usr/share/OpenCV \ + NVIDIA_DRIVER_CAPABILITIES="video,compute,utility,graphics" \ + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/lib:/usr/lib:/usr/local/lib \ + QT_GRAPHICSSYSTEM="native" +CMD ["/usr/local/bin/start-xvfb.sh"] ################################ Build ################################ FROM dependencies as build # Build ROS2 packages diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 141a6911..8e42c78a 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -1,111 +1,78 @@ -#! /usr/bin/env python3.8 -""" -Created on Thu Aug 6 11:27:43 2020 - -@author: Javier del Egido Sierra and Carlos Gómez-Huélamo +import argparse +import glob +from pathlib import Path +import rclpy +from rclpy.node import Node +from std_msgs.msg import Header +from sensor_msgs.msg import PointCloud2 +import numpy as np +import torch +import sys +sys.path.append('/home/bolty/OpenPCDet') +from pcdet.config import cfg, cfg_from_yaml_file +from pcdet.datasets import DatasetTemplate +from pcdet.models import build_network, load_data_to_gpu +from pcdet.utils import common_utils +import sensor_msgs_py.point_cloud2 as pc2 -=== +from visual_utils import open3d_vis_utils as V +OPEN3D_FLAG = True -Modified on 23 Dec 2022 -@author: Kin ZHANG (https://kin-zhang.github.io/) -Part of codes also refers: https://github.com/kwea123/ROS_notes -""" +class LidarDemoNode(Node): + def __init__(self): + super().__init__('lidar_demo_node') + self.publisher_ = self.create_publisher(PointCloud2, 'lidar_data', 10) -# General use imports -import os -import time -import glob -from pathlib import Path + args, cfg = self.parse_config() + self.logger = common_utils.create_logger() + self.logger.info('-----------------Quick Demo of OpenPCDet-------------------------') -# ROS imports -import rospy -import ros_numpy -from sensor_msgs.msg import PointCloud2, PointField -from visualization_msgs.msg import MarkerArray, Marker + self.demo_dataset = DemoDataset( + dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, + root_path=Path(args.data_path), ext=args.ext, logger=self.logger + ) -# Math and geometry imports -import math -import numpy as np -import torch + self.model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) + self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) + self.model.cuda() + self.model.eval() -# OpenPCDet imports -from pcdet.datasets import DatasetTemplate -from pcdet.models import build_network, load_data_to_gpu -from pcdet.config import cfg, cfg_from_yaml_file -from pcdet.utils import box_utils, calibration_kitti, common_utils, object3d_kitti + self.process_data() -# Kin's utils -from utils.draw_3d import Draw3DBox -from utils.global_def import * -from utils import * + def process_data(self): + with torch.no_grad(): + for idx, data_dict in enumerate(self.demo_dataset): + data_dict = self.demo_dataset.collate_batch([data_dict]) + load_data_to_gpu(data_dict) + pred_dicts, _ = self.model.forward(data_dict) -import yaml -import os -BASE_DIR = os.path.abspath(os.path.join( os.path.dirname( __file__ ), '..' )) -with open(f"{BASE_DIR}/launch/config.yaml", 'r') as f: - try: - para_cfg = yaml.safe_load(f, Loader=yaml.FullLoader) - except: - para_cfg = yaml.safe_load(f) + # Convert and publish data + header = self.make_header() + point_cloud_msg = pc2.create_cloud_xyz32(header, data_dict['points'][:, 1:4]) # Assuming first 3 columns are x, y, z + self.publisher_.publish(point_cloud_msg) -cfg_root = para_cfg["cfg_root"] -model_path = para_cfg["model_path"] -move_lidar_center = para_cfg["move_lidar_center"] -threshold = para_cfg["threshold"] -pointcloud_topic = para_cfg["pointcloud_topic"] -RATE_VIZ = para_cfg["viz_rate"] -inference_time_list = [] + self.logger.info('Demo done.') + def parse_config(self): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default='cfgs/nuscenes_models/cbgs_second_multihead.yaml', + help='specify the config for demo') + parser.add_argument('--data_path', type=str, default='demo_data', + help='specify the point cloud data file or directory') + parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') + parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') -def xyz_array_to_pointcloud2(points_sum, stamp=None, frame_id=None): - """ - Create a sensor_msgs.PointCloud2 from an array of points. - """ - msg = PointCloud2() - if stamp: - msg.header.stamp = stamp - if frame_id: - msg.header.frame_id = frame_id - msg.height = 1 - msg.width = points_sum.shape[0] - msg.fields = [ - PointField('x', 0, PointField.FLOAT32, 1), - PointField('y', 4, PointField.FLOAT32, 1), - PointField('z', 8, PointField.FLOAT32, 1) - # PointField('i', 12, PointField.FLOAT32, 1) - ] - msg.is_bigendian = False - msg.point_step = 12 - msg.row_step = points_sum.shape[0] - msg.is_dense = int(np.isfinite(points_sum).all()) - msg.data = np.asarray(points_sum, np.float32).tostring() - return msg + args, unknown = parser.parse_known_args() + cfg_from_yaml_file(args.cfg_file, cfg) + return args, cfg -def rslidar_callback(msg): - select_boxs, select_types = [],[] - if proc_1.no_frame_id: - proc_1.set_viz_frame_id(msg.header.frame_id) - print(f"{bc.OKGREEN} setting marker frame id to lidar: {msg.header.frame_id} {bc.ENDC}") - proc_1.no_frame_id = False + def make_header(self): + header = Header() + header.stamp = self.get_clock().now().to_msg() + header.frame_id = 'lidar_frame' + return header - frame = msg.header.seq # frame id -> not timestamp - msg_cloud = ros_numpy.point_cloud2.pointcloud2_to_array(msg) - np_p = get_xyz_points(msg_cloud, True) - scores, dt_box_lidar, types, pred_dict = proc_1.run(np_p, frame) - for i, score in enumerate(scores): - if score>threshold: - select_boxs.append(dt_box_lidar[i]) - select_types.append(pred_dict['name'][i]) - if(len(select_boxs)>0): - proc_1.pub_rviz.publish_3dbox(np.array(select_boxs), -1, pred_dict['name']) - print_str = f"Frame id: {frame}. Prediction results: \n" - for i in range(len(pred_dict['name'])): - print_str += f"Type: {pred_dict['name'][i]:.3s} Prob: {scores[i]:.2f}\n" - print(print_str) - else: - print(f"\n{bc.FAIL} No confident prediction in this time stamp {bc.ENDC}\n") - print(f" -------------------------------------------------------------- ") class DemoDataset(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): @@ -132,7 +99,7 @@ def __len__(self): def __getitem__(self, index): if self.ext == '.bin': - points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) elif self.ext == '.npy': points = np.load(self.sample_file_list[index]) else: @@ -146,113 +113,14 @@ def __getitem__(self, index): data_dict = self.prepare_data(data_dict=input_dict) return data_dict -class Processor_ROS: - def __init__(self, config_path, model_path): - self.points = None - self.config_path = config_path - self.model_path = model_path - self.device = None - self.net = None - self.voxel_generator = None - self.inputs = None - self.pub_rviz = None - self.no_frame_id = True - self.rate = RATE_VIZ - - def set_pub_rviz(self, box3d_pub, marker_frame_id = 'velodyne'): - self.pub_rviz = Draw3DBox(box3d_pub, marker_frame_id, self.rate) - - def set_viz_frame_id(self, marker_frame_id): - self.pub_rviz.set_frame_id(marker_frame_id) - - def initialize(self): - self.read_config() - - def read_config(self): - config_path = self.config_path - cfg_from_yaml_file(self.config_path, cfg) - self.logger = common_utils.create_logger() - self.demo_dataset = DemoDataset( - dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, - root_path=Path("/home/kin/workspace/OpenPCDet/tools/000002.bin"), - ext='.bin') - - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - self.net = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) - print("Model path: ", self.model_path) - self.net.load_params_from_file(filename=self.model_path, logger=self.logger, to_cpu=True) - self.net = self.net.to(self.device).eval() - - def get_template_prediction(self, num_samples): - ret_dict = { - 'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples), - 'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples), - 'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]), - 'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples), - 'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7]) - } - return ret_dict - - def run(self, points, frame): - t_t = time.time() - num_features = 4 # X,Y,Z,intensity - self.points = points.reshape([-1, num_features]) - - timestamps = np.empty((len(self.points),1)) - timestamps[:] = frame - - # self.points = np.append(self.points, timestamps, axis=1) - self.points[:,0] += move_lidar_center - - input_dict = { - 'points': self.points, - 'frame_id': frame, - } - - data_dict = self.demo_dataset.prepare_data(data_dict=input_dict) - data_dict = self.demo_dataset.collate_batch([data_dict]) - load_data_to_gpu(data_dict) - - torch.cuda.synchronize() - t = time.time() - - pred_dicts, _ = self.net.forward(data_dict) - - torch.cuda.synchronize() - inference_time = time.time() - t - inference_time_list.append(inference_time) - - boxes_lidar = pred_dicts[0]["pred_boxes"].detach().cpu().numpy() - scores = pred_dicts[0]["pred_scores"].detach().cpu().numpy() - types = pred_dicts[0]["pred_labels"].detach().cpu().numpy() - - pred_boxes = np.copy(boxes_lidar) - pred_dict = self.get_template_prediction(scores.shape[0]) - - pred_dict['name'] = np.array(cfg.CLASS_NAMES)[types - 1] - pred_dict['score'] = scores - pred_dict['boxes_lidar'] = pred_boxes - - return scores, boxes_lidar, types, pred_dict - -if __name__ == "__main__": - no_frame_id = False - proc_1 = Processor_ROS(cfg_root, model_path) - print(f"\n{bc.OKCYAN}Config path: {bc.BOLD}{cfg_root}{bc.ENDC}") - print(f"{bc.OKCYAN}Model path: {bc.BOLD}{model_path}{bc.ENDC}") - proc_1.initialize() - rospy.init_node('inference') - sub_lidar_topic = [pointcloud_topic] +def main(args=None): + rclpy.init(args=args) + node = LidarDemoNode() + rclpy.spin(node) + node.destroy_node() + rclpy.shutdown() - cfg_from_yaml_file(cfg_root, cfg) - - sub_ = rospy.Subscriber(sub_lidar_topic[0], PointCloud2, rslidar_callback, queue_size=1, buff_size=2**24) - pub_rviz = rospy.Publisher('detect_3dbox',MarkerArray, queue_size=10) - proc_1.set_pub_rviz(pub_rviz) - print(f"{bc.HEADER} ====================== {bc.ENDC}") - print(" ===> [+] PCDet ros_node has started. Try to Run the rosbag file") - print(f"{bc.HEADER} ====================== {bc.ENDC}") - rospy.spin() \ No newline at end of file +if __name__ == '__main__': + main() \ No newline at end of file From 1a7eedf07c7283667cdf793ff73fba54117e6617 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 2 Feb 2024 01:58:47 +0000 Subject: [PATCH 21/70] builds with cuda and I can run the setup script :) --- ...idar_object_detection_openpcdet.Dockerfile | 93 +++---------------- 1 file changed, 12 insertions(+), 81 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index c76f493b..61902cb2 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel ################################ Source ################################ FROM ${BASE_IMAGE} as source WORKDIR ${AMENT_WS}/src @@ -11,6 +11,10 @@ RUN apt-get -qq update && rosdep update && \ | grep 'apt-get install' \ | awk '{print $3}' \ | sort > /tmp/colcon_install_list +# Set environment variables +ENV CUDA_HOME /usr/local/cuda +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} +ENV PATH /usr/local/cuda/bin:${PATH} ################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies # Install Rosdep requirements @@ -24,87 +28,14 @@ WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* ################################ INSTALL OpenPCDet ################################ -# Set environment variables -ENV NVENCODE_CFLAGS "-I/usr/local/cuda/include" -ENV CV_VERSION=4.2.0 -ENV DEBIAN_FRONTEND=noninteractive -# Get all dependencies -RUN apt-get update && apt-get install -y \ - git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ - build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ - libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim -RUN apt-get update && apt-get install -y \ - python3-pip \ - python3-dev -RUN apt-get update && apt-get install -y \ - pcl-tools \ - python3-pcl \ - xvfb \ - x11-utils -RUN rm -rf /var/lib/apt/lists/* -# OpenCV with CUDA support -WORKDIR /opencv -RUN git clone https://github.com/opencv/opencv.git -b $CV_VERSION &&\ - git clone https://github.com/opencv/opencv_contrib.git -b $CV_VERSION -# While using OpenCV 4.2.0 we have to apply some fixes to ensure that CUDA is fully supported, thanks @https://github.com/gismo07 for this fix -RUN mkdir opencvfix && cd opencvfix &&\ - git clone https://github.com/opencv/opencv.git -b 4.5.2 &&\ - cd opencv/cmake &&\ - cp -r FindCUDA /opencv/opencv/cmake/ &&\ - cp FindCUDA.cmake /opencv/opencv/cmake/ &&\ - cp FindCUDNN.cmake /opencv/opencv/cmake/ &&\ - cp OpenCVDetectCUDA.cmake /opencv/opencv/cmake/ -WORKDIR /opencv/opencv/build -RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ --D CMAKE_INSTALL_PREFIX=/usr/local \ --D OPENCV_GENERATE_PKGCONFIG=ON \ --D BUILD_EXAMPLES=OFF \ --D INSTALL_PYTHON_EXAMPLES=OFF \ --D INSTALL_C_EXAMPLES=OFF \ --D PYTHON_EXECUTABLE=$(which python2) \ --D PYTHON3_EXECUTABLE=$(which python3) \ --D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ --D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ --D BUILD_opencv_python2=ON \ --D BUILD_opencv_python3=ON \ --D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ --D WITH_GSTREAMER=ON \ --D WITH_CUDA=ON \ --D ENABLE_PRECOMPILED_HEADERS=OFF \ -.. &&\ -make -j$(nproc) &&\ -make install &&\ -ldconfig &&\ -rm -rf /opencv -WORKDIR / -ENV OpenCV_DIR=/usr/share/OpenCV -# PyTorch for CUDA 11.6 +RUN apt update && apt install -y python3-pip RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 -ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" -# OpenPCDet -RUN pip3 install numpy==1.23.0 llvmlite numba tensorboardX easydict pyyaml scikit-image tqdm SharedArray open3d mayavi av2 pyquaternion -RUN pip3 install spconv-cu116 -RUN pip3 install kornia==0.6.8 -RUN pip3 install nuscenes-devkit==1.0.5 -# Get extra kitti data -WORKDIR / -RUN git clone https://github.com/NVIDIA-AI-IOT/CUDA-PointPillars.git -# Change based on if you want to retain changes to the OpenPCDet repo -RUN git clone https://github.com/open-mmlab/OpenPCDet.git -WORKDIR /OpenPCDet -# Set up xvfb (X Virtual FrameBuffer) -RUN echo '#!/bin/bash\nXvfb :99 -screen 0 1280x1024x24 &\nsleep 3\nexec "$@"' > /usr/local/bin/start-xvfb.sh \ - && chmod +x /usr/local/bin/start-xvfb.sh -# Set the environment variable for DISPLAY -ENV DISPLAY=:99 -RUN python3 setup.py develop -WORKDIR / -ENV NVIDIA_VISIBLE_DEVICES="all" \ - OpenCV_DIR=/usr/share/OpenCV \ - NVIDIA_DRIVER_CAPABILITIES="video,compute,utility,graphics" \ - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/lib:/usr/lib:/usr/local/lib \ - QT_GRAPHICSSYSTEM="native" -CMD ["/usr/local/bin/start-xvfb.sh"] +RUN pip3 install spconv-cu113 +RUN apt update && apt install -y python3-setuptools +WORKDIR /home/bolty +RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git +RUN cd OpenPCDet && pip3 install -r requirements.txt +RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d ################################ Build ################################ FROM dependencies as build # Build ROS2 packages From 0346da7ba8cceefe3c2d0e2ac6ccfbed9d4e88e2 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sun, 4 Feb 2024 05:54:31 +0000 Subject: [PATCH 22/70] Adding OpenCV --- ...idar_object_detection_openpcdet.Dockerfile | 39 +++++++++++++++++++ .../lidar_det_py/lidar_det_py/inference.py | 2 +- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 61902cb2..38f4801b 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -27,6 +27,45 @@ COPY --from=source ${AMENT_WS}/src src WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* +################################# INSTALL OpenCV with CUDA Support ################ +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim +WORKDIR /opt +RUN git clone https://github.com/opencv/opencv.git && \ + cd opencv && \ + git checkout 4.5.5 + +RUN git clone https://github.com/opencv/opencv_contrib.git && \ + cd opencv_contrib && \ + git checkout 4.5.5 +WORKDIR /opt/opencv/build +RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ +-D CMAKE_INSTALL_PREFIX=/usr/local \ +-D OPENCV_GENERATE_PKGCONFIG=ON \ +-D BUILD_EXAMPLES=OFF \ +-D INSTALL_PYTHON_EXAMPLES=OFF \ +-D INSTALL_C_EXAMPLES=OFF \ +-D PYTHON_EXECUTABLE=$(which python2) \ +-D PYTHON3_EXECUTABLE=$(which python3) \ +-D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ +-D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ +-D BUILD_opencv_python2=ON \ +-D BUILD_opencv_python3=ON \ +-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ +-D WITH_GSTREAMER=ON \ +-D WITH_CUDA=ON \ +-D ENABLE_PRECOMPILED_HEADERS=OFF \ +.. &&\ +make -j$(nproc) &&\ +make install &&\ +ldconfig &&\ +rm -rf /opencv + +WORKDIR / +ENV OpenCV_DIR=/usr/share/OpenCV +WORKDIR / ################################ INSTALL OpenPCDet ################################ RUN apt update && apt install -y python3-pip RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 8e42c78a..6992168d 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -13,7 +13,7 @@ from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils -import sensor_msgs_py.point_cloud2 as pc2 +import sensor_msgs.point_cloud2 as pc2 from visual_utils import open3d_vis_utils as V OPEN3D_FLAG = True From 31648651af8a90ab27030b33bb4595d7db0d133c Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Mon, 5 Feb 2024 01:12:12 +0000 Subject: [PATCH 23/70] ROS node runs and makes a single inference on a .bin! --- ...idar_object_detection_openpcdet.Dockerfile | 117 +++++++----------- .../lidar_det_py/lidar_det_py/inference.py | 45 +++++-- 2 files changed, 82 insertions(+), 80 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 38f4801b..927af4a4 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -1,95 +1,68 @@ ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel -################################ Source ################################ -FROM ${BASE_IMAGE} as source -WORKDIR ${AMENT_WS}/src -# Copy in source code -COPY src/perception/lidar_object_detection lidar_object_detection -COPY src/wato_msgs/sample_msgs sample_msgs -# Scan for rosdeps -RUN apt-get -qq update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -s \ - | grep 'apt-get install' \ - | awk '{print $3}' \ - | sort > /tmp/colcon_install_list -# Set environment variables -ENV CUDA_HOME /usr/local/cuda -ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} -ENV PATH /usr/local/cuda/bin:${PATH} -################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies -# Install Rosdep requirements -COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) -# Copy in source code from source stage -WORKDIR ${AMENT_WS} -COPY --from=source ${AMENT_WS}/src src -# Dependency Cleanup -WORKDIR / -RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* -################################# INSTALL OpenCV with CUDA Support ################ +################################# Dependencies ################################ RUN apt-get update && apt-get install -y \ git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ - libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ + python3-pip python3-setuptools \ + && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc/* +################################# INSTALL OpenCV with CUDA Support ############# WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ - cd opencv && \ - git checkout 4.5.5 - + cd opencv && git checkout 4.5.5 RUN git clone https://github.com/opencv/opencv_contrib.git && \ - cd opencv_contrib && \ - git checkout 4.5.5 + cd opencv_contrib && git checkout 4.5.5 WORKDIR /opt/opencv/build RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ --D CMAKE_INSTALL_PREFIX=/usr/local \ --D OPENCV_GENERATE_PKGCONFIG=ON \ --D BUILD_EXAMPLES=OFF \ --D INSTALL_PYTHON_EXAMPLES=OFF \ --D INSTALL_C_EXAMPLES=OFF \ --D PYTHON_EXECUTABLE=$(which python2) \ --D PYTHON3_EXECUTABLE=$(which python3) \ --D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ --D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ --D BUILD_opencv_python2=ON \ --D BUILD_opencv_python3=ON \ --D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ --D WITH_GSTREAMER=ON \ --D WITH_CUDA=ON \ --D ENABLE_PRECOMPILED_HEADERS=OFF \ -.. &&\ -make -j$(nproc) &&\ -make install &&\ -ldconfig &&\ -rm -rf /opencv - -WORKDIR / + -D CMAKE_INSTALL_PREFIX=/usr/local \ + -D OPENCV_GENERATE_PKGCONFIG=ON \ + -D BUILD_EXAMPLES=OFF \ + -D INSTALL_PYTHON_EXAMPLES=OFF \ + -D INSTALL_C_EXAMPLES=OFF \ + -D PYTHON_EXECUTABLE=$(which python3) \ + -D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ + -D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ + -D BUILD_opencv_python3=ON \ + -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ + -D WITH_GSTREAMER=ON \ + -D WITH_CUDA=ON \ + -D ENABLE_PRECOMPILED_HEADERS=OFF \ + .. && make -j$(nproc) && make install && ldconfig +RUN rm -rf /opt/opencv /opt/opencv_contrib +# Set environment variables +ENV CUDA_HOME /usr/local/cuda +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} +ENV PATH /usr/local/cuda/bin:${PATH} ENV OpenCV_DIR=/usr/share/OpenCV -WORKDIR / -################################ INSTALL OpenPCDet ################################ -RUN apt update && apt install -y python3-pip +# Install Python dependencies RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 -RUN pip3 install spconv-cu113 -RUN apt update && apt install -y python3-setuptools +RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 +WORKDIR /home/bolty +# RUN git clone https://github.com/WATonomous/OpenPCDet.git +COPY /OpenPCDet /home/bolty/OpenPCDet WORKDIR /home/bolty -RUN git clone https://github.com/Kin-Zhang/OpenPCDet.git RUN cd OpenPCDet && pip3 install -r requirements.txt -RUN pip3 install pyquaternion numpy==1.23 pillow==8.4 mayavi open3d -################################ Build ################################ +RUN pip3 install kornia==0.6.8 +RUN pip3 install nuscenes-devkit==1.0.5 +################################ Source ####################################### FROM dependencies as build -# Build ROS2 packages +WORKDIR ${AMENT_WS}/src +COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection +COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs +RUN apt-get update && rosdep update && \ + rosdep install --from-paths . --ignore-src -r -y WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ - colcon build \ - --cmake-args -DCMAKE_BUILD_TYPE=Release -# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash -COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release +# Entrypoint setup +COPY wato_monorepo/docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] -################################ Prod ################################ +################################ Prod ######################################### # FROM build as deploy # # Source Cleanup and Security Setup # RUN chown -R $USER:$USER ${AMENT_WS} # RUN rm -rf src/* -# USER ${USER} +# USER ${USER} \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 6992168d..33621c78 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -4,7 +4,7 @@ import rclpy from rclpy.node import Node from std_msgs.msg import Header -from sensor_msgs.msg import PointCloud2 +from sensor_msgs.msg import PointCloud2, PointField import numpy as np import torch import sys @@ -13,9 +13,7 @@ from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils -import sensor_msgs.point_cloud2 as pc2 -from visual_utils import open3d_vis_utils as V OPEN3D_FLAG = True @@ -49,18 +47,19 @@ def process_data(self): # Convert and publish data header = self.make_header() - point_cloud_msg = pc2.create_cloud_xyz32(header, data_dict['points'][:, 1:4]) # Assuming first 3 columns are x, y, z + points = data_dict['points'][:, 1:4] # Extract the relevant columns (x, y, z) + point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) self.logger.info('Demo done.') def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') - parser.add_argument('--cfg_file', type=str, default='cfgs/nuscenes_models/cbgs_second_multihead.yaml', + parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml', help='specify the config for demo') - parser.add_argument('--data_path', type=str, default='demo_data', + parser.add_argument('--data_path', type=str, default='/home/bolty/data/velodyne/data/0000000001.bin', help='specify the point cloud data file or directory') - parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') + parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/pv_rcnn8369.pth", help='specify the pretrained model') parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args, unknown = parser.parse_known_args() @@ -72,6 +71,36 @@ def make_header(self): header.stamp = self.get_clock().now().to_msg() header.frame_id = 'lidar_frame' return header + + def create_cloud_xyz32(self, header, points): + """ + Create a sensor_msgs/PointCloud2 message from an array of points. + + :param header: std_msgs/Header, the header of the message. + :param points: numpy.ndarray, an Nx3 array of xyz points. + :return: sensor_msgs/PointCloud2, the constructed PointCloud2 message. + """ + # Create fields for the PointCloud2 message + fields = [ + PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1), + PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1), + PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1) + ] + points_np = points.cpu().numpy() + # Create a PointCloud2 message + cloud = PointCloud2() + cloud.header = header + cloud.height = 1 # Unstructured point cloud + cloud.width = points_np.shape[0] + cloud.fields = fields + cloud.is_bigendian = False # Assuming little endian + cloud.point_step = 12 # FLOAT32 (4 bytes) * 3 (x, y, z) + cloud.row_step = cloud.point_step * cloud.width + cloud.is_dense = bool(np.isfinite(points_np).all()) + cloud.data = np.asarray(points_np, np.float32).tobytes() + + return cloud + class DemoDataset(DatasetTemplate): @@ -99,7 +128,7 @@ def __len__(self): def __getitem__(self, index): if self.ext == '.bin': - points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) #change back to 5 later elif self.ext == '.npy': points = np.load(self.sample_file_list[index]) else: From 6ac0f6d609af6c341f572c2d9e29689d3476cbf8 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 6 Feb 2024 03:19:00 +0000 Subject: [PATCH 24/70] Foxglove integration and bounding boxes --- ...idar_object_detection_openpcdet.Dockerfile | 3 ++- .../lidar_det_py/lidar_det_py/inference.py | 27 +++++++++++++++++++ .../lidar_det_py/package.xml | 1 + 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 927af4a4..18487a31 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -7,7 +7,7 @@ RUN apt-get update && apt-get install -y \ libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ python3-pip python3-setuptools \ && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc/* + rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc ################################# INSTALL OpenCV with CUDA Support ############# WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ @@ -53,6 +53,7 @@ COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs RUN apt-get update && rosdep update && \ rosdep install --from-paths . --ignore-src -r -y +RUN apt install -y ros-${ROS_DISTRO}-foxglove-bridge WORKDIR ${AMENT_WS} RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release # Entrypoint setup diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 33621c78..dd544077 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -13,6 +13,7 @@ from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils +from visualization_msgs.msg import Marker, MarkerArray OPEN3D_FLAG = True @@ -21,6 +22,7 @@ class LidarDemoNode(Node): def __init__(self): super().__init__('lidar_demo_node') self.publisher_ = self.create_publisher(PointCloud2, 'lidar_data', 10) + self.bbox_publisher = self.create_publisher(MarkerArray, '/bounding_boxes', 10) args, cfg = self.parse_config() self.logger = common_utils.create_logger() @@ -38,6 +40,30 @@ def __init__(self): self.process_data() + def publish_bounding_boxes(self, pred_dicts, frame_id): + marker_array = MarkerArray() + for idx, box in enumerate(pred_dicts[0]['pred_boxes']): + marker = Marker() + marker.header.frame_id = frame_id + marker.header.stamp = self.get_clock().now().to_msg() + marker.id = idx + marker.type = Marker.CUBE + marker.action = Marker.ADD + marker.pose.position.x = float(box[0]) + marker.pose.position.y = float(box[1]) + marker.pose.position.z = float(box[2]) + marker.pose.orientation.w = 1.0 # Assuming no rotation; modify as needed + marker.scale.x = float(box[3]) # Size in X direction + marker.scale.y = float(box[4]) # Size in Y direction + marker.scale.z = float(box[5]) # Size in Z direction + marker.color.a = 0.8 # Alpha + marker.color.r = 1.0 # Red + marker.color.g = 0.0 # Green + marker.color.b = 0.0 # Blue + marker_array.markers.append(marker) + + self.bbox_publisher.publish(marker_array) + def process_data(self): with torch.no_grad(): for idx, data_dict in enumerate(self.demo_dataset): @@ -50,6 +76,7 @@ def process_data(self): points = data_dict['points'][:, 1:4] # Extract the relevant columns (x, y, z) point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) + self.publish_bounding_boxes(pred_dicts, "lidar_frame") # Use appropriate frame_id self.logger.info('Demo done.') diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_det_py/package.xml index f9da549c..0700ea0c 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/package.xml +++ b/src/perception/lidar_object_detection/lidar_det_py/package.xml @@ -14,6 +14,7 @@ nav_msgs geometry_msgs tf + visualization_msgs ament_copyright ament_flake8 From 58f0e193b537dd66d77ad73bcc0613c7df70bd8f Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 6 Feb 2024 06:02:14 +0000 Subject: [PATCH 25/70] aligned lidar data with bounding boxes --- .../lidar_det_py/lidar_det_py/inference.py | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index dd544077..52574cae 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -73,7 +73,7 @@ def process_data(self): # Convert and publish data header = self.make_header() - points = data_dict['points'][:, 1:4] # Extract the relevant columns (x, y, z) + points = data_dict['points'][:, :3] # Extract the relevant columns (x, y, z) point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) self.publish_bounding_boxes(pred_dicts, "lidar_frame") # Use appropriate frame_id @@ -113,18 +113,34 @@ def create_cloud_xyz32(self, header, points): PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1), PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1) ] + + y_angle = np.deg2rad(90) + z_angle = np.deg2rad(-90) + rotation_matrix_y = np.array([ + [np.cos(y_angle), 0, np.sin(y_angle)], + [0, 1, 0 ], + [-np.sin(y_angle), 0, np.cos(y_angle)] + ]) + rotation_matrix_z = np.array([ + [np.cos(z_angle), -np.sin(z_angle), 0], + [np.sin(z_angle), np.cos(z_angle), 0], + [0, 0, 1] + ]) + # Apply transformation points_np = points.cpu().numpy() + points_np_y = np.dot(points_np, rotation_matrix_y) + points_transformed = np.dot(points_np_y, rotation_matrix_z.T) # Create a PointCloud2 message cloud = PointCloud2() cloud.header = header cloud.height = 1 # Unstructured point cloud - cloud.width = points_np.shape[0] + cloud.width = points_transformed.shape[0] cloud.fields = fields cloud.is_bigendian = False # Assuming little endian cloud.point_step = 12 # FLOAT32 (4 bytes) * 3 (x, y, z) cloud.row_step = cloud.point_step * cloud.width - cloud.is_dense = bool(np.isfinite(points_np).all()) - cloud.data = np.asarray(points_np, np.float32).tobytes() + cloud.is_dense = bool(np.isfinite(points_transformed).all()) + cloud.data = np.asarray(points_transformed, np.float32).tobytes() return cloud From 2480fd4c9792b0d9fb2f5a50dd35fe7bc76d4bc1 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 9 Feb 2024 00:22:16 +0000 Subject: [PATCH 26/70] changing to work with wato fork of openpcdet --- .../lidar_object_detection_openpcdet.Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 18487a31..e14c962e 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -8,7 +8,7 @@ RUN apt-get update && apt-get install -y \ python3-pip python3-setuptools \ && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc -################################# INSTALL OpenCV with CUDA Support ############# +################################ INSTALL OpenCV with CUDA Support ############## WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ cd opencv && git checkout 4.5.5 @@ -40,8 +40,8 @@ ENV OpenCV_DIR=/usr/share/OpenCV RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty -# RUN git clone https://github.com/WATonomous/OpenPCDet.git -COPY /OpenPCDet /home/bolty/OpenPCDet +RUN git clone https://github.com/WATonomous/OpenPCDet.git +# COPY /OpenPCDet /home/bolty/OpenPCDet WORKDIR /home/bolty RUN cd OpenPCDet && pip3 install -r requirements.txt RUN pip3 install kornia==0.6.8 From 41e1296b591345eaba65b80c2d3fa37c1592d2b0 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 9 Feb 2024 16:57:48 +0000 Subject: [PATCH 27/70] updated --- .../lidar_object_detection_openpcdet.Dockerfile | 2 ++ .../lidar_det_py/lidar_det_py/inference.py | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index e14c962e..4fac13c6 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -38,6 +38,8 @@ ENV PATH /usr/local/cuda/bin:${PATH} ENV OpenCV_DIR=/usr/share/OpenCV # Install Python dependencies RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.html +ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty RUN git clone https://github.com/WATonomous/OpenPCDet.git diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py index 52574cae..a7874d05 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py +++ b/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py @@ -82,11 +82,11 @@ def process_data(self): def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') - parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml', + parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml', help='specify the config for demo') - parser.add_argument('--data_path', type=str, default='/home/bolty/data/velodyne/data/0000000001.bin', + parser.add_argument('--data_path', type=str, default='/home/bolty/data/n015-2018-11-21-19-38-26+0800__LIDAR_TOP__1542801000947820.pcd.bin', help='specify the point cloud data file or directory') - parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/pv_rcnn8369.pth", help='specify the pretrained model') + parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth", help='specify the pretrained model') parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args, unknown = parser.parse_known_args() @@ -171,7 +171,7 @@ def __len__(self): def __getitem__(self, index): if self.ext == '.bin': - points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) #change back to 5 later + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) elif self.ext == '.npy': points = np.load(self.sample_file_list[index]) else: From 3071625bcf68acafd0feb781d21b5ab6365615ad Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 9 Feb 2024 18:51:59 +0000 Subject: [PATCH 28/70] we're changing this to copy for now since we want to clone OpenPCDet and download the required model --- .../lidar_object_detection_openpcdet.Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile index 4fac13c6..b8b8c2aa 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile @@ -42,8 +42,8 @@ RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.ht ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty -RUN git clone https://github.com/WATonomous/OpenPCDet.git -# COPY /OpenPCDet /home/bolty/OpenPCDet +# RUN git clone https://github.com/WATonomous/OpenPCDet.git +COPY /OpenPCDet /home/bolty/OpenPCDet WORKDIR /home/bolty RUN cd OpenPCDet && pip3 install -r requirements.txt RUN pip3 install kornia==0.6.8 From 9f177c6acdcd8c67e951ba2e7a899ea1ca674a4f Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 13 Feb 2024 02:23:05 +0000 Subject: [PATCH 29/70] renaming package --- .../lidar_object_detection.Dockerfile | 112 ++++++------ ...idar_object_detection_openpcdet.Dockerfile | 71 -------- modules/docker-compose.perception.yaml | 162 +++++++++--------- .../lidar_det_py/setup.cfg | 4 - .../launch/3d_object_detector.py | 2 +- .../launch/config.yaml | 0 .../lidar_object_detection}/__init__.py | 0 .../lidar_object_detection}/inference.py | 0 .../lidar_object_detection}/test_node.py | 0 .../lidar_object_detection}/utils/draw_3d.py | 0 .../lidar_object_detection}/utils/global_def | 0 .../package.xml | 2 +- .../resource/lidar_object_detection} | 0 .../lidar_object_detection/setup.cfg | 4 + .../setup.py | 6 +- watod-config.sh | 2 + 16 files changed, 156 insertions(+), 209 deletions(-) delete mode 100644 docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile delete mode 100644 src/perception/lidar_object_detection/lidar_det_py/setup.cfg rename src/perception/lidar_object_detection/{lidar_det_py => lidar_object_detection}/launch/3d_object_detector.py (86%) rename src/perception/lidar_object_detection/{lidar_det_py => lidar_object_detection}/launch/config.yaml (100%) rename src/perception/lidar_object_detection/{lidar_det_py/lidar_det_py => lidar_object_detection/lidar_object_detection}/__init__.py (100%) rename src/perception/lidar_object_detection/{lidar_det_py/lidar_det_py => lidar_object_detection/lidar_object_detection}/inference.py (100%) rename src/perception/lidar_object_detection/{lidar_det_py/lidar_det_py => lidar_object_detection/lidar_object_detection}/test_node.py (100%) rename src/perception/lidar_object_detection/{lidar_det_py/lidar_det_py => lidar_object_detection/lidar_object_detection}/utils/draw_3d.py (100%) rename src/perception/lidar_object_detection/{lidar_det_py/lidar_det_py => lidar_object_detection/lidar_object_detection}/utils/global_def (100%) rename src/perception/lidar_object_detection/{lidar_det_py => lidar_object_detection}/package.xml (95%) rename src/perception/lidar_object_detection/{lidar_det_py/resource/lidar_det_py => lidar_object_detection/resource/lidar_object_detection} (100%) create mode 100644 src/perception/lidar_object_detection/lidar_object_detection/setup.cfg rename src/perception/lidar_object_detection/{lidar_det_py => lidar_object_detection}/setup.py (80%) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index 6deba0bc..52d7da16 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -1,55 +1,71 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - -################################ Source ################################ -FROM ${BASE_IMAGE} as source - -WORKDIR ${AMENT_WS}/src - -# Copy in source code -COPY src/perception/lidar_object_detection lidar_object_detection -COPY src/wato_msgs/sample_msgs sample_msgs - -# Scan for rosdeps -RUN apt-get -qq update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -s \ - | grep 'apt-get install' \ - | awk '{print $3}' \ - | sort > /tmp/colcon_install_list - -################################# Dependencies ################################ +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel FROM ${BASE_IMAGE} as dependencies - -# Install Rosdep requirements -COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - -# Copy in source code from source stage -WORKDIR ${AMENT_WS} -COPY --from=source ${AMENT_WS}/src src - -# Dependency Cleanup -WORKDIR / -RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - -################################ Build ################################ +################################# Dependencies ################################ +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ + python3-pip python3-setuptools \ + && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc +################################ INSTALL OpenCV with CUDA Support ############## +WORKDIR /opt +RUN git clone https://github.com/opencv/opencv.git && \ + cd opencv && git checkout 4.5.5 +RUN git clone https://github.com/opencv/opencv_contrib.git && \ + cd opencv_contrib && git checkout 4.5.5 +WORKDIR /opt/opencv/build +RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ + -D CMAKE_INSTALL_PREFIX=/usr/local \ + -D OPENCV_GENERATE_PKGCONFIG=ON \ + -D BUILD_EXAMPLES=OFF \ + -D INSTALL_PYTHON_EXAMPLES=OFF \ + -D INSTALL_C_EXAMPLES=OFF \ + -D PYTHON_EXECUTABLE=$(which python3) \ + -D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ + -D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ + -D BUILD_opencv_python3=ON \ + -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ + -D WITH_GSTREAMER=ON \ + -D WITH_CUDA=ON \ + -D ENABLE_PRECOMPILED_HEADERS=OFF \ + .. && make -j$(nproc) && make install && ldconfig +RUN rm -rf /opt/opencv /opt/opencv_contrib +# Set environment variables +ENV CUDA_HOME /usr/local/cuda +ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} +ENV PATH /usr/local/cuda/bin:${PATH} +ENV OpenCV_DIR=/usr/share/OpenCV +# Install Python dependencies +RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.html +ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" +RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 +WORKDIR /home/bolty +# RUN git clone https://github.com/WATonomous/OpenPCDet.git +COPY /OpenPCDet /home/bolty/OpenPCDet +WORKDIR /home/bolty +RUN cd OpenPCDet && pip3 install -r requirements.txt +RUN pip3 install kornia==0.6.8 +RUN pip3 install nuscenes-devkit==1.0.5 +################################ Source ####################################### FROM dependencies as build - -# Build ROS2 packages +WORKDIR ${AMENT_WS}/src +COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection +COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs +RUN apt-get update && rosdep update && \ + rosdep install --from-paths . --ignore-src -r -y +RUN apt install -y ros-${ROS_DISTRO}-foxglove-bridge WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ - colcon build \ - --cmake-args -DCMAKE_BUILD_TYPE=Release - -# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash -COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release +# Entrypoint setup +COPY wato_monorepo/docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] - -################################ Prod ################################ -FROM build as deploy +################################ Prod ######################################### +# FROM build as deploy # Source Cleanup and Security Setup -RUN chown -R $USER:$USER ${AMENT_WS} -RUN rm -rf src/* +# RUN chown -R $USER:$USER ${AMENT_WS} +# RUN rm -rf src/* -USER ${USER} +# USER ${USER} \ No newline at end of file diff --git a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile deleted file mode 100644 index b8b8c2aa..00000000 --- a/docker/perception/lidar_object_detection/lidar_object_detection_openpcdet.Dockerfile +++ /dev/null @@ -1,71 +0,0 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel -FROM ${BASE_IMAGE} as dependencies -################################# Dependencies ################################ -RUN apt-get update && apt-get install -y \ - git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ - build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ - libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ - python3-pip python3-setuptools \ - && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc -################################ INSTALL OpenCV with CUDA Support ############## -WORKDIR /opt -RUN git clone https://github.com/opencv/opencv.git && \ - cd opencv && git checkout 4.5.5 -RUN git clone https://github.com/opencv/opencv_contrib.git && \ - cd opencv_contrib && git checkout 4.5.5 -WORKDIR /opt/opencv/build -RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ - -D CMAKE_INSTALL_PREFIX=/usr/local \ - -D OPENCV_GENERATE_PKGCONFIG=ON \ - -D BUILD_EXAMPLES=OFF \ - -D INSTALL_PYTHON_EXAMPLES=OFF \ - -D INSTALL_C_EXAMPLES=OFF \ - -D PYTHON_EXECUTABLE=$(which python3) \ - -D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ - -D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ - -D BUILD_opencv_python3=ON \ - -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ - -D WITH_GSTREAMER=ON \ - -D WITH_CUDA=ON \ - -D ENABLE_PRECOMPILED_HEADERS=OFF \ - .. && make -j$(nproc) && make install && ldconfig -RUN rm -rf /opt/opencv /opt/opencv_contrib -# Set environment variables -ENV CUDA_HOME /usr/local/cuda -ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} -ENV PATH /usr/local/cuda/bin:${PATH} -ENV OpenCV_DIR=/usr/share/OpenCV -# Install Python dependencies -RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 -RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.html -ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" -RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 -WORKDIR /home/bolty -# RUN git clone https://github.com/WATonomous/OpenPCDet.git -COPY /OpenPCDet /home/bolty/OpenPCDet -WORKDIR /home/bolty -RUN cd OpenPCDet && pip3 install -r requirements.txt -RUN pip3 install kornia==0.6.8 -RUN pip3 install nuscenes-devkit==1.0.5 -################################ Source ####################################### -FROM dependencies as build -WORKDIR ${AMENT_WS}/src -COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection -COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs -RUN apt-get update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -y -RUN apt install -y ros-${ROS_DISTRO}-foxglove-bridge -WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release -# Entrypoint setup -COPY wato_monorepo/docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh -ENTRYPOINT ["./wato_ros_entrypoint.sh"] -################################ Prod ######################################### -# FROM build as deploy - -# # Source Cleanup and Security Setup -# RUN chown -R $USER:$USER ${AMENT_WS} -# RUN rm -rf src/* - -# USER ${USER} \ No newline at end of file diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index fd6ce9b3..9cef780b 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -1,98 +1,98 @@ version: "3.8" services: - radar_object_detection: - build: - context: .. - dockerfile: docker/perception/radar_object_detection/radar_object_detection.Dockerfile - cache_from: - - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" - - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:main" - target: deploy - image: "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" - command: /bin/bash -c "ros2 launch radar_object_detection radar_object_detection.launch.py" + # radar_object_detection: + # build: + # context: .. + # dockerfile: docker/perception/radar_object_detection/radar_object_detection.Dockerfile + # cache_from: + # - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" + # - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:main" + # target: deploy + # image: "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" + # command: /bin/bash -c "ros2 launch radar_object_detection radar_object_detection.launch.py" - camera_object_detection: - build: - context: .. - dockerfile: docker/perception/camera_object_detection/camera_object_detection.Dockerfile - cache_from: - - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" - - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:main" - target: deploy - image: "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [ gpu ] - command: /bin/bash -c "ros2 launch camera_object_detection nuscenes_launch.py" - volumes: - - /mnt/wato-drive2/perception_models/yolov8s.pt:/perception_models/yolov8s.pt + # camera_object_detection: + # build: + # context: .. + # dockerfile: docker/perception/camera_object_detection/camera_object_detection.Dockerfile + # cache_from: + # - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" + # - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:main" + # target: deploy + # image: "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [ gpu ] + # command: /bin/bash -c "ros2 launch camera_object_detection nuscenes_launch.py" + # volumes: + # - /mnt/wato-drive2/perception_models/yolov8s.pt:/perception_models/yolov8s.pt lidar_object_detection: build: - context: .. + context: ../ dockerfile: docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile cache_from: - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" - target: deploy + # target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection lidar_object_detection.launch.py" - traffic_light_detection: - build: - context: .. - dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" + # traffic_light_detection: + # build: + # context: .. + # dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile + # cache_from: + # - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" + # - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:main" + # target: deploy + # image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" + # command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" - traffic_sign_detection: - build: - context: .. - dockerfile: docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_sign_detection traffic_sign_detection.launch.py" + # traffic_sign_detection: + # build: + # context: .. + # dockerfile: docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile + # cache_from: + # - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" + # - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:main" + # target: deploy + # image: "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" + # command: /bin/bash -c "ros2 launch traffic_sign_detection traffic_sign_detection.launch.py" - semantic_segmentation: - build: - context: .. - dockerfile: docker/perception/semantic_segmentation/semantic_segmentation.Dockerfile - cache_from: - - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" - - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch semantic_segmentation semantic_segmentation.launch.py" + # semantic_segmentation: + # build: + # context: .. + # dockerfile: docker/perception/semantic_segmentation/semantic_segmentation.Dockerfile + # cache_from: + # - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" + # - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:main" + # target: deploy + # image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" + # command: /bin/bash -c "ros2 launch semantic_segmentation semantic_segmentation.launch.py" - lane_detection: - build: - context: .. - dockerfile: docker/perception/lane_detection/lane_detection.Dockerfile - cache_from: - - "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_LANE_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch lane_detection lane_detection.launch.py" + # lane_detection: + # build: + # context: .. + # dockerfile: docker/perception/lane_detection/lane_detection.Dockerfile + # cache_from: + # - "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" + # - "${PERCEPTION_LANE_DETECTION_IMAGE}:main" + # target: deploy + # image: "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" + # command: /bin/bash -c "ros2 launch lane_detection lane_detection.launch.py" - tracking: - build: - context: .. - dockerfile: docker/perception/tracking/tracking.Dockerfile - cache_from: - - "${PERCEPTION_TRACKING_IMAGE}:${TAG}" - - "${PERCEPTION_TRACKING_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRACKING_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch tracking tracking.launch.py" + # tracking: + # build: + # context: .. + # dockerfile: docker/perception/tracking/tracking.Dockerfile + # cache_from: + # - "${PERCEPTION_TRACKING_IMAGE}:${TAG}" + # - "${PERCEPTION_TRACKING_IMAGE}:main" + # target: deploy + # image: "${PERCEPTION_TRACKING_IMAGE}:${TAG}" + # command: /bin/bash -c "ros2 launch tracking tracking.launch.py" diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.cfg b/src/perception/lidar_object_detection/lidar_det_py/setup.cfg deleted file mode 100644 index b74c15be..00000000 --- a/src/perception/lidar_object_detection/lidar_det_py/setup.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[develop] -script_dir=$base/lib/lidar_det_py -[install] -install_scripts=$base/lib/lidar_det_py diff --git a/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py b/src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py similarity index 86% rename from src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py rename to src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py index 05984a36..95786c82 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/launch/3d_object_detector.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py @@ -5,7 +5,7 @@ def generate_launch_description(): return LaunchDescription([ Node( - package='lidar_det_py', + package='lidar_object_detection', executable='inference', name='inference', output='screen', diff --git a/src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/launch/config.yaml similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/launch/config.yaml rename to src/perception/lidar_object_detection/lidar_object_detection/launch/config.yaml diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/__init__.py similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/__init__.py rename to src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/__init__.py diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/inference.py rename to src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/test_node.py similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/test_node.py rename to src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/test_node.py diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/draw_3d.py similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/draw_3d.py rename to src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/draw_3d.py diff --git a/src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/global_def similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/lidar_det_py/utils/global_def rename to src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/global_def diff --git a/src/perception/lidar_object_detection/lidar_det_py/package.xml b/src/perception/lidar_object_detection/lidar_object_detection/package.xml similarity index 95% rename from src/perception/lidar_object_detection/lidar_det_py/package.xml rename to src/perception/lidar_object_detection/lidar_object_detection/package.xml index 0700ea0c..9ea65b8d 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/package.xml +++ b/src/perception/lidar_object_detection/lidar_object_detection/package.xml @@ -1,7 +1,7 @@ - lidar_det_py + lidar_object_detection 0.0.0 TODO: Package description bolty diff --git a/src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py b/src/perception/lidar_object_detection/lidar_object_detection/resource/lidar_object_detection similarity index 100% rename from src/perception/lidar_object_detection/lidar_det_py/resource/lidar_det_py rename to src/perception/lidar_object_detection/lidar_object_detection/resource/lidar_object_detection diff --git a/src/perception/lidar_object_detection/lidar_object_detection/setup.cfg b/src/perception/lidar_object_detection/lidar_object_detection/setup.cfg new file mode 100644 index 00000000..4cbdef16 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/lidar_object_detection +[install] +install_scripts=$base/lib/lidar_object_detection diff --git a/src/perception/lidar_object_detection/lidar_det_py/setup.py b/src/perception/lidar_object_detection/lidar_object_detection/setup.py similarity index 80% rename from src/perception/lidar_object_detection/lidar_det_py/setup.py rename to src/perception/lidar_object_detection/lidar_object_detection/setup.py index 305cea43..c6b99e1f 100644 --- a/src/perception/lidar_object_detection/lidar_det_py/setup.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.py @@ -1,7 +1,7 @@ from setuptools import find_packages, setup from glob import glob -package_name = 'lidar_det_py' +package_name = 'lidar_object_detection' setup( name=package_name, @@ -22,8 +22,8 @@ tests_require=['pytest'], entry_points={ 'console_scripts': [ - 'test_node = lidar_det_py.test_node:main', - 'inference = lidar_det_py.inference:main' + 'test_node = lidar_object_detection.test_node:main', + 'inference = lidar_object_detection.inference:main' ], }, ) diff --git a/watod-config.sh b/watod-config.sh index 637c1624..416fc8dc 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -13,10 +13,12 @@ ## - samples : starts sample ROS2 pubsub nodes # ACTIVE_MODULES="" +ACTIVE_MODULES="perception" ############################## OPTIONAL CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" # COMPOSE_PROJECT_NAME="" +COMPOSE_PROJECT_NAME="danielrhuynh" ## Tag to use. Images are formatted as : with forward slashes replaced with dashes. ## DEFAULT = "" From 6d08576e7a069bc4ae1b3e0c6f986bb138ff37c9 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 13 Feb 2024 02:23:59 +0000 Subject: [PATCH 30/70] WIP --- .../lidar_object_detection.Dockerfile | 6 +++--- modules/docker-compose.perception.yaml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index 52d7da16..1f728d43 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -62,10 +62,10 @@ RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_T COPY wato_monorepo/docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] ################################ Prod ######################################### -# FROM build as deploy +FROM build as deploy # Source Cleanup and Security Setup -# RUN chown -R $USER:$USER ${AMENT_WS} +RUN chown -R $USER:$USER ${AMENT_WS} # RUN rm -rf src/* -# USER ${USER} \ No newline at end of file +USER ${USER} \ No newline at end of file diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 9cef780b..e9eba885 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -39,7 +39,7 @@ services: cache_from: - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" - # target: deploy + target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection lidar_object_detection.launch.py" # traffic_light_detection: From 41254e2936139eaf585485528123d6c2be02df3b Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 13 Feb 2024 02:57:03 +0000 Subject: [PATCH 31/70] WIP integrating with watod --- .../lidar_object_detection.Dockerfile | 12 +++++++----- modules/docker-compose.perception.yaml | 4 ++-- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index 1f728d43..7d6dd454 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -42,24 +42,26 @@ RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.ht ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty -# RUN git clone https://github.com/WATonomous/OpenPCDet.git -COPY /OpenPCDet /home/bolty/OpenPCDet +RUN git clone https://github.com/WATonomous/OpenPCDet.git +# COPY /OpenPCDet /home/bolty/OpenPCDet WORKDIR /home/bolty RUN cd OpenPCDet && pip3 install -r requirements.txt RUN pip3 install kornia==0.6.8 RUN pip3 install nuscenes-devkit==1.0.5 +WORKDIR /home/bolty/OpenPCDet/ +RUN python3 setup.py develop ################################ Source ####################################### FROM dependencies as build WORKDIR ${AMENT_WS}/src -COPY wato_monorepo/src/perception/lidar_object_detection lidar_object_detection -COPY wato_monorepo/src/wato_msgs/sample_msgs sample_msgs +COPY src/perception/lidar_object_detection lidar_object_detection +COPY src/wato_msgs/sample_msgs sample_msgs RUN apt-get update && rosdep update && \ rosdep install --from-paths . --ignore-src -r -y RUN apt install -y ros-${ROS_DISTRO}-foxglove-bridge WORKDIR ${AMENT_WS} RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release # Entrypoint setup -COPY wato_monorepo/docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +COPY /docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] ################################ Prod ######################################### FROM build as deploy diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index e9eba885..bbd8b585 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -34,14 +34,14 @@ services: lidar_object_detection: build: - context: ../ + context: .. dockerfile: docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile cache_from: - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch lidar_object_detection lidar_object_detection.launch.py" + command: /bin/bash -c "ros2 run lidar_object_detection inference" # traffic_light_detection: # build: # context: .. From 95fd8cc73e175a9b26e8ed533f8674b742555e40 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 13 Feb 2024 04:43:26 +0000 Subject: [PATCH 32/70] integrated with watod :) --- modules/docker-compose.perception.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index bbd8b585..700584f8 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -42,6 +42,9 @@ services: target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 run lidar_object_detection inference" + volumes: + - /mnt/wato-drive2/perception_models/voxelnext_nuscenes_kernel1.pth:/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth + - /mnt/wato-drive2/nuscenes-1.0-mini/samples/LIDAR_TOP:/home/bolty/data # traffic_light_detection: # build: # context: .. From 48c76f13ec6d68820fcdf9c9f23143a01d006dad Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 13 Feb 2024 22:06:22 +0000 Subject: [PATCH 33/70] now runs on same frame --- .../lidar_object_detection/inference.py | 4 ++-- watod-config.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py index a7874d05..123ca282 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py @@ -76,7 +76,7 @@ def process_data(self): points = data_dict['points'][:, :3] # Extract the relevant columns (x, y, z) point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z self.publisher_.publish(point_cloud_msg) - self.publish_bounding_boxes(pred_dicts, "lidar_frame") # Use appropriate frame_id + self.publish_bounding_boxes(pred_dicts, "base_link") # Use appropriate frame_id self.logger.info('Demo done.') @@ -96,7 +96,7 @@ def parse_config(self): def make_header(self): header = Header() header.stamp = self.get_clock().now().to_msg() - header.frame_id = 'lidar_frame' + header.frame_id = 'base_link' return header def create_cloud_xyz32(self, header, points): diff --git a/watod-config.sh b/watod-config.sh index 416fc8dc..b0f57521 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -13,7 +13,7 @@ ## - samples : starts sample ROS2 pubsub nodes # ACTIVE_MODULES="" -ACTIVE_MODULES="perception" +ACTIVE_MODULES="perception infrastructure" ############################## OPTIONAL CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" From 2ab53057e08484717b24e099bee4a8f76cb41abb Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Wed, 14 Feb 2024 02:11:27 +0000 Subject: [PATCH 34/70] working with ros bags --- modules/docker-compose.perception.yaml | 2 +- .../config/nuscenes_config.yaml | 5 + .../launch/3d_object_detector.py | 13 - .../lidar_object_detection/launch/config.yaml | 15 -- .../launch/nuscenes_launch.py | 27 +++ ...ence.py => lidar_object_detection_node.py} | 119 +++++----- .../lidar_object_detection/test_node.py | 34 --- .../lidar_object_detection/utils/draw_3d.py | 222 ------------------ .../lidar_object_detection/utils/global_def | 30 --- .../lidar_object_detection/setup.py | 7 +- 10 files changed, 100 insertions(+), 374 deletions(-) create mode 100644 src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml delete mode 100644 src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py delete mode 100644 src/perception/lidar_object_detection/lidar_object_detection/launch/config.yaml create mode 100644 src/perception/lidar_object_detection/lidar_object_detection/launch/nuscenes_launch.py rename src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/{inference.py => lidar_object_detection_node.py} (69%) delete mode 100644 src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/test_node.py delete mode 100644 src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/draw_3d.py delete mode 100644 src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/global_def diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 700584f8..4d9a75b1 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -41,7 +41,7 @@ services: - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 run lidar_object_detection inference" + command: /bin/bash -c "ros2 launch lidar_object_detection nuscenes_launch.py" volumes: - /mnt/wato-drive2/perception_models/voxelnext_nuscenes_kernel1.pth:/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth - /mnt/wato-drive2/nuscenes-1.0-mini/samples/LIDAR_TOP:/home/bolty/data diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml new file mode 100644 index 00000000..7a997ac9 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -0,0 +1,5 @@ +camera_object_detection_node: + ros__parameters: + camera_topic: /LIDAR_TOP + publish_vis_topic: /test + model_path: /perception_models/voxelnext_nuscenes_kernel1.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py b/src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py deleted file mode 100644 index 95786c82..00000000 --- a/src/perception/lidar_object_detection/lidar_object_detection/launch/3d_object_detector.py +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env python3 -from launch import LaunchDescription -from launch_ros.actions import Node - -def generate_launch_description(): - return LaunchDescription([ - Node( - package='lidar_object_detection', - executable='inference', - name='inference', - output='screen', - ), - ]) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/launch/config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/launch/config.yaml deleted file mode 100644 index d74146d7..00000000 --- a/src/perception/lidar_object_detection/lidar_object_detection/launch/config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# which go to the model you want to use -cfg_root: "/home/bolty/workspace/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml" - -# which go to the .pth -model_path: "/home/kin/workspace/OpenPCDet/tools/pv_rcnn_8369.pth" - -# pointcloud_topic in the bag if you are not sure -# rosbag info xxx.bag check the topic name -pointcloud_topic: "/velodyne_points" # default is from kitti2bag: /velodyne_points - -# only the predict probablity over this threshold will be draw a box -threshold: 0.8 - -move_lidar_center: 20 -viz_rate: 1.0 \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_object_detection/launch/nuscenes_launch.py b/src/perception/lidar_object_detection/lidar_object_detection/launch/nuscenes_launch.py new file mode 100644 index 00000000..64cc1ef5 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_object_detection/launch/nuscenes_launch.py @@ -0,0 +1,27 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + ld = LaunchDescription() + config = os.path.join( + get_package_share_directory('lidar_object_detection'), + 'config', + 'nuscenes_config.yaml' + ) + + # nodes + lidar_object_detection = Node( + package='lidar_object_detection', + executable='lidar_object_detection_node', + name='lidar_object_detection_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + # finalize + ld.add_action(lidar_object_detection) + + return ld diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py similarity index 69% rename from src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py rename to src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 123ca282..6a93d39c 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/inference.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -17,18 +17,28 @@ OPEN3D_FLAG = True - -class LidarDemoNode(Node): +class LidarObjectDetection(Node): def __init__(self): - super().__init__('lidar_demo_node') - self.publisher_ = self.create_publisher(PointCloud2, 'lidar_data', 10) + super().__init__('lidar_object_detection') + self.declare_parameter("model_path", "/perception_models/voxelnext_nuscenes_kernel1.pth") + self.declare_parameter("lidar_topic", "/LIDAR_TOP") + self.model_path = self.get_parameter("model_path").value + self.lidar_data = self.get_parameter("lidar_topic").value + self.bbox_publisher = self.create_publisher(MarkerArray, '/bounding_boxes', 10) + self.subscription = self.create_subscription( + PointCloud2, + '/LIDAR_TOP', + self.point_cloud_callback, + 10) + self.subscription + args, cfg = self.parse_config() self.logger = common_utils.create_logger() - self.logger.info('-----------------Quick Demo of OpenPCDet-------------------------') + self.logger.info('-----------------Starting Lidar Object Detection-------------------------') - self.demo_dataset = DemoDataset( + self.demo_dataset = LidarDataset( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, root_path=Path(args.data_path), ext=args.ext, logger=self.logger ) @@ -38,7 +48,25 @@ def __init__(self): self.model.cuda() self.model.eval() - self.process_data() + def point_cloud_callback(self, msg): + points = self.pointcloud2_to_xyz_array(msg) + data_dict = { + 'points': points, + 'frame_id': msg.header.frame_id, + } + data_dict = self.demo_dataset.prepare_data(data_dict=data_dict) + data_dict = self.demo_dataset.collate_batch([data_dict]) + load_data_to_gpu(data_dict) + + with torch.no_grad(): + pred_dicts, _ = self.model.forward(data_dict) + + self.publish_bounding_boxes(pred_dicts, msg.header.frame_id) + + def pointcloud2_to_xyz_array(self, cloud_msg): + cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) + cloud_array = cloud_array.reshape(cloud_msg.height * cloud_msg.width, 5) + return cloud_array def publish_bounding_boxes(self, pred_dicts, frame_id): marker_array = MarkerArray() @@ -52,34 +80,18 @@ def publish_bounding_boxes(self, pred_dicts, frame_id): marker.pose.position.x = float(box[0]) marker.pose.position.y = float(box[1]) marker.pose.position.z = float(box[2]) - marker.pose.orientation.w = 1.0 # Assuming no rotation; modify as needed - marker.scale.x = float(box[3]) # Size in X direction - marker.scale.y = float(box[4]) # Size in Y direction - marker.scale.z = float(box[5]) # Size in Z direction - marker.color.a = 0.8 # Alpha - marker.color.r = 1.0 # Red - marker.color.g = 0.0 # Green - marker.color.b = 0.0 # Blue + marker.pose.orientation.w = 1.0 + marker.scale.x = float(box[3]) + marker.scale.y = float(box[4]) + marker.scale.z = float(box[5]) + marker.color.a = 0.8 + marker.color.r = 1.0 + marker.color.g = 0.0 + marker.color.b = 0.0 marker_array.markers.append(marker) self.bbox_publisher.publish(marker_array) - def process_data(self): - with torch.no_grad(): - for idx, data_dict in enumerate(self.demo_dataset): - data_dict = self.demo_dataset.collate_batch([data_dict]) - load_data_to_gpu(data_dict) - pred_dicts, _ = self.model.forward(data_dict) - - # Convert and publish data - header = self.make_header() - points = data_dict['points'][:, :3] # Extract the relevant columns (x, y, z) - point_cloud_msg = self.create_cloud_xyz32(header, points) # Assuming first 3 columns are x, y, z - self.publisher_.publish(point_cloud_msg) - self.publish_bounding_boxes(pred_dicts, "base_link") # Use appropriate frame_id - - self.logger.info('Demo done.') - def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml', @@ -89,7 +101,7 @@ def parse_config(self): parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth", help='specify the pretrained model') parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') - args, unknown = parser.parse_known_args() + args, _ = parser.parse_known_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg @@ -126,27 +138,25 @@ def create_cloud_xyz32(self, header, points): [np.sin(z_angle), np.cos(z_angle), 0], [0, 0, 1] ]) - # Apply transformation + points_np = points.cpu().numpy() points_np_y = np.dot(points_np, rotation_matrix_y) points_transformed = np.dot(points_np_y, rotation_matrix_z.T) - # Create a PointCloud2 message + cloud = PointCloud2() cloud.header = header - cloud.height = 1 # Unstructured point cloud + cloud.height = 1 cloud.width = points_transformed.shape[0] cloud.fields = fields - cloud.is_bigendian = False # Assuming little endian - cloud.point_step = 12 # FLOAT32 (4 bytes) * 3 (x, y, z) + cloud.is_bigendian = False + cloud.point_step = 12 cloud.row_step = cloud.point_step * cloud.width cloud.is_dense = bool(np.isfinite(points_transformed).all()) cloud.data = np.asarray(points_transformed, np.float32).tobytes() return cloud - - -class DemoDataset(DatasetTemplate): +class LidarDataset(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): """ Args: @@ -169,30 +179,29 @@ def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logg def __len__(self): return len(self.sample_file_list) - def __getitem__(self, index): - if self.ext == '.bin': - points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) - elif self.ext == '.npy': - points = np.load(self.sample_file_list[index]) - else: - raise NotImplementedError + # def __getitem__(self, index): + # if self.ext == '.bin': + # points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) + # elif self.ext == '.npy': + # points = np.load(self.sample_file_list[index]) + # else: + # raise NotImplementedError - input_dict = { - 'points': points, - 'frame_id': index, - } + # input_dict = { + # 'points': points, + # 'frame_id': index, + # } - data_dict = self.prepare_data(data_dict=input_dict) - return data_dict + # data_dict = self.prepare_data(data_dict=input_dict) + # return data_dict def main(args=None): rclpy.init(args=args) - node = LidarDemoNode() + node = LidarObjectDetection() rclpy.spin(node) node.destroy_node() rclpy.shutdown() - if __name__ == '__main__': main() \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/test_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/test_node.py deleted file mode 100644 index 934bd9f1..00000000 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/test_node.py +++ /dev/null @@ -1,34 +0,0 @@ -import rclpy -from rclpy.node import Node -from std_msgs.msg import String -from time import sleep - -class TestNode(Node): - - def __init__(self): - super().__init__('test_node') - self.publisher_ = self.create_publisher(String, 'chatter', 10) - self.timer = self.create_timer(1.0, self.timer_callback) - self.count = 0 - - def timer_callback(self): - msg = String() - msg.data = 'Hello ROS 2 world! %d' % self.count - self.publisher_.publish(msg) - self.get_logger().info('Publishing: "%s"' % msg.data) - self.count += 1 - -def main(): - rclpy.init() - test_node = TestNode() - - try: - rclpy.spin(test_node) - except KeyboardInterrupt: - pass - - test_node.destroy_node() - rclpy.shutdown() - -if __name__ == '__main__': - main() diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/draw_3d.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/draw_3d.py deleted file mode 100644 index c5fbaefd..00000000 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/draw_3d.py +++ /dev/null @@ -1,222 +0,0 @@ -#! /usr/bin/env python3.8 -""" -Created on 23 Dec 2022 - -@author: Kin ZHANG (https://kin-zhang.github.io/) -% Copyright (C) 2022, RPL, KTH Royal Institute of Technology - -Part of codes also refers: -1. https://github.com/kwea123/ROS_notes -2. https://github.com/seaside2mm/sharecode - -""" - -import rospy -from visualization_msgs.msg import Marker, MarkerArray -from geometry_msgs.msg import Point -import numpy as np -from .global_def import * - -""" -This file is from OpenPCDet visualize_utils.py -Check their repo: https://github.com/open-mmlab/OpenPCDet -""" -import numpy as np -import torch - -box_colormap = [ - [1, 1, 1], - [0, 1, 0], - [0, 1, 1], - [1, 1, 0], -] - - -def check_numpy_to_torch(x): - if isinstance(x, np.ndarray): - return torch.from_numpy(x).float(), True - return x, False - - -def rotate_points_along_z(points, angle): - """ - Args: - points: (B, N, 3 + C) - angle: (B), angle along z-axis, angle increases x ==> y - Returns: - - """ - points, is_numpy = check_numpy_to_torch(points) - angle, _ = check_numpy_to_torch(angle) - - cosa = torch.cos(angle) - sina = torch.sin(angle) - zeros = angle.new_zeros(points.shape[0]) - ones = angle.new_ones(points.shape[0]) - rot_matrix = torch.stack(( - cosa, sina, zeros, - -sina, cosa, zeros, - zeros, zeros, ones - ), dim=1).view(-1, 3, 3).float() - points_rot = torch.matmul(points[:, :, 0:3], rot_matrix) - points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) - return points_rot.numpy() if is_numpy else points_rot - - -def boxes_to_corners_3d(boxes3d): - """ - 7 -------- 4 - /| /| - 6 -------- 5 . - | | | | - . 3 -------- 0 - |/ |/ - 2 -------- 1 - Args: - boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center - - Returns: - """ - boxes3d, is_numpy = check_numpy_to_torch(boxes3d) - - template = boxes3d.new_tensor(( - [1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], - [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1], - )) / 2 - - corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] - corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3) - corners3d += boxes3d[:, None, 0:3] - - return corners3d.numpy() if is_numpy else corners3d - -class Draw3DBox: - def __init__(self, box3d_pub, marker_frame_id = 'velodyne', rate=10): - self.frame_id = marker_frame_id - self.lifetime = 1.0/rate - self.box3d_pub = box3d_pub - - def set_frame_id(self, marker_frame_id): - self.frame_id = marker_frame_id - - def publish_3dbox(self, dt_box_lidar, track_ids, types=None, publish_id=True, move_lidar_center=20, publish_type=True): - """ - Publish 3d boxes in velodyne coordinate, with color specified by object_types - If object_types is None, set all color to cyan - corners_3d_velos : list of (8, 4) 3d corners - """ - # (N, 8, 3) - # -move_lidar_center - dt_box_lidar[:,0] = dt_box_lidar[:,0]-move_lidar_center - corners_3d_velos = boxes_to_corners_3d(dt_box_lidar) - - marker_array = MarkerArray() - - for i, corners_3d_velo in enumerate(corners_3d_velos): - # 3d box - marker = Marker() - marker.header.frame_id = self.frame_id - marker.header.stamp = rospy.Time.now() - marker.id = i - marker.action = Marker.ADD - marker.lifetime = rospy.Duration(self.lifetime) - marker.type = Marker.LINE_LIST - - b, g, r = DETECTION_COLOR_MAP[types[i]] - if types is None: - marker.color.r = 0.0 - marker.color.g = 1.0 - marker.color.b = 1.0 - else: - marker.color.r = r/255.0 - marker.color.g = g/255.0 - marker.color.b = b/255.0 - marker.color.a = 1.0 - marker.scale.x = 0.1 - - marker.points = [] - for l in LINES: - p1 = corners_3d_velo[l[0]] - marker.points.append(Point(p1[0], p1[1], p1[2])) - p2 = corners_3d_velo[l[1]] - marker.points.append(Point(p2[0], p2[1], p2[2])) - marker_array.markers.append(marker) - - # add track id - if publish_id and track_ids != -1: - track_id = track_ids[i] - text_marker = Marker() - text_marker.header.frame_id = self.frame_id - text_marker.header.stamp = rospy.Time.now() - - text_marker.id = track_id + 1000 - text_marker.action = Marker.ADD - text_marker.lifetime = rospy.Duration(self.lifetime) - text_marker.type = Marker.TEXT_VIEW_FACING - - p4 = corners_3d_velo[4] # upper front left corner - - text_marker.pose.position.x = p4[0] - text_marker.pose.position.y = p4[1] - text_marker.pose.position.z = p4[2] + 0.5 - - text_marker.text = str(track_id) - - text_marker.scale.x = 1 - text_marker.scale.y = 1 - text_marker.scale.z = 1 - - if types is None: - text_marker.color.r = 0.0 - text_marker.color.g = 1.0 - text_marker.color.b = 1.0 - else: - b, g, r = DETECTION_COLOR_MAP[types[i]] - text_marker.color.r = r/255.0 - text_marker.color.g = g/255.0 - text_marker.color.b = b/255.0 - text_marker.color.a = 1.0 - marker_array.markers.append(text_marker) - - if publish_type: - text_marker = Marker() - text_marker.header.frame_id = self.frame_id - text_marker.header.stamp = rospy.Time.now() - - text_marker.id = i + 1000 - text_marker.action = Marker.ADD - text_marker.lifetime = rospy.Duration(self.lifetime) - text_marker.type = Marker.TEXT_VIEW_FACING - - bc = (corners_3d_velo[6] + corners_3d_velo[7] + corners_3d_velo[2] + corners_3d_velo[3])/4 # back center - - text_marker.pose.position.x = bc[0] - text_marker.pose.position.y = bc[1] - text_marker.pose.position.z = bc[2] + 1.5 - - text_marker.text = types[i] - text_marker.scale.x = 1 - text_marker.scale.y = 1 - text_marker.scale.z = 1 - b, g, r = DETECTION_COLOR_MAP[types[i]] - text_marker.color.r = r/255.0 - text_marker.color.g = g/255.0 - text_marker.color.b = b/255.0 - text_marker.color.a = 1.0 - marker_array.markers.append(text_marker) - - self.box3d_pub.publish(marker_array) - - def compute_3d_box_cam2(self, h, w, l, x, y, z, yaw): - """ - Return : 3xn in cam2 coordinate - """ - R = np.array([[np.cos(yaw), 0, np.sin(yaw)], [0, 1, 0], [-np.sin(yaw), 0, np.cos(yaw)]]) - x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2] - y_corners = [0,0,0,0,-h,-h,-h,-h] - z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2] - corners_3d_cam2 = np.dot(R, np.vstack([x_corners,y_corners,z_corners])) - corners_3d_cam2[0,:] += x - corners_3d_cam2[1,:] += y - corners_3d_cam2[2,:] += z - return corners_3d_cam2 \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/global_def b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/global_def deleted file mode 100644 index d318a6ac..00000000 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/utils/global_def +++ /dev/null @@ -1,30 +0,0 @@ -#! /usr/bin/env python3.8 -""" -Created on 23 Dec 2022 - -@author: Kin ZHANG (https://kin-zhang.github.io/) -% Copyright (C) 2022, RPL, KTH Royal Institute of Technology - -Part of codes also refers: https://github.com/kwea123/ROS_notes -""" - -DETECTION_COLOR_MAP = {'Car': (255,255,0), - 'Pedestrian': (0, 226, 255), - 'Cyclist': (141, 40, 255)} # color for detection, in format bgr - -# connect vertic -LINES = [[0, 1], [1, 2], [2, 3], [3, 0]] # lower face -LINES+= [[4, 5], [5, 6], [6, 7], [7, 4]] # upper face -LINES+= [[4, 0], [5, 1], [6, 2], [7, 3]] # connect lower face and upper face -LINES+= [[4, 1], [5, 0]] # front face and draw x - -class bc: - HEADER = '\033[95m' - OKBLUE = '\033[94m' - OKCYAN = '\033[96m' - OKGREEN = '\033[92m' - WARNING = '\033[93m' - FAIL = '\033[91m' - ENDC = '\033[0m' - BOLD = '\033[1m' - UNDERLINE = '\033[4m' \ No newline at end of file diff --git a/src/perception/lidar_object_detection/lidar_object_detection/setup.py b/src/perception/lidar_object_detection/lidar_object_detection/setup.py index c6b99e1f..efc027d8 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/setup.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.py @@ -15,15 +15,14 @@ ], install_requires=['setuptools'], zip_safe=True, - maintainer='bolty', - maintainer_email='bolty@todo.todo', + maintainer='Dan Huynh', + maintainer_email='danielrhuynh@watonomous.ca', description='TODO: Package description', license='TODO: License declaration', tests_require=['pytest'], entry_points={ 'console_scripts': [ - 'test_node = lidar_object_detection.test_node:main', - 'inference = lidar_object_detection.inference:main' + 'lidar_object_detection_node = lidar_object_detection.lidar_object_detection_node:main' ], }, ) From 0eb740ce088e6da0fe1ea4db696e83545fb27dfe Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Wed, 14 Feb 2024 02:47:10 +0000 Subject: [PATCH 35/70] removing dependancy on static .bin --- .../lidar_object_detection_node.py | 49 ++----------------- 1 file changed, 4 insertions(+), 45 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 6a93d39c..2b7b3c85 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -1,6 +1,4 @@ import argparse -import glob -from pathlib import Path import rclpy from rclpy.node import Node from std_msgs.msg import Header @@ -36,12 +34,10 @@ def __init__(self): args, cfg = self.parse_config() self.logger = common_utils.create_logger() - self.logger.info('-----------------Starting Lidar Object Detection-------------------------') + self.logger.info('-------------------------Starting Lidar Object Detection-------------------------') self.demo_dataset = LidarDataset( - dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, - root_path=Path(args.data_path), ext=args.ext, logger=self.logger - ) + dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, logger=self.logger) self.model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) @@ -96,10 +92,7 @@ def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml', help='specify the config for demo') - parser.add_argument('--data_path', type=str, default='/home/bolty/data/n015-2018-11-21-19-38-26+0800__LIDAR_TOP__1542801000947820.pcd.bin', - help='specify the point cloud data file or directory') parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth", help='specify the pretrained model') - parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') args, _ = parser.parse_known_args() cfg_from_yaml_file(args.cfg_file, cfg) @@ -157,44 +150,10 @@ def create_cloud_xyz32(self, header, points): return cloud class LidarDataset(DatasetTemplate): - def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): - """ - Args: - root_path: - dataset_cfg: - class_names: - training: - logger: - """ + def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext='.bin'): super().__init__( - dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger ) - self.root_path = root_path - self.ext = ext - data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path] - - data_file_list.sort() - self.sample_file_list = data_file_list - - def __len__(self): - return len(self.sample_file_list) - - # def __getitem__(self, index): - # if self.ext == '.bin': - # points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 5) - # elif self.ext == '.npy': - # points = np.load(self.sample_file_list[index]) - # else: - # raise NotImplementedError - - # input_dict = { - # 'points': points, - # 'frame_id': index, - # } - - # data_dict = self.prepare_data(data_dict=input_dict) - # return data_dict - def main(args=None): rclpy.init(args=args) From 517fba271b740e0848d3a62e1da8882f0840a0ea Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Wed, 14 Feb 2024 02:51:04 +0000 Subject: [PATCH 36/70] refactoring --- .../lidar_object_detection/config/nuscenes_config.yaml | 6 +++--- .../lidar_object_detection_node.py | 10 ++++++---- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index 7a997ac9..3613a87c 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -1,5 +1,5 @@ camera_object_detection_node: ros__parameters: - camera_topic: /LIDAR_TOP - publish_vis_topic: /test - model_path: /perception_models/voxelnext_nuscenes_kernel1.pth + lidar_topic: /LIDAR_TOP + model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml + model_path: /home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 2b7b3c85..4627e7c7 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -18,16 +18,18 @@ class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') - self.declare_parameter("model_path", "/perception_models/voxelnext_nuscenes_kernel1.pth") + self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth") + self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml") self.declare_parameter("lidar_topic", "/LIDAR_TOP") self.model_path = self.get_parameter("model_path").value + self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value self.bbox_publisher = self.create_publisher(MarkerArray, '/bounding_boxes', 10) self.subscription = self.create_subscription( PointCloud2, - '/LIDAR_TOP', + self.lidar_data, self.point_cloud_callback, 10) self.subscription @@ -90,9 +92,9 @@ def publish_bounding_boxes(self, pred_dicts, frame_id): def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') - parser.add_argument('--cfg_file', type=str, default='/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml', + parser.add_argument('--cfg_file', type=str, default=self.model_config_path, help='specify the config for demo') - parser.add_argument('--ckpt', type=str, default="/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth", help='specify the pretrained model') + parser.add_argument('--ckpt', type=str, default=self.model_path, help='specify the pretrained model') args, _ = parser.parse_known_args() cfg_from_yaml_file(args.cfg_file, cfg) From 97676f70acc3406a8efe8f4b84c2bf4e3dc70d0e Mon Sep 17 00:00:00 2001 From: Dan Ryan Huynh <89366190+danielrhuynh@users.noreply.github.com> Date: Tue, 13 Feb 2024 22:02:45 -0500 Subject: [PATCH 37/70] Update watod-config.sh --- watod-config.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/watod-config.sh b/watod-config.sh index b0f57521..637c1624 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -13,12 +13,10 @@ ## - samples : starts sample ROS2 pubsub nodes # ACTIVE_MODULES="" -ACTIVE_MODULES="perception infrastructure" ############################## OPTIONAL CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" # COMPOSE_PROJECT_NAME="" -COMPOSE_PROJECT_NAME="danielrhuynh" ## Tag to use. Images are formatted as : with forward slashes replaced with dashes. ## DEFAULT = "" From 95ec494b1de149f2f2826e2a0545cdd88ce1c944 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Wed, 14 Feb 2024 03:04:51 +0000 Subject: [PATCH 38/70] re-enabling other services --- modules/docker-compose.perception.yaml | 159 ++++++++++++------------- 1 file changed, 79 insertions(+), 80 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 4d9a75b1..544cf7ec 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -1,37 +1,36 @@ version: "3.8" services: - # radar_object_detection: - # build: - # context: .. - # dockerfile: docker/perception/radar_object_detection/radar_object_detection.Dockerfile - # cache_from: - # - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" - # - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:main" - # target: deploy - # image: "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" - # command: /bin/bash -c "ros2 launch radar_object_detection radar_object_detection.launch.py" - - # camera_object_detection: - # build: - # context: .. - # dockerfile: docker/perception/camera_object_detection/camera_object_detection.Dockerfile - # cache_from: - # - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" - # - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:main" - # target: deploy - # image: "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" - # deploy: - # resources: - # reservations: - # devices: - # - driver: nvidia - # count: 1 - # capabilities: [ gpu ] - # command: /bin/bash -c "ros2 launch camera_object_detection nuscenes_launch.py" - # volumes: - # - /mnt/wato-drive2/perception_models/yolov8s.pt:/perception_models/yolov8s.pt + radar_object_detection: + build: + context: .. + dockerfile: docker/perception/radar_object_detection/radar_object_detection.Dockerfile + cache_from: + - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" + - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:main" + target: deploy + image: "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" + command: /bin/bash -c "ros2 launch radar_object_detection radar_object_detection.launch.py" + camera_object_detection: + build: + context: .. + dockerfile: docker/perception/camera_object_detection/camera_object_detection.Dockerfile + cache_from: + - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" + - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:main" + target: deploy + image: "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [ gpu ] + command: /bin/bash -c "ros2 launch camera_object_detection nuscenes_launch.py" + volumes: + - /mnt/wato-drive2/perception_models/yolov8s.pt:/perception_models/yolov8s.pt lidar_object_detection: build: context: .. @@ -45,57 +44,57 @@ services: volumes: - /mnt/wato-drive2/perception_models/voxelnext_nuscenes_kernel1.pth:/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth - /mnt/wato-drive2/nuscenes-1.0-mini/samples/LIDAR_TOP:/home/bolty/data - # traffic_light_detection: - # build: - # context: .. - # dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile - # cache_from: - # - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - # - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:main" - # target: deploy - # image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - # command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" + traffic_light_detection: + build: + context: .. + dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile + cache_from: + - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" + - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:main" + target: deploy + image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" + command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" - # traffic_sign_detection: - # build: - # context: .. - # dockerfile: docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile - # cache_from: - # - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - # - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:main" - # target: deploy - # image: "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - # command: /bin/bash -c "ros2 launch traffic_sign_detection traffic_sign_detection.launch.py" + traffic_sign_detection: + build: + context: .. + dockerfile: docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile + cache_from: + - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" + - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:main" + target: deploy + image: "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" + command: /bin/bash -c "ros2 launch traffic_sign_detection traffic_sign_detection.launch.py" - # semantic_segmentation: - # build: - # context: .. - # dockerfile: docker/perception/semantic_segmentation/semantic_segmentation.Dockerfile - # cache_from: - # - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" - # - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:main" - # target: deploy - # image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" - # command: /bin/bash -c "ros2 launch semantic_segmentation semantic_segmentation.launch.py" + semantic_segmentation: + build: + context: .. + dockerfile: docker/perception/semantic_segmentation/semantic_segmentation.Dockerfile + cache_from: + - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" + - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:main" + target: deploy + image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" + command: /bin/bash -c "ros2 launch semantic_segmentation semantic_segmentation.launch.py" - # lane_detection: - # build: - # context: .. - # dockerfile: docker/perception/lane_detection/lane_detection.Dockerfile - # cache_from: - # - "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" - # - "${PERCEPTION_LANE_DETECTION_IMAGE}:main" - # target: deploy - # image: "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" - # command: /bin/bash -c "ros2 launch lane_detection lane_detection.launch.py" + lane_detection: + build: + context: .. + dockerfile: docker/perception/lane_detection/lane_detection.Dockerfile + cache_from: + - "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" + - "${PERCEPTION_LANE_DETECTION_IMAGE}:main" + target: deploy + image: "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" + command: /bin/bash -c "ros2 launch lane_detection lane_detection.launch.py" - # tracking: - # build: - # context: .. - # dockerfile: docker/perception/tracking/tracking.Dockerfile - # cache_from: - # - "${PERCEPTION_TRACKING_IMAGE}:${TAG}" - # - "${PERCEPTION_TRACKING_IMAGE}:main" - # target: deploy - # image: "${PERCEPTION_TRACKING_IMAGE}:${TAG}" - # command: /bin/bash -c "ros2 launch tracking tracking.launch.py" + tracking: + build: + context: .. + dockerfile: docker/perception/tracking/tracking.Dockerfile + cache_from: + - "${PERCEPTION_TRACKING_IMAGE}:${TAG}" + - "${PERCEPTION_TRACKING_IMAGE}:main" + target: deploy + image: "${PERCEPTION_TRACKING_IMAGE}:${TAG}" + command: /bin/bash -c "ros2 launch tracking tracking.launch.py" From 4a0f73a90006f821058cbd90ce3c577ec0e6d385 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 17 Feb 2024 03:47:16 +0000 Subject: [PATCH 39/70] Working with wato ros bags --- modules/docker-compose.perception.yaml | 2 +- .../config/eve_config.yaml | 5 +++ .../launch/eve_launch.py | 26 ++++++++++++ .../lidar_object_detection_node.py | 41 +++++++++++++++---- 4 files changed, 64 insertions(+), 10 deletions(-) create mode 100644 src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml create mode 100644 src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 544cf7ec..00726f0b 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -40,7 +40,7 @@ services: - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch lidar_object_detection nuscenes_launch.py" + command: /bin/bash -c "ros2 launch lidar_object_detection eve_launch.py" volumes: - /mnt/wato-drive2/perception_models/voxelnext_nuscenes_kernel1.pth:/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth - /mnt/wato-drive2/nuscenes-1.0-mini/samples/LIDAR_TOP:/home/bolty/data diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml new file mode 100644 index 00000000..476dfa1c --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml @@ -0,0 +1,5 @@ +camera_object_detection_node: + ros__parameters: + lidar_topic: /velodyne_points + model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml + model_path: /home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py b/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py new file mode 100644 index 00000000..af515d44 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py @@ -0,0 +1,26 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + +def generate_launch_description(): + ld = LaunchDescription() + config = os.path.join( + get_package_share_directory('lidar_object_detection'), + 'config', + 'eve_config.yaml' + ) + + # nodes + lidar_object_detection = Node( + package='lidar_object_detection', + executable='lidar_object_detection_node', + name='lidar_object_detection_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + # finalize + ld.add_action(lidar_object_detection) + + return ld diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 4627e7c7..bac2435f 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -5,6 +5,7 @@ from sensor_msgs.msg import PointCloud2, PointField import numpy as np import torch +import struct import sys sys.path.append('/home/bolty/OpenPCDet') from pcdet.config import cfg, cfg_from_yaml_file @@ -20,18 +21,18 @@ def __init__(self): super().__init__('lidar_object_detection') self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth") self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml") - self.declare_parameter("lidar_topic", "/LIDAR_TOP") + self.declare_parameter("lidar_topic", "/velodyne_points") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value - self.bbox_publisher = self.create_publisher(MarkerArray, '/bounding_boxes', 10) + self.bbox_publisher = self.create_publisher(MarkerArray, '/BOUNDING_BOXES', 2) self.subscription = self.create_subscription( - PointCloud2, - self.lidar_data, - self.point_cloud_callback, - 10) + PointCloud2, + self.lidar_data, + self.point_cloud_callback, + 10) self.subscription args, cfg = self.parse_config() @@ -61,11 +62,33 @@ def point_cloud_callback(self, msg): self.publish_bounding_boxes(pred_dicts, msg.header.frame_id) - def pointcloud2_to_xyz_array(self, cloud_msg): - cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) - cloud_array = cloud_array.reshape(cloud_msg.height * cloud_msg.width, 5) + # Nuscenes data loader (x, y, z, intensity, timestamp) + # def pointcloud2_to_xyz_array(self, cloud_msg): + # cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) + # cloud_array = cloud_array.reshape(cloud_msg.height * cloud_msg.width, 5) + # return cloud_array + + def pointcloud2_to_xyz_array(self, cloud_msg: PointCloud2): + if cloud_msg.is_bigendian: + dtype = np.dtype('>f4') + else: + dtype = np.dtype('f4') + + offset_dict = {field.name: field.offset for field in cloud_msg.fields} + offsets = [offset_dict[field] for field in ['x', 'y', 'z', 'intensity']] + + cloud_array = np.empty((cloud_msg.width * cloud_msg.height, 4), dtype=np.float32) + + for i in range(cloud_msg.width * cloud_msg.height): + point_start_byte = i * cloud_msg.point_step + + for j, offset in enumerate(offsets): + data_bytes = cloud_msg.data[point_start_byte + offset:point_start_byte + offset + 4] + cloud_array[i, j] = struct.unpack('f', data_bytes)[0] + return cloud_array + def publish_bounding_boxes(self, pred_dicts, frame_id): marker_array = MarkerArray() for idx, box in enumerate(pred_dicts[0]['pred_boxes']): From 17797adbe2df4a2e36835df2e1f264562781c8dc Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sat, 17 Feb 2024 04:09:04 +0000 Subject: [PATCH 40/70] refactored --- .../lidar_object_detection_node.py | 30 +++++++------------ 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index bac2435f..fd0d2e24 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -62,32 +62,22 @@ def point_cloud_callback(self, msg): self.publish_bounding_boxes(pred_dicts, msg.header.frame_id) - # Nuscenes data loader (x, y, z, intensity, timestamp) - # def pointcloud2_to_xyz_array(self, cloud_msg): - # cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) - # cloud_array = cloud_array.reshape(cloud_msg.height * cloud_msg.width, 5) - # return cloud_array + def pointcloud2_to_xyz_array(self, cloud_msg): + dtype = np.dtype('>f4') if cloud_msg.is_bigendian else np.dtype('f4') - def pointcloud2_to_xyz_array(self, cloud_msg: PointCloud2): - if cloud_msg.is_bigendian: - dtype = np.dtype('>f4') - else: - dtype = np.dtype('f4') + field_offsets = {field.name: field.offset for field in cloud_msg.fields} - offset_dict = {field.name: field.offset for field in cloud_msg.fields} - offsets = [offset_dict[field] for field in ['x', 'y', 'z', 'intensity']] + cloud_array = np.empty((cloud_msg.width * cloud_msg.height, 4), dtype=dtype) - cloud_array = np.empty((cloud_msg.width * cloud_msg.height, 4), dtype=np.float32) + all_data = np.frombuffer(cloud_msg.data, dtype=dtype) - for i in range(cloud_msg.width * cloud_msg.height): - point_start_byte = i * cloud_msg.point_step + reshaped_data = all_data.reshape(cloud_msg.height * cloud_msg.width, -1) - for j, offset in enumerate(offsets): - data_bytes = cloud_msg.data[point_start_byte + offset:point_start_byte + offset + 4] - cloud_array[i, j] = struct.unpack('f', data_bytes)[0] - - return cloud_array + for i, field in enumerate(['x', 'y', 'z', 'intensity']): + field_idx = field_offsets[field] // 4 + cloud_array[:, i] = reshaped_data[:, field_idx] + return cloud_array.astype(np.float32) def publish_bounding_boxes(self, pred_dicts, frame_id): marker_array = MarkerArray() From bf996bebf90f5edec2175adeb4fb13dae6e1bae9 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Sun, 18 Feb 2024 18:20:44 +0000 Subject: [PATCH 41/70] changing to kitti model and making data loader more flexible --- modules/docker-compose.perception.yaml | 2 +- .../config/eve_config.yaml | 4 ++-- .../lidar_object_detection_node.py | 24 +++++++------------ watod-config.sh | 2 ++ 4 files changed, 14 insertions(+), 18 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 00726f0b..3f601604 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -42,7 +42,7 @@ services: image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection eve_launch.py" volumes: - - /mnt/wato-drive2/perception_models/voxelnext_nuscenes_kernel1.pth:/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth + - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth - /mnt/wato-drive2/nuscenes-1.0-mini/samples/LIDAR_TOP:/home/bolty/data traffic_light_detection: build: diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml index 476dfa1c..1e2b8d6f 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml @@ -1,5 +1,5 @@ camera_object_detection_node: ros__parameters: lidar_topic: /velodyne_points - model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml - model_path: /home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth + model_config_path: /home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml + model_path: /home/bolty/OpenPCDet/models/pv_rcnn_8369.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index fd0d2e24..99cf7651 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -19,8 +19,8 @@ class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') - self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth") - self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml") + self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth") + self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml") self.declare_parameter("lidar_topic", "/velodyne_points") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value @@ -63,21 +63,15 @@ def point_cloud_callback(self, msg): self.publish_bounding_boxes(pred_dicts, msg.header.frame_id) def pointcloud2_to_xyz_array(self, cloud_msg): - dtype = np.dtype('>f4') if cloud_msg.is_bigendian else np.dtype('f4') + num_points = cloud_msg.width * cloud_msg.height + cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) + num_fields = cloud_msg.point_step // 4 + cloud_array = cloud_array.reshape(num_points, num_fields) - field_offsets = {field.name: field.offset for field in cloud_msg.fields} + if cloud_array.shape[1] > 4: + cloud_array = cloud_array[:, :4] - cloud_array = np.empty((cloud_msg.width * cloud_msg.height, 4), dtype=dtype) - - all_data = np.frombuffer(cloud_msg.data, dtype=dtype) - - reshaped_data = all_data.reshape(cloud_msg.height * cloud_msg.width, -1) - - for i, field in enumerate(['x', 'y', 'z', 'intensity']): - field_idx = field_offsets[field] // 4 - cloud_array[:, i] = reshaped_data[:, field_idx] - - return cloud_array.astype(np.float32) + return cloud_array def publish_bounding_boxes(self, pred_dicts, frame_id): marker_array = MarkerArray() diff --git a/watod-config.sh b/watod-config.sh index 637c1624..b0f57521 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -13,10 +13,12 @@ ## - samples : starts sample ROS2 pubsub nodes # ACTIVE_MODULES="" +ACTIVE_MODULES="perception infrastructure" ############################## OPTIONAL CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" # COMPOSE_PROJECT_NAME="" +COMPOSE_PROJECT_NAME="danielrhuynh" ## Tag to use. Images are formatted as : with forward slashes replaced with dashes. ## DEFAULT = "" From a5dcdfa8c895965432fe1e4aec4f21bf533f832d Mon Sep 17 00:00:00 2001 From: Dan Ryan Huynh <89366190+danielrhuynh@users.noreply.github.com> Date: Sun, 18 Feb 2024 15:01:44 -0500 Subject: [PATCH 42/70] Update watod-config.sh --- watod-config.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/watod-config.sh b/watod-config.sh index b0f57521..637c1624 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -13,12 +13,10 @@ ## - samples : starts sample ROS2 pubsub nodes # ACTIVE_MODULES="" -ACTIVE_MODULES="perception infrastructure" ############################## OPTIONAL CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" # COMPOSE_PROJECT_NAME="" -COMPOSE_PROJECT_NAME="danielrhuynh" ## Tag to use. Images are formatted as : with forward slashes replaced with dashes. ## DEFAULT = "" From 775515fab9289dfda8bf21d2924efc8a3e62ef5b Mon Sep 17 00:00:00 2001 From: Steven Gong Date: Wed, 21 Feb 2024 23:22:52 +0000 Subject: [PATCH 43/70] Clean up dockerfile and fix missing config file --- .../lidar_object_detection.Dockerfile | 59 ++++++++++++++----- .../lidar_object_detection/setup.py | 4 +- 2 files changed, 48 insertions(+), 15 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index 7d6dd454..31922d00 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -1,6 +1,24 @@ ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel -FROM ${BASE_IMAGE} as dependencies + +################################ Source ################################ +FROM ${BASE_IMAGE} as source + +WORKDIR ${AMENT_WS}/src + +# Copy in source code +COPY src/perception/lidar_object_detection lidar_object_detection + +# Scan for rosdeps +RUN apt-get -qq update && rosdep update && \ + rosdep install --from-paths . --ignore-src -r -s \ + | grep 'apt-get install' \ + | awk '{print $3}' \ + | sort > /tmp/colcon_install_list + + ################################# Dependencies ################################ +FROM ${BASE_IMAGE} as dependencies + RUN apt-get update && apt-get install -y \ git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ @@ -8,6 +26,7 @@ RUN apt-get update && apt-get install -y \ python3-pip python3-setuptools \ && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc + ################################ INSTALL OpenCV with CUDA Support ############## WORKDIR /opt RUN git clone https://github.com/opencv/opencv.git && \ @@ -43,31 +62,43 @@ ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty RUN git clone https://github.com/WATonomous/OpenPCDet.git -# COPY /OpenPCDet /home/bolty/OpenPCDet -WORKDIR /home/bolty RUN cd OpenPCDet && pip3 install -r requirements.txt RUN pip3 install kornia==0.6.8 RUN pip3 install nuscenes-devkit==1.0.5 WORKDIR /home/bolty/OpenPCDet/ RUN python3 setup.py develop + +# Install Rosdep requirements +COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) + +# Copy in source code from source stage +WORKDIR ${AMENT_WS} +COPY --from=source ${AMENT_WS}/src src + +# Dependency Cleanup +WORKDIR / +RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* + ################################ Source ####################################### FROM dependencies as build -WORKDIR ${AMENT_WS}/src -COPY src/perception/lidar_object_detection lidar_object_detection -COPY src/wato_msgs/sample_msgs sample_msgs -RUN apt-get update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -y -RUN apt install -y ros-${ROS_DISTRO}-foxglove-bridge + +# Build ROS2 packages WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && colcon build --cmake-args -DCMAKE_BUILD_TYPE=Release -# Entrypoint setup -COPY /docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ + colcon build \ + --cmake-args -DCMAKE_BUILD_TYPE=Release + +# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash +COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] + ################################ Prod ######################################### FROM build as deploy # Source Cleanup and Security Setup RUN chown -R $USER:$USER ${AMENT_WS} -# RUN rm -rf src/* +RUN rm -rf src/* -USER ${USER} \ No newline at end of file +USER ${USER} diff --git a/src/perception/lidar_object_detection/lidar_object_detection/setup.py b/src/perception/lidar_object_detection/lidar_object_detection/setup.py index efc027d8..8852c904 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/setup.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.py @@ -1,3 +1,4 @@ +import os from setuptools import find_packages, setup from glob import glob @@ -11,7 +12,8 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), - ('share/' + package_name + '/launch', glob('launch/*.py')), + (os.path.join('share', package_name, 'launch'), glob('launch/*.py')), + (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), ], install_requires=['setuptools'], zip_safe=True, From b3b4948a2a96bd9bd13d88ba281ffed34bf43610 Mon Sep 17 00:00:00 2001 From: Steven Gong Date: Fri, 23 Feb 2024 01:25:48 +0000 Subject: [PATCH 44/70] Update config files and cleanup publisher --- .../config/eve_config.yaml | 2 +- .../config/nuscenes_config.yaml | 2 +- .../lidar_object_detection_node.py | 43 ++++++++++++------- .../lidar_object_detection/package.xml | 11 +++-- 4 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml index 1e2b8d6f..109feb83 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml @@ -1,4 +1,4 @@ -camera_object_detection_node: +lidar_object_detection_node: ros__parameters: lidar_topic: /velodyne_points model_config_path: /home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index 3613a87c..61b305ca 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -1,4 +1,4 @@ -camera_object_detection_node: +lidar_object_detection_node: ros__parameters: lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 99cf7651..e07adddf 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -5,7 +5,6 @@ from sensor_msgs.msg import PointCloud2, PointField import numpy as np import torch -import struct import sys sys.path.append('/home/bolty/OpenPCDet') from pcdet.config import cfg, cfg_from_yaml_file @@ -13,8 +12,7 @@ from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils from visualization_msgs.msg import Marker, MarkerArray - -OPEN3D_FLAG = True +from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray class LidarObjectDetection(Node): def __init__(self): @@ -26,14 +24,14 @@ def __init__(self): self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value - self.bbox_publisher = self.create_publisher(MarkerArray, '/BOUNDING_BOXES', 2) + self.bbox_publisher = self.create_publisher(MarkerArray, '/BOUNDING_BOXES', 10) + self.detections_publisher = self.create_publisher(Detection3DArray, '/detections_3d', 10) self.subscription = self.create_subscription( PointCloud2, self.lidar_data, self.point_cloud_callback, 10) - self.subscription args, cfg = self.parse_config() self.logger = common_utils.create_logger() @@ -60,7 +58,7 @@ def point_cloud_callback(self, msg): with torch.no_grad(): pred_dicts, _ = self.model.forward(data_dict) - self.publish_bounding_boxes(pred_dicts, msg.header.frame_id) + self.publish_bounding_boxes(msg, pred_dicts) def pointcloud2_to_xyz_array(self, cloud_msg): num_points = cloud_msg.width * cloud_msg.height @@ -73,12 +71,12 @@ def pointcloud2_to_xyz_array(self, cloud_msg): return cloud_array - def publish_bounding_boxes(self, pred_dicts, frame_id): + def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): marker_array = MarkerArray() for idx, box in enumerate(pred_dicts[0]['pred_boxes']): marker = Marker() - marker.header.frame_id = frame_id - marker.header.stamp = self.get_clock().now().to_msg() + marker.header.frame_id = pointcloud_msg.header.frame_id + marker.header.stamp = pointcloud_msg.header.stamp marker.id = idx marker.type = Marker.CUBE marker.action = Marker.ADD @@ -92,10 +90,28 @@ def publish_bounding_boxes(self, pred_dicts, frame_id): marker.color.a = 0.8 marker.color.r = 1.0 marker.color.g = 0.0 - marker.color.b = 0.0 + marker.color.b = float(pred_dicts[0]['pred_labels'][idx]) / 3 marker_array.markers.append(marker) + detections = Detection3DArray() + detections.header = pointcloud_msg.header + for idx, box in enumerate(pred_dicts[0]['pred_boxes']): + detection = Detection3D() + detection.header = pointcloud_msg.header + detection.bbox.center.position.x = float(box[0]) + detection.bbox.center.position.y = float(box[1]) + detection.bbox.center.position.z = float(box[2]) + detection.bbox.size.x = float(box[3]) + detection.bbox.size.y = float(box[4]) + detection.bbox.size.z = float(box[5]) + detected_object = ObjectHypothesisWithPose() + detected_object.hypothesis.class_id = str(pred_dicts[0]['pred_labels'][idx]) + detected_object.hypothesis.score = float(pred_dicts[0]['pred_scores'][idx]) + detection.results.append(detected_object) + detections.detections.append(detection) + self.bbox_publisher.publish(marker_array) + self.detections_publisher.publish(detections) def parse_config(self): parser = argparse.ArgumentParser(description='arg parser') @@ -107,11 +123,6 @@ def parse_config(self): cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg - def make_header(self): - header = Header() - header.stamp = self.get_clock().now().to_msg() - header.frame_id = 'base_link' - return header def create_cloud_xyz32(self, header, points): """ @@ -172,4 +183,4 @@ def main(args=None): rclpy.shutdown() if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/src/perception/lidar_object_detection/lidar_object_detection/package.xml b/src/perception/lidar_object_detection/lidar_object_detection/package.xml index 9ea65b8d..57b447bb 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/package.xml +++ b/src/perception/lidar_object_detection/lidar_object_detection/package.xml @@ -9,12 +9,11 @@ rclpy std_msgs - rospy - sensor_msgs - nav_msgs - geometry_msgs - tf - visualization_msgs + rospy + sensor_msgs + vision_msgs + tf + visualization_msgs ament_copyright ament_flake8 From 257a53bb431f9c13bf9dc394b1fd302b7ad9d9d8 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 23 Feb 2024 01:45:17 +0000 Subject: [PATCH 45/70] updates --- .../lidar_object_detection/lidar_object_detection.Dockerfile | 1 + modules/docker-compose.perception.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index 31922d00..ea0bb44d 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -70,6 +70,7 @@ RUN python3 setup.py develop # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-get update && apt-fast update RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) # Copy in source code from source stage diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 3f601604..06f02f0d 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -43,7 +43,7 @@ services: command: /bin/bash -c "ros2 launch lidar_object_detection eve_launch.py" volumes: - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth - - /mnt/wato-drive2/nuscenes-1.0-mini/samples/LIDAR_TOP:/home/bolty/data + - /mnt/wato-drive2/nuscenes-1.0-mini:/home/bolty/OpenPCDet/data/nuscenes/v1.0-mini traffic_light_detection: build: context: .. From 311296395850273073ee706fbd716a117e30a8c8 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 23 Feb 2024 02:58:27 +0000 Subject: [PATCH 46/70] experimenting --- .../lidar_object_detection.Dockerfile | 2 +- modules/docker-compose.perception.yaml | 1 + .../lidar_object_detection/config/nuscenes_config.yaml | 7 ++++--- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index ea0bb44d..d80d1799 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -59,7 +59,7 @@ ENV OpenCV_DIR=/usr/share/OpenCV RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.html ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" -RUN pip3 install spconv-cu113 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 +RUN pip3 install spconv-cu116 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 WORKDIR /home/bolty RUN git clone https://github.com/WATonomous/OpenPCDet.git RUN cd OpenPCDet && pip3 install -r requirements.txt diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 06f02f0d..5035acef 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -42,6 +42,7 @@ services: image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection eve_launch.py" volumes: + - /mnt/wato-drive2/perception_models/cbgs_transfusion_lidar.pth:/home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth - /mnt/wato-drive2/nuscenes-1.0-mini:/home/bolty/OpenPCDet/data/nuscenes/v1.0-mini traffic_light_detection: diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index 61b305ca..12e6d89e 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -1,5 +1,6 @@ lidar_object_detection_node: ros__parameters: - lidar_topic: /LIDAR_TOP - model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml - model_path: /home/bolty/OpenPCDet/models/voxelnext_nuscenes_kernel1.pth + # lidar_topic: /LIDAR_TOP + lidar_topic: /velodyne_points + model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml + model_path: /home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth From 4b9b36f38318afdeba7d1585f5d71a71402336b9 Mon Sep 17 00:00:00 2001 From: Steven Gong Date: Fri, 23 Feb 2024 18:31:09 +0000 Subject: [PATCH 47/70] Fix styling --- .../config/nuscenes_config.yaml | 1 - .../lidar_object_detection_node.py | 115 ++++++++++-------- 2 files changed, 66 insertions(+), 50 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index 12e6d89e..c222e200 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -1,6 +1,5 @@ lidar_object_detection_node: ros__parameters: - # lidar_topic: /LIDAR_TOP lidar_topic: /velodyne_points model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index e07adddf..5f9c58fc 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -1,46 +1,52 @@ +import sys import argparse import rclpy from rclpy.node import Node -from std_msgs.msg import Header -from sensor_msgs.msg import PointCloud2, PointField import numpy as np import torch -import sys -sys.path.append('/home/bolty/OpenPCDet') +from visualization_msgs.msg import Marker, MarkerArray +from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray +from sensor_msgs.msg import PointCloud2, PointField + +# pylint: disable=wrong-import-position +sys.path.append("/home/bolty/OpenPCDet") from pcdet.config import cfg, cfg_from_yaml_file from pcdet.datasets import DatasetTemplate from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils -from visualization_msgs.msg import Marker, MarkerArray -from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray + class LidarObjectDetection(Node): def __init__(self): - super().__init__('lidar_object_detection') + super().__init__("lidar_object_detection") self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth") - self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml") + self.declare_parameter( + "model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml") self.declare_parameter("lidar_topic", "/velodyne_points") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value - self.bbox_publisher = self.create_publisher(MarkerArray, '/BOUNDING_BOXES', 10) - self.detections_publisher = self.create_publisher(Detection3DArray, '/detections_3d', 10) + self.viz_publisher = self.create_publisher(MarkerArray, "/lidar_detections_viz", 10) + self.detections_publisher = self.create_publisher(Detection3DArray, "/lidar_detections", 10) self.subscription = self.create_subscription( - PointCloud2, - self.lidar_data, - self.point_cloud_callback, - 10) + PointCloud2, self.lidar_data, self.point_cloud_callback, 10 + ) args, cfg = self.parse_config() self.logger = common_utils.create_logger() - self.logger.info('-------------------------Starting Lidar Object Detection-------------------------') self.demo_dataset = LidarDataset( - dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, logger=self.logger) + dataset_cfg=cfg.DATA_CONFIG, + class_names=cfg.CLASS_NAMES, + training=False, + logger=self.logger, + ) - self.model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset) + self.model = build_network( + model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset + ) self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) self.model.cuda() self.model.eval() @@ -48,8 +54,8 @@ def __init__(self): def point_cloud_callback(self, msg): points = self.pointcloud2_to_xyz_array(msg) data_dict = { - 'points': points, - 'frame_id': msg.header.frame_id, + "points": points, + "frame_id": msg.header.frame_id, } data_dict = self.demo_dataset.prepare_data(data_dict=data_dict) data_dict = self.demo_dataset.collate_batch([data_dict]) @@ -73,10 +79,9 @@ def pointcloud2_to_xyz_array(self, cloud_msg): def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): marker_array = MarkerArray() - for idx, box in enumerate(pred_dicts[0]['pred_boxes']): + for idx, box in enumerate(pred_dicts[0]["pred_boxes"]): marker = Marker() - marker.header.frame_id = pointcloud_msg.header.frame_id - marker.header.stamp = pointcloud_msg.header.stamp + marker.header = pointcloud_msg.header marker.id = idx marker.type = Marker.CUBE marker.action = Marker.ADD @@ -90,12 +95,12 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): marker.color.a = 0.8 marker.color.r = 1.0 marker.color.g = 0.0 - marker.color.b = float(pred_dicts[0]['pred_labels'][idx]) / 3 + marker.color.b = float(pred_dicts[0]["pred_labels"][idx]) / 3 marker_array.markers.append(marker) - + detections = Detection3DArray() detections.header = pointcloud_msg.header - for idx, box in enumerate(pred_dicts[0]['pred_boxes']): + for idx, box in enumerate(pred_dicts[0]["pred_boxes"]): detection = Detection3D() detection.header = pointcloud_msg.header detection.bbox.center.position.x = float(box[0]) @@ -105,25 +110,30 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): detection.bbox.size.y = float(box[4]) detection.bbox.size.z = float(box[5]) detected_object = ObjectHypothesisWithPose() - detected_object.hypothesis.class_id = str(pred_dicts[0]['pred_labels'][idx]) - detected_object.hypothesis.score = float(pred_dicts[0]['pred_scores'][idx]) + detected_object.hypothesis.class_id = str(pred_dicts[0]["pred_labels"][idx]) + detected_object.hypothesis.score = float(pred_dicts[0]["pred_scores"][idx]) detection.results.append(detected_object) detections.detections.append(detection) - - self.bbox_publisher.publish(marker_array) + + self.viz_publisher.publish(marker_array) self.detections_publisher.publish(detections) def parse_config(self): - parser = argparse.ArgumentParser(description='arg parser') - parser.add_argument('--cfg_file', type=str, default=self.model_config_path, - help='specify the config for demo') - parser.add_argument('--ckpt', type=str, default=self.model_path, help='specify the pretrained model') + parser = argparse.ArgumentParser(description="arg parser") + parser.add_argument( + "--cfg_file", + type=str, + default=self.model_config_path, + help="specify the config for demo", + ) + parser.add_argument( + "--ckpt", type=str, default=self.model_path, help="specify the pretrained model" + ) args, _ = parser.parse_known_args() cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg - def create_cloud_xyz32(self, header, points): """ Create a sensor_msgs/PointCloud2 message from an array of points. @@ -134,23 +144,27 @@ def create_cloud_xyz32(self, header, points): """ # Create fields for the PointCloud2 message fields = [ - PointField(name='x', offset=0, datatype=PointField.FLOAT32, count=1), - PointField(name='y', offset=4, datatype=PointField.FLOAT32, count=1), - PointField(name='z', offset=8, datatype=PointField.FLOAT32, count=1) + PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1), + PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1), + PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1), ] y_angle = np.deg2rad(90) z_angle = np.deg2rad(-90) - rotation_matrix_y = np.array([ - [np.cos(y_angle), 0, np.sin(y_angle)], - [0, 1, 0 ], - [-np.sin(y_angle), 0, np.cos(y_angle)] - ]) - rotation_matrix_z = np.array([ - [np.cos(z_angle), -np.sin(z_angle), 0], - [np.sin(z_angle), np.cos(z_angle), 0], - [0, 0, 1] - ]) + rotation_matrix_y = np.array( + [ + [np.cos(y_angle), 0, np.sin(y_angle)], + [0, 1, 0], + [-np.sin(y_angle), 0, np.cos(y_angle)], + ] + ) + rotation_matrix_z = np.array( + [ + [np.cos(z_angle), -np.sin(z_angle), 0], + [np.sin(z_angle), np.cos(z_angle), 0], + [0, 0, 1], + ] + ) points_np = points.cpu().numpy() points_np_y = np.dot(points_np, rotation_matrix_y) @@ -169,12 +183,14 @@ def create_cloud_xyz32(self, header, points): return cloud + class LidarDataset(DatasetTemplate): - def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext='.bin'): + def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext=".bin"): super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger ) + def main(args=None): rclpy.init(args=args) node = LidarObjectDetection() @@ -182,5 +198,6 @@ def main(args=None): node.destroy_node() rclpy.shutdown() -if __name__ == '__main__': + +if __name__ == "__main__": main() From ec95c6fb991f18908b9a287c4d52f1785c2bfd21 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 12 Mar 2024 01:48:52 +0000 Subject: [PATCH 48/70] Added thresholding --- modules/docker-compose.perception.yaml | 4 +- .../config/eve_config.yaml | 4 +- .../lidar_object_detection_node.py | 73 +++++++++---------- 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 93d1ec83..cb39dbad 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -42,9 +42,11 @@ services: image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection eve_launch.py" volumes: - - /mnt/wato-drive2/perception_models/cbgs_transfusion_lidar.pth:/home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth + - /mnt/wato-drive2/perception_models/transfusion_trained_model.pth:/home/bolty/OpenPCDet/models/transfusion_trained_model.pth - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth + - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/nuscenes_trained_voxelNext.pth - /mnt/wato-drive2/nuscenes-1.0-mini:/home/bolty/OpenPCDet/data/nuscenes/v1.0-mini + - /mnt/wato-drive2/wato-datasets/nuscenes/full-dataset:/home/bolty/OpenPCDet/data/nuscenes/v1.0-trainval traffic_light_detection: build: context: .. diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml index 109feb83..7117fddb 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml @@ -1,5 +1,5 @@ lidar_object_detection_node: ros__parameters: lidar_topic: /velodyne_points - model_config_path: /home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml - model_path: /home/bolty/OpenPCDet/models/pv_rcnn_8369.pth + model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml + model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 5f9c58fc..198767cf 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -18,10 +18,9 @@ class LidarObjectDetection(Node): def __init__(self): - super().__init__("lidar_object_detection") - self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth") - self.declare_parameter( - "model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/kitti_models/pv_rcnn.yaml") + super().__init__('lidar_object_detection') + self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/transfusion_trained_model.pth") + self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml") self.declare_parameter("lidar_topic", "/velodyne_points") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value @@ -79,41 +78,41 @@ def pointcloud2_to_xyz_array(self, cloud_msg): def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): marker_array = MarkerArray() - for idx, box in enumerate(pred_dicts[0]["pred_boxes"]): - marker = Marker() - marker.header = pointcloud_msg.header - marker.id = idx - marker.type = Marker.CUBE - marker.action = Marker.ADD - marker.pose.position.x = float(box[0]) - marker.pose.position.y = float(box[1]) - marker.pose.position.z = float(box[2]) - marker.pose.orientation.w = 1.0 - marker.scale.x = float(box[3]) - marker.scale.y = float(box[4]) - marker.scale.z = float(box[5]) - marker.color.a = 0.8 - marker.color.r = 1.0 - marker.color.g = 0.0 - marker.color.b = float(pred_dicts[0]["pred_labels"][idx]) / 3 - marker_array.markers.append(marker) - detections = Detection3DArray() detections.header = pointcloud_msg.header - for idx, box in enumerate(pred_dicts[0]["pred_boxes"]): - detection = Detection3D() - detection.header = pointcloud_msg.header - detection.bbox.center.position.x = float(box[0]) - detection.bbox.center.position.y = float(box[1]) - detection.bbox.center.position.z = float(box[2]) - detection.bbox.size.x = float(box[3]) - detection.bbox.size.y = float(box[4]) - detection.bbox.size.z = float(box[5]) - detected_object = ObjectHypothesisWithPose() - detected_object.hypothesis.class_id = str(pred_dicts[0]["pred_labels"][idx]) - detected_object.hypothesis.score = float(pred_dicts[0]["pred_scores"][idx]) - detection.results.append(detected_object) - detections.detections.append(detection) + for idx, (box, score) in enumerate(zip(pred_dicts[0]["pred_boxes"], pred_dicts[0]["pred_scores"])): + if score > 0.1: + marker = Marker() + marker.header = pointcloud_msg.header + marker.id = idx + marker.type = Marker.CUBE + marker.action = Marker.ADD + marker.pose.position.x = float(box[0]) + marker.pose.position.y = float(box[1]) + marker.pose.position.z = float(box[2]) + marker.pose.orientation.w = 1.0 + marker.scale.x = float(box[3]) + marker.scale.y = float(box[4]) + marker.scale.z = float(box[5]) + marker.color.a = 0.8 + marker.color.r = 1.0 + marker.color.g = 0.0 + marker.color.b = float(pred_dicts[0]["pred_labels"][idx]) / 3 + marker_array.markers.append(marker) + + detection = Detection3D() + detection.header = pointcloud_msg.header + detection.bbox.center.position.x = float(box[0]) + detection.bbox.center.position.y = float(box[1]) + detection.bbox.center.position.z = float(box[2]) + detection.bbox.size.x = float(box[3]) + detection.bbox.size.y = float(box[4]) + detection.bbox.size.z = float(box[5]) + detected_object = ObjectHypothesisWithPose() + detected_object.hypothesis.class_id = str(pred_dicts[0]["pred_labels"][idx]) + detected_object.hypothesis.score = float(pred_dicts[0]["pred_scores"][idx]) + detection.results.append(detected_object) + detections.detections.append(detection) self.viz_publisher.publish(marker_array) self.detections_publisher.publish(detections) From 1331dc262176d8398ead6db5bda1d0e7253b8dca Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Wed, 20 Mar 2024 00:03:23 +0000 Subject: [PATCH 49/70] WIP: added timestamps to (x,y,z,intensity) data to make them nuscenes compatible + added more models to try --- modules/docker-compose.perception.yaml | 12 ++++++++++-- .../config/nuscenes_config.yaml | 4 ++-- .../lidar_object_detection_node.py | 9 ++++----- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index cb39dbad..94d022ab 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -40,11 +40,19 @@ services: - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch lidar_object_detection eve_launch.py" + command: /bin/bash -c "ros2 launch lidar_object_detection nuscenes_launch.py" volumes: - /mnt/wato-drive2/perception_models/transfusion_trained_model.pth:/home/bolty/OpenPCDet/models/transfusion_trained_model.pth + - /mnt/wato-drive2/perception_models/transfusion_trained_model.pth:/home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth - - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/nuscenes_trained_voxelNext.pth + - /mnt/wato-drive2/perception_models/nuscenes_trained_voxelNext.pth:/home/bolty/OpenPCDet/models/nuscenes_trained_voxelNext.pth + # New Models to Try + - /mnt/wato-drive/perception/bevfusion_pretrain/bevfusion-det.pth:/home/bolty/OpenPCDet/models/bevfusion-det.pth + - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-det.pth:/home/bolty/OpenPCDet/models/lidar-only-det.pth + - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-seg.pth:/home/bolty/OpenPCDet/models/lidar-only-seg.pth + - /mnt/wato-drive/perception/pcds/pvrcnn_ros_ckpt.pth:/home/bolty/OpenPCDet/models/pvrcnn_ros_ckpt.pth + - /mnt/wato-drive/perception/pcds/pvrcnn_base_ckpt.pth:/home/bolty/OpenPCDet/models/pvrcnn_base_ckpt.pth + # Data sets - /mnt/wato-drive2/nuscenes-1.0-mini:/home/bolty/OpenPCDet/data/nuscenes/v1.0-mini - /mnt/wato-drive2/wato-datasets/nuscenes/full-dataset:/home/bolty/OpenPCDet/data/nuscenes/v1.0-trainval traffic_light_detection: diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index c222e200..e515b2d7 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -1,5 +1,5 @@ lidar_object_detection_node: ros__parameters: - lidar_topic: /velodyne_points + lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml - model_path: /home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth + model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 198767cf..f16571d4 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -70,10 +70,9 @@ def pointcloud2_to_xyz_array(self, cloud_msg): cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) num_fields = cloud_msg.point_step // 4 cloud_array = cloud_array.reshape(num_points, num_fields) - - if cloud_array.shape[1] > 4: - cloud_array = cloud_array[:, :4] - + if cloud_array.shape[1] <= 4: + timestamp = np.full((num_points, 1), fill_value=0.0, dtype=np.float32) + cloud_array = np.hstack((cloud_array, timestamp)) return cloud_array def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): @@ -81,7 +80,7 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): detections = Detection3DArray() detections.header = pointcloud_msg.header for idx, (box, score) in enumerate(zip(pred_dicts[0]["pred_boxes"], pred_dicts[0]["pred_scores"])): - if score > 0.1: + if score > 0.03: marker = Marker() marker.header = pointcloud_msg.header marker.id = idx From 002f29ba007437759b72a21e99d339996ed337f4 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Wed, 20 Mar 2024 04:04:00 +0000 Subject: [PATCH 50/70] updated data loader + mounted bevfusion pth --- modules/docker-compose.perception.yaml | 3 ++- .../lidar_object_detection/config/eve_config.yaml | 1 + .../lidar_object_detection/config/nuscenes_config.yaml | 1 + .../lidar_object_detection/lidar_object_detection_node.py | 6 +++++- 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 94d022ab..5fd61fca 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -43,13 +43,14 @@ services: command: /bin/bash -c "ros2 launch lidar_object_detection nuscenes_launch.py" volumes: - /mnt/wato-drive2/perception_models/transfusion_trained_model.pth:/home/bolty/OpenPCDet/models/transfusion_trained_model.pth - - /mnt/wato-drive2/perception_models/transfusion_trained_model.pth:/home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth + - /mnt/wato-drive2/perception_models/cbgs_transfusion_lidar.pth:/home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth - /mnt/wato-drive2/perception_models/nuscenes_trained_voxelNext.pth:/home/bolty/OpenPCDet/models/nuscenes_trained_voxelNext.pth # New Models to Try - /mnt/wato-drive/perception/bevfusion_pretrain/bevfusion-det.pth:/home/bolty/OpenPCDet/models/bevfusion-det.pth - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-det.pth:/home/bolty/OpenPCDet/models/lidar-only-det.pth - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-seg.pth:/home/bolty/OpenPCDet/models/lidar-only-seg.pth + - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-seg.pth:/home/bolty/OpenPCDet/models/swint-nuimages-pretrained.pth - /mnt/wato-drive/perception/pcds/pvrcnn_ros_ckpt.pth:/home/bolty/OpenPCDet/models/pvrcnn_ros_ckpt.pth - /mnt/wato-drive/perception/pcds/pvrcnn_base_ckpt.pth:/home/bolty/OpenPCDet/models/pvrcnn_base_ckpt.pth # Data sets diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml index 7117fddb..bfabe5b9 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml @@ -3,3 +3,4 @@ lidar_object_detection_node: lidar_topic: /velodyne_points model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth + model_type: nuscenes diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index e515b2d7..d3e2fc89 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -3,3 +3,4 @@ lidar_object_detection_node: lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth + model_type: nuscenes diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index f16571d4..b9b319d3 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -22,9 +22,11 @@ def __init__(self): self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/transfusion_trained_model.pth") self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml") self.declare_parameter("lidar_topic", "/velodyne_points") + self.declare_parameter("model_type", "nuscenes") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value + self.model_type = self.get_parameter("model_type").value self.viz_publisher = self.create_publisher(MarkerArray, "/lidar_detections_viz", 10) self.detections_publisher = self.create_publisher(Detection3DArray, "/lidar_detections", 10) @@ -70,9 +72,11 @@ def pointcloud2_to_xyz_array(self, cloud_msg): cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) num_fields = cloud_msg.point_step // 4 cloud_array = cloud_array.reshape(num_points, num_fields) - if cloud_array.shape[1] <= 4: + if cloud_array.shape[1] <= 4 and self.model_type == "nuscenes": timestamp = np.full((num_points, 1), fill_value=0.0, dtype=np.float32) cloud_array = np.hstack((cloud_array, timestamp)) + if cloud_array.shape[1] > 4 and self.model_type != "nuscenes": + cloud_array = cloud_array[:, :4] return cloud_array def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): From 0f7b407ee7d4742ae5739c1e86ce67a2347fc699 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 22 Mar 2024 03:12:12 +0000 Subject: [PATCH 51/70] better data loader :) --- .../lidar_object_detection/config/eve_config.yaml | 1 - .../config/nuscenes_config.yaml | 1 - .../lidar_object_detection_node.py | 11 ++++------- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml index bfabe5b9..7117fddb 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/eve_config.yaml @@ -3,4 +3,3 @@ lidar_object_detection_node: lidar_topic: /velodyne_points model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth - model_type: nuscenes diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index d3e2fc89..e515b2d7 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -3,4 +3,3 @@ lidar_object_detection_node: lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth - model_type: nuscenes diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index b9b319d3..980562a0 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -15,18 +15,15 @@ from pcdet.models import build_network, load_data_to_gpu from pcdet.utils import common_utils - class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/transfusion_trained_model.pth") self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml") self.declare_parameter("lidar_topic", "/velodyne_points") - self.declare_parameter("model_type", "nuscenes") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value - self.model_type = self.get_parameter("model_type").value self.viz_publisher = self.create_publisher(MarkerArray, "/lidar_detections_viz", 10) self.detections_publisher = self.create_publisher(Detection3DArray, "/lidar_detections", 10) @@ -72,11 +69,11 @@ def pointcloud2_to_xyz_array(self, cloud_msg): cloud_array = np.frombuffer(cloud_msg.data, dtype=np.float32) num_fields = cloud_msg.point_step // 4 cloud_array = cloud_array.reshape(num_points, num_fields) - if cloud_array.shape[1] <= 4 and self.model_type == "nuscenes": + if cloud_array.shape[1] > 4: + cloud_array = cloud_array[:, :4] + if cloud_array.shape[1] <= 4: timestamp = np.full((num_points, 1), fill_value=0.0, dtype=np.float32) cloud_array = np.hstack((cloud_array, timestamp)) - if cloud_array.shape[1] > 4 and self.model_type != "nuscenes": - cloud_array = cloud_array[:, :4] return cloud_array def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): @@ -84,7 +81,7 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): detections = Detection3DArray() detections.header = pointcloud_msg.header for idx, (box, score) in enumerate(zip(pred_dicts[0]["pred_boxes"], pred_dicts[0]["pred_scores"])): - if score > 0.03: + if score > 0.7: marker = Marker() marker.header = pointcloud_msg.header marker.id = idx From b31e3316094f33109cebf9ea1f88181ed10fa6f8 Mon Sep 17 00:00:00 2001 From: Dylan Wang Date: Mon, 25 Mar 2024 01:15:26 +0000 Subject: [PATCH 52/70] fixed bounding box orientation issues --- .../lidar_object_detection_node.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 980562a0..a6892fd2 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -90,7 +90,12 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): marker.pose.position.x = float(box[0]) marker.pose.position.y = float(box[1]) marker.pose.position.z = float(box[2]) - marker.pose.orientation.w = 1.0 + + # Calculate orientation quaternion + yaw = float(box[6]) + 1e-10 + marker.pose.orientation.z = np.sin(yaw / 2) + marker.pose.orientation.w = np.cos(yaw / 2) + marker.scale.x = float(box[3]) marker.scale.y = float(box[4]) marker.scale.z = float(box[5]) @@ -197,6 +202,5 @@ def main(args=None): node.destroy_node() rclpy.shutdown() - -if __name__ == "__main__": +if __name__ == '__main__': main() From f15eb0f533a283a60902323cc15f7d64169e5b7d Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Mon, 25 Mar 2024 03:06:05 +0000 Subject: [PATCH 53/70] implementing simple time syncer for messages --- .../lidar_object_detection/lidar_object_detection_node.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index a6892fd2..2ddc07a9 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -62,7 +62,8 @@ def point_cloud_callback(self, msg): with torch.no_grad(): pred_dicts, _ = self.model.forward(data_dict) - self.publish_bounding_boxes(msg, pred_dicts) + original_timestamp = msg.header.stamp + self.publish_bounding_boxes(msg, pred_dicts, original_timestamp) def pointcloud2_to_xyz_array(self, cloud_msg): num_points = cloud_msg.width * cloud_msg.height @@ -76,7 +77,7 @@ def pointcloud2_to_xyz_array(self, cloud_msg): cloud_array = np.hstack((cloud_array, timestamp)) return cloud_array - def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): + def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp): marker_array = MarkerArray() detections = Detection3DArray() detections.header = pointcloud_msg.header @@ -84,6 +85,7 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): if score > 0.7: marker = Marker() marker.header = pointcloud_msg.header + marker.header.stamp = original_timestamp marker.id = idx marker.type = Marker.CUBE marker.action = Marker.ADD @@ -91,7 +93,7 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts): marker.pose.position.y = float(box[1]) marker.pose.position.z = float(box[2]) - # Calculate orientation quaternion + # Calculate orientation quaternion yaw = float(box[6]) + 1e-10 marker.pose.orientation.z = np.sin(yaw / 2) marker.pose.orientation.w = np.cos(yaw / 2) From fc8d18ad30032bd9f41189d92a1d8838400c96dd Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Mon, 25 Mar 2024 03:21:35 +0000 Subject: [PATCH 54/70] messing with confidence --- .../lidar_object_detection/config/nuscenes_config.yaml | 2 +- .../lidar_object_detection/lidar_object_detection_node.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index e515b2d7..e6c7413f 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -2,4 +2,4 @@ lidar_object_detection_node: ros__parameters: lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml - model_path: /home/bolty/OpenPCDet/models/transfusion_trained_model.pth + model_path: /home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 2ddc07a9..4e77074e 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -82,7 +82,7 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp) detections = Detection3DArray() detections.header = pointcloud_msg.header for idx, (box, score) in enumerate(zip(pred_dicts[0]["pred_boxes"], pred_dicts[0]["pred_scores"])): - if score > 0.7: + if score > 0.6: marker = Marker() marker.header = pointcloud_msg.header marker.header.stamp = original_timestamp From 92723b6b846bfb9e219dde1703371c9873e2609c Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Thu, 4 Apr 2024 23:23:38 +0000 Subject: [PATCH 55/70] fixing styling --- .../lidar_object_detection/launch/eve_launch.py | 1 + .../lidar_object_detection_node.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py b/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py index af515d44..78f2c2ac 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/launch/eve_launch.py @@ -3,6 +3,7 @@ from ament_index_python.packages import get_package_share_directory import os + def generate_launch_description(): ld = LaunchDescription() config = os.path.join( diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 4e77074e..39e8d5a7 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -1,3 +1,9 @@ +# pylint: disable=wrong-import-position +sys.path.append("/home/bolty/OpenPCDet") +from pcdet.utils import common_utils +from pcdet.models import build_network, load_data_to_gpu +from pcdet.datasets import DatasetTemplate +from pcdet.config import cfg, cfg_from_yaml_file import sys import argparse import rclpy @@ -8,12 +14,6 @@ from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray from sensor_msgs.msg import PointCloud2, PointField -# pylint: disable=wrong-import-position -sys.path.append("/home/bolty/OpenPCDet") -from pcdet.config import cfg, cfg_from_yaml_file -from pcdet.datasets import DatasetTemplate -from pcdet.models import build_network, load_data_to_gpu -from pcdet.utils import common_utils class LidarObjectDetection(Node): def __init__(self): @@ -204,5 +204,6 @@ def main(args=None): node.destroy_node() rclpy.shutdown() + if __name__ == '__main__': main() From 0fd32e0868d5e7abdbefead3cb711d6b98197105 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Thu, 4 Apr 2024 23:28:58 +0000 Subject: [PATCH 56/70] fixing styling again --- .../lidar_object_detection_node.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 39e8d5a7..5a1f248f 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -1,15 +1,17 @@ # pylint: disable=wrong-import-position sys.path.append("/home/bolty/OpenPCDet") -from pcdet.utils import common_utils -from pcdet.models import build_network, load_data_to_gpu -from pcdet.datasets import DatasetTemplate -from pcdet.config import cfg, cfg_from_yaml_file + import sys import argparse import rclpy from rclpy.node import Node import numpy as np import torch + +from pcdet.utils import common_utils +from pcdet.models import build_network, load_data_to_gpu +from pcdet.datasets import DatasetTemplate +from pcdet.config import cfg, cfg_from_yaml_file from visualization_msgs.msg import Marker, MarkerArray from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray from sensor_msgs.msg import PointCloud2, PointField From d0c60053b8b73649c8a88cc1d9152f689b309747 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Thu, 4 Apr 2024 23:49:22 +0000 Subject: [PATCH 57/70] styling --- .../lidar_object_detection_node.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 5a1f248f..8b6f7c83 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -1,27 +1,27 @@ # pylint: disable=wrong-import-position -sys.path.append("/home/bolty/OpenPCDet") - -import sys -import argparse -import rclpy -from rclpy.node import Node -import numpy as np -import torch - -from pcdet.utils import common_utils -from pcdet.models import build_network, load_data_to_gpu -from pcdet.datasets import DatasetTemplate -from pcdet.config import cfg, cfg_from_yaml_file -from visualization_msgs.msg import Marker, MarkerArray -from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray from sensor_msgs.msg import PointCloud2, PointField +from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray +from visualization_msgs.msg import Marker, MarkerArray +from pcdet.config import cfg, cfg_from_yaml_file +from pcdet.datasets import DatasetTemplate +from pcdet.models import build_network, load_data_to_gpu +from pcdet.utils import common_utils +import torch +import numpy as np +from rclpy.node import Node +import rclpy +import argparse +import sys +sys.path.append("/home/bolty/OpenPCDet") class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') - self.declare_parameter("model_path", "/home/bolty/OpenPCDet/models/transfusion_trained_model.pth") - self.declare_parameter("model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml") + self.declare_parameter( + "model_path", "/home/bolty/OpenPCDet/models/transfusion_trained_model.pth") + self.declare_parameter( + "model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml") self.declare_parameter("lidar_topic", "/velodyne_points") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value From 06cb03a1d10e2396e727bdc33b5d228a03624c3e Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 5 Apr 2024 03:45:53 +0000 Subject: [PATCH 58/70] removing old modules --- modules/docker-compose.perception.yaml | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index c5fd014f..53684ac3 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -60,28 +60,7 @@ services: # Data sets - /mnt/wato-drive2/nuscenes-1.0-mini:/home/bolty/OpenPCDet/data/nuscenes/v1.0-mini - /mnt/wato-drive2/wato-datasets/nuscenes/full-dataset:/home/bolty/OpenPCDet/data/nuscenes/v1.0-trainval - traffic_light_detection: - build: - context: .. - dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" - - traffic_sign_detection: - build: - context: .. - dockerfile: docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_sign_detection traffic_sign_detection.launch.py" - + semantic_segmentation: build: context: .. @@ -124,4 +103,4 @@ services: - "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:build_main" target: deploy image: "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch depth_estimation eve.launch.py" + command: /bin/bash -c "ros2 launch depth_estimation eve.launch.py" \ No newline at end of file From c643b434a90fe7bde751f4da3aeab7287e9fb7b2 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Mon, 27 May 2024 12:54:32 +0000 Subject: [PATCH 59/70] skip CI for lidar --- .github/templates/docker_context/docker_context.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/templates/docker_context/docker_context.sh b/.github/templates/docker_context/docker_context.sh index e8dc0d1f..dda15b60 100755 --- a/.github/templates/docker_context/docker_context.sh +++ b/.github/templates/docker_context/docker_context.sh @@ -39,6 +39,7 @@ while read -r module; do # Temporarily skip perception services that have too large image size if [[ "$service_out" == "lane_detection" ]] || \ [[ "$service_out" == "camera_object_detection" ]] || \ + [[ "$service_out" == "lidar_object_detection" ]] || \ [[ "$service_out" == "semantic_segmentation" ]]; then continue fi From 6a28e25e0de57982cf26199b183a1613d58dd8fa Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 11 Jun 2024 01:59:30 +0000 Subject: [PATCH 60/70] PR comments --- .../lidar_object_detection.Dockerfile | 58 +++++++--------- modules/docker-compose.perception.yaml | 15 ----- .../lidar_object_detection_node.py | 66 ++----------------- 3 files changed, 29 insertions(+), 110 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index d80d1799..9964ea85 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -1,37 +1,24 @@ ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda11.7-humble-ubuntu22.04-devel - ################################ Source ################################ FROM ${BASE_IMAGE} as source - WORKDIR ${AMENT_WS}/src - # Copy in source code COPY src/perception/lidar_object_detection lidar_object_detection - # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ rosdep install --from-paths . --ignore-src -r -s \ | grep 'apt-get install' \ | awk '{print $3}' \ | sort > /tmp/colcon_install_list - - -################################# Dependencies ################################ -FROM ${BASE_IMAGE} as dependencies - -RUN apt-get update && apt-get install -y \ - git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ - build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ - libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ - python3-pip python3-setuptools \ - && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc - +WORKDIR /home/bolty +RUN git clone https://github.com/WATonomous/OpenPCDet.git && \ + cd OpenPCDet \ + && git checkout 06fd4f862329625ff9ed850464330816e54531f8 ################################ INSTALL OpenCV with CUDA Support ############## WORKDIR /opt -RUN git clone https://github.com/opencv/opencv.git && \ +RUN git clone -b 4.x https://github.com/opencv/opencv.git && \ cd opencv && git checkout 4.5.5 -RUN git clone https://github.com/opencv/opencv_contrib.git && \ +RUN git clone -b 4.x https://github.com/opencv/opencv_contrib.git && \ cd opencv_contrib && git checkout 4.5.5 WORKDIR /opt/opencv/build RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ @@ -50,6 +37,15 @@ RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ -D ENABLE_PRECOMPILED_HEADERS=OFF \ .. && make -j$(nproc) && make install && ldconfig RUN rm -rf /opt/opencv /opt/opencv_contrib +################################# Dependencies ################################ +FROM ${BASE_IMAGE} as dependencies +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ + python3-pip python3-setuptools \ + && apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /var/lib/apt/lists/* /root/* /root/.ros /tmp/* /usr/share/doc # Set environment variables ENV CUDA_HOME /usr/local/cuda ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:${LD_LIBRARY_PATH} @@ -60,46 +56,36 @@ RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13. RUN pip3 install torch-scatter -f https://data.pyg.org/whl/torch-1.13.1+cu116.html ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" RUN pip3 install spconv-cu116 pyquaternion numpy==1.23 pillow==8.4 mayavi open3d av2 -WORKDIR /home/bolty -RUN git clone https://github.com/WATonomous/OpenPCDet.git -RUN cd OpenPCDet && pip3 install -r requirements.txt -RUN pip3 install kornia==0.6.8 -RUN pip3 install nuscenes-devkit==1.0.5 -WORKDIR /home/bolty/OpenPCDet/ -RUN python3 setup.py develop - # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list RUN apt-get update && apt-fast update RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - # Copy in source code from source stage WORKDIR ${AMENT_WS} COPY --from=source ${AMENT_WS}/src src - # Dependency Cleanup WORKDIR / RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - -################################ Source ####################################### +################################ Build ####################################### FROM dependencies as build - +WORKDIR /home/bolty/OpenPCDet/ +RUN pip3 install -r requirements.txt +RUN pip3 install kornia==0.6.8 +RUN pip3 install nuscenes-devkit==1.0.5 +WORKDIR /home/bolty/OpenPCDet/ +RUN python3 setup.py develop # Build ROS2 packages WORKDIR ${AMENT_WS} RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ colcon build \ --cmake-args -DCMAKE_BUILD_TYPE=Release - # Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] - ################################ Prod ######################################### FROM build as deploy - # Source Cleanup and Security Setup RUN chown -R $USER:$USER ${AMENT_WS} RUN rm -rf src/* - USER ${USER} diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 53684ac3..dba8ff6d 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -34,7 +34,6 @@ services: - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt - /mnt/wato-drive2/perception_models/traffic_signs_v0.pt:/perception_models/traffic_signs_v1.pt - lidar_object_detection: build: context: .. @@ -47,20 +46,6 @@ services: command: /bin/bash -c "ros2 launch lidar_object_detection nuscenes_launch.py" volumes: - /mnt/wato-drive2/perception_models/transfusion_trained_model.pth:/home/bolty/OpenPCDet/models/transfusion_trained_model.pth - - /mnt/wato-drive2/perception_models/cbgs_transfusion_lidar.pth:/home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth - - /mnt/wato-drive2/perception_models/pv_rcnn_8369.pth:/home/bolty/OpenPCDet/models/pv_rcnn_8369.pth - - /mnt/wato-drive2/perception_models/nuscenes_trained_voxelNext.pth:/home/bolty/OpenPCDet/models/nuscenes_trained_voxelNext.pth - # New Models to Try - - /mnt/wato-drive/perception/bevfusion_pretrain/bevfusion-det.pth:/home/bolty/OpenPCDet/models/bevfusion-det.pth - - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-det.pth:/home/bolty/OpenPCDet/models/lidar-only-det.pth - - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-seg.pth:/home/bolty/OpenPCDet/models/lidar-only-seg.pth - - /mnt/wato-drive/perception/bevfusion_pretrain/lidar-only-seg.pth:/home/bolty/OpenPCDet/models/swint-nuimages-pretrained.pth - - /mnt/wato-drive/perception/pcds/pvrcnn_ros_ckpt.pth:/home/bolty/OpenPCDet/models/pvrcnn_ros_ckpt.pth - - /mnt/wato-drive/perception/pcds/pvrcnn_base_ckpt.pth:/home/bolty/OpenPCDet/models/pvrcnn_base_ckpt.pth - # Data sets - - /mnt/wato-drive2/nuscenes-1.0-mini:/home/bolty/OpenPCDet/data/nuscenes/v1.0-mini - - /mnt/wato-drive2/wato-datasets/nuscenes/full-dataset:/home/bolty/OpenPCDet/data/nuscenes/v1.0-trainval - semantic_segmentation: build: context: .. diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 8b6f7c83..d9b89144 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -18,10 +18,8 @@ class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') - self.declare_parameter( - "model_path", "/home/bolty/OpenPCDet/models/transfusion_trained_model.pth") - self.declare_parameter( - "model_config_path", "/home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml") + self.declare_parameter("model_path") + self.declare_parameter("model_config_path") self.declare_parameter("lidar_topic", "/velodyne_points") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value @@ -37,7 +35,7 @@ def __init__(self): args, cfg = self.parse_config() self.logger = common_utils.create_logger() - self.demo_dataset = LidarDataset( + self.lidar_dataloader = LidarDatalodaer( dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, @@ -45,7 +43,7 @@ def __init__(self): ) self.model = build_network( - model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.demo_dataset + model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=self.lidar_dataloader ) self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) self.model.cuda() @@ -57,8 +55,8 @@ def point_cloud_callback(self, msg): "points": points, "frame_id": msg.header.frame_id, } - data_dict = self.demo_dataset.prepare_data(data_dict=data_dict) - data_dict = self.demo_dataset.collate_batch([data_dict]) + data_dict = self.lidar_dataloader.prepare_data(data_dict=data_dict) + data_dict = self.lidar_dataloader.collate_batch([data_dict]) load_data_to_gpu(data_dict) with torch.no_grad(): @@ -142,57 +140,7 @@ def parse_config(self): cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg - def create_cloud_xyz32(self, header, points): - """ - Create a sensor_msgs/PointCloud2 message from an array of points. - - :param header: std_msgs/Header, the header of the message. - :param points: numpy.ndarray, an Nx3 array of xyz points. - :return: sensor_msgs/PointCloud2, the constructed PointCloud2 message. - """ - # Create fields for the PointCloud2 message - fields = [ - PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1), - PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1), - PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1), - ] - - y_angle = np.deg2rad(90) - z_angle = np.deg2rad(-90) - rotation_matrix_y = np.array( - [ - [np.cos(y_angle), 0, np.sin(y_angle)], - [0, 1, 0], - [-np.sin(y_angle), 0, np.cos(y_angle)], - ] - ) - rotation_matrix_z = np.array( - [ - [np.cos(z_angle), -np.sin(z_angle), 0], - [np.sin(z_angle), np.cos(z_angle), 0], - [0, 0, 1], - ] - ) - - points_np = points.cpu().numpy() - points_np_y = np.dot(points_np, rotation_matrix_y) - points_transformed = np.dot(points_np_y, rotation_matrix_z.T) - - cloud = PointCloud2() - cloud.header = header - cloud.height = 1 - cloud.width = points_transformed.shape[0] - cloud.fields = fields - cloud.is_bigendian = False - cloud.point_step = 12 - cloud.row_step = cloud.point_step * cloud.width - cloud.is_dense = bool(np.isfinite(points_transformed).all()) - cloud.data = np.asarray(points_transformed, np.float32).tobytes() - - return cloud - - -class LidarDataset(DatasetTemplate): +class LidarDatalodaer(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext=".bin"): super().__init__( dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger From 8c9f195d16ecee2f1607f369c5855f45a9db3954 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 11 Jun 2024 02:25:56 +0000 Subject: [PATCH 61/70] setting bounding box to be optional in configs --- .../lidar_object_detection/config/nuscenes_config.yaml | 2 ++ .../lidar_object_detection/lidar_object_detection_node.py | 8 +++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index e6c7413f..a968de35 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -3,3 +3,5 @@ lidar_object_detection_node: lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth + detection_options: + enable_detection: true diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index d9b89144..b7a4ff32 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -20,10 +20,11 @@ def __init__(self): super().__init__('lidar_object_detection') self.declare_parameter("model_path") self.declare_parameter("model_config_path") - self.declare_parameter("lidar_topic", "/velodyne_points") + self.declare_parameter("lidar_topic") self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value + self.publish_detection = self.get_parameter('detection_options.enable_detection').get_parameter_value().bool_value self.viz_publisher = self.create_publisher(MarkerArray, "/lidar_detections_viz", 10) self.detections_publisher = self.create_publisher(Detection3DArray, "/lidar_detections", 10) @@ -120,8 +121,9 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp) detected_object.hypothesis.score = float(pred_dicts[0]["pred_scores"][idx]) detection.results.append(detected_object) detections.detections.append(detection) - - self.viz_publisher.publish(marker_array) + + if self.publish_detection: + self.viz_publisher.publish(marker_array) self.detections_publisher.publish(detections) def parse_config(self): From a0b046f86d08c49f9a08ec9d4f1bd1022d9930fa Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 11 Jun 2024 02:35:10 +0000 Subject: [PATCH 62/70] fixing pep8 --- .../lidar_object_detection/lidar_object_detection_node.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index b7a4ff32..539e8f00 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -148,7 +148,6 @@ def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext=".b dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger ) - def main(args=None): rclpy.init(args=args) node = LidarObjectDetection() From 824b2387764edfcf3a0eb9e391b87816fdb31437 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 11 Jun 2024 02:44:16 +0000 Subject: [PATCH 63/70] autopep8 --- .../lidar_object_detection/lidar_object_detection_node.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 539e8f00..b091958b 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -24,7 +24,8 @@ def __init__(self): self.model_path = self.get_parameter("model_path").value self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value - self.publish_detection = self.get_parameter('detection_options.enable_detection').get_parameter_value().bool_value + self.publish_detection = self.get_parameter( + 'detection_options.enable_detection').get_parameter_value().bool_value self.viz_publisher = self.create_publisher(MarkerArray, "/lidar_detections_viz", 10) self.detections_publisher = self.create_publisher(Detection3DArray, "/lidar_detections", 10) @@ -122,6 +123,7 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp) detection.results.append(detected_object) detections.detections.append(detection) + if self.publish_detection: self.viz_publisher.publish(marker_array) self.detections_publisher.publish(detections) @@ -144,9 +146,7 @@ def parse_config(self): class LidarDatalodaer(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext=".bin"): - super().__init__( - dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger - ) + super().__init__(dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger) def main(args=None): rclpy.init(args=args) From c4f370542949ea11f12dc313d99a49b3a1ed2cdc Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 11 Jun 2024 02:47:45 +0000 Subject: [PATCH 64/70] autopep8 --- .../lidar_object_detection/lidar_object_detection_node.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index b091958b..7f360fa8 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -123,7 +123,6 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp) detection.results.append(detected_object) detections.detections.append(detection) - if self.publish_detection: self.viz_publisher.publish(marker_array) self.detections_publisher.publish(detections) @@ -144,10 +143,12 @@ def parse_config(self): cfg_from_yaml_file(args.cfg_file, cfg) return args, cfg + class LidarDatalodaer(DatasetTemplate): def __init__(self, dataset_cfg, class_names, training=True, logger=None, ext=".bin"): super().__init__(dataset_cfg=dataset_cfg, class_names=class_names, training=training, logger=logger) + def main(args=None): rclpy.init(args=args) node = LidarObjectDetection() From 93edc191c3daa325bafb1dd65fe7b753f9fb44a3 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Tue, 11 Jun 2024 02:49:48 +0000 Subject: [PATCH 65/70] autopep8 --- .../lidar_object_detection/lidar_object_detection_node.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 7f360fa8..d3920017 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -122,7 +122,6 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp) detected_object.hypothesis.score = float(pred_dicts[0]["pred_scores"][idx]) detection.results.append(detected_object) detections.detections.append(detection) - if self.publish_detection: self.viz_publisher.publish(marker_array) self.detections_publisher.publish(detections) From a45626f01e5e85ea9218de63f3e9dd449d2306a8 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Thu, 13 Jun 2024 16:43:57 +0000 Subject: [PATCH 66/70] fixing docker staging issue --- .../lidar_object_detection/lidar_object_detection.Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile index 9964ea85..e231e2ac 100644 --- a/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile +++ b/docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile @@ -69,11 +69,12 @@ RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* ################################ Build ####################################### FROM dependencies as build -WORKDIR /home/bolty/OpenPCDet/ +COPY --from=source /home/bolty/OpenPCDet /home/bolty/OpenPCDet +WORKDIR /home/bolty/OpenPCDet RUN pip3 install -r requirements.txt RUN pip3 install kornia==0.6.8 RUN pip3 install nuscenes-devkit==1.0.5 -WORKDIR /home/bolty/OpenPCDet/ +WORKDIR /home/bolty/OpenPCDet RUN python3 setup.py develop # Build ROS2 packages WORKDIR ${AMENT_WS} From 43870bb50bf02f4f1928e21a1554808f1eb545ba Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 28 Jun 2024 02:38:26 +0000 Subject: [PATCH 67/70] added label server --- .../config/nuscenes_config.yaml | 1 - .../lidar_object_detection/label_server.py | 34 +++++++++++++++++++ .../lidar_object_detection_node.py | 22 +++++++++--- .../lidar_object_detection/setup.py | 3 +- 4 files changed, 53 insertions(+), 7 deletions(-) create mode 100644 src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py diff --git a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml index a968de35..b1d8c58b 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml +++ b/src/perception/lidar_object_detection/lidar_object_detection/config/nuscenes_config.yaml @@ -3,5 +3,4 @@ lidar_object_detection_node: lidar_topic: /LIDAR_TOP model_config_path: /home/bolty/OpenPCDet/tools/cfgs/nuscenes_models/transfusion_lidar.yaml model_path: /home/bolty/OpenPCDet/models/cbgs_transfusion_lidar.pth - detection_options: enable_detection: true diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py new file mode 100644 index 00000000..466781f7 --- /dev/null +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python + +import rclpy +from rclpy.node import Node +from vision_msgs.msg import VisionInfo + +class LabelServer(Node): + def __init__(self): + super().__init__('label_server') + self.publisher_ = self.create_publisher(VisionInfo, 'vision_info', 10) + self.label_mapping = { + 1: "car", + 2: "pedestrian", + 3: "cyclist", + } + self.publish_labels() + + def publish_labels(self): + vision_info_msg = VisionInfo() + vision_info_msg.database_location = "memory" + vision_info_msg.database_version = "1.0" + for label_id, class_name in self.label_mapping.items(): + vision_info_msg.class_map[label_id] = class_name + self.publisher_.publish(vision_info_msg) + +def main(args=None): + rclpy.init(args=args) + label_server = LabelServer() + rclpy.spin(label_server) + label_server.destroy_node() + rclpy.shutdown() + +if __name__ == '__main__': + main() diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index d3920017..b6237c90 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -1,6 +1,6 @@ # pylint: disable=wrong-import-position -from sensor_msgs.msg import PointCloud2, PointField -from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray +from sensor_msgs.msg import PointCloud2 +from vision_msgs.msg import ObjectHypothesisWithPose, Detection3D, Detection3DArray, VisionInfo from visualization_msgs.msg import Marker, MarkerArray from pcdet.config import cfg, cfg_from_yaml_file from pcdet.datasets import DatasetTemplate @@ -14,7 +14,6 @@ import sys sys.path.append("/home/bolty/OpenPCDet") - class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') @@ -25,8 +24,14 @@ def __init__(self): self.model_config_path = self.get_parameter("model_config_path").value self.lidar_data = self.get_parameter("lidar_topic").value self.publish_detection = self.get_parameter( - 'detection_options.enable_detection').get_parameter_value().bool_value - + 'enable_detection').get_parameter_value().bool_value + + self.label_mapping = {} + self.subscription = self.create_subscription( + VisionInfo, + 'vision_info', + self.vision_info_callback, + 10) self.viz_publisher = self.create_publisher(MarkerArray, "/lidar_detections_viz", 10) self.detections_publisher = self.create_publisher(Detection3DArray, "/lidar_detections", 10) @@ -50,6 +55,9 @@ def __init__(self): self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) self.model.cuda() self.model.eval() + + def vision_info_callback(self, msg): + self.label_mapping = msg.class_map def point_cloud_callback(self, msg): points = self.pointcloud2_to_xyz_array(msg) @@ -107,6 +115,10 @@ def publish_bounding_boxes(self, pointcloud_msg, pred_dicts, original_timestamp) marker.color.r = 1.0 marker.color.g = 0.0 marker.color.b = float(pred_dicts[0]["pred_labels"][idx]) / 3 + + class_id = pred_dicts[0]["pred_labels"][idx] + class_name = self.label_mapping.get(class_id, "unknown") + marker.text = class_name marker_array.markers.append(marker) detection = Detection3D() diff --git a/src/perception/lidar_object_detection/lidar_object_detection/setup.py b/src/perception/lidar_object_detection/lidar_object_detection/setup.py index 8852c904..46636840 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/setup.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.py @@ -24,7 +24,8 @@ tests_require=['pytest'], entry_points={ 'console_scripts': [ - 'lidar_object_detection_node = lidar_object_detection.lidar_object_detection_node:main' + 'lidar_object_detection_node = lidar_object_detection.lidar_object_detection_node:main', + 'label_server = lidar_object_detection.label_server:main', ], }, ) From 34e0a21ca7d4d664bedf8b375af9fb1821b71a5a Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 28 Jun 2024 02:40:44 +0000 Subject: [PATCH 68/70] linter --- .../lidar_object_detection/label_server.py | 3 +++ .../lidar_object_detection/lidar_object_detection_node.py | 3 +++ .../lidar_object_detection/lidar_object_detection/setup.py | 3 ++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py index 466781f7..92035ced 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/label_server.py @@ -4,6 +4,7 @@ from rclpy.node import Node from vision_msgs.msg import VisionInfo + class LabelServer(Node): def __init__(self): super().__init__('label_server') @@ -23,6 +24,7 @@ def publish_labels(self): vision_info_msg.class_map[label_id] = class_name self.publisher_.publish(vision_info_msg) + def main(args=None): rclpy.init(args=args) label_server = LabelServer() @@ -30,5 +32,6 @@ def main(args=None): label_server.destroy_node() rclpy.shutdown() + if __name__ == '__main__': main() diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index b6237c90..7043493d 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -14,6 +14,7 @@ import sys sys.path.append("/home/bolty/OpenPCDet") + class LidarObjectDetection(Node): def __init__(self): super().__init__('lidar_object_detection') @@ -26,6 +27,7 @@ def __init__(self): self.publish_detection = self.get_parameter( 'enable_detection').get_parameter_value().bool_value + self.label_mapping = {} self.subscription = self.create_subscription( VisionInfo, @@ -56,6 +58,7 @@ def __init__(self): self.model.cuda() self.model.eval() + def vision_info_callback(self, msg): self.label_mapping = msg.class_map diff --git a/src/perception/lidar_object_detection/lidar_object_detection/setup.py b/src/perception/lidar_object_detection/lidar_object_detection/setup.py index 46636840..3568fc78 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/setup.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.py @@ -12,7 +12,8 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), - (os.path.join('share', package_name, 'launch'), glob('launch/*.py')), + (os.path.join('share', package_name, 'launch'), + glob('launch/*.py')), (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), ], install_requires=['setuptools'], From 43628a44316861246d9c96c698f23dda4e12dabd Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 28 Jun 2024 02:49:01 +0000 Subject: [PATCH 69/70] linter --- .../lidar_object_detection/lidar_object_detection_node.py | 4 +--- .../lidar_object_detection/lidar_object_detection/setup.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py index 7043493d..d81a21bc 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/lidar_object_detection/lidar_object_detection_node.py @@ -26,7 +26,6 @@ def __init__(self): self.lidar_data = self.get_parameter("lidar_topic").value self.publish_detection = self.get_parameter( 'enable_detection').get_parameter_value().bool_value - self.label_mapping = {} self.subscription = self.create_subscription( @@ -57,8 +56,7 @@ def __init__(self): self.model.load_params_from_file(filename=args.ckpt, logger=self.logger, to_cpu=True) self.model.cuda() self.model.eval() - - + def vision_info_callback(self, msg): self.label_mapping = msg.class_map diff --git a/src/perception/lidar_object_detection/lidar_object_detection/setup.py b/src/perception/lidar_object_detection/lidar_object_detection/setup.py index 3568fc78..c0459e9a 100644 --- a/src/perception/lidar_object_detection/lidar_object_detection/setup.py +++ b/src/perception/lidar_object_detection/lidar_object_detection/setup.py @@ -12,7 +12,7 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), - (os.path.join('share', package_name, 'launch'), + (os.path.join('share', package_name, 'launch'), glob('launch/*.py')), (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), ], From d72d2dcff8af9bfe177266996c5ad3319a993e37 Mon Sep 17 00:00:00 2001 From: Danryanh7 Date: Fri, 28 Jun 2024 02:50:33 +0000 Subject: [PATCH 70/70] for linter --- src/samples/python/aggregator/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/samples/python/aggregator/setup.py b/src/samples/python/aggregator/setup.py index b0afb9f6..f77c1804 100755 --- a/src/samples/python/aggregator/setup.py +++ b/src/samples/python/aggregator/setup.py @@ -14,7 +14,7 @@ # Include our package.xml file (os.path.join('share', package_name), ['package.xml']), # Include all launch files. - (os.path.join('share', package_name, 'launch'), \ + (os.path.join('share', package_name, 'launch'), glob(os.path.join('launch', '*.launch.py'))), ], install_requires=['setuptools'],