diff --git a/.github/templates/docker_context/docker_context.sh b/.github/templates/docker_context/docker_context.sh index 5e35355f..e8dc0d1f 100755 --- a/.github/templates/docker_context/docker_context.sh +++ b/.github/templates/docker_context/docker_context.sh @@ -24,6 +24,11 @@ while read -r module; do services=$(docker-compose -f "$module" config --services) module_out=$(echo "$module" | sed -n 's/modules\/docker-compose\.\(.*\)\.yaml/\1/p') + # Skip simulation module + if [[ 'simulation' = $module_out ]]; then + continue + fi + # Only work with modules that are modified if [[ $MODIFIED_MODULES != *$module_out* && $TEST_ALL = "false" ]]; then continue @@ -31,6 +36,12 @@ while read -r module; do # Loop through each service while read -r service_out; do + # Temporarily skip perception services that have too large image size + if [[ "$service_out" == "lane_detection" ]] || \ + [[ "$service_out" == "camera_object_detection" ]] || \ + [[ "$service_out" == "semantic_segmentation" ]]; then + continue + fi # Construct JSON object for each service with module and service name json_object=$(jq -nc --arg module_out "$module_out" --arg service_out "$service_out" \ '{module: $module_out, service: $service_out}') diff --git a/.github/workflows/build_and_unitest.yml b/.github/workflows/build_and_unitest.yml index 12ef538c..214815b1 100644 --- a/.github/workflows/build_and_unitest.yml +++ b/.github/workflows/build_and_unitest.yml @@ -10,7 +10,7 @@ on: jobs: setup-environment: - name: Setup environment + name: Setup Environment runs-on: ubuntu-latest outputs: @@ -29,7 +29,9 @@ jobs: uses: "./.github/templates/check_src_changes" - name: Setup Watod Environment - run: ./watod_scripts/watod-setup-env.sh + run: | + MODULES_DIR="$GITHUB_WORKSPACE/modules" + . ./watod_scripts/watod-setup-env.sh shell: bash - name: Generate Docker Environment @@ -43,7 +45,7 @@ jobs: uses: "./.github/templates/github_context" build-and-unittest: - name: Build Image and Run Unit Testing Suite + name: Build/Test runs-on: ubuntu-latest needs: setup-environment @@ -124,9 +126,7 @@ jobs: ${{ steps.construct-registry-url.outputs.url }}:${{ env.SOURCE_BRANCH }} cache-from: | ${{ steps.construct-registry-url.outputs.url }}:build_${{ env.SOURCE_BRANCH }} - ${{ steps.construct-registry-url.outputs.url }}:${{ env.SOURCE_BRANCH }} - ${{ steps.construct-registry-url.outputs.url }}:${{ env.TARGET_BRANCH }} - cache-to: type=inline + ${{ steps.construct-registry-url.outputs.url }}:build_${{ env.TARGET_BRANCH }} builder: ${{ steps.buildx.outputs.name }} target: deploy diff --git a/.github/workflows/build_base_images.yml b/.github/workflows/build_base_images.yml index 094357d0..ac08006d 100644 --- a/.github/workflows/build_base_images.yml +++ b/.github/workflows/build_base_images.yml @@ -17,7 +17,9 @@ jobs: uses: actions/checkout@v4 - name: Setup Watod Environment - run: ./watod_scripts/watod-setup-env.sh + run: | + MODULES_DIR="$GITHUB_WORKSPACE/modules" + . ./watod_scripts/watod-setup-env.sh shell: bash - name: Generate Docker Environment diff --git a/.github/workflows/linting_auto.yml b/.github/workflows/linting_auto.yml index a02b3b0c..a2b7646f 100644 --- a/.github/workflows/linting_auto.yml +++ b/.github/workflows/linting_auto.yml @@ -8,6 +8,7 @@ on: branches: - main types: + - unlabeled - labeled - synchronize @@ -51,4 +52,4 @@ jobs: repo: context.repo.repo, issue_number: context.issue.number, name: 'auto-lint' - }); \ No newline at end of file + }); diff --git a/.gitignore b/.gitignore index 9127d484..9be06ca7 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ code_server_config/*/start_code_server.sh .env .vscode/ ssh_config/ + +watod-config.local.sh \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..89818293 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "src/perception/lane_detection/src/lane_detection_tensorrt"] + path = src/perception/lane_detection/src/lane_detection_tensorrt + url = git@github.com:WATonomous/lane_detection_tensorrt.git diff --git a/README.md b/README.md index 0bc93c47..e494cc21 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # WATonomous Monorepo v2 -Dockerized ROS2 setup for the WATonomous Autonomous Driving Software Pipeline +Dockerized ROS2 setup for the WATonomous Autonomous Vehicle Software Pipeline - [WATonomous Monorepo](#watonomous-monorepo) - [Getting Started](#getting-started) @@ -88,7 +88,7 @@ wato_monorepo_v2 ### Using Foxglove [Foxglove](https://foxglove.dev/) is used to visualize ROS messages on a local machine. -Add `data_stream` as an `ACTIVE_MODULES` and `watod up`. +Add `infrastructure` as an `ACTIVE_MODULES` and `watod up`. It exposes the port specified by the `FOXGLOVE_BRIDGE_PORT` variable, which you will need to forward to your local machine. This can either be done in the `ports` section of VS Code or by running the command `ssh -L 8765:localhost:8765 @-ubuntu1.watocluster.local` on your local machine. diff --git a/docker/base/inject_wato_base.Dockerfile b/docker/base/inject_wato_base.Dockerfile index 118bf5b5..61c0d89f 100644 --- a/docker/base/inject_wato_base.Dockerfile +++ b/docker/base/inject_wato_base.Dockerfile @@ -5,12 +5,15 @@ ARG GENERIC_IMAGE # for the wato_monorepo. FROM ${GENERIC_IMAGE} as wato_base -ENV DEBIAN_FRONTEND noninteractive +RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ + apt-get install -y curl sudo && \ + rm -rf /var/lib/apt/lists/* + ENV USER="bolty" ENV AMENT_WS=/home/${USER}/ament_ws # User Setup -RUN apt-get update && apt-get install -y curl sudo && \ +RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y curl sudo && \ rm -rf /var/lib/apt/lists/* # Add a user so that created files in the docker container are owned by a non-root user (for prod) @@ -19,14 +22,14 @@ RUN addgroup --gid 1000 ${USER} && \ useradd -rm -d /home/${USER} -s /bin/bash -g ${USER} -G sudo -u 1000 ${USER} -p "$(openssl passwd -6 $USER_PASSWD)" # install apt-fast -RUN apt-get -qq update && \ +RUN DEBIAN_FRONTEND=noninteractive apt-get -qq update && \ apt-get install -qq -y wget && \ mkdir -p /etc/apt/keyrings && \ wget -O - "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA2166B8DE8BDC3367D1901C11EE2FF37CA8DA16B" \ | gpg --dearmor -o /etc/apt/keyrings/apt-fast.gpg && \ echo "deb [signed-by=/etc/apt/keyrings/apt-fast.gpg] \ http://ppa.launchpad.net/apt-fast/stable/ubuntu focal main" > /etc/apt/sources.list.d/apt-fast.list && \ - apt-get update -qq && apt-get install -qq -y apt-fast && \ + DEBIAN_FRONTEND=noninteractive apt-get update -qq && DEBIAN_FRONTEND=noninteractive apt-get install -qq -y apt-fast && \ echo debconf apt-fast/maxdownloads string 16 | debconf-set-selections && \ echo debconf apt-fast/dlflag boolean true | debconf-set-selections && \ echo debconf apt-fast/aptmanager string apt-get | debconf-set-selections diff --git a/docker/perception/camera_object_detection/camera_object_detection.Dockerfile b/docker/perception/camera_object_detection/camera_object_detection.Dockerfile index da5959bc..5c3ee385 100644 --- a/docker/perception/camera_object_detection/camera_object_detection.Dockerfile +++ b/docker/perception/camera_object_detection/camera_object_detection.Dockerfile @@ -31,7 +31,7 @@ RUN rm requirements.txt # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) +RUN apt-get update && apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) # Copy in source code from source stage WORKDIR ${AMENT_WS} diff --git a/docker/simulation/carla_sim/carla_sim.Dockerfile b/docker/perception/depth_estimation/depth_estimation.Dockerfile similarity index 95% rename from docker/simulation/carla_sim/carla_sim.Dockerfile rename to docker/perception/depth_estimation/depth_estimation.Dockerfile index c49f8358..d51497ae 100644 --- a/docker/simulation/carla_sim/carla_sim.Dockerfile +++ b/docker/perception/depth_estimation/depth_estimation.Dockerfile @@ -6,8 +6,7 @@ FROM ${BASE_IMAGE} as source WORKDIR ${AMENT_WS}/src # Copy in source code -COPY src/simulation/carla_sim carla_sim -COPY src/wato_msgs/sample_msgs sample_msgs +COPY src/perception/depth_estimation depth_estimation # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ diff --git a/docker/perception/lane_detection/lane_detection.Dockerfile b/docker/perception/lane_detection/lane_detection.Dockerfile index 2fa97918..73aea4b6 100644 --- a/docker/perception/lane_detection/lane_detection.Dockerfile +++ b/docker/perception/lane_detection/lane_detection.Dockerfile @@ -1,5 +1,5 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda12.2-humble-ubuntu22.04-devel +ARG RUNTIME_IMAGE=ghcr.io/watonomous/wato_monorepo/base:cuda12.2-humble-ubuntu22.04 ################################ Source ################################ FROM ${BASE_IMAGE} as source @@ -7,7 +7,7 @@ WORKDIR ${AMENT_WS}/src # Copy in source code COPY src/perception/lane_detection lane_detection -COPY src/wato_msgs/sample_msgs sample_msgs +COPY src/wato_msgs/perception_msgs/lane_detection_msgs lane_detection_msgs # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ @@ -19,6 +19,13 @@ RUN apt-get -qq update && rosdep update && \ ################################# Dependencies ################################ FROM ${BASE_IMAGE} as dependencies +RUN apt-get update && apt-get install -y libopencv-dev \ + python3-opencv \ + tensorrt \ + cuda-toolkit + +RUN export OpenCV_DIR=/usr/lib/x86_64-linux-gnu/cmake/opencv4/OpenCVConfig.cmake + # Install Rosdep requirements COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) @@ -46,7 +53,21 @@ COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh ENTRYPOINT ["./wato_ros_entrypoint.sh"] ################################ Prod ################################ -FROM build as deploy +# Use a different runtime image for a smaller image size +FROM ${RUNTIME_IMAGE} as deploy + +# Install runtime libs +RUN apt-get update && apt-get install -y \ + ros-humble-cv-bridge \ + tensorrt + +# Copy the compiled binary to the runtime image +COPY --from=build ${AMENT_WS} ${AMENT_WS} + +WORKDIR ${AMENT_WS} + +COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +ENTRYPOINT ["./wato_ros_entrypoint.sh"] # Source Cleanup and Security Setup RUN chown -R $USER:$USER ${AMENT_WS} diff --git a/docker/perception/radar_object_detection/radar_object_detection.Dockerfile b/docker/perception/radar_object_detection/radar_object_detection.Dockerfile index 82aeb8c8..6329d177 100644 --- a/docker/perception/radar_object_detection/radar_object_detection.Dockerfile +++ b/docker/perception/radar_object_detection/radar_object_detection.Dockerfile @@ -7,7 +7,7 @@ WORKDIR ${AMENT_WS}/src # Copy in source code COPY src/perception/radar_object_detection radar_object_detection -COPY src/wato_msgs/radar_msgs radar_msgs +COPY src/wato_msgs/perception_msgs/radar_msgs radar_msgs # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ diff --git a/docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile b/docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile deleted file mode 100644 index a07d0507..00000000 --- a/docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:humble-ubuntu22.04 - -################################ Source ################################ -FROM ${BASE_IMAGE} as source - -WORKDIR ${AMENT_WS}/src - -# Copy in source code -COPY src/perception/traffic_sign_detection traffic_sign_detection -COPY src/wato_msgs/sample_msgs sample_msgs - -# Scan for rosdeps -RUN apt-get -qq update && rosdep update && \ - rosdep install --from-paths . --ignore-src -r -s \ - | grep 'apt-get install' \ - | awk '{print $3}' \ - | sort > /tmp/colcon_install_list - -################################# Dependencies ################################ -FROM ${BASE_IMAGE} as dependencies - -# Install Rosdep requirements -COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list -RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) - -# Copy in source code from source stage -WORKDIR ${AMENT_WS} -COPY --from=source ${AMENT_WS}/src src - -# Dependency Cleanup -WORKDIR / -RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ - rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* - -################################ Build ################################ -FROM dependencies as build - -# Build ROS2 packages -WORKDIR ${AMENT_WS} -RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ - colcon build \ - --cmake-args -DCMAKE_BUILD_TYPE=Release - -# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash -COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh -ENTRYPOINT ["./wato_ros_entrypoint.sh"] - -################################ Prod ################################ -FROM build as deploy - -# Source Cleanup and Security Setup -RUN chown -R $USER:$USER ${AMENT_WS} -RUN rm -rf src/* - -USER ${USER} diff --git a/docker/simulation/carla_notebooks/carla_notebooks.Dockerfile b/docker/simulation/carla_notebooks/carla_notebooks.Dockerfile new file mode 100644 index 00000000..2f50b621 --- /dev/null +++ b/docker/simulation/carla_notebooks/carla_notebooks.Dockerfile @@ -0,0 +1,23 @@ +ARG CARLA_VERSION=0.9.13 +FROM carlasim/carla:${CARLA_VERSION} AS wato_carla_api + +FROM python:3.8.16-slim-bullseye +ARG CARLA_VERSION=0.9.13 + +RUN pip3 install carla==${CARLA_VERSION} jupyter tensorflow-probability +RUN apt-get update && apt-get install -y curl git wget unzip && apt remove python3-networkx + +COPY --from=wato_carla_api --chown=root /home/carla/PythonAPI/carla/agents /usr/local/lib/python3.8/site-packages/agents + +WORKDIR /home/bolty/carla_notebooks +COPY src/simulation/carla_notebooks /home/bolty/carla_notebooks + +WORKDIR /home/bolty +# Setup CARLA Scenario Runner +# The last sed command replaces hero (default ego vehicle name) with another ego vehicle name +RUN git clone https://github.com/carla-simulator/scenario_runner.git && \ + cd scenario_runner && pip3 install -r requirements.txt && \ + sed -i s/hero/ego/g /home/bolty/scenario_runner/srunner/tools/scenario_parser.py +WORKDIR /home/bolty + +WORKDIR /home/bolty/carla_notebooks \ No newline at end of file diff --git a/docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile b/docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile new file mode 100644 index 00000000..57d952dc --- /dev/null +++ b/docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile @@ -0,0 +1,109 @@ +ARG BASE_IMAGE=ghcr.io/watonomous/wato_monorepo/base:foxy-ubuntu20.04 + +ARG CARLA_VERSION=0.9.13 +FROM carlasim/carla:${CARLA_VERSION} AS wato_carla_api + +################################ Source ################################ +FROM ${BASE_IMAGE} as source + +WORKDIR ${AMENT_WS}/src + +# Download ROS Bridge +RUN git clone --depth 1 --branch master --recurse-submodules https://github.com/carla-simulator/ros-bridge.git && \ + cd ros-bridge && \ + git checkout e9063d97ff5a724f76adbb1b852dc71da1dcfeec && \ + cd .. + +# Fix an error in the ackermann_control node +RUN sed -i s/simple_pid.PID/simple_pid.pid/g ./ros-bridge/carla_ackermann_control/src/carla_ackermann_control/carla_ackermann_control_node.py + +# Copy in source code +COPY src/simulation/carla_config carla_config +COPY src/wato_msgs/simulation ros_msgs + +# Scan for rosdeps +RUN apt-get -qq update && rosdep update --rosdistro foxy && \ + rosdep install --from-paths . -r -s \ + | grep 'apt-get install' \ + | awk '{print $3}' \ + | sort > /tmp/colcon_install_list + +################################# Dependencies ################################ +FROM ${BASE_IMAGE} as dependencies + +# Install dependencies +RUN apt-get update && \ + apt-fast install -qq -y --no-install-recommends lsb-release \ + libglu1-mesa-dev xorg-dev \ + software-properties-common \ + build-essential \ + python3-rosdep \ + python3-rospkg \ + python3-colcon-common-extensions \ + python3-pygame \ + ros-$ROS_DISTRO-tf2-geometry-msgs \ + ros-$ROS_DISTRO-tf2-eigen \ + ros-$ROS_DISTRO-ackermann-msgs \ + ros-$ROS_DISTRO-derived-object-msgs \ + ros-$ROS_DISTRO-cv-bridge \ + ros-$ROS_DISTRO-vision-opencv \ + ros-$ROS_DISTRO-rqt-image-view \ + ros-$ROS_DISTRO-rqt-gui-py \ + qt5-default \ + ros-$ROS_DISTRO-pcl-conversions \ + ros-$ROS_DISTRO-resource-retriever \ + ros-$ROS_DISTRO-yaml-cpp-vendor \ + ros-$ROS_DISTRO-urdf \ + ros-$ROS_DISTRO-map-msgs \ + ros-$ROS_DISTRO-laser-geometry \ + ros-$ROS_DISTRO-interactive-markers \ + ros-$ROS_DISTRO-rviz2 + +# Install Rosdep requirements +COPY --from=source /tmp/colcon_install_list /tmp/colcon_install_list +RUN apt-fast install -qq -y --no-install-recommends $(cat /tmp/colcon_install_list) + +# Copy in source code from source stage +WORKDIR ${AMENT_WS} +COPY --from=source ${AMENT_WS}/src src + +# Dependency Cleanup +WORKDIR / +RUN apt-get -qq autoremove -y && apt-get -qq autoclean && apt-get -qq clean && \ + rm -rf /root/* /root/.ros /tmp/* /var/lib/apt/lists/* /usr/share/doc/* + +################################ Build ################################ +FROM dependencies as build + +ARG CARLA_VERSION + +#Install Python Carla API +COPY --from=wato_carla_api --chown=root /home/carla/PythonAPI/carla /opt/carla/PythonAPI +WORKDIR /opt/carla/PythonAPI +RUN python3.8 -m easy_install pip && \ + pip3 install carla==${CARLA_VERSION} && \ + pip install simple-pid==2.0.0 && \ + pip install transforms3d==0.4.1 && \ + pip install pexpect==4.9.0 && \ + pip install networkx==3.1 + +WORKDIR ${AMENT_WS}/src + +# Build ROS2 packages +WORKDIR ${AMENT_WS} +RUN . /opt/ros/$ROS_DISTRO/setup.sh && \ + colcon build \ + --cmake-args -DCMAKE_BUILD_TYPE=Release + +# Entrypoint will run before any CMD on launch. Sources ~/opt//setup.bash and ~/ament_ws/install/setup.bash +COPY docker/wato_ros_entrypoint.sh ${AMENT_WS}/wato_ros_entrypoint.sh +ENTRYPOINT ["./wato_ros_entrypoint.sh"] + +################################ Prod ################################ +FROM build as deploy + +# Source Cleanup and Security Setup +RUN chown -R $USER:$USER ${AMENT_WS} +RUN rm -rf src/* + +USER ${USER} diff --git a/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile b/docker/simulation/carla_sample_node/carla_sample_node.Dockerfile similarity index 78% rename from docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile rename to docker/simulation/carla_sample_node/carla_sample_node.Dockerfile index d91b79e5..b1e68d90 100644 --- a/docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile +++ b/docker/simulation/carla_sample_node/carla_sample_node.Dockerfile @@ -6,8 +6,17 @@ FROM ${BASE_IMAGE} as source WORKDIR ${AMENT_WS}/src # Copy in source code -COPY src/perception/traffic_light_detection traffic_light_detection -COPY src/wato_msgs/sample_msgs sample_msgs +COPY src/simulation/carla_sample_node carla_sample_node +COPY src/wato_msgs/simulation/embedded_msgs embedded_msgs +COPY src/wato_msgs/simulation/path_planning_msgs path_planning_msgs + +# Carla specific messages + +RUN git clone --depth 1 https://github.com/ros-drivers/ackermann_msgs.git --branch 2.0.2 + +RUN git clone --depth 1 https://github.com/ros-perception/image_common.git --branch 3.1.8 + +RUN git clone --depth 1 https://github.com/carla-simulator/ros-carla-msgs.git --branch 1.3.0 # Scan for rosdeps RUN apt-get -qq update && rosdep update && \ diff --git a/docker/simulation/carla_viz/carla_viz.Dockerfile b/docker/simulation/carla_viz/carla_viz.Dockerfile new file mode 100644 index 00000000..f8a03c9e --- /dev/null +++ b/docker/simulation/carla_viz/carla_viz.Dockerfile @@ -0,0 +1,14 @@ +FROM mjxu96/carlaviz:0.9.13 + +ENV CARLAVIZ_BACKEND_HOST localhost +ENV CARLAVIZ_BACKEND_PORT 8081 +ENV CARLA_SERVER_HOST localhost +ENV CARLA_SERVER_PORT 2000 + +WORKDIR /home/carla/carlaviz + +COPY docker/simulation/carla_viz/carlaviz_entrypoint.sh /home/carla/carlaviz/docker/carlaviz_entrypoint.sh + +RUN chmod +x ./docker/carlaviz_entrypoint.sh + +ENTRYPOINT ["/bin/bash", "-c", "./docker/carlaviz_entrypoint.sh > /dev/null 2>&1"] \ No newline at end of file diff --git a/docker/simulation/carla_viz/carlaviz_entrypoint.sh b/docker/simulation/carla_viz/carlaviz_entrypoint.sh new file mode 100644 index 00000000..dbae6309 --- /dev/null +++ b/docker/simulation/carla_viz/carlaviz_entrypoint.sh @@ -0,0 +1,53 @@ + +is_backend_up="" + +function wait_for_backend_up() { + max_wait_time=5 + for ((i=0; i<=$max_wait_time; ++i)) do + cat output.txt | grep -q "Connected to Carla Server" + if [ $? == 0 ]; then + is_backend_up="1" + break + fi + sleep 1 + done +} + +function cleanup_backend() { + backend_pid=$(pidof backend) + kill -9 $backend_pid + echo "Killed Backend process $backend_pid" +} + +echo -e "CARLAVIZ_BACKEND_HOST=${CARLAVIZ_BACKEND_HOST}" >> /home/carla/.env +echo -e "CARLAVIZ_BACKEND_PORT=${CARLAVIZ_BACKEND_PORT}" >> /home/carla/.env + +echo "Make sure you have launched the Carla server." +echo "Launching backend." +./backend/bin/backend ${CARLA_SERVER_HOST} ${CARLA_SERVER_PORT} |& tee output.txt & +wait_for_backend_up +if [[ -z "$is_backend_up" ]]; then + echo "Backend is not launched. Please check if you have already started the Carla server." + cleanup_backend + exit 1 +fi + +echo "Backend launched." + +echo "Launching frontend" +# enable nginx +service nginx restart +echo "Frontend launched. Please open your browser" +sleep 10 +sed -i s/:8081/:$CARLAVIZ_BACKEND_PORT/g /var/www/carlaviz/bundle.js + +while [ "$is_backend_up" = "1" ] +do + cat output.txt | grep -q "time-out of" + if [ $? == 0 ]; then + is_backend_up="" + cleanup_backend + exit 1 + fi + sleep 5 +done \ No newline at end of file diff --git a/docs/carla.md b/docs/carla.md new file mode 100644 index 00000000..11a1f487 --- /dev/null +++ b/docs/carla.md @@ -0,0 +1,67 @@ +# CARLA Setup in Monorepo +CARLA is an open-source autonomous driving simulator based on Unreal Engine 4. The primary ways of interacting with the CARLA setup are through the Python API or with ROS2 (via the CARLA ROS Bridge). Both of these methods are explained in greater detail further down in this document. + +The goal of the CARLA setup is to provide an easy way for WATonomous members to interact with the simulator without having to setup anything themselves. So, if you have any questions or suggestions regarding any part of the CARLA setup (or questions about CARLA in general) please bring them up in Discord in #simulation-general or contact Vishal Jayakumar (masteroooogway on Discord or at [v3jayaku@watonomous.ca](mailto:v3jayaku@watonomous.ca)). + +- [CARLA WATonomous Documentation](#using-carla-setup-in-monorepo) + - [Initial Setup](#initial-setup) + - [Interacting with CARLA using the Python API](#interacting-with-carla-using-the-python-api) + - [Using a ROS Node to interact with CARLA](#using-a-ros-node-to-interact-with-carla) + - [CARLA Visualization using Foxglove Studio](#carla-visualization-using-foxglove-studio) + - [CARLA Visualization using CarlaViz](#carla-visualization-using-carlaviz) + - [FAQ](#faq) + - [CARLA is running very slow (approx. 3 fps)](#carla-is-running-very-slow-approx-3-fps) + +**Make sure you are confortable with navigating the monorepo before reading** + +## Initial Setup + +To run CARLA and all associated containers first add `simulation` as an `ACTIVE_MODULE` in `watod-config.sh`. This will cause the following containers to launch when `watod up` is run: `carla_server`, `carla_viz`, `carla_ros_bridge`, `carla_notebooks`, `carla_sample_node`. + +## Interacting with CARLA using the Python API + +This is the simplest way of interacting with CARLA and most importantly does not require the usage or knowledge of ROS. The documentation below will only show the setup procedure for using the CARLA Python API in the WATOnomous server (via Jupyter Notebook). The full CARLA Python API documentation can be found here ([CARLA Python API Documentation](https://carla.readthedocs.io/en/0.9.13/python_api/)). + +**On the WATOnomous Server** + +Forward the port specified by the variable `CARLA_NOTEBOOKS_PORT` (which can be found in the `.env` file in the `modules` folder once `watod up` is run) using either the `ports` section of VS Code or by running the command `ssh -L 8888:localhost:8888 @-ubuntu1.watocluster.local` on your local machine (replace 8888 with the port specified in the `CARLA_NOTEBOOKS_PORT` variable). If the port is auto-forwarded by VS Code then you do not need to add it manually. + +**On your local machine (your personal laptop/pc)** + +Open any web browser (Chrome, Firefox, Edge etc.) and type `localhost:8888` (replace 8888 with the port specified in the `CARLA_NOTEBOOKS_PORT` variable) and click enter. You should be automatically redirected to the Jupyter Notebook home screen where you should see the file `carla_notebooks.ipynb`. Open that file and follow the instructions in it. Either edit the `carla_notebooks.ipynb` file directly or create a new file and copy the first code block in `carla_notebooks.ipynb`. + +## Using a ROS Node to interact with CARLA + +**Ensure that you have a good understanding of ROS and writing ROS nodes before proceeding** + +Currently there is a CARLA sample node setup (written in Python) shows how to publish a message to enable (and keep enabled) autopilot, subsribe to the GNSS sensor topic and output the data to the console. What is probably most helpful from the sample node is the Dockerfile located in `wato_monorepo_v2/docker/simulation/carla_sample_node/carla_sample_node.Dockerfile`, where you can find all the CARLA related messages that should be installed when building your own ROS nodes that interact with CARLA. + +## CARLA Visualization using Foxglove Studio + +Foxglove Studio is a tool to visualize and interact with ROS messages. Using Foxglove Studio, data such as Camera, LiDAR, Radar etc. can be visualized. Foxglove Studio also allows for messages to be published to topics, for example to enable autopilot or set vehicle speed. The documentation below will only show the setup procedure for using Foxglove Studio with the WATOnomous server. Further documentation regarding how to use Foxglove Studio and all its features can be found here ([Foxglove Studio Documentation](https://foxglove.dev/docs/studio)) + +**On the WATOnomous Server** + +Add `vis_tools` as an `ACTIVE_PROFILE`. This will launch the `foxglove.Dockerfile` container with an open port when `watod up` is ran. + +It exposes the port specified by the `FOXGLOVE_BRIDGE_PORT` variable, which will be defined in the `.env` file in the `profiles` folder (the `.env` file is populated after `watod up` is run). This port may be auto-forwarded by VS Code automatically but if not the port will need to be forwarded manually to your local machine. This can either be done in the `ports` section of VS Code or by running the command `ssh -L 8765:localhost:8765 @-ubuntu1.watocluster.local` on your local machine (replace 8765 with the port specified in the `FOXGLOVE_BRIDGE_PORT` variable). + +**On your local machine (your personal laptop/pc)** + +Open Foxglove Studio on your local machine using either the desktop app ([Download Here](https://foxglove.dev/studio)) or the [Web Application](https://studio.foxglove.dev/) (only supported on Chrome). Click on Open Connection then replace the default port (8765) with the port you specified in the `FOXGLOVE_BRIDGE_PORT` variable. Then click the Open button and after a few seconds Foxglove Studio should connect. You should now be able to access any of the topic being published or subscribed by the CARLA ROS Bridge. + +## CARLA Visualization using CarlaViz + +**On the WATOnomous Server** + +CarlaViz is a visualization tool that is useful when you are only using the CARLA Python API through a Jupyter Notebook and you don't want the overhead that comes with the ROS Bridge + Foxglove method of visualization. To use CarlaViz forward the ports specified by the variables `CARLAVIZ_PORT` and `CARLAVIZ_PORT_2` defined in the `.env` file in the `profiles` folder (make sure you forward **both** ports). If you want to stop the ROS Bridge from running (for the reasons I mentioned in the previous sentences) then make sure to comment out the `carla_ros_bridge` service in the `docker-compose.carla.yaml` file in the `profiles` folder. + +**On your local machine (your personal laptop/pc)** + +In any browser go to `localhost:8081` (replace 8081 with the port specified in the `CARLAVIZ_PORT` variable) and after waiting 10 seconds or so CarlaViz should appear. + +## FAQ + +### CARLA is running very slow (approx. 3 fps) + +This is expected. The ROS bridge causes CARLA to render all the sensor data which slows down the simulation considerably. While this may be annoying when viewing real-time camera output or trying to control the car manual, the simulation is still running accurately. If you wish to replay scenes with a configurable playback speed, check out the docs on [CARLA's recorder](https://carla.readthedocs.io/en/0.9.13/adv_recorder/) functionality. \ No newline at end of file diff --git a/docs/dev/how_to_dev.md b/docs/dev/how_to_dev.md index 409c5ae2..0719e966 100644 --- a/docs/dev/how_to_dev.md +++ b/docs/dev/how_to_dev.md @@ -92,7 +92,7 @@ services: - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-${CACHE_FROM_TAG}" - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-develop" # name of the image made by the dockerfile (boilerplate, but with name change) - image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-${TAG}" + image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}-build_${TAG}" # deals with permission and ownership in the container (boilerplate) user: ${FIXUID:?}:${FIXGID:?} # IMPORTANT: mounts your ROS2 node into the container so that changes in the dockerfile are reflected in your diff --git a/docs/dev/rosbag.md b/docs/dev/rosbag.md index 8e98e667..a78f3a8f 100644 --- a/docs/dev/rosbag.md +++ b/docs/dev/rosbag.md @@ -5,9 +5,9 @@ More on bags can be found here: https://docs.ros.org/en/humble/Tutorials/Beginne ## To Use -Add `data_stream` as an `ACTIVE_PROFILE` in `watod-config.sh`. +Add `infrastructure` as an `ACTIVE_PROFILE` in `watod-config.sh`. -Run `watod up` (or however you want to launch the `data_stream` service). +Run `watod up` (or however you want to launch the `infrastructure` service). The working directory of the `data_stream` container should have a `nuscenes` directory, which contains the NuScenes dataset converted to ros2bag format. To confirm this, run `watod run data_stream ls nuscenes` to view the available bags. Each bag has its own directory. The location of the `.mcap` file is `/_0.mcap`. For example, one of the bags is in `nuscenes/NuScenes-v1.0-mini-scene-0061/NuScenes-v1.0-mini-scene-0061_0.mcap`. diff --git a/docs/setup/setup.md b/docs/setup/setup.md index 9f4c8f21..03c3b78b 100644 --- a/docs/setup/setup.md +++ b/docs/setup/setup.md @@ -27,7 +27,7 @@ Note: These are all available on WATonomous servers by default. ## Running Your First Program in the Monorepo These steps will let you run our sample nodes module. To run other modules, refer to our modules documentation. -1. Clone this repo onto the host machine by using `$ git clone git@github.com:WATonomous/wato_monorepo.git`. We recommend you clone the repo into your home directory, `~` +1. Clone this repo onto the host machine by using `$ git clone --recurse-submodules git@github.com:WATonomous/wato_monorepo.git`. The `--recurse-submodules` flag is needed to initialize submodules required by the repo - if you did not clone with this flag, run `git submodule update --init --recursive` inside the repo. We recommend you clone the repo into your home directory, `~` 2. Login to the our container registry by using `docker login ghcr.io`. Provide your GitHub Username and GitHub Token. 3. Configure to only run our sample nodes by uncommenting and setting `ACTIVE_MODULES="samples"` in `watod-config.sh` 4. Run `$ ./watod pull` to pull latest docker images from our container registry. diff --git a/modules/dev_overrides/docker-compose.action.yaml b/modules/dev_overrides/docker-compose.action.yaml index 1318b6bc..1984e6db 100644 --- a/modules/dev_overrides/docker-compose.action.yaml +++ b/modules/dev_overrides/docker-compose.action.yaml @@ -10,6 +10,7 @@ services: extends: file: ../docker-compose.action.yaml service: global_planning + image: "${ACTION_GLOBAL_PLANNING_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/action/global_planning:/home/ament_ws/src/global_planning @@ -19,6 +20,7 @@ services: extends: file: ../docker-compose.action.yaml service: behaviour_planning + image: "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/action/behaviour_planning:/home/ament_ws/src/behaviour_planning @@ -28,6 +30,7 @@ services: extends: file: ../docker-compose.action.yaml service: local_planning + image: "${ACTION_LOCAL_PLANNING_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/action/local_planning:/home/ament_ws/src/local_planning @@ -37,6 +40,7 @@ services: extends: file: ../docker-compose.action.yaml service: model_predictive_control + image: "${ACTION_MPC_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/action/model_predictive_control:/home/ament_ws/src/model_predictive_control \ No newline at end of file diff --git a/modules/dev_overrides/docker-compose.interfacing.yaml b/modules/dev_overrides/docker-compose.interfacing.yaml index 503469d9..12a5db5b 100644 --- a/modules/dev_overrides/docker-compose.interfacing.yaml +++ b/modules/dev_overrides/docker-compose.interfacing.yaml @@ -10,6 +10,7 @@ services: extends: file: ../docker-compose.interfacing.yaml service: sensor_interfacing + image: "${INTERFACING_SENSOR_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/sensor_interfacing:/home/bolty/ament_ws/src/sensor_interfacing @@ -19,6 +20,7 @@ services: extends: file: ../docker-compose.interfacing.yaml service: can_interfacing + image: "${INTERFACING_CAN_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/can_interfacing:/home/bolty/ament_ws/src/can_interfacing diff --git a/modules/dev_overrides/docker-compose.perception.yaml b/modules/dev_overrides/docker-compose.perception.yaml index 4cd9bfdb..73fc96ac 100644 --- a/modules/dev_overrides/docker-compose.perception.yaml +++ b/modules/dev_overrides/docker-compose.perception.yaml @@ -10,6 +10,7 @@ services: extends: file: ../docker-compose.perception.yaml service: radar_object_detection + image: "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/perception/radar_object_detection:/home/bolty/ament_ws/src/radar_object_detection @@ -19,6 +20,7 @@ services: extends: file: ../docker-compose.perception.yaml service: camera_object_detection + image: "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/perception/camera_object_detection:/home/bolty/ament_ws/src/camera_object_detection @@ -28,33 +30,17 @@ services: extends: file: ../docker-compose.perception.yaml service: lidar_object_detection + image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/perception/lidar_object_detection:/home/bolty/ament_ws/src/lidar_object_detection - traffic_light_detection: - <<: *fixuid - extends: - file: ../docker-compose.perception.yaml - service: traffic_light_detection - command: tail -F anything - volumes: - - ${MONO_DIR}/src/perception/traffic_light_detection:/home/bolty/ament_ws/src/traffic_light_detection - - traffic_sign_detection: - <<: *fixuid - extends: - file: ../docker-compose.perception.yaml - service: traffic_sign_detection - command: tail -F anything - volumes: - - ${MONO_DIR}/src/perception/traffic_sign_detection:/home/bolty/ament_ws/src/traffic_sign_detection - semantic_segmentation: <<: *fixuid extends: file: ../docker-compose.perception.yaml service: semantic_segmentation + image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/perception/semantic_segmentation:/home/bolty/ament_ws/src/semantic_segmentation @@ -65,6 +51,7 @@ services: extends: file: ../docker-compose.perception.yaml service: lane_detection + image: "${PERCEPTION_LANE_DETECTION_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/perception/lane_detection:/home/bolty/ament_ws/src/lane_detection @@ -74,6 +61,17 @@ services: extends: file: ../docker-compose.perception.yaml service: tracking + image: "${PERCEPTION_TRACKING_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/perception/tracking:/home/bolty/ament_ws/src/tracking + + depth_estimation: + <<: *fixuid + extends: + file: ../docker-compose.perception.yaml + service: tracking + image: "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:build_${TAG}" + command: tail -F anything + volumes: + - ${MONO_DIR}/src/perception/depth_estimation:/home/bolty/ament_ws/src/depth_estimation diff --git a/modules/dev_overrides/docker-compose.samples.yaml b/modules/dev_overrides/docker-compose.samples.yaml index 2d49b7bf..335da171 100644 --- a/modules/dev_overrides/docker-compose.samples.yaml +++ b/modules/dev_overrides/docker-compose.samples.yaml @@ -10,6 +10,7 @@ services: extends: file: ../docker-compose.samples.yaml service: cpp_aggregator + image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/samples/cpp/aggregator:/home/bolty/ament_ws/src/aggregator @@ -19,6 +20,8 @@ services: # extends: # file: ../docker-compose.samples.yaml # service: py_aggregator + # image: "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:build_${TAG}" + # command: tail -F anything # volumes: # - ${MONO_DIR}/src/samples/python/aggregator:/home/bolty/ament_ws/src/aggregator @@ -27,6 +30,8 @@ services: # extends: # file: ../docker-compose.samples.yaml # service: cpp_producer + # image: "${SAMPLES_CPP_PRODUCER_IMAGE:?}:build_${TAG}" + # command: tail -F anything # volumes: # - ${MONO_DIR}/src/samples/cpp/producer:/home/bolty/ament_ws/src/producer @@ -35,6 +40,7 @@ services: extends: file: ../docker-compose.samples.yaml service: py_producer + image: "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/samples/python/producer:/home/bolty/ament_ws/src/producer @@ -44,6 +50,8 @@ services: # extends: # file: ../docker-compose.samples.yaml # service: cpp_transformer + # image: "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:build_${TAG}" + # command: tail -F anything # volumes: # - ${MONO_DIR}/src/samples/cpp/transformer:/home/bolty/ament_ws/src/transformer @@ -52,6 +60,7 @@ services: extends: file: ../docker-compose.samples.yaml service: py_transformer + image: "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/samples/python/transformer:/home/bolty/ament_ws/src/transformer diff --git a/modules/dev_overrides/docker-compose.simulation.yaml b/modules/dev_overrides/docker-compose.simulation.yaml index 04730356..2da94571 100644 --- a/modules/dev_overrides/docker-compose.simulation.yaml +++ b/modules/dev_overrides/docker-compose.simulation.yaml @@ -6,10 +6,35 @@ x-fixuid: &fixuid services: carla_sim: - <<: *fixuid extends: file: ../docker-compose.simulation.yaml service: carla_sim + + carla_ros_bridge: + <<: *fixuid + extends: + file: ../docker-compose.simulation.yaml + service: carla_ros_bridge + command: tail -F anything + volumes: + - ${MONO_DIR}/src/simulation/carla_config:/home/bolty/ament_ws/src/carla_config + # command: /bin/bash -c "ros2 launch carla_config carla.launch.py" + + carla_viz: + extends: + file: ../docker-compose.simulation.yaml + service: carla_viz + + carla_sample_node: + <<: *fixuid + extends: + file: ../docker-compose.simulation.yaml + service: carla_sample_node command: tail -F anything volumes: - - ${MONO_DIR}/src/simulation/carla_sim:/home/bolty/ament_ws/src/carla_sim + - ${MONO_DIR}/src/simulation/carla_sample_node:/home/bolty/ament_ws/src/carla_sample_node + + carla_notebooks: + extends: + file: ../docker-compose.simulation.yaml + service: carla_notebooks diff --git a/modules/dev_overrides/docker-compose.world_modeling.yaml b/modules/dev_overrides/docker-compose.world_modeling.yaml index 67b03241..7682c63a 100644 --- a/modules/dev_overrides/docker-compose.world_modeling.yaml +++ b/modules/dev_overrides/docker-compose.world_modeling.yaml @@ -10,6 +10,7 @@ services: extends: file: ../docker-compose.world_modeling.yaml service: hd_map + image: "${WORLD_MODELING_HD_MAP_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/world_modeling:/home/ament_ws/src/hd_map @@ -19,6 +20,7 @@ services: extends: file: ../docker-compose.world_modeling.yaml service: localization + image: "${WORLD_MODELING_LOCALIZATION_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/world_modeling:/home/ament_ws/src/localization @@ -28,6 +30,7 @@ services: extends: file: ../docker-compose.world_modeling.yaml service: occupancy + image: "${WORLD_MODELING_OCCUPANCY_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/world_modeling:/home/ament_ws/src/occupancy @@ -37,6 +40,7 @@ services: extends: file: ../docker-compose.world_modeling.yaml service: occupancy_segmentation + image: "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/world_modeling:/home/ament_ws/src/occupancy_segmentation @@ -46,6 +50,7 @@ services: extends: file: ../docker-compose.world_modeling.yaml service: motion_forecasting + image: "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:build_${TAG}" command: tail -F anything volumes: - ${MONO_DIR}/src/world_modeling:/home/ament_ws/src/motion_forecasting diff --git a/modules/docker-compose.action.yaml b/modules/docker-compose.action.yaml index ab4743dd..ab73667e 100644 --- a/modules/docker-compose.action.yaml +++ b/modules/docker-compose.action.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/action/global_planning/global_planning.Dockerfile cache_from: - - "${ACTION_GLOBAL_PLANNING_IMAGE}:${TAG}" - - "${ACTION_GLOBAL_PLANNING_IMAGE}:main" + - "${ACTION_GLOBAL_PLANNING_IMAGE}:build_${TAG}" + - "${ACTION_GLOBAL_PLANNING_IMAGE}:build_main" target: deploy image: "${ACTION_GLOBAL_PLANNING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch global_planning global_planning.launch.py" @@ -17,8 +17,8 @@ services: context: .. dockerfile: docker/action/behaviour_planning/behaviour_planning.Dockerfile cache_from: - - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:${TAG}" - - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:main" + - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:build_${TAG}" + - "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:build_main" target: deploy image: "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch behaviour_planning behaviour_planning.launch.py" @@ -28,8 +28,8 @@ services: context: .. dockerfile: docker/action/local_planning/local_planning.Dockerfile cache_from: - - "${ACTION_LOCAL_PLANNING_IMAGE}:${TAG}" - - "${ACTION_LOCAL_PLANNING_IMAGE}:main" + - "${ACTION_LOCAL_PLANNING_IMAGE}:build_${TAG}" + - "${ACTION_LOCAL_PLANNING_IMAGE}:build_main" target: deploy image: "${ACTION_LOCAL_PLANNING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch local_planning local_planning.launch.py" @@ -39,8 +39,8 @@ services: context: .. dockerfile: docker/action/model_predictive_control/model_predictive_control.Dockerfile cache_from: - - "${ACTION_MPC_IMAGE}:${TAG}" - - "${ACTION_MPC_IMAGE}:main" + - "${ACTION_MPC_IMAGE}:build_${TAG}" + - "${ACTION_MPC_IMAGE}:build_main" target: deploy image: "${ACTION_MPC_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch model_predictive_control model_predictive_control.launch.py" diff --git a/modules/docker-compose.infrastructure.yaml b/modules/docker-compose.infrastructure.yaml index e8db67b9..63adceef 100644 --- a/modules/docker-compose.infrastructure.yaml +++ b/modules/docker-compose.infrastructure.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/infrastructure/foxglove/foxglove.Dockerfile cache_from: - - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:${TAG}" - - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:main" + - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:build_${TAG}" + - "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:build_main" target: deploy image: "${INFRASTRUCTURE_FOXGLOVE_IMAGE:?}:${TAG}" # command: ["ros2", "launch", "foxglove_bridge", "foxglove_bridge_launch.xml", "port:=${FOXGLOVE_BRIDGE_PORT:?}"] @@ -20,8 +20,8 @@ services: # context: .. # dockerfile: docker/infrastructure/vnc/vnc.Dockerfile # cache_from: - # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:${TAG}" - # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:main" + # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:build_${TAG}" + # - "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:build_main" # image: "${INFRASTRUCTURE_VIS_TOOLS_VNC_IMAGE:?}:${TAG}" # ports: # - "${GUI_TOOLS_VNC_PORT:?}:5900" @@ -33,8 +33,8 @@ services: context: .. dockerfile: docker/infrastructure/data_stream/data_stream.Dockerfile cache_from: - - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:${TAG}" - - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:main" + - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:build_${TAG}" + - "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:build_main" target: deploy image: "${INFRASTRUCTURE_DATA_STREAM_IMAGE:?}:${TAG}" volumes: diff --git a/modules/docker-compose.interfacing.yaml b/modules/docker-compose.interfacing.yaml index 0024ef07..51c39102 100644 --- a/modules/docker-compose.interfacing.yaml +++ b/modules/docker-compose.interfacing.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/interfacing/sensor_interfacing/sensor_interfacing.Dockerfile cache_from: - - "${INTERFACING_SENSOR_IMAGE}:${TAG}" - - "${INTERFACING_SENSOR_IMAGE}:main" + - "${INTERFACING_SENSOR_IMAGE}:build_${TAG}" + - "${INTERFACING_SENSOR_IMAGE}:build_main" target: deploy image: "${INTERFACING_SENSOR_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch sensor_interfacing sensor_interfacing.launch.py" @@ -17,8 +17,8 @@ services: context: .. dockerfile: docker/interfacing/can_interfacing/can_interfacing.Dockerfile cache_from: - - "${INTERFACING_CAN_IMAGE}:${TAG}" - - "${INTERFACING_CAN_IMAGE}:main" + - "${INTERFACING_CAN_IMAGE}:build_${TAG}" + - "${INTERFACING_CAN_IMAGE}:build_main" target: deploy image: "${INTERFACING_CAN_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch can_interfacing can_interfacing.launch.py" diff --git a/modules/docker-compose.perception.yaml b/modules/docker-compose.perception.yaml index 2c640212..99dc9a49 100644 --- a/modules/docker-compose.perception.yaml +++ b/modules/docker-compose.perception.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/perception/radar_object_detection/radar_object_detection.Dockerfile cache_from: - - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" - - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:main" + - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:build_${TAG}" + - "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:build_main" target: deploy image: "${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch radar_object_detection radar_object_detection.launch.py" @@ -17,10 +17,11 @@ services: context: .. dockerfile: docker/perception/camera_object_detection/camera_object_detection.Dockerfile cache_from: - - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" - - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:main" + - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:build_${TAG}" + - "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:build_main" target: deploy image: "${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:?}:${TAG}" + shm_size: 8G deploy: resources: reservations: @@ -28,50 +29,31 @@ services: - driver: nvidia count: 1 capabilities: [ gpu ] - command: /bin/bash -c "ros2 launch camera_object_detection nuscenes_launch.py" + command: /bin/bash -c "ros2 launch camera_object_detection eve.launch.py" volumes: - - /mnt/wato-drive2/perception_models/yolov8s.pt:/perception_models/yolov8s.pt + - /mnt/wato-drive2/perception_models/yolov8m.pt:/perception_models/yolov8m.pt + - /mnt/wato-drive2/perception_models/traffic_light.pt:/perception_models/traffic_light.pt + - /mnt/wato-drive2/perception_models/traffic_signs_v0.pt:/perception_models/traffic_signs_v1.pt lidar_object_detection: build: context: .. dockerfile: docker/perception/lidar_object_detection/lidar_object_detection.Dockerfile cache_from: - - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:main" + - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:build_${TAG}" + - "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:build_main" target: deploy image: "${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch lidar_object_detection lidar_object_detection.launch.py" - traffic_light_detection: - build: - context: .. - dockerfile: docker/perception/traffic_light_detection/traffic_light_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_light_detection traffic_light_detection.launch.py" - - traffic_sign_detection: - build: - context: .. - dockerfile: docker/perception/traffic_sign_detection/traffic_sign_detection.Dockerfile - cache_from: - - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:main" - target: deploy - image: "${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch traffic_sign_detection traffic_sign_detection.launch.py" semantic_segmentation: build: context: .. dockerfile: docker/perception/semantic_segmentation/semantic_segmentation.Dockerfile cache_from: - - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" - - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:main" - # target: deploy + - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:build_${TAG}" + - "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:build_main" + target: deploy image: "${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch semantic_segmentation semantic_segmentation.launch.py" volumes: @@ -90,19 +72,39 @@ services: context: .. dockerfile: docker/perception/lane_detection/lane_detection.Dockerfile cache_from: - - "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" - - "${PERCEPTION_LANE_DETECTION_IMAGE}:main" + - "${PERCEPTION_LANE_DETECTION_IMAGE}:build_${TAG}" + - "${PERCEPTION_LANE_DETECTION_IMAGE}:build_main" target: deploy image: "${PERCEPTION_LANE_DETECTION_IMAGE}:${TAG}" - command: /bin/bash -c "ros2 launch lane_detection lane_detection.launch.py" + command: /bin/bash -c "ros2 launch lane_detection eve.launch.py" + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + volumes: + - /mnt/wato-drive2/perception-weights/tensorrt_model/ultra_fast_lane_detection_v2/resource/model/ufldv2_culane_res34_320x1600.onnx:/models/ufldv2_culane_res34_320x1600.onnx tracking: build: context: .. dockerfile: docker/perception/tracking/tracking.Dockerfile cache_from: - - "${PERCEPTION_TRACKING_IMAGE}:${TAG}" - - "${PERCEPTION_TRACKING_IMAGE}:main" + - "${PERCEPTION_TRACKING_IMAGE}:build_${TAG}" + - "${PERCEPTION_TRACKING_IMAGE}:build_main" target: deploy image: "${PERCEPTION_TRACKING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch tracking tracking.launch.py" + + depth_estimation: + build: + context: .. + dockerfile: docker/perception/depth_estimation/depth_estimation.Dockerfile + cache_from: + - "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:build_${TAG}" + - "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:build_main" + target: deploy + image: "${PERCEPTION_DEPTH_ESTIMATION_IMAGE}:${TAG}" + command: /bin/bash -c "ros2 launch depth_estimation eve.launch.py" diff --git a/modules/docker-compose.samples.yaml b/modules/docker-compose.samples.yaml index f094d080..38cd838a 100644 --- a/modules/docker-compose.samples.yaml +++ b/modules/docker-compose.samples.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/samples/cpp_aggregator/cpp_aggregator.Dockerfile cache_from: - - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:${TAG}" - - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:main" + - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:build_${TAG}" + - "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:build_main" target: deploy image: "${SAMPLES_CPP_AGGREGATOR_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch aggregator aggregator.launch.py" @@ -17,8 +17,9 @@ services: # context: .. # dockerfile: docker/samples/py_aggregator/py_aggregator.Dockerfile # cache_from: - # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:${TAG}" - # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:main" + # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:build_${TAG}" + # - "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:build_main" + # target: deploy # image: "${SAMPLES_PYTHON_AGGREGATOR_IMAGE:?}:${TAG}" # command: /bin/bash -c "ros2 launch aggregator aggregator.launch.py" @@ -27,8 +28,9 @@ services: # context: .. # dockerfile: docker/samples/cpp_producer/cpp_producer.Dockerfile # cache_from: - # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:${TAG}" - # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:main" + # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:build_${TAG}" + # - "${SAMPLES_CPP_PRODUCER_IMAGE:?}:build_main" + # target: deploy # image: "${SAMPLES_CPP_PRODUCER_IMAGE:?}:${TAG}" # command: /bin/bash -c "ros2 launch producer producer.launch.py" @@ -37,8 +39,8 @@ services: context: .. dockerfile: docker/samples/py_producer/py_producer.Dockerfile cache_from: - - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:${TAG}" - - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:main" + - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:build_${TAG}" + - "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:build_main" target: deploy image: "${SAMPLES_PYTHON_PRODUCER_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch producer producer.launch.py" @@ -48,8 +50,9 @@ services: # context: .. # dockerfile: docker/samples/cpp_transformer/cpp_transformer.Dockerfile # cache_from: - # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:${TAG}" - # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:main" + # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:build_${TAG}" + # - "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:build_main" + # target: deploy # image: "${SAMPLES_CPP_TRANSFORMER_IMAGE:?}:${TAG}" # command: /bin/bash -c "ros2 launch transformer transformer.launch.py" @@ -58,8 +61,8 @@ services: context: .. dockerfile: docker/samples/py_transformer/py_transformer.Dockerfile cache_from: - - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:${TAG}" - - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:main" + - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:build_${TAG}" + - "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:build_main" target: deploy image: "${SAMPLES_PYTHON_TRANSFORMER_IMAGE:?}:${TAG}" command: /bin/bash -c "ros2 launch transformer transformer.launch.py" diff --git a/modules/docker-compose.simulation.yaml b/modules/docker-compose.simulation.yaml index ceba4f8b..241f4567 100644 --- a/modules/docker-compose.simulation.yaml +++ b/modules/docker-compose.simulation.yaml @@ -2,12 +2,76 @@ version: "3.8" services: carla_sim: + image: carlasim/carla:0.9.13 + environment: + - DISPLAY=1 + - CUDA_VISIBLE_DEVICES=0,1,2 + - NVIDIA_VISIBLE_DEVICES=0,1,2 + runtime: nvidia + restart: always + command: /bin/bash -c "./CarlaUE4.sh -nosound -carla-server -RenderOffscreen -world-port=2000 -quality-level=Low" + + carla_ros_bridge: build: context: .. - dockerfile: docker/perception/carla_sim/carla_sim.Dockerfile + dockerfile: docker/simulation/carla_ros_bridge/carla_ros_bridge.Dockerfile cache_from: - - "${SIMULATION_CARLA_IMAGE:?}:${TAG}" - - "${SIMULATION_CARLA_IMAGE:?}:main" + - "${SIMULATION_CARLA_ROS_BRIDGE_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLA_ROS_BRIDGE_IMAGE:?}:build_main" target: deploy - image: "${SIMULATION_CARLA_IMAGE:?}:${TAG}" - command: /bin/bash -c "ros2 launch carla_sim carla_sim.launch.py" + image: "${SIMULATION_CARLA_ROS_BRIDGE_IMAGE}:${TAG}" + environment: + - CARLA_HOSTNAME=${COMPOSE_PROJECT_NAME:?}-carla_sim-1 + - USE_ACKERMANN_CONTROL=False + # command: /bin/bash -c "echo CARLA_ROS_BRIDGE disabled" + command: /bin/bash -c "ros2 launch carla_config carla.launch.py" + + carla_viz: + build: + context: .. + dockerfile: docker/simulation/carla_viz/carla_viz.Dockerfile + cache_from: + - "${SIMULATION_CARLAVIZ_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLAVIZ_IMAGE:?}:build_main" + image: "${SIMULATION_CARLAVIZ_IMAGE:?}:${TAG}" + ports: + - ${CARLAVIZ_PORT}:8080 + - ${CARLAVIZ_PORT_2}:8081 + environment: + - CARLA_SERVER_HOST=${COMPOSE_PROJECT_NAME:?}-carla_sim-1 + - CARLAVIZ_BACKEND_PORT=${CARLAVIZ_PORT_2} + entrypoint: ["/bin/bash", "-c", "./docker/carlaviz_entrypoint.sh > /dev/null 2>&1"] + restart: always + + carla_sample_node: + build: + context: .. + dockerfile: docker/simulation/carla_sample_node/carla_sample_node.Dockerfile + cache_from: + - "${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:?}:build_main" + target: deploy + image: "${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:?}:${TAG}" + # command: /bin/bash -c "echo CARLA_SAMPLE_NODE disabled" + command: /bin/bash -c "ros2 launch carla_sample_node carla_sample_node.launch.py publish_autopilot:='False'" + + carla_notebooks: + build: + context: .. + dockerfile: docker/simulation/carla_notebooks/carla_notebooks.Dockerfile + cache_from: + - "${SIMULATION_CARLA_NOTEBOOKS_IMAGE:?}:build_${TAG}" + - "${SIMULATION_CARLA_NOTEBOOKS_IMAGE:?}:build_main" + image: "${SIMULATION_CARLA_NOTEBOOKS_IMAGE:?}:${TAG}" + ports: + - ${CARLA_NOTEBOOKS_PORT:?}:${CARLA_NOTEBOOKS_PORT:?} + environment: + - CLIENT_NAME=${COMPOSE_PROJECT_NAME:?}-carla_sim-1 + - SCENARIO_RUNNER_ROOT=/home/bolty/scenario_runner + - DISPLAY=1 + - CUDA_VISIBLE_DEVICES=0 + - NVIDIA_VISIBLE_DEVICES=1 + container_name: ${COMPOSE_PROJECT_NAME:?}_carla_notebooks + volumes: + - ${MONO_DIR}/src/simulation/carla_notebooks:/home/bolty/carla_notebooks + command: jupyter notebook --allow-root --ip=0.0.0.0 --port=${CARLA_NOTEBOOKS_PORT:?} --no-browser --ServerApp.token='' --ServerApp.password='' diff --git a/modules/docker-compose.world_modeling.yaml b/modules/docker-compose.world_modeling.yaml index 8bd6889d..c70e4cf2 100644 --- a/modules/docker-compose.world_modeling.yaml +++ b/modules/docker-compose.world_modeling.yaml @@ -6,8 +6,8 @@ services: context: .. dockerfile: docker/world_modeling/hd_map/hd_map.Dockerfile cache_from: - - "${WORLD_MODELING_HD_MAP_IMAGE}:${TAG}" - - "${WORLD_MODELING_HD_MAP_IMAGE}:main" + - "${WORLD_MODELING_HD_MAP_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_HD_MAP_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_HD_MAP_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch hd_map hd_map.launch.py" @@ -17,8 +17,8 @@ services: context: .. dockerfile: docker/world_modeling/localization/localization.Dockerfile cache_from: - - "${WORLD_MODELING_LOCALIZATION_IMAGE}:${TAG}" - - "${WORLD_MODELING_LOCALIZATION_IMAGE}:main" + - "${WORLD_MODELING_LOCALIZATION_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_LOCALIZATION_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_LOCALIZATION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch localization localization.launch.py" @@ -28,8 +28,8 @@ services: context: .. dockerfile: docker/world_modeling/occupancy/occupancy.Dockerfile cache_from: - - "${WORLD_MODELING_OCCUPANCY_IMAGE}:${TAG}" - - "${WORLD_MODELING_OCCUPANCY_IMAGE}:main" + - "${WORLD_MODELING_OCCUPANCY_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_OCCUPANCY_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_OCCUPANCY_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch occupancy occupancy.launch.py" @@ -39,8 +39,8 @@ services: context: .. dockerfile: docker/world_modeling/occupancy_segmentation/occupancy_segmentation.Dockerfile cache_from: - - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:${TAG}" - - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:main" + - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_OCCUPANCY_SEGMENTATION_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch occupancy_segmentation occupancy_segmentation.launch.py" @@ -50,8 +50,8 @@ services: context: .. dockerfile: docker/world_modeling/motion_forecasting/motion_forecasting.Dockerfile cache_from: - - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:${TAG}" - - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:main" + - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:build_${TAG}" + - "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:build_main" target: deploy image: "${WORLD_MODELING_MOTION_FORECASTING_IMAGE}:${TAG}" command: /bin/bash -c "ros2 launch motion_forecasting motion_forecasting.launch.py" diff --git a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py index b5ad2ea8..05ceb431 100755 --- a/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py +++ b/src/perception/camera_object_detection/camera_object_detection/yolov8_detection.py @@ -1,5 +1,6 @@ import rclpy from rclpy.node import Node +import os from sensor_msgs.msg import Image, CompressedImage from vision_msgs.msg import ( @@ -9,7 +10,7 @@ ) from ultralytics.nn.autobackend import AutoBackend -from ultralytics.data.augment import LetterBox +from ultralytics.data.augment import LetterBox, CenterCrop from ultralytics.utils.ops import non_max_suppression from ultralytics.utils.plotting import Annotator, colors @@ -34,21 +35,27 @@ def __init__(self): self.declare_parameter("camera_topic", "/camera/right/image_color") self.declare_parameter("publish_vis_topic", "/annotated_img") self.declare_parameter("publish_detection_topic", "/detections") - self.declare_parameter("model_path", "/perception_models/yolov8s.pt") - self.declare_parameter("image_size", 480) + self.declare_parameter("model_path", "/perception_models/yolov8m.pt") + self.declare_parameter("image_size", 1024) self.declare_parameter("compressed", False) + self.declare_parameter("crop_mode", "LetterBox") + self.declare_parameter("save_detections", False) self.camera_topic = self.get_parameter("camera_topic").value self.publish_vis_topic = self.get_parameter("publish_vis_topic").value - self.publish_detection_topic = self.get_parameter( - "publish_detection_topic").value + self.publish_detection_topic = self.get_parameter("publish_detection_topic").value self.model_path = self.get_parameter("model_path").value self.image_size = self.get_parameter("image_size").value self.compressed = self.get_parameter("compressed").value + self.crop_mode = self.get_parameter("crop_mode").value + self.save_detections = bool(self.get_parameter("save_detections").value) + self.counter = 0 # For saving detections + if self.save_detections: + if not os.path.exists("detections"): + os.makedirs("detections") self.line_thickness = 1 self.half = False - self.augment = False self.subscription = self.create_subscription( Image if not self.compressed else CompressedImage, @@ -61,9 +68,11 @@ def __init__(self): ), ) + self.orig_image_width = None + self.orig_image_height = None + # set device - self.device = torch.device( - "cuda" if torch.cuda.is_available() else "cpu") + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.is_available(): self.get_logger().info("Using GPU for inference") else: @@ -73,24 +82,69 @@ def __init__(self): self.cv_bridge = CvBridge() # load yolov8 model - self.model = AutoBackend( - self.model_path, device=self.device, dnn=False, fp16=False) + self.model = AutoBackend(self.model_path, device=self.device, dnn=False, fp16=False) - self.names = self.model.module.names if hasattr( - self.model, "module") else self.model.names + self.names = self.model.module.names if hasattr(self.model, "module") else self.model.names self.stride = int(self.model.stride) # setup vis publishers - self.vis_publisher = self.create_publisher( - Image, self.publish_vis_topic, 10) + self.vis_publisher = self.create_publisher(Image, self.publish_vis_topic, 10) self.detection_publisher = self.create_publisher( - Detection2DArray, self.publish_detection_topic, 10) + Detection2DArray, self.publish_detection_topic, 10 + ) self.get_logger().info( - f"Successfully created node listening on camera topic: {self.camera_topic}...") + f"Successfully created node listening on camera topic: {self.camera_topic}..." + ) + + def crop_image(self, cv_image): + if self.crop_mode == "LetterBox": + img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + elif self.crop_mode == "CenterCrop": + img = CenterCrop(self.image_size)(cv_image) + else: + raise Exception("Invalid crop mode, please choose either 'LetterBox' or 'CenterCrop'!") + + return img + + def convert_bboxes_to_orig_frame(self, bbox): + """ + Converts bounding box coordinates from the scaled image frame back to the original image frame. + + This function takes into account the original image dimensions and the scaling method used + (either "LetterBox" or "CenterCrop") to accurately map the bounding box coordinates back to + their original positions in the original image. + + Parameters: + bbox (list): A list containing the bounding box coordinates in the format [x1, y1, w1, h1] + in the scaled image frame. + + Returns: + list: A list containing the bounding box coordinates in the format [x1, y1, w1, h1] + in the original image frame. - def preprocess_image(self, cv_image): + """ + width_scale = self.orig_image_width / self.image_size + height_scale = self.orig_image_height / self.image_size + if self.crop_mode == "LetterBox": + translation = (self.image_size - self.orig_image_height / width_scale) / 2 + return [ + bbox[0] * width_scale, + (bbox[1] - translation) * width_scale, + bbox[2] * width_scale, + bbox[3] * width_scale, + ] + elif self.crop_mode == "CenterCrop": + translation = (self.orig_image_width / height_scale - self.image_size) / 2 + return [ + (bbox[0] + translation) * height_scale, + bbox[1] * height_scale, + bbox[2] * height_scale, + bbox[3] * height_scale, + ] + + def crop_and_convert_to_tensor(self, cv_image): """ Preprocess the image by resizing, padding and rearranging the dimensions. @@ -100,9 +154,7 @@ def preprocess_image(self, cv_image): Returns: torch.Tensor image for model input of shape (1,3,w,h) """ - # Padded resize - img = cv_image - img = LetterBox(self.image_size, stride=self.stride)(image=cv_image) + img = self.crop_image(cv_image) # Convert img = img.transpose(2, 0, 1) @@ -143,10 +195,11 @@ def postprocess_detections(self, detections, annotator): annotator_img = annotator.result() return (processed_detections, annotator_img) - def publish_vis(self, annotated_img, feed): + def publish_vis(self, annotated_img, msg, feed): # Publish visualizations imgmsg = self.cv_bridge.cv2_to_imgmsg(annotated_img, "bgr8") - imgmsg.header.frame_id = "camera_{}_link".format(feed) + imgmsg.header.stamp = msg.header.stamp + imgmsg.header.frame_id = msg.header.frame_id self.vis_publisher.publish(imgmsg) def publish_detections(self, detections, msg, feed): @@ -175,10 +228,13 @@ def publish_detections(self, detections, msg, feed): detection2darray.detections.append(detection2d) self.detection_publisher.publish(detection2darray) - return def image_callback(self, msg): self.get_logger().debug("Received image") + if self.orig_image_width is None: + self.orig_image_width = msg.width + self.orig_image_height = msg.height + images = [msg] # msg is a single sensor image startTime = time.time() for image in images: @@ -189,16 +245,13 @@ def image_callback(self, msg): cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) else: try: - cv_image = self.cv_bridge.imgmsg_to_cv2( - image, desired_encoding="passthrough") + cv_image = self.cv_bridge.imgmsg_to_cv2(image, desired_encoding="passthrough") except CvBridgeError as e: self.get_logger().error(str(e)) return # preprocess image and run through prediction - img = self.preprocess_image(cv_image) - processed_cv_image = LetterBox( - self.image_size, stride=self.stride)(image=cv_image) + img = self.crop_and_convert_to_tensor(cv_image) pred = self.model(img) # nms function used same as yolov8 detect.py @@ -217,6 +270,7 @@ def image_callback(self, msg): xyxy[3] - xyxy[1], ] bbox = [b.item() for b in bbox] + bbox = self.convert_bboxes_to_orig_frame(bbox) detections.append( { @@ -228,20 +282,24 @@ def image_callback(self, msg): self.get_logger().debug(f"{label}: {bbox}") annotator = Annotator( - processed_cv_image, + cv_image, line_width=self.line_thickness, example=str(self.names), ) - (detections, annotated_img) = self.postprocess_detections( - detections, annotator) + (detections, annotated_img) = self.postprocess_detections(detections, annotator) # Currently we support a single camera so we pass an empty string feed = "" - self.publish_vis(annotated_img, feed) + self.publish_vis(annotated_img, msg, feed) self.publish_detections(detections, msg, feed) + if self.save_detections: + cv2.imwrite(f"detections/{self.counter}.jpg", annotated_img) + self.counter += 1 + self.get_logger().info( - f"Finished in: {time.time() - startTime}, {1/(time.time() - startTime)} Hz") + f"Finished in: {time.time() - startTime}, {1/(time.time() - startTime)} Hz" + ) def main(args=None): diff --git a/src/perception/camera_object_detection/config/deepracer_config.yaml b/src/perception/camera_object_detection/config/deepracer_config.yaml index 542efa7a..88a2e959 100755 --- a/src/perception/camera_object_detection/config/deepracer_config.yaml +++ b/src/perception/camera_object_detection/config/deepracer_config.yaml @@ -3,5 +3,5 @@ camera_object_detection_node: camera_topic: /camera_pkg/display_mjpeg publish_vis_topic: /annotated_img publish_obstacle_topic: /obstacles - model_path: /perception_models/yolov8s.pt - image_size: 480 + model_path: /perception_models/yolov8m.pt + image_size: 1024 diff --git a/src/perception/camera_object_detection/config/eve_config.yaml b/src/perception/camera_object_detection/config/eve_config.yaml new file mode 100755 index 00000000..208ccbe6 --- /dev/null +++ b/src/perception/camera_object_detection/config/eve_config.yaml @@ -0,0 +1,23 @@ +left_camera_object_detection_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /camera/left/camera_detections_viz + publish_detection_topic: /camera/left/camera_detections + model_path: /perception_models/yolov8m.pt + image_size: 1024 + +center_camera_object_detection_node: + ros__parameters: + camera_topic: /camera/center/image_color + publish_vis_topic: /camera/center/camera_detections_viz + publish_detection_topic: /camera/center/camera_detections + model_path: /perception_models/yolov8m.pt + image_size: 1024 + +right_camera_object_detection_node: + ros__parameters: + camera_topic: /camera/right/image_color + publish_vis_topic: /camera/right/camera_detections_viz + publish_detection_topic: /camera/right/camera_detections + model_path: /perception_models/yolov8m.pt + image_size: 1024 diff --git a/src/perception/camera_object_detection/config/nuscenes_config.yaml b/src/perception/camera_object_detection/config/nuscenes_config.yaml index 6dff0554..6251c4b0 100755 --- a/src/perception/camera_object_detection/config/nuscenes_config.yaml +++ b/src/perception/camera_object_detection/config/nuscenes_config.yaml @@ -3,6 +3,6 @@ camera_object_detection_node: camera_topic: /CAM_FRONT/image_rect_compressed publish_vis_topic: /annotated_img publish_obstacle_topic: /obstacles - model_path: /perception_models/yolov8s.pt + model_path: /perception_models/yolov8m.pt image_size: 640 compressed: true diff --git a/src/perception/camera_object_detection/config/sim_config.yaml b/src/perception/camera_object_detection/config/sim_config.yaml deleted file mode 100755 index ec13a71b..00000000 --- a/src/perception/camera_object_detection/config/sim_config.yaml +++ /dev/null @@ -1,7 +0,0 @@ -camera_object_detection_node: - ros__parameters: - camera_topic: /camera/right/image_color - publish_vis_topic: /annotated_img - publish_obstacle_topic: /obstacles - model_path: /perception_models/yolov8s.pt - image_size: 480 diff --git a/src/perception/camera_object_detection/config/traffic_light_config.yaml b/src/perception/camera_object_detection/config/traffic_light_config.yaml new file mode 100755 index 00000000..9a3e00c8 --- /dev/null +++ b/src/perception/camera_object_detection/config/traffic_light_config.yaml @@ -0,0 +1,9 @@ +traffic_light_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /traffic_lights_viz + publish_detection_topic: /traffic_lights + model_path: /perception_models/traffic_light.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false diff --git a/src/perception/camera_object_detection/config/traffic_signs_config.yaml b/src/perception/camera_object_detection/config/traffic_signs_config.yaml new file mode 100644 index 00000000..8a143a77 --- /dev/null +++ b/src/perception/camera_object_detection/config/traffic_signs_config.yaml @@ -0,0 +1,9 @@ +traffic_signs_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /traffic_signs_viz + publish_detection_topic: /traffic_signs + model_path: /perception_models/traffic_signs_v1.pt + crop_mode: CenterCrop + image_size: 1024 + save_detections: false diff --git a/src/perception/camera_object_detection/launch/deepracer_launch.py b/src/perception/camera_object_detection/launch/deepracer.launch.py similarity index 82% rename from src/perception/camera_object_detection/launch/deepracer_launch.py rename to src/perception/camera_object_detection/launch/deepracer.launch.py index c853a556..138c32b4 100755 --- a/src/perception/camera_object_detection/launch/deepracer_launch.py +++ b/src/perception/camera_object_detection/launch/deepracer.launch.py @@ -5,14 +5,12 @@ def generate_launch_description(): - ld = LaunchDescription() config = os.path.join( get_package_share_directory('camera_object_detection'), 'config', 'deeepracer.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', @@ -20,7 +18,4 @@ def generate_launch_description(): parameters=[config] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/eve.launch.py b/src/perception/camera_object_detection/launch/eve.launch.py new file mode 100755 index 00000000..9ff4992e --- /dev/null +++ b/src/perception/camera_object_detection/launch/eve.launch.py @@ -0,0 +1,57 @@ +from launch import LaunchDescription +from launch.substitutions import LaunchConfiguration +from launch.conditions import LaunchConfigurationEquals +from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + launch_traffic_light = LaunchConfiguration("launch_traffic_light", default=True) + launch_traffic_light_arg = DeclareLaunchArgument( + "launch_traffic_light", + default_value=launch_traffic_light, + description="Launch traffic light detection", + ) + launch_traffic_signs = LaunchConfiguration("launch_traffic_signs", default=True) + launch_traffic_signs_arg = DeclareLaunchArgument( + "launch_traffic_signs", + default_value=launch_traffic_signs, + description="Launch traffic signs detection", + ) + + launch_args = [launch_traffic_light_arg, launch_traffic_signs_arg] + + camera_object_detection_launch_include_dir = os.path.join( + get_package_share_directory("camera_object_detection"), "launch", "include" + ) + + pretrained_yolov8_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/pretrained_yolov8.launch.py"] + ), + ) + + traffic_light_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/traffic_light.launch.py"] + ), + condition=LaunchConfigurationEquals("launch_traffic_light", "True"), + ) + + traffic_signs_launch = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + [camera_object_detection_launch_include_dir, "/traffic_signs.launch.py"] + ), + condition=LaunchConfigurationEquals("launch_traffic_signs", "True"), + ) + + return LaunchDescription( + launch_args + + [ + pretrained_yolov8_launch, + traffic_light_launch, + traffic_signs_launch, + ] + ) diff --git a/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py b/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py new file mode 100644 index 00000000..13096462 --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/pretrained_yolov8.launch.py @@ -0,0 +1,39 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory("camera_object_detection"), "config", "eve_config.yaml" + ) + + left_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="left_camera_object_detection_node", + parameters=[config], + ) + + center_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="center_camera_object_detection_node", + parameters=[config], + ) + + right_camera_object_detection_node = Node( + package="camera_object_detection", + executable="camera_object_detection_node", + name="right_camera_object_detection_node", + parameters=[config], + ) + + return LaunchDescription( + [ + left_camera_object_detection_node, + center_camera_object_detection_node, + right_camera_object_detection_node, + ] + ) diff --git a/src/perception/camera_object_detection/launch/sim_launch.py b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py similarity index 71% rename from src/perception/camera_object_detection/launch/sim_launch.py rename to src/perception/camera_object_detection/launch/include/traffic_light.launch.py index 12af5eb0..234844b8 100755 --- a/src/perception/camera_object_detection/launch/sim_launch.py +++ b/src/perception/camera_object_detection/launch/include/traffic_light.launch.py @@ -5,22 +5,17 @@ def generate_launch_description(): - ld = LaunchDescription() config = os.path.join( get_package_share_directory('camera_object_detection'), 'config', - 'sim_config.yaml' + 'traffic_light_config.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', - name='camera_object_detection_node', + name='traffic_light_node', parameters=[config] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py b/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py new file mode 100644 index 00000000..cdb0134f --- /dev/null +++ b/src/perception/camera_object_detection/launch/include/traffic_signs.launch.py @@ -0,0 +1,21 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + config = os.path.join( + get_package_share_directory('camera_object_detection'), + 'config', + 'traffic_signs_config.yaml' + ) + + camera_object_detection_node = Node( + package='camera_object_detection', + executable='camera_object_detection_node', + name='traffic_signs_node', + parameters=[config] + ) + + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/launch/nuscenes_launch.py b/src/perception/camera_object_detection/launch/nuscenes.launch.py similarity index 87% rename from src/perception/camera_object_detection/launch/nuscenes_launch.py rename to src/perception/camera_object_detection/launch/nuscenes.launch.py index 7a366260..faff24b6 100755 --- a/src/perception/camera_object_detection/launch/nuscenes_launch.py +++ b/src/perception/camera_object_detection/launch/nuscenes.launch.py @@ -12,7 +12,6 @@ def generate_launch_description(): 'nuscenes_config.yaml' ) - # nodes camera_object_detection_node = Node( package='camera_object_detection', executable='camera_object_detection_node', @@ -21,7 +20,4 @@ def generate_launch_description(): arguments=['--ros-args', '--log-level', 'info'] ) - # finalize - ld.add_action(camera_object_detection_node) - - return ld + return LaunchDescription([camera_object_detection_node]) diff --git a/src/perception/camera_object_detection/setup.py b/src/perception/camera_object_detection/setup.py index 7ba35c15..c9665f79 100755 --- a/src/perception/camera_object_detection/setup.py +++ b/src/perception/camera_object_detection/setup.py @@ -12,7 +12,8 @@ ('share/ament_index/resource_index/packages', ['resource/' + package_name]), ('share/' + package_name, ['package.xml']), - (os.path.join('share', package_name, 'launch'), glob('launch/*.py')), + (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')), + (os.path.join('share', package_name, 'launch', 'include'), glob('launch/include/*.launch.py')), (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), ], install_requires=['setuptools'], diff --git a/src/perception/depth_estimation/CMakeLists.txt b/src/perception/depth_estimation/CMakeLists.txt new file mode 100644 index 00000000..fb6f76f5 --- /dev/null +++ b/src/perception/depth_estimation/CMakeLists.txt @@ -0,0 +1,36 @@ +cmake_minimum_required(VERSION 3.5) +project(depth_estimation) + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(sensor_msgs REQUIRED) + +include_directories(include) + +add_executable(depth_estimation_node src/depth_estimation_node.cpp) +ament_target_dependencies(depth_estimation_node rclcpp sensor_msgs) + +install(TARGETS + depth_estimation_node + DESTINATION lib/${PROJECT_NAME}) + +install(DIRECTORY + launch + DESTINATION share/${PROJECT_NAME} +) + +install(DIRECTORY + config + DESTINATION share/${PROJECT_NAME} +) + +ament_package() diff --git a/src/perception/depth_estimation/config/config.yaml b/src/perception/depth_estimation/config/config.yaml new file mode 100644 index 00000000..4f42e0b3 --- /dev/null +++ b/src/perception/depth_estimation/config/config.yaml @@ -0,0 +1,5 @@ +depth_estimation_node: + ros__parameters: + camera_topic: "/camera/left/image_color" + depth_map_topic: "/depth_map" + debug_node: false diff --git a/src/perception/depth_estimation/include/depth_estimation_node.hpp b/src/perception/depth_estimation/include/depth_estimation_node.hpp new file mode 100644 index 00000000..e54e9b6f --- /dev/null +++ b/src/perception/depth_estimation/include/depth_estimation_node.hpp @@ -0,0 +1,22 @@ +#ifndef DEPTH_ESTIMATION_NODE_HPP_ +#define DEPTH_ESTIMATION_NODE_HPP_ + +#include "rclcpp/rclcpp.hpp" +#include "sensor_msgs/msg/image.hpp" + +class DepthEstimationNode : public rclcpp::Node { + public: + DepthEstimationNode(); + + private: + void image_callback(const sensor_msgs::msg::Image::SharedPtr msg); + + std::string camera_topic_; + std::string depth_map_topic_; + bool debug_node_; + + rclcpp::Subscription::SharedPtr image_subscription_; + rclcpp::Publisher::SharedPtr depth_image_publisher_; +}; + +#endif // DEPTH_ESTIMATION_NODE_HPP_ diff --git a/src/perception/depth_estimation/launch/eve.launch.py b/src/perception/depth_estimation/launch/eve.launch.py new file mode 100644 index 00000000..8567b7e6 --- /dev/null +++ b/src/perception/depth_estimation/launch/eve.launch.py @@ -0,0 +1,25 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + ld = LaunchDescription() + config = os.path.join( + get_package_share_directory('depth_estimation'), + 'config', + 'config.yaml' + ) + + depth_estimation_node = Node( + package='depth_estimation', + executable='depth_estimation_node', + name='depth_estimation_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + ld.add_action(depth_estimation_node) + + return ld diff --git a/src/perception/traffic_sign_detection/package.xml b/src/perception/depth_estimation/package.xml similarity index 70% rename from src/perception/traffic_sign_detection/package.xml rename to src/perception/depth_estimation/package.xml index 48d6de51..76fc56c8 100644 --- a/src/perception/traffic_sign_detection/package.xml +++ b/src/perception/depth_estimation/package.xml @@ -1,10 +1,10 @@ - traffic_sign_detection + depth_estimation 0.0.0 - TODO: Package description - bolty + Depth estimation ROS 2 node + TBD TODO: License declaration ament_cmake diff --git a/src/perception/depth_estimation/src/depth_estimation_node.cpp b/src/perception/depth_estimation/src/depth_estimation_node.cpp new file mode 100644 index 00000000..bebd1ad0 --- /dev/null +++ b/src/perception/depth_estimation/src/depth_estimation_node.cpp @@ -0,0 +1,39 @@ +#include "depth_estimation_node.hpp" +#include "sensor_msgs/msg/image.hpp" + +DepthEstimationNode::DepthEstimationNode() : Node("depth_estimation_node") { + this->declare_parameter("camera_topic", "/camera/image_raw"); + this->declare_parameter("depth_map_topic", "/depth/image_raw"); + this->declare_parameter("deubg_node", false); + + this->get_parameter("camera_topic", camera_topic_); + this->get_parameter("depth_map_topic", depth_map_topic_); + this->get_parameter("debug_node", debug_node_); + + RCLCPP_INFO(this->get_logger(), "debug_node: %s", debug_node_ ? "true" : "false"); + + RCLCPP_INFO(this->get_logger(), "Subscribing to camera topic '%s'", camera_topic_.c_str()); + image_subscription_ = this->create_subscription( + camera_topic_, 10, + std::bind(&DepthEstimationNode::image_callback, this, std::placeholders::_1)); + + RCLCPP_INFO(this->get_logger(), "Publishing depth map to topic '%s'", depth_map_topic_.c_str()); + depth_image_publisher_ = this->create_publisher(depth_map_topic_, 10); +} + +void DepthEstimationNode::image_callback(const sensor_msgs::msg::Image::SharedPtr msg) { + RCLCPP_INFO(this->get_logger(), "Received sensor image on '%s'", camera_topic_.c_str()); + + // TODO: Process the incoming image and estimate depth + // Replace this empty image with the actual depth map + auto depth_map_image = sensor_msgs::msg::Image(); + depth_image_publisher_->publish(depth_map_image); +} + +int main(int argc, char **argv) { + rclcpp::init(argc, argv); + auto node = std::make_shared(); + rclcpp::spin(node); + rclcpp::shutdown(); + return 0; +} diff --git a/src/perception/lane_detection/CMakeLists.txt b/src/perception/lane_detection/CMakeLists.txt index b3cfae5d..084ff19f 100644 --- a/src/perception/lane_detection/CMakeLists.txt +++ b/src/perception/lane_detection/CMakeLists.txt @@ -1,14 +1,70 @@ cmake_minimum_required(VERSION 3.8) -project(lane_detection) +set(ProjectName "lane_detection") +set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda") +SET(CMAKE_CUDA_COMPILER /usr/local/cuda-12.2/bin/nvcc) +SET(CUDACXX /usr/local/cuda-12.2/bin/nvcc) +project(${ProjectName} LANGUAGES CXX CUDA) + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wall -Wextra -Wpedantic) endif() -# find dependencies -find_package(ament_cmake REQUIRED) -# uncomment the following section in order to fill in -# further dependencies manually. -# find_package( REQUIRED) +### TensorRT model ### + +# Select build system and set compile options +include(${CMAKE_CURRENT_LIST_DIR}/src/lane_detection_tensorrt/common_helper/cmakes/build_setting.cmake) +add_executable(lane_detection src/lane_detection_node.cpp) + +add_subdirectory(./src/lane_detection_tensorrt/ultra_fast_lane_detection_v2/image_processor image_processor) +target_include_directories(${ProjectName} PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include + ./src/lane_detection_tensorrt/ultra_fast_lane_detection_v2/image_processor) +target_link_libraries(${ProjectName} ImageProcessor) + +find_package(OpenCV REQUIRED) +target_include_directories(${ProjectName} PUBLIC ${OpenCV_INCLUDE_DIRS}) +target_link_libraries(${ProjectName} ${OpenCV_LIBS}) + +# Copy resouce +add_definitions(-DRESOURCE_DIR="/models/") + +### End of TensorRT model ### + +set(REQUIRED_PACKAGES + rclcpp + std_msgs + sensor_msgs + geometry_msgs + lane_detection_msgs + cv_bridge + image_transport + CUDA +) + +foreach(PKG IN LISTS REQUIRED_PACKAGES) + find_package(${PKG} REQUIRED) +endforeach() + +# Install launch files +install(DIRECTORY + launch + DESTINATION share/${PROJECT_NAME}/ +) + +# Install config files +install(DIRECTORY + config + DESTINATION share/${PROJECT_NAME}/ +) + +install(TARGETS +lane_detection + DESTINATION lib/${PROJECT_NAME}) +ament_target_dependencies(lane_detection rclcpp OpenCV std_msgs sensor_msgs cv_bridge image_transport lane_detection_msgs) ament_package() diff --git a/src/perception/lane_detection/config/eve_config.yaml b/src/perception/lane_detection/config/eve_config.yaml new file mode 100644 index 00000000..5706f42d --- /dev/null +++ b/src/perception/lane_detection/config/eve_config.yaml @@ -0,0 +1,9 @@ +lane_detection_node: + ros__parameters: + camera_topic: /camera/left/image_color + publish_vis_topic: /camera/left/lanes_viz + publish_lanes_topic: /camera/left/lanes + save_images: false + save_images_path: /tmp + publish_source_image: false + debug_node: false diff --git a/src/perception/lane_detection/config/nuscenes_config.yaml b/src/perception/lane_detection/config/nuscenes_config.yaml new file mode 100644 index 00000000..e526dbaf --- /dev/null +++ b/src/perception/lane_detection/config/nuscenes_config.yaml @@ -0,0 +1,9 @@ +lane_detection_node: + ros__parameters: + camera_topic: /CAM_FRONT/image_rect_compressed + publish_vis_topic: /CAM_FRONT/lanes_viz + publish_lanes_topic: /CAM_FRONT/lanes + save_images: false + save_images_path: /tmp + publish_source_image: false + debug_node: false diff --git a/src/perception/lane_detection/include/lane_detection_node.hpp b/src/perception/lane_detection/include/lane_detection_node.hpp new file mode 100644 index 00000000..efa73639 --- /dev/null +++ b/src/perception/lane_detection/include/lane_detection_node.hpp @@ -0,0 +1,48 @@ +#ifndef LANE_DETECTION_NODE_HPP +#define LANE_DETECTION_NODE_HPP + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "lane_detection_msgs/msg/lane_detection.hpp" + +#include + +#include "common_helper_cv.h" +#include "image_processor.h" + +class LaneDetectionNode : public rclcpp::Node { + public: + LaneDetectionNode(); + void image_callback(const sensor_msgs::msg::Image::ConstSharedPtr &msg); + + private: + void populate_lane_msg(lane_detection_msgs::msg::LaneDetection &lane_msg, + const std::vector> &raw_lane_list); + void save_image(const cv::Mat &image, const std::string &filename); + + rclcpp::Subscription::SharedPtr subscription_; + rclcpp::Publisher::SharedPtr image_pub_; + rclcpp::Publisher::SharedPtr lane_detection_pub_; + + size_t count_; + + // ROS Parameters + std::string camera_topic_; + std::string publish_vis_topic_; + std::string publish_lanes_topic_; + bool save_images_; + std::string save_dir_; + bool publish_source_image_; + // Debug will publish the source image with lane detection overlay + bool debug_node_; +}; + +#endif // LANE_DETECTION_NODE_HPP \ No newline at end of file diff --git a/src/perception/lane_detection/launch/eve.launch.py b/src/perception/lane_detection/launch/eve.launch.py new file mode 100644 index 00000000..8dbeb7cf --- /dev/null +++ b/src/perception/lane_detection/launch/eve.launch.py @@ -0,0 +1,26 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + ld = LaunchDescription() + + config = os.path.join( + get_package_share_directory('lane_detection'), + 'config', + 'eve_config.yaml' + ) + + lane_detection_node = Node( + package='lane_detection', + executable='lane_detection', + name='lane_detection_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + ld.add_action(lane_detection_node) + + return ld diff --git a/src/perception/lane_detection/launch/nuscenes.launch.py b/src/perception/lane_detection/launch/nuscenes.launch.py new file mode 100644 index 00000000..74c839fa --- /dev/null +++ b/src/perception/lane_detection/launch/nuscenes.launch.py @@ -0,0 +1,26 @@ +from launch import LaunchDescription +from launch_ros.actions import Node +from ament_index_python.packages import get_package_share_directory +import os + + +def generate_launch_description(): + ld = LaunchDescription() + + config = os.path.join( + get_package_share_directory('lane_detection'), + 'config', + 'nuscenes_config.yaml' + ) + + lane_detection_node = Node( + package='lane_detection', + executable='lane_detection', + name='lane_detection_node', + parameters=[config], + arguments=['--ros-args', '--log-level', 'info'] + ) + + ld.add_action(lane_detection_node) + + return ld diff --git a/src/perception/lane_detection/package.xml b/src/perception/lane_detection/package.xml index 90308912..c6a2ee45 100644 --- a/src/perception/lane_detection/package.xml +++ b/src/perception/lane_detection/package.xml @@ -3,12 +3,24 @@ lane_detection 0.0.0 - TODO: Package description - bolty + Lane Detection using ultra-fast-lane-detection_v2 + Justin Leung + Steven Gong TODO: License declaration ament_cmake + sensor_msgs + geometry_msgs + lane_detection_msgs + std_msgs + cv_bridge + image_transport + + rosidl_default_generators + rosidl_default_runtime + rosidl_interface_packages + ament_cmake diff --git a/src/perception/lane_detection/src/lane_detection_node.cpp b/src/perception/lane_detection/src/lane_detection_node.cpp new file mode 100755 index 00000000..3ae668bd --- /dev/null +++ b/src/perception/lane_detection/src/lane_detection_node.cpp @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include "image_transport/image_transport.hpp" +#include "rclcpp/rclcpp.hpp" + +#include "lane_detection_msgs/msg/lane_detection.hpp" +#include "lane_detection_node.hpp" +#include "lane_engine.h" +#include "std_msgs/msg/string.hpp" + +#include + +#include "common_helper_cv.h" +#include "image_processor.h" + +LaneDetectionNode::LaneDetectionNode() : Node("lane_detection"), count_(0) { + this->declare_parameter("camera_topic", "/CAM_FRONT/image_rect_compressed"); + this->declare_parameter("publish_vis_topic", "/CAM_FRONT/lanes_viz"); + this->declare_parameter("publish_lanes_topic", "/CAM_FRONT/lanes"); + this->declare_parameter("save_images", false); + this->declare_parameter("save_dir", "/tmp"); + this->declare_parameter("publish_source_image", false); + this->declare_parameter("debug_node", false); + + this->get_parameter("camera_topic", camera_topic_); + this->get_parameter("publish_vis_topic", publish_vis_topic_); + this->get_parameter("publish_lanes_topic", publish_lanes_topic_); + this->get_parameter("save_images", save_images_); + this->get_parameter("save_dir", save_dir_); + this->get_parameter("publish_source_image", publish_source_image_); + this->get_parameter("debug_node", debug_node_); + + if (!std::filesystem::exists(save_dir_)) { + std::filesystem::create_directories(save_dir_); + } + + RCLCPP_INFO(this->get_logger(), "Subscribing to camera topic: %s", camera_topic_.c_str()); + + subscription_ = this->create_subscription( + camera_topic_, 10, + std::bind(&LaneDetectionNode::image_callback, this, std::placeholders::_1)); + + lane_detection_pub_ = + this->create_publisher(publish_lanes_topic_, 10); + + if (debug_node_) { + RCLCPP_INFO(this->get_logger(), "Creating debug publisher topic: %s", + publish_vis_topic_.c_str()); + image_pub_ = this->create_publisher(publish_vis_topic_, 10); + } +} + +void LaneDetectionNode::save_image(const cv::Mat &image, const std::string &filename) { + cv::imwrite(filename, image); + RCLCPP_INFO(this->get_logger(), "Saved image to %s", filename.c_str()); +} + +void LaneDetectionNode::populate_lane_msg(lane_detection_msgs::msg::LaneDetection &lane_msg, + const std::vector> &raw_lane_list) { + lane_msg.header.stamp = this->now(); + lane_msg.header.frame_id = "lane_detection"; + lane_msg.lines.clear(); + + // Load the message from the raw lane list + for (size_t i = 0; i < raw_lane_list.size(); i++) { + lane_detection_msgs::msg::LaneLine line; + line.points.resize(raw_lane_list[i].size()); + for (size_t j = 0; j < raw_lane_list[i].size() / 2; j++) { + line.points[j].x = raw_lane_list[i][j * 2 + 0]; + line.points[j].y = raw_lane_list[i][j * 2 + 1]; + line.points[j].z = 0; + } + lane_msg.lines.push_back(line); + } +} + +void LaneDetectionNode::image_callback(const sensor_msgs::msg::Image::ConstSharedPtr &msg) { + cv_bridge::CvImagePtr cv_ptr; + try { + cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); + } catch (cv_bridge::Exception &e) { + RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what()); + return; + } + + cv::Mat image = cv_ptr->image; + + if (image.empty()) { + RCLCPP_ERROR(this->get_logger(), "Decoded image is empty!"); + return; + } + RCLCPP_INFO(this->get_logger(), "Got Decoded image: width=%d, height=%d, type=%d", image.cols, + image.rows, image.type()); + + RCLCPP_INFO(this->get_logger(), "=== Start frame ==="); + const auto &time_all0 = std::chrono::steady_clock::now(); + + std::vector> raw_lane_list; + + /* Call image processor library */ + ImageProcessor::Result result; + ImageProcessor::Process(image, result, raw_lane_list); + const auto &time_all1 = std::chrono::steady_clock::now(); + double time_all = (time_all1 - time_all0).count() / 1000000.0; + RCLCPP_INFO(this->get_logger(), "Total: %9.3lf [msec]", time_all); + RCLCPP_INFO(this->get_logger(), " Pre processing: %9.3lf [msec]", result.time_pre_process); + RCLCPP_INFO(this->get_logger(), " Inference: %9.3lf [msec]", result.time_inference); + RCLCPP_INFO(this->get_logger(), " Post processing: %9.3lf [msec]", result.time_post_process); + RCLCPP_INFO(this->get_logger(), "=== Finished frame ==="); + + // Convert the processed cv::Mat back to sensor_msgs::msg::Image and publish + sensor_msgs::msg::Image::SharedPtr img_msg = + cv_bridge::CvImage(msg->header, "bgr8", image).toImageMsg(); + + if (debug_node_) { + image_pub_->publish(*img_msg); + } + + // Create the lane detection message + lane_detection_msgs::msg::LaneDetection lane_msg; + populate_lane_msg(lane_msg, raw_lane_list); + + if (publish_source_image_) { + lane_msg.source_img = *img_msg; + } + + // Save the output image + if (save_images_) { + save_image(image, save_dir_ + "/lane_detection_" + std::to_string(count_) + ".png"); + } + + lane_detection_pub_->publish(lane_msg); + count_++; +} + +int main(int argc, char *argv[]) { + /* Initialize image processor library */ + ImageProcessor::InputParam input_param = {RESOURCE_DIR, 4}; + if (ImageProcessor::Initialize(input_param) != 0) { + RCLCPP_ERROR(rclcpp::get_logger("rclcpp"), "Initialization Error\n"); + return -1; + } + RCLCPP_INFO(rclcpp::get_logger("rclcpp"), "Initializing node"); + rclcpp::init(argc, argv); + + std::shared_ptr node = std::make_shared(); + rclcpp::spin(node); + rclcpp::shutdown(); + return 0; +} diff --git a/src/perception/lane_detection/src/lane_detection_tensorrt b/src/perception/lane_detection/src/lane_detection_tensorrt new file mode 160000 index 00000000..86be6923 --- /dev/null +++ b/src/perception/lane_detection/src/lane_detection_tensorrt @@ -0,0 +1 @@ +Subproject commit 86be69239c01a17da6a3be86f96a0029cee7f146 diff --git a/src/perception/traffic_sign_detection/CMakeLists.txt b/src/perception/traffic_sign_detection/CMakeLists.txt deleted file mode 100644 index b968c4cf..00000000 --- a/src/perception/traffic_sign_detection/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(traffic_sign_detection) - -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() - -# find dependencies -find_package(ament_cmake REQUIRED) -# uncomment the following section in order to fill in -# further dependencies manually. -# find_package( REQUIRED) - -ament_package() diff --git a/src/simulation/carla_config/CMakeLists.txt b/src/simulation/carla_config/CMakeLists.txt new file mode 100644 index 00000000..deba4f20 --- /dev/null +++ b/src/simulation/carla_config/CMakeLists.txt @@ -0,0 +1,63 @@ +cmake_minimum_required(VERSION 3.5) +project(carla_config) + +# Default to C99 +if(NOT CMAKE_C_STANDARD) + set(CMAKE_C_STANDARD 99) +endif() + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# find dependencies +find_package(ament_cmake REQUIRED) +find_package(rclcpp REQUIRED) +find_package(path_planning_msgs REQUIRED) +find_package(embedded_msgs REQUIRED) + +# Create ROS2 node executable from source files +add_executable(carla_mpc_bridge src/carla_mpc_bridge.cpp) + +if(BUILD_TESTING) + # Search for dependencies required for building tests + linting + find_package(ament_cmake_gtest REQUIRED) + + # Create library to link cpp file with test + add_library(mpc_bridge_lib + src/carla_mpc_bridge.cpp) + + # Add ROS2 dependencies required by package + ament_target_dependencies(mpc_bridge_lib rclcpp path_planning_msgs embedded_msgs) + + # Create test executable from source files + ament_add_gtest(mpc_bridge_test test/mpc_bridge_test.cpp) + # Link to dependencies + target_link_libraries(mpc_bridge_test mpc_bridge_lib) + + # Copy executable to installation location + install(TARGETS + mpc_bridge_test + DESTINATION lib/${PROJECT_NAME}) +endif() + +# Link with dependencies +ament_target_dependencies(carla_mpc_bridge rclcpp path_planning_msgs embedded_msgs) + +# Copy executable to installation location +install(TARGETS + carla_mpc_bridge + DESTINATION lib/${PROJECT_NAME}) + +# Copy launch and config files to installation location +install(DIRECTORY + config + launch + DESTINATION share/${PROJECT_NAME}) + +ament_package() \ No newline at end of file diff --git a/src/simulation/carla_config/config/carla_settings.yaml b/src/simulation/carla_config/config/carla_settings.yaml new file mode 100644 index 00000000..d002d01c --- /dev/null +++ b/src/simulation/carla_config/config/carla_settings.yaml @@ -0,0 +1,28 @@ +use_sim_time: true +carla: + # the network connection for the python connection to CARLA, can be overwritten by launch parameters + # don't make anything other than the host setting reference an environment variable + host: $(optenv CARLA_HOSTNAME localhost) + port: 2000 + timeout: 20 + # enable/disable synchronous mode. If enabled ros-bridge waits until + # expected data is received for all sensors + synchronous_mode: true + # within synchronous mode: wait for a vehicle control command before next tick? + synchronous_mode_wait_for_vehicle_control_command: false + # set the fixed timestep length + fixed_delta_seconds: 0.05 + # town + # options: + # Town05, mcity + town: Town10HD_Opt + # configuration values for the ego vehicle + ego_vehicle: + # the role name of the vehicles that acts as ego vehicle for this ros bridge instance + # Only the vehicles within this list are controllable from within ROS. + # (the vehicle from CARLA is selected which has the attribute 'role_name' set to this value) + role_name: ego + ackermann_control: + # override the default values of the pid speed controller + # (only relevant for ackermann control mode) + control_loop_rate: 0.05 \ No newline at end of file diff --git a/src/simulation/carla_config/config/mpc_bridge_config.yaml b/src/simulation/carla_config/config/mpc_bridge_config.yaml new file mode 100644 index 00000000..93bf14b8 --- /dev/null +++ b/src/simulation/carla_config/config/mpc_bridge_config.yaml @@ -0,0 +1,4 @@ +mpc_bridge_node: + ros_parameters: + mpc_output_topic: /mpc_output + steering_publisher_topic: /steering_data \ No newline at end of file diff --git a/src/simulation/carla_config/config/objects.json b/src/simulation/carla_config/config/objects.json new file mode 100644 index 00000000..42280919 --- /dev/null +++ b/src/simulation/carla_config/config/objects.json @@ -0,0 +1,280 @@ +{ + "objects": + [ + { + "type": "sensor.pseudo.traffic_lights", + "id": "traffic_lights" + }, + { + "type": "sensor.pseudo.objects", + "id": "objects" + }, + { + "type": "sensor.pseudo.actor_list", + "id": "actor_list" + }, + { + "type": "sensor.pseudo.markers", + "id": "markers" + }, + { + "type": "sensor.pseudo.opendrive_map", + "id": "map" + }, + { + "type": "vehicle.tesla.model3", + "id": "ego", + "sensors": + [ + { + "type": "sensor.camera.rgb", + "id": "rgb_right", + "spawn_point": {"x": 0.304, "y": 0.345, "z": 1.723, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 83, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 200.0, + "iso": 100.0, + "fstop": 1.2, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1085, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.semantic_segmentation", + "id": "right_seg", + "spawn_point": {"x": 0.304, "y": 0.345, "z": 1.723, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 83, + "sensor_tick": 0.05, + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_center", + "spawn_point": {"x": 0.304, "y": 0.0, "z": 1.617, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 120, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 60.0, + "iso": 1200.0, + "fstop": 1.2, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1085, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_left", + "spawn_point": {"x": 0.304, "y": -0.345, "z": 1.723, "roll": 0.0, "pitch": 0.0, "yaw": -20.0}, + "image_size_x": 1920, + "image_size_y": 1200, + "fov": 83, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 200.0, + "iso": 100.0, + "fstop": 1.2, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1085, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08 + }, + { + "type": "sensor.camera.rgb", + "id": "rgb_view", + "spawn_point": {"x": -4.5, "y": 0.0, "z": 2.8, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "image_size_x": 800, + "image_size_y": 600, + "fov": 90.0, + "sensor_tick": 0.05, + "gamma": 2.2, + "shutter_speed": 200.0, + "iso": 100.0, + "fstop": 8.0, + "min_fstop": 1.2, + "blade_count": 5, + "exposure_mode": "histogram", + "exposure_compensation": 0.0, + "exposure_min_bright": 7.0, + "exposure_max_bright": 9.0, + "exposure_speed_up": 3.0, + "exposure_speed_down": 1.0, + "calibration_constant": 16.0, + "focal_distance": 1000.0, + "blur_amount": 1.0, + "blur_radius": 0.0, + "motion_blur_intensity": 0.45, + "motion_blur_max_distortion": 0.35, + "motion_blur_min_object_screen_size": 0.1, + "slope": 0.88, + "toe": 0.55, + "shoulder": 0.26, + "black_clip": 0.0, + "white_clip": 0.04, + "temp": 6500.0, + "tint": 0.0, + "chromatic_aberration_intensity": 0.0, + "chromatic_aberration_offset": 0.0, + "enable_postprocess_effects": "True", + "lens_circle_falloff": 5.0, + "lens_circle_multiplier": 0.0, + "lens_k": -1.0, + "lens_kcube": 0.0, + "lens_x_size": 0.08, + "lens_y_size": 0.08, + "bloom_intensity": 0.675, + "lens_flare_intensity": 0.1 + }, + { + "type": "sensor.lidar.ray_cast", + "id": "center_lidar", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 2.03, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "range": 100, + "channels": 32, + "points_per_second": 1200000, + "upper_fov": 15, + "lower_fov": -25, + "rotation_frequency": 20, + "sensor_tick": 0.05 + }, + { + "type": "sensor.other.radar", + "id": "front", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 1.5, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "horizontal_fov": 30.0, + "vertical_fov": 10.0, + "points_per_second": 1500, + "range": 100.0 + }, + { + "type": "sensor.other.gnss", + "id": "gnss", + "spawn_point": {"x": 1.0, "y": 0.0, "z": 2.0}, + "noise_alt_stddev": 0.0, "noise_lat_stddev": 0.0, "noise_lon_stddev": 0.0, + "noise_alt_bias": 0.0, "noise_lat_bias": 0.0, "noise_lon_bias": 0.0 + }, + { + "type": "sensor.other.imu", + "id": "imu", + "spawn_point": {"x": 2.0, "y": 0.0, "z": 2.0, "roll": 0.0, "pitch": 0.0, "yaw": 0.0}, + "noise_accel_stddev_x": 0.0, "noise_accel_stddev_y": 0.0, "noise_accel_stddev_z": 0.0, + "noise_gyro_stddev_x": 0.0, "noise_gyro_stddev_y": 0.0, "noise_gyro_stddev_z": 0.0, + "noise_gyro_bias_x": 0.0, "noise_gyro_bias_y": 0.0, "noise_gyro_bias_z": 0.0 + }, + { + "type": "sensor.other.collision", + "id": "collision1", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.other.lane_invasion", + "id": "laneinvasion", + "spawn_point": {"x": 0.0, "y": 0.0, "z": 0.0} + }, + { + "type": "sensor.pseudo.tf", + "id": "tf" + }, + { + "type": "sensor.pseudo.odom", + "id": "odometry" + } + + ] + } + ] +} \ No newline at end of file diff --git a/src/simulation/carla_config/launch/carla.launch.py b/src/simulation/carla_config/launch/carla.launch.py new file mode 100644 index 00000000..888ef992 --- /dev/null +++ b/src/simulation/carla_config/launch/carla.launch.py @@ -0,0 +1,175 @@ +from launch import LaunchDescription +from launch_ros.actions import Node + +# For creating launch arguments +from launch.actions import DeclareLaunchArgument +from launch.substitutions import TextSubstitution, LaunchConfiguration +from launch.conditions import UnlessCondition + +# To load params from yaml file +import os +import yaml +from ament_index_python.packages import get_package_share_directory + +# For using other launch files +from launch.actions import IncludeLaunchDescription +from launch.launch_description_sources import PythonLaunchDescriptionSource + +# Use the second param (the launch argument) unless it is empty + + +def CheckParam(param1, param2): + try: + return param2 + except NameError: + return param1 + + +def generate_launch_description(): + """ Get file path of yaml file """ + config_file_path = os.path.join( + get_package_share_directory('carla_config'), + 'config', + 'carla_settings.yaml' + ) + mpc_bridge_confile_file_path = os.path.join( + get_package_share_directory('carla_config'), + 'config', + 'mpc_bridge_config.yaml' + ) + + """ Load params from yaml file """ + with open(config_file_path, 'r') as config_file: + params = yaml.safe_load(config_file) + with open(mpc_bridge_confile_file_path, 'r') as config_file: + mpc_bridge_config = yaml.safe_load(config_file) + + """ Get hostname from yaml file """ + # Checking if the hostname provided in the yaml file is referencing an env + # variable + if '$' in params['carla']['host']: + hostname = os.environ.get( + params['carla']['host'].split()[1], + params['carla']['host'].split()[2]) + else: + hostname = params['carla']['host'] + + """ Declare launch arguments """ + # carla-ros-bridge args: See carla_settings.yaml for default arg values + host_arg = DeclareLaunchArgument('host', default_value=hostname) + port_arg = DeclareLaunchArgument( + 'port', default_value=str( + params['carla']['port'])) + timeout_arg = DeclareLaunchArgument( + 'timeout', default_value=str( + params['carla']['timeout'])) + sync_mode_arg = DeclareLaunchArgument( + 'synchronous_mode', default_value=str( + params['carla']['synchronous_mode'])) + sync_mode_wait_arg = DeclareLaunchArgument( + 'synchronous_mode_wait_for_vehicle_control_command', default_value=str( + params['carla']['synchronous_mode_wait_for_vehicle_control_command'])) + fixed_delta_arg = DeclareLaunchArgument( + 'fixed_delta_seconds', default_value=str( + params['carla']['fixed_delta_seconds'])) + town_arg = DeclareLaunchArgument( + 'town', default_value=params['carla']['town']) + + # Carla ego vehicle args + objects_definition_arg = DeclareLaunchArgument( + 'objects_definition_file', + default_value=str( + get_package_share_directory('carla_config') + + '/config/objects.json')) + role_name_arg = DeclareLaunchArgument( + 'role_name', default_value=params['carla']['ego_vehicle']['role_name']) + spawn_point_arg = DeclareLaunchArgument( + 'spawn_point', default_value='None') + spawn_sensors_only_arg = DeclareLaunchArgument( + 'spawn_sensors_only', default_value='False') + + # Ackermann control args + control_loop_rate_arg = DeclareLaunchArgument( + 'control_loop_rate', default_value=str( + params['carla']['ackermann_control']['control_loop_rate'])) + + """Launch carla ros bridge node.""" + carla_ros_bridge = Node( + package='carla_ros_bridge', + executable='bridge', + output='screen', + parameters=[{ + 'host': LaunchConfiguration('host'), + 'port': LaunchConfiguration('port'), + 'timeout': LaunchConfiguration('timeout'), + 'synchronous_mode': LaunchConfiguration('synchronous_mode'), + 'synchronous_mode_wait_for_vehicle_control_command': LaunchConfiguration('synchronous_mode_wait_for_vehicle_control_command'), + 'fixed_delta_seconds': LaunchConfiguration('fixed_delta_seconds'), + 'town': LaunchConfiguration('town'), + }], + respawn=True + ) + + """Launch ego vehicle spawner nodes (using carla_example_ego_vehicle.launch.py)""" + carla_ego_vehicle = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join( + get_package_share_directory('carla_spawn_objects'), + 'carla_example_ego_vehicle.launch.py')), + launch_arguments={ + 'objects_definition_file': LaunchConfiguration('objects_definition_file'), + 'role_name': LaunchConfiguration('role_name'), + 'spawn_point_ego_vehicle': LaunchConfiguration('spawn_point'), + 'spawn_sensors_only': LaunchConfiguration('spawn_sensors_only')}.items()) + + if (os.environ.get('USE_ACKERMANN_CONTROL', 'True').lower() == 'true'): + """ Launch Ackermann Control Node """ + carla_control = Node( + package='carla_ackermann_control', + executable='carla_ackermann_control_node', + output='screen', + parameters=[{ + 'role_name': LaunchConfiguration('role_name'), + 'control_loop_rate': LaunchConfiguration('control_loop_rate') + }] + ) + else: + carla_control = IncludeLaunchDescription( + PythonLaunchDescriptionSource( + os.path.join(get_package_share_directory( + 'carla_manual_control'), 'carla_manual_control.launch.py') + ), + launch_arguments={ + 'role_name': LaunchConfiguration('role_name') + }.items() + ) + + """ Launch MPC Bridge Node """ + carla_mpc_bridge = Node( + package='carla_config', + executable='carla_mpc_bridge', + parameters=[{ + 'mpc_moutput_topic': mpc_bridge_config['mpc_bridge_node']['ros_parameters']['mpc_output_topic'], + 'steering_publisher_topic': mpc_bridge_config['mpc_bridge_node']['ros_parameters']['steering_publisher_topic'] + }], + output='screen' + ) + + return LaunchDescription([ + host_arg, + port_arg, + timeout_arg, + sync_mode_arg, + sync_mode_wait_arg, + fixed_delta_arg, + town_arg, + objects_definition_arg, + role_name_arg, + spawn_point_arg, + spawn_sensors_only_arg, + control_loop_rate_arg, + carla_ros_bridge, + carla_ego_vehicle, + carla_control, + carla_mpc_bridge, + ]) diff --git a/src/simulation/carla_config/package.xml b/src/simulation/carla_config/package.xml new file mode 100644 index 00000000..9ea6c3b5 --- /dev/null +++ b/src/simulation/carla_config/package.xml @@ -0,0 +1,20 @@ + + + carla_config + 0.0.0 + Contains launch files, scenario scripts, and custom carla bridge nodes + + Vishal Jayakumar + + TODO: License declaration + + rclcpp + path_planning_msgs + embedded_msgs + + ament_cmake + + + ament_cmake + + \ No newline at end of file diff --git a/src/simulation/carla_config/src/carla_mpc_bridge.cpp b/src/simulation/carla_config/src/carla_mpc_bridge.cpp new file mode 100644 index 00000000..dce4b3bf --- /dev/null +++ b/src/simulation/carla_config/src/carla_mpc_bridge.cpp @@ -0,0 +1,109 @@ +#include "embedded_msgs/msg/steering_angle_can.hpp" +#include "path_planning_msgs/msg/ackermann_drive.hpp" +#include "path_planning_msgs/msg/carla_ego_vehicle_status.hpp" +#include "path_planning_msgs/msg/environment.hpp" +#include "path_planning_msgs/msg/mpc_output.hpp" +#include "rclcpp/rclcpp.hpp" +using std::placeholders::_1; + +class MPC_Bridge_Node : public rclcpp::Node { + // offset for the rosbag + double xOff = 0; + double yOff = 0; + + rclcpp::Publisher::SharedPtr steeringPub; + rclcpp::Publisher::SharedPtr carlaPub; + + public: + MPC_Bridge_Node() : Node("carla_mpc_bridge") { + // Declare Parameters + this->declare_parameter("mpc_output_topic", "/mpc_output"); + this->declare_parameter("steering_publisher_topic", "/steering_data"); + + // Get Parameters + std::string mpc_output_topic = this->get_parameter("mpc_output_topic").as_string(); + std::string steering_publisher_topic = + this->get_parameter("steering_publisher_topic").as_string(); + + // Wait for service + auto parameters_client = + std::make_shared(this, "set_initial_pose"); + while (!parameters_client->wait_for_service(std::chrono::seconds(1))) { + if (!rclcpp::ok()) { + RCLCPP_ERROR(this->get_logger(), "Interrupted while waiting for the service. Exiting."); + return; + } + RCLCPP_INFO(this->get_logger(), "service not available, waiting again..."); + } + + // Check if role name specified + std::string role_name; + std::string param_name = "role_name"; + do { + role_name = parameters_client->get_parameters({"role_name"})[0].get_value(); + std::this_thread::sleep_for(std::chrono::seconds(1)); + } while (role_name == ""); + + // Subscribe to vehicle status + rclcpp::Subscription::SharedPtr + steeringAngleSub; + steeringAngleSub = this->create_subscription( + "/carla/" + role_name + "/vehicle_status", 2, + std::bind(&MPC_Bridge_Node::publish_steering, this, _1)); + + // Subscribe to MPC Output + rclcpp::Subscription::SharedPtr mpcSub; + mpcSub = this->create_subscription( + mpc_output_topic, 2, std::bind(&MPC_Bridge_Node::mpc_to_carla, this, _1)); + + rclcpp::Subscription::SharedPtr envSub; + envSub = this->create_subscription( + "/path_planning/environment", 2, std::bind(&MPC_Bridge_Node::env_callback, this, _1)); + + this->steeringPub = + this->create_publisher(steering_publisher_topic, 1); + this->carlaPub = this->create_publisher( + "/carla/" + role_name + "/ackermann_cmd", 1); + } + + private: + void publish_steering(path_planning_msgs::msg::CarlaEgoVehicleStatus::SharedPtr vehicle) { + // steer is between -1 and 1, max steer is 70 degrees (1.22173rad), so + // multiply steer by rad to get approximate steer rwa = + // (steer.steering_angle * 0.0629 - 0.1363) * 0.0174533; + auto steer = embedded_msgs::msg::SteeringAngleCAN(); + steer.steering_angle = (vehicle->control.steer * -70 + 0.1363) / 0.0629; + this->steeringPub->publish(steer); + } + + void env_callback(path_planning_msgs::msg::Environment::SharedPtr env) { + static bool receivedEnv = false; + + if (!receivedEnv) { + receivedEnv = true; + this->xOff = env->global_pose.position.x; + this->yOff = env->global_pose.position.y; + } + } + + void mpc_to_carla(path_planning_msgs::msg::MPCOutput::SharedPtr mpcOutput) { + auto carlaControl = path_planning_msgs::msg::AckermannDrive(); + carlaControl.steering_angle = mpcOutput->steer; + carlaControl.speed = mpcOutput->accel > 0 ? mpcOutput->accel : 0; + this->carlaPub->publish(carlaControl); + } +}; + +int main(int argc, char **argv) { + // Initialize ROS2 + rclcpp::init(argc, argv); + + // Create Node + auto node = std::make_shared(); + + rclcpp::spin(node); + + rclcpp::shutdown(); + + return 0; +} \ No newline at end of file diff --git a/src/simulation/carla_config/test/mpc_bridge_test.cpp b/src/simulation/carla_config/test/mpc_bridge_test.cpp new file mode 100644 index 00000000..e69de29b diff --git a/src/simulation/carla_notebooks/.gitignore b/src/simulation/carla_notebooks/.gitignore new file mode 100644 index 00000000..30992009 --- /dev/null +++ b/src/simulation/carla_notebooks/.gitignore @@ -0,0 +1,2 @@ +.ipynb_checkpoints +_pycache_ \ No newline at end of file diff --git a/src/simulation/carla_notebooks/sample_notebook.ipynb b/src/simulation/carla_notebooks/sample_notebook.ipynb new file mode 100644 index 00000000..1e270af5 --- /dev/null +++ b/src/simulation/carla_notebooks/sample_notebook.ipynb @@ -0,0 +1,74 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setup Code\n", + "**DO NOT CHANGE THIS!!**\n", + "**RUN THE CELL BELOW FIRST!!**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import carla\n", + "import os\n", + "\n", + "client_name = os.environ.get(\"CLIENT_NAME\", \"DOES NOT EXIST\")\n", + "if client_name == \"DOES NOT EXIST\":\n", + " raise Exception(\"The environment variable for the container name of the carla server has not been set\")\n", + "\n", + "# Connect to the client and retrieve the world object\n", + "client = carla.Client(client_name, 2000)\n", + "world = client.get_world()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Start Adding Code or Markdown Cells Below\n", + "Some helpful code cells are provided below that you may choose to use" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#### SPAWN VEHICLES ####\n", + "\"\"\"\n", + "# Get the blueprint library and filter for the vehicle blueprints\n", + "vehicle_blueprints = world.get_blueprint_library().filter('*vehicle*')\n", + "# Get the map's spawn points\n", + "spawn_points = world.get_map().get_spawn_points()\n", + "\n", + "# Spawn 50 vehicles randomly distributed throughout the map \n", + "# for each spawn point, we choose a random vehicle from the blueprint library\n", + "for i in range(0,50):\n", + " world.try_spawn_actor(random.choice(vehicle_blueprints), random.choice(spawn_points)))\n", + "\"\"\"\n", + "#### SET ALL VEHICLES TO AUTOPILOT\n", + "\"\"\"\n", + "for vehicle in world.get_actors().filter('*vehicle*'):\n", + " vehicle.set_autopilot(True)\n", + "\"\"\"" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/src/simulation/carla_notebooks/scenarios/custom_scenarios.ipynb b/src/simulation/carla_notebooks/scenarios/custom_scenarios.ipynb new file mode 100644 index 00000000..d89d2407 --- /dev/null +++ b/src/simulation/carla_notebooks/scenarios/custom_scenarios.ipynb @@ -0,0 +1,20 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python /home/docker/scenario_runner/scenario_runner.py --host $CLIENT_NAME --configFile /home/docker/carla_notebooks/scenarios/custom_scenarios.xml --scenario OtherLeadingVehicle_Custom --waitForEgo" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/src/simulation/carla_notebooks/scenarios/custom_scenarios.xml b/src/simulation/carla_notebooks/scenarios/custom_scenarios.xml new file mode 100644 index 00000000..5514d042 --- /dev/null +++ b/src/simulation/carla_notebooks/scenarios/custom_scenarios.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/src/simulation/carla_sample_node/.gitignore b/src/simulation/carla_sample_node/.gitignore new file mode 100644 index 00000000..cd3d2253 --- /dev/null +++ b/src/simulation/carla_sample_node/.gitignore @@ -0,0 +1 @@ +logs \ No newline at end of file diff --git a/src/simulation/carla_sample_node/carla_sample_node/__init__.py b/src/simulation/carla_sample_node/carla_sample_node/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/simulation/carla_sample_node/carla_sample_node/carla_sample_node.py b/src/simulation/carla_sample_node/carla_sample_node/carla_sample_node.py new file mode 100644 index 00000000..37f8e660 --- /dev/null +++ b/src/simulation/carla_sample_node/carla_sample_node/carla_sample_node.py @@ -0,0 +1,56 @@ +import rclpy +from rclpy.node import Node + +from nav_msgs.msg import Odometry # For odometry +from sensor_msgs.msg import NavSatFix # For GNSS + +from std_msgs.msg import Bool + +import os + + +class Datalogger(Node): + def __init__(self): + super().__init__('datalogger') + self.declare_parameter("publish_autopilot", False) + self.gnssSubscription = self.create_subscription( + NavSatFix, + '/carla/ego_vehicle/gnss', + self.gnss_callback, + 10 + ) + if self.get_parameter("publish_autopilot").value: + self.autopilotPublisher = self.create_publisher( + Bool, + '/carla/ego_vehicle/enable_autopilot', + 10 + ) + # Publish autopilot message every 10 seconds + self.timer = self.create_timer(10, self.timer_callback) + + def gnss_callback(self, msg): + # with open("/home/docker/ament_ws/src/carla_sample_node/logs/gnss_" + self.containerId + ".txt", 'a+') as file: + # file.write(str(msg.latitude) + ", " + str(msg.longitude) + "\n") + self.get_logger().info(str(msg.latitude) + ", " + + str(msg.longitude)) # print to screen + + def timer_callback(self): + msg = Bool() + msg.data = True + self.autopilotPublisher.publish(msg) + + +def main(args=None): + rclpy.init(args=args) + + datalogger = Datalogger() + + rclpy.spin(datalogger) + + datalogger.destroy_node() + rclpy.shutdown() + return + + +if __name__ == '__main__': + main() diff --git a/src/simulation/carla_sample_node/launch/carla_sample_node.launch.py b/src/simulation/carla_sample_node/launch/carla_sample_node.launch.py new file mode 100644 index 00000000..45a077bb --- /dev/null +++ b/src/simulation/carla_sample_node/launch/carla_sample_node.launch.py @@ -0,0 +1,25 @@ +from launch import LaunchDescription +import launch_ros.actions + +# For creating launch arguments +from launch.actions import DeclareLaunchArgument +from launch.substitutions import LaunchConfiguration + + +def generate_launch_description(): + publish_autopilot_arg = DeclareLaunchArgument( + 'publish_autopilot', + default_value='False' + ) + + return LaunchDescription( + [ + launch_ros.actions.Node( + namespace="carla_sample_node", + package='carla_sample_node', + executable='carla_sample_node', + parameters=[{ + 'publish_autopilot': LaunchConfiguration('publish_autopilot') + }], + output='screen') + ]) diff --git a/src/simulation/carla_sample_node/package.xml b/src/simulation/carla_sample_node/package.xml new file mode 100644 index 00000000..7e3d0b25 --- /dev/null +++ b/src/simulation/carla_sample_node/package.xml @@ -0,0 +1,20 @@ + + + + carla_sample_node + 0.0.0 + Sample python node to read data from CARLA + Vishal Jayakumar + TODO: License declaration + + ament_copyright + ament_flake8 + ament_pep257 + python3-pytest + + rclpy + + + ament_python + + \ No newline at end of file diff --git a/src/simulation/carla_sample_node/resource/carla_sample_node b/src/simulation/carla_sample_node/resource/carla_sample_node new file mode 100644 index 00000000..e69de29b diff --git a/src/simulation/carla_sample_node/setup.cfg b/src/simulation/carla_sample_node/setup.cfg new file mode 100644 index 00000000..e9a26297 --- /dev/null +++ b/src/simulation/carla_sample_node/setup.cfg @@ -0,0 +1,4 @@ +[develop] +script_dir=$base/lib/carla_sample_node +[install] +install_scripts=$base/lib/carla_sample_node \ No newline at end of file diff --git a/src/simulation/carla_sample_node/setup.py b/src/simulation/carla_sample_node/setup.py new file mode 100644 index 00000000..18127651 --- /dev/null +++ b/src/simulation/carla_sample_node/setup.py @@ -0,0 +1,29 @@ +from setuptools import setup +from glob import glob +import os + +package_name = 'carla_sample_node' + +setup( + name=package_name, + version='0.0.0', + packages=[package_name], + data_files=[ + ('share/ament_index/resource_index/packages', + ['resource/' + package_name]), + ('share/' + package_name, ['package.xml']), + ('share/' + package_name, glob(os.path.join('launch', '*.launch.py'))) + ], + install_requires=['setuptools'], + zip_safe=True, + maintainer='Vishal Jayakumar', + maintainer_email='v3jayaku@watonomous.ca', + description='Sample python node to read data from CARLA', + license='TODO: License declaration', + tests_require=['pytest'], + entry_points={ + 'console_scripts': [ + 'carla_sample_node = carla_sample_node.carla_sample_node:main' + ], + }, +) diff --git a/src/wato_msgs/perception_msgs/lane_detection_msgs/CMakeLists.txt b/src/wato_msgs/perception_msgs/lane_detection_msgs/CMakeLists.txt new file mode 100644 index 00000000..b2cd1884 --- /dev/null +++ b/src/wato_msgs/perception_msgs/lane_detection_msgs/CMakeLists.txt @@ -0,0 +1,26 @@ +cmake_minimum_required(VERSION 3.10) +project(lane_detection_msgs) + +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +find_package(ament_cmake REQUIRED) +find_package(std_msgs REQUIRED) +find_package(geometry_msgs REQUIRED) +find_package(sensor_msgs REQUIRED) + +set(msg_files + msg/LaneLine.msg + msg/LaneDetection.msg) + +rosidl_generate_interfaces(lane_detection_msgs + ${msg_files} + DEPENDENCIES std_msgs geometry_msgs sensor_msgs) + +ament_export_dependencies(rosidl_default_runtime) +ament_package() \ No newline at end of file diff --git a/src/wato_msgs/perception_msgs/lane_detection_msgs/msg/LaneDetection.msg b/src/wato_msgs/perception_msgs/lane_detection_msgs/msg/LaneDetection.msg new file mode 100644 index 00000000..abc06496 --- /dev/null +++ b/src/wato_msgs/perception_msgs/lane_detection_msgs/msg/LaneDetection.msg @@ -0,0 +1,5 @@ +std_msgs/Header header +LaneLine[] lines + +# Source image (optional) +sensor_msgs/Image source_img \ No newline at end of file diff --git a/src/wato_msgs/perception_msgs/lane_detection_msgs/msg/LaneLine.msg b/src/wato_msgs/perception_msgs/lane_detection_msgs/msg/LaneLine.msg new file mode 100644 index 00000000..7b8976ce --- /dev/null +++ b/src/wato_msgs/perception_msgs/lane_detection_msgs/msg/LaneLine.msg @@ -0,0 +1 @@ +geometry_msgs/Point[] points \ No newline at end of file diff --git a/src/wato_msgs/perception_msgs/lane_detection_msgs/package.xml b/src/wato_msgs/perception_msgs/lane_detection_msgs/package.xml new file mode 100644 index 00000000..400c9b6d --- /dev/null +++ b/src/wato_msgs/perception_msgs/lane_detection_msgs/package.xml @@ -0,0 +1,22 @@ + + + lane_detection_msgs + 0.0.0 + Lane Detection ROS2 messages + + Perception + TODO + + ament_cmake + std_msgs + geometry_msgs + sensor_msgs + + rosidl_default_generators + rosidl_default_runtime + rosidl_interface_packages + + + ament_cmake + + \ No newline at end of file diff --git a/src/wato_msgs/radar_msgs/CMakeLists.txt b/src/wato_msgs/perception_msgs/radar_msgs/CMakeLists.txt similarity index 100% rename from src/wato_msgs/radar_msgs/CMakeLists.txt rename to src/wato_msgs/perception_msgs/radar_msgs/CMakeLists.txt diff --git a/src/wato_msgs/radar_msgs/msg/RadarDetection.msg b/src/wato_msgs/perception_msgs/radar_msgs/msg/RadarDetection.msg similarity index 100% rename from src/wato_msgs/radar_msgs/msg/RadarDetection.msg rename to src/wato_msgs/perception_msgs/radar_msgs/msg/RadarDetection.msg diff --git a/src/wato_msgs/radar_msgs/msg/RadarPacket.msg b/src/wato_msgs/perception_msgs/radar_msgs/msg/RadarPacket.msg similarity index 100% rename from src/wato_msgs/radar_msgs/msg/RadarPacket.msg rename to src/wato_msgs/perception_msgs/radar_msgs/msg/RadarPacket.msg diff --git a/src/wato_msgs/radar_msgs/package.xml b/src/wato_msgs/perception_msgs/radar_msgs/package.xml similarity index 100% rename from src/wato_msgs/radar_msgs/package.xml rename to src/wato_msgs/perception_msgs/radar_msgs/package.xml diff --git a/src/wato_msgs/simulation/embedded_msgs/CMakeLists.txt b/src/wato_msgs/simulation/embedded_msgs/CMakeLists.txt new file mode 100644 index 00000000..2b721e39 --- /dev/null +++ b/src/wato_msgs/simulation/embedded_msgs/CMakeLists.txt @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.10) +project(embedded_msgs) + +# Set compiler to use C++ 17 standard +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# Search for dependencies required for building this package +find_package(ament_cmake REQUIRED) # ROS2 build tool +find_package(std_msgs REQUIRED) # ROS2 common messages + +set(msg_files + msg/SteeringAngleCAN.msg) +# Parses message files to generate ROS2 interfaces +rosidl_generate_interfaces(embedded_msgs + ${msg_files} + DEPENDENCIES std_msgs) + +ament_export_dependencies(rosidl_default_runtime) +ament_package() \ No newline at end of file diff --git a/src/wato_msgs/simulation/embedded_msgs/msg/SteeringAngleCAN.msg b/src/wato_msgs/simulation/embedded_msgs/msg/SteeringAngleCAN.msg new file mode 100644 index 00000000..e274f9d0 --- /dev/null +++ b/src/wato_msgs/simulation/embedded_msgs/msg/SteeringAngleCAN.msg @@ -0,0 +1,4 @@ +float64 steering_angle +float64 steering_gradient +int8 steering_availability_status +bool driver_steering_interference_detected \ No newline at end of file diff --git a/src/wato_msgs/simulation/embedded_msgs/package.xml b/src/wato_msgs/simulation/embedded_msgs/package.xml new file mode 100644 index 00000000..aa63197f --- /dev/null +++ b/src/wato_msgs/simulation/embedded_msgs/package.xml @@ -0,0 +1,20 @@ + + + embedded_msgs + 0.0.0 + Embedded ROS2 Messages + + Vishal Jayakumar + TODO + + ament_cmake + std_msgs + + rosidl_default_generators + rosidl_default_runtime + rosidl_interface_packages + + + ament_cmake + + \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/CMakeLists.txt b/src/wato_msgs/simulation/path_planning_msgs/CMakeLists.txt new file mode 100644 index 00000000..8fd9b59e --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.10) +project(path_planning_msgs) + +# Set compiler to use C++ 17 standard +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 17) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic) +endif() + +# Search for dependencies required for building this package +find_package(ament_cmake REQUIRED) # ROS2 build tool +find_package(std_msgs REQUIRED) # ROS2 common messages +find_package(geometry_msgs REQUIRED) + +set(msg_files + msg/CarlaEgoVehicleStatus.msg + msg/CarlaEgoVehicleControl.msg + msg/Environment.msg + msg/MPCOutput.msg + msg/AckermannDrive.msg) +# Parses message files to generate ROS2 interfaces +rosidl_generate_interfaces(path_planning_msgs + ${msg_files} + DEPENDENCIES std_msgs geometry_msgs) + +ament_export_dependencies(rosidl_default_runtime) +ament_package() \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/AckermannDrive.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/AckermannDrive.msg new file mode 100644 index 00000000..ef4dcf89 --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/AckermannDrive.msg @@ -0,0 +1,38 @@ +## Driving command for a car-like vehicle using Ackermann steering. +# $Id$ + +# Assumes Ackermann front-wheel steering. The left and right front +# wheels are generally at different angles. To simplify, the commanded +# angle corresponds to the yaw of a virtual wheel located at the +# center of the front axle, like on a tricycle. Positive yaw is to +# the left. (This is *not* the angle of the steering wheel inside the +# passenger compartment.) +# +# Zero steering angle velocity means change the steering angle as +# quickly as possible. Positive velocity indicates a desired absolute +# rate of change either left or right. The controller tries not to +# exceed this limit in either direction, but sometimes it might. +# +float32 steering_angle # desired virtual angle (radians) +float32 steering_angle_velocity # desired rate of change (radians/s) + +# Drive at requested speed, acceleration and jerk (the 1st, 2nd and +# 3rd derivatives of position). All are measured at the vehicle's +# center of rotation, typically the center of the rear axle. The +# controller tries not to exceed these limits in either direction, but +# sometimes it might. +# +# Speed is the desired scalar magnitude of the velocity vector. +# Direction is forward unless the sign is negative, indicating reverse. +# +# Zero acceleration means change speed as quickly as +# possible. Positive acceleration indicates a desired absolute +# magnitude; that includes deceleration. +# +# Zero jerk means change acceleration as quickly as possible. Positive +# jerk indicates a desired absolute rate of acceleration change in +# either direction (increasing or decreasing). +# +float32 speed # desired forward speed (m/s) +float32 acceleration # desired acceleration (m/s^2) +float32 jerk # desired jerk (m/s^3) \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleControl.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleControl.msg new file mode 100644 index 00000000..0a69577f --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleControl.msg @@ -0,0 +1,32 @@ +# +# Copyright (c) 2018-2019 Intel Corporation. +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . +# +# This represents a vehicle control message sent to CARLA simulator + +std_msgs/Header header + +# The CARLA vehicle control data + +# 0. <= throttle <= 1. +float32 throttle + +# -1. <= steer <= 1. +float32 steer + +# 0. <= brake <= 1. +float32 brake + +# hand_brake 0 or 1 +bool hand_brake + +# reverse 0 or 1 +bool reverse + +# gear +int32 gear + +# manual gear shift +bool manual_gear_shift diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleStatus.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleStatus.msg new file mode 100644 index 00000000..650e40e5 --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/CarlaEgoVehicleStatus.msg @@ -0,0 +1,15 @@ +# +# Copyright (c) 2019 Intel Corporation. +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . +# + +std_msgs/Header header + +float32 velocity +geometry_msgs/Accel acceleration +geometry_msgs/Quaternion orientation + +# the current control values, as reported by Carla +CarlaEgoVehicleControl control diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/Environment.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/Environment.msg new file mode 100644 index 00000000..c63726db --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/Environment.msg @@ -0,0 +1,9 @@ +std_msgs/Header header + +# Vehicle State +####################### + +float64 forward_speed # m/s +float64 forward_accel # m/s^2 +float64 wheel_angle # degrees (not sure if we want/need this) +geometry_msgs/Pose global_pose # in meters from where odometer was started up \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/msg/MPCOutput.msg b/src/wato_msgs/simulation/path_planning_msgs/msg/MPCOutput.msg new file mode 100644 index 00000000..4565da0a --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/msg/MPCOutput.msg @@ -0,0 +1,2 @@ +float64 accel +float64 steer \ No newline at end of file diff --git a/src/wato_msgs/simulation/path_planning_msgs/package.xml b/src/wato_msgs/simulation/path_planning_msgs/package.xml new file mode 100644 index 00000000..a34f4a27 --- /dev/null +++ b/src/wato_msgs/simulation/path_planning_msgs/package.xml @@ -0,0 +1,21 @@ + + + path_planning_msgs + 0.0.0 + Path Planning ROS2 Messages + + Vishal Jayakumar + TODO + + ament_cmake + std_msgs + geometry_msgs + + rosidl_default_generators + rosidl_default_runtime + rosidl_interface_packages + + + ament_cmake + + \ No newline at end of file diff --git a/watod b/watod index f81630da..0b5d1f87 100755 --- a/watod +++ b/watod @@ -8,13 +8,8 @@ ANSIBLE_TMP_DIR=/tmp/ansible-tmp-$USER function usage { echo "Usage: $programname [OPTIONS]... [COMMAND]..." - echo "Executes docker compose COMMAND in the the monorepo with appropriate environment variables." - echo " launch_autonomous launch the can_interface for autonomous mode" - echo " -dev --devel configures containers for development (allows you to edit inside a container)" + echo "Executes \"docker compose COMMAND\" in the the monorepo with appropriate environment variables." echo " -v --verbose verbose mode. Currently, prints .env variables" - echo " -lp --local-ports displays ports exposed by applications and shows the" - echo " ssh local port forwarding commands for each VNC Server found" - echo " if a docker compose command is specified, it will be run in the background." echo " -t --terminal [service_name] open a bash terminal into the desired service (eg: perception)." echo "" echo "Examples:" @@ -53,18 +48,10 @@ COMPOSE_CMD="" while [[ $# -gt 0 ]] ; do key="$1" case $key in - -dev|--devel) - DEVEL=1 - shift - ;; -v|--verbose) VERBOSE=1 shift # past argument ;; - -lp|--local-ports) - PRINT_LOCAL_PORTS=1 - shift - ;; -t|--terminal) START_TERMINAL=1 shift @@ -88,55 +75,28 @@ if [[ $# -gt 0 ]]; then COMPOSE_CMD="${COMPOSE_CMD} $@" fi -if [ ! -z $DEVEL ]; then - echo "DEVELOPER MODE ACTIVATED: Source files have been mounted within /home/bolty when you enter the container" - MODULES_DIR="$MONO_DIR/modules/dev_overrides" - export MODULES_DIR_EXP="$MODULES_DIR" +# Allow for local overrides of any of the below parameters (prioritized your local config) +if [ -f "$MONO_DIR/watod-config.local.sh" ]; then + source "$MONO_DIR/watod-config.local.sh" +elif [ -f "$MONO_DIR/watod-config.sh" ]; then + source "$MONO_DIR/watod-config.sh" fi -# generate .env file from watod_scripts/watod-setup-env.sh -if [ ! -z $VERBOSE ]; then # if we want to see all verbose coming from setting up env - cd $MONO_DIR && bash watod_scripts/watod-setup-env.sh +# Change docker-compose override according to mode of operation +MODE_OF_OPERATION=${MODE_OF_OPERATION:-"deploy"} +if [ $MODE_OF_OPERATION == "deploy" ]; then + MODULES_DIR="$MONO_DIR/modules" +elif [ $MODE_OF_OPERATION == "develop" ]; then + MODULES_DIR="$MONO_DIR/modules/dev_overrides" else - cd $MONO_DIR && bash watod_scripts/watod-setup-env.sh &> /dev/null + MODULES_DIR="$MONO_DIR/modules" fi -if [ ! -z $PRINT_LOCAL_PORTS ]; then - # get list of services to traverse - SERVICE_LIST="$(run_compose ps --services | sort)" - - # fixes hostnames for VMs so that by default $HOSTNAME is the first element - # in the array, which often is the domain name that can be accessed - # externally - read -ra HOSTNAME <<< $(hostname -A) - echo "=========================================================================" - echo "Ports exposed by running containers:" - echo "=========================================================================" - LOCAL_FORWARD_ALL="" - - PORTS_STRING="" - for SERVICE in $SERVICE_LIST; do - ENV_VARS="$(run_compose exec "$SERVICE" env || true)" - VNC_PORT=$(echo "$ENV_VARS" | grep ^VNC_PORT= | cut -d '=' -f2) - VNC_PORT=${VNC_PORT:0:5} # Strip unncessary characters - - if [ ! -z $VNC_PORT ]; then - echo "$SERVICE service exposes a VNC Server at $HOSTNAME:$VNC_PORT" - - LOCAL_FORWARD="-L ${VNC_PORT}:localhost:${VNC_PORT}" - LOCAL_FORWARD_ALL="${LOCAL_FORWARD_ALL} $LOCAL_FORWARD" - PORTS_STRING="${PORTS_STRING},${VNC_PORT}:${SERVICE}_VNC" - echo " To forward it locally, run" - echo " ssh$LOCAL_FORWARD $USER@$HOSTNAME" - echo " on your local machine attach to VNC at localhost:${VNC_PORT}" - echo - fi - done - - echo "=========================================================================" - echo "To forward all ports locally:" - echo "ssh $LOCAL_FORWARD_ALL $USER@$HOSTNAME" - echo "=========================================================================" +# generate .env file from watod_scripts/watod-setup-env.sh +if [ ! -z $VERBOSE ]; then # if we want to see all verbose coming from setting up env + cd $MONO_DIR && . ./watod_scripts/watod-setup-env.sh +else + cd $MONO_DIR && . ./watod_scripts/watod-setup-env.sh &> /dev/null fi if [ ! -z "$COMPOSE_CMD" ]; then diff --git a/watod-config.sh b/watod-config.sh index 0b82dd62..946079fa 100755 --- a/watod-config.sh +++ b/watod-config.sh @@ -1,8 +1,11 @@ ## ----------------------- watod Configuration File Override ---------------------------- -############################ ACTIVE MODULE CONFIGURATION ############################ -## List of active modules to run, defined in docker-compose.yaml. ## +## HINT: You can copy the contents of this file to a watod-config.local.sh +## file that is untrackable by git and readable by watod. +## + +############################ ACTIVE MODULE CONFIGURATION ############################ ## List of active modules to run, defined in docker-compose.yaml. ## Possible values: ## - infrastructure : starts visualization tools (foxglove and/or vnc and/or data_stream) @@ -14,7 +17,15 @@ ACTIVE_MODULES="infrastructure perception" -############################## OPTIONAL CONFIGURATIONS ############################## +################################# MODE OF OPERATION ################################# +## Possible modes of operation when running watod. +## Possible values: +## - deploy (default) : runs production-grade containers (non-editable) +## - develop : runs developer containers (editable) + +# MODE_OF_OPERATION="" + +############################## ADVANCED CONFIGURATIONS ############################## ## Name to append to docker containers. DEFAULT = "" COMPOSE_PROJECT_NAME="lereljic" FOXGLOVE_BRIDGE_PORT="8771" diff --git a/watod_scripts/watod-setup-env.sh b/watod_scripts/watod-setup-env.sh index 383b763c..a165c028 100755 --- a/watod_scripts/watod-setup-env.sh +++ b/watod_scripts/watod-setup-env.sh @@ -1,4 +1,5 @@ #!/bin/bash +set -e # This script generates a .env file to be used with docker-compose # To override any of the variables in this script, create watod-config.sh @@ -10,21 +11,7 @@ if [ -f /.dockerenv ]; then exit 1 fi -MONO_DIR="$(dirname "$(realpath "$0")")" -# moves us one level out to the root monorepo directory -MONO_DIR=${MONO_DIR%/*} - -if [ ! -z $MODULES_DIR_EXP ]; then - MODULES_DIR="$MODULES_DIR_EXP" -else - MODULES_DIR="$MONO_DIR/modules" -fi - -# Allow for local overrides of any of the below parameters -if [ -f "$MONO_DIR/watod-config.sh" ]; then - source "$MONO_DIR/watod-config.sh" -fi - +# Retrieve git branch if ! [ -x "$(command -v git)" ]; then echo 'Error: git is not installed.' >&2 else @@ -41,11 +28,6 @@ TAG=$(echo ${TAG:-$BRANCH} | tr / -) # replace / with - TAG=${TAG/\//-} -# Happens during CI, we use the TAG from CI -if [ ! -z $MODULES_DIR_EXP ]; then - TAG="build_$TAG" -fi - # List of active modules to run, defined in docker-compose.yaml. # Possible values: # - infrastructure : starts visualization tools (foxglove and/or vnc) @@ -87,11 +69,10 @@ INFRASTRUCTURE_FOXGLOVE_IMAGE=${DATA_STREAM_IMAGE:-"$REGISTRY_URL/infrastructure PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE=${PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/radar_object_detection"} PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE=${PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/lidar_object_detection"} PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE=${PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/camera_object_detection"} -PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE=${PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE:-"$REGISTRY_URL/perception/traffic_light_detection"} -PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE=${PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE:-"$REGISTRY_URL/perception/traffic_sign_detection"} PERCEPTION_LANE_DETECTION_IMAGE=${PERCEPTION_LANE_DETECTION_IMAGE:-"$REGISTRY_URL/perception/lane_detection"} PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=${PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE:-"$REGISTRY_URL/perception/semantic_segmentation"} PERCEPTION_TRACKING_IMAGE=${PERCEPTION_TRACKING_IMAGE:-"$REGISTRY_URL/perception/tracking"} +PERCEPTION_DEPTH_ESTIMATION_IMAGE=${PERCEPTION_DEPTH_ESTIMATION_IMAGE:-"$REGISTRY_URL/perception/depth_estimation"} # World Modeling WORLD_MODELING_HD_MAP_IMAGE=${WORLD_MODELING_HD_MAP_IMAGE:-"$REGISTRY_URL/world_modeling/hd_map"} @@ -108,6 +89,10 @@ ACTION_MPC_IMAGE=${ACTION_MPC_IMAGE:-"$REGISTRY_URL/action/model_predictive_cont # Simulation SIMULATION_CARLA_IMAGE=${SIMULATION_CARLA_IMAGE:-"$REGISTRY_URL/simulation/carla_sim"} +SIMULATION_CARLA_ROS_BRIDGE_IMAGE=${SIMULATION_CARLA_ROS_BRIDGE_IMAGE:-"$REGISTRY_URL/simulation/carla_ros_bridge"} +SIMULATION_CARLAVIZ_IMAGE=${SIMULATION_CARLAVIZ_IMAGE:-"$REGISTRY_URL/simulation/carla_viz"} +SIMULATION_CARLA_NOTEBOOKS_IMAGE=${SIMULATION_CARLA_NOTEBOOKS_IMAGE:-"$REGISTRY_URL/simulation/carla_notebooks"} +SIMULATION_CARLA_SAMPLE_NODE_IMAGE=${SIMULATION_CARLA_SAMPLE_NODE_IMAGE:-"$REGISTRY_URL/simulation/carla_sample_node"} # Interfacing INTERFACING_CAN_IMAGE=${INTERFACING_CAN_IMAGE:-"$REGISTRY_URL/interfacing/can_interfacing"} @@ -123,6 +108,9 @@ SETGID=$(id -g) BASE_PORT=${BASE_PORT:-$(($(id -u)*20))} GUI_TOOLS_VNC_PORT=${GUI_TOOLS_VNC_PORT:-$((BASE_PORT++))} FOXGLOVE_BRIDGE_PORT=${FOXGLOVE_BRIDGE_PORT:-$((GUI_TOOLS_VNC_PORT++))} +CARLAVIZ_PORT=${CARLAVIZ_PORT:-$((GUI_TOOLS_VNC_PORT++))} +CARLAVIZ_PORT_2=${CARLAVIZ_PORT_2:-$((GUI_TOOLS_VNC_PORT++))} +CARLA_NOTEBOOKS_PORT=${CARLA_NOTEBOOKS_PORT:-$((GUI_TOOLS_VNC_PORT++))} ## -------------------- Export Environment Variables ------------------------- @@ -150,6 +138,9 @@ echo "SETGID=$SETGID" >> "$MODULES_DIR/.env" echo "BASE_PORT=$BASE_PORT" >> "$MODULES_DIR/.env" echo "GUI_TOOLS_VNC_PORT=$GUI_TOOLS_VNC_PORT" >> "$MODULES_DIR/.env" echo "FOXGLOVE_BRIDGE_PORT=$FOXGLOVE_BRIDGE_PORT" >> "$MODULES_DIR/.env" +echo "CARLAVIZ_PORT=$CARLAVIZ_PORT" >> "$MODULES_DIR/.env" +echo "CARLAVIZ_PORT_2=$CARLAVIZ_PORT_2" >> "$MODULES_DIR/.env" +echo "CARLA_NOTEBOOKS_PORT=$CARLA_NOTEBOOKS_PORT" >> "$MODULES_DIR/.env" echo "REGISTRY=$REGISTRY" >> "$MODULES_DIR/.env" echo "REGISTRY=$REPOSITORY" >> "$MODULES_DIR/.env" @@ -173,11 +164,10 @@ echo "INFRASTRUCTURE_FOXGLOVE_IMAGE=$INFRASTRUCTURE_FOXGLOVE_IMAGE" >> "$MODULES echo "PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE=$PERCEPTION_RADAR_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE=$PERCEPTION_LIDAR_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE=$PERCEPTION_CAMERA_OBJECT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" -echo "PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE=$PERCEPTION_TRAFFIC_LIGHT_DETECTION_IMAGE" >> "$MODULES_DIR/.env" -echo "PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE=$PERCEPTION_TRAFFIC_SIGN_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_LANE_DETECTION_IMAGE=$PERCEPTION_LANE_DETECTION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE=$PERCEPTION_SEMANTIC_SEGMENTATION_IMAGE" >> "$MODULES_DIR/.env" echo "PERCEPTION_TRACKING_IMAGE=$PERCEPTION_TRACKING_IMAGE" >> "$MODULES_DIR/.env" +echo "PERCEPTION_DEPTH_ESTIMATION_IMAGE=$PERCEPTION_DEPTH_ESTIMATION_IMAGE" >> "$MODULES_DIR/.env" # World Modeling echo "WORLD_MODELING_HD_MAP_IMAGE=$WORLD_MODELING_HD_MAP_IMAGE" >> "$MODULES_DIR/.env" @@ -194,6 +184,10 @@ echo "ACTION_MPC_IMAGE=$ACTION_MPC_IMAGE" >> "$MODULES_DIR/.env" # Simulation echo "SIMULATION_CARLA_IMAGE=$SIMULATION_CARLA_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLA_ROS_BRIDGE_IMAGE=$SIMULATION_CARLA_ROS_BRIDGE_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLAVIZ_IMAGE=$SIMULATION_CARLAVIZ_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLA_NOTEBOOKS_IMAGE=$SIMULATION_CARLA_NOTEBOOKS_IMAGE" >> "$MODULES_DIR/.env" +echo "SIMULATION_CARLA_SAMPLE_NODE_IMAGE=$SIMULATION_CARLA_SAMPLE_NODE_IMAGE" >> "$MODULES_DIR/.env" # Interfacing echo "INTERFACING_CAN_IMAGE=$INTERFACING_CAN_IMAGE" >> "$MODULES_DIR/.env"