From 84cf8f21f23bd133d9f9c25e4f1f4efc33a6877d Mon Sep 17 00:00:00 2001 From: Amal Nanavati Date: Sat, 6 May 2023 01:34:21 -0700 Subject: [PATCH] Implemented dummy face detection and intergrated it with BiteInitiation - implemented and tested the dummy ROS2 node - integrated it with the web app - implemented proper use of useRef and useCallback - Added a debug continue button (e.g., if ROS is not running) - added ability to unsubscribe from a topic --- .../FaceDetection.py | 185 +++++++++++++++ .../reverse_string_service.py | 2 +- .../feeding_web_app_dummy_nodes_launch.xml | 2 + feeding_web_app_ros2_test/package.xml | 1 + feeding_web_app_ros2_test/setup.py | 1 + feedingwebapp/src/Pages/Constants.js | 14 +- feedingwebapp/src/Pages/GlobalState.jsx | 5 + .../Pages/Home/MealStates/BiteInitiation.jsx | 212 ++++++++++++++---- feedingwebapp/src/ros/ros_helpers.js | 10 + 9 files changed, 392 insertions(+), 40 deletions(-) create mode 100755 feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py diff --git a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py new file mode 100755 index 00000000..563f7544 --- /dev/null +++ b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +from ada_feeding_msgs.msg import FaceDetection +from ada_feeding_msgs.srv import ToggleFaceDetection +import cv2 +from cv_bridge import CvBridge +from geometry_msgs.msg import PointStamped +import rclpy +from rclpy.node import Node +from sensor_msgs.msg import Image +from threading import Lock + + +class FaceDetectionNode(Node): + def __init__( + self, + face_detection_interval=150, + num_images_with_face=60, + open_mouth_interval=90, + num_images_with_open_mouth=30, + ): + """ + Initializes the FaceDetection node, which exposes a ToggleFaceDetection + service that can be used to toggle the face detection on or off and + publishes information about detected faces to the /face_detection + topic when face detection is on. + + After face_detection_interval images without a face, this dummy function + detects a face for num_images_with_face frames. After open_mouth_interval + images with a face but without an open mouth, this dummy function + detects an open mouth for num_images_with_open_mouth frames. + + Parameters: + ---------- + face_detection_interval: The number of frames between each face detection. + num_images_with_face: The number of frames that must have a face in them. + open_mouth_interval: The number of frames between each open mouth detection. + num_images_with_open_mouth: The number of frames that must have an open mouth in them. + """ + super().__init__("face_detection") + + # Internal variables to track when a face and/or open mouth should be detected + self.face_detection_interval = face_detection_interval + self.num_images_with_face = num_images_with_face + self.open_mouth_interval = open_mouth_interval + self.num_images_with_open_mouth = num_images_with_open_mouth + self.num_consecutive_images_without_face = 0 + self.num_consecutive_images_with_face = 0 + self.num_consecutive_images_without_open_mouth = 0 + self.num_consecutive_images_with_open_mouth = 0 + + # Convert between ROS and CV images + self.bridge = CvBridge() + + # Keeps track of whether face detection is on or not + self.is_on = False + self.is_on_lock = Lock() + + # Create the service + self.srv = self.create_service( + ToggleFaceDetection, + "ToggleFaceDetection", + self.toggle_face_detection_callback, + ) + + # Subscribe to the camera feed + self.subscription = self.create_subscription( + Image, "camera/color/image_raw", self.camera_callback, 1 + ) + self.subscription # prevent unused variable warning + + # Create the publishers + self.publisher_results = self.create_publisher( + FaceDetection, "face_detection", 1 + ) + self.publisher_image = self.create_publisher(Image, "face_detection_img", 1) + + def toggle_face_detection_callback(self, request, response): + """ + Callback function for the ToggleFaceDetection service. Safely toggles + the face detection on or off depending on the request. + """ + self.get_logger().info( + "Incoming service request. turn_on: %s" % (request.turn_on) + ) + if request.turn_on: + # Reset counters + self.num_consecutive_images_without_face = 0 + self.num_consecutive_images_with_face = 0 + self.num_consecutive_images_without_open_mouth = 0 + self.num_consecutive_images_with_open_mouth = 0 + # Turn on face detection + self.is_on_lock.acquire() + self.is_on = True + self.is_on_lock.release() + response.face_detection_is_on = True + else: + self.is_on_lock.acquire() + self.is_on = False + self.is_on_lock.release() + response.face_detection_is_on = False + return response + + def camera_callback(self, msg): + """ + Callback function for the camera feed. If face detection is on, this + function will detect faces in the image and publish information about + them to the /face_detection topic. + """ + self.is_on_lock.acquire() + is_on = self.is_on + self.is_on_lock.release() + if is_on: + # Update the number of consecutive images with/without a face + is_face_detected = False + if self.num_consecutive_images_with_face == self.num_images_with_face: + self.num_consecutive_images_without_face = 0 + self.num_consecutive_images_with_face = 0 + if self.num_consecutive_images_without_face == self.face_detection_interval: + # Detect a face + self.num_consecutive_images_with_face += 1 + is_face_detected = True + else: + # Don't detect a face + self.num_consecutive_images_without_face += 1 + + # Update the number of consecutive images with/without an open mouth + open_mouth_detected = False + if is_face_detected: + if ( + self.num_consecutive_images_with_open_mouth + == self.num_images_with_open_mouth + ): + self.num_consecutive_images_without_open_mouth = 0 + self.num_consecutive_images_with_open_mouth = 0 + if ( + self.num_consecutive_images_without_open_mouth + == self.open_mouth_interval + ): + # Detect an open mouth + self.num_consecutive_images_with_open_mouth += 1 + open_mouth_detected = True + else: + # Don't detect an open mouth + self.num_consecutive_images_without_open_mouth += 1 + + # Publish the face detection information + face_detection_msg = FaceDetection() + face_detection_msg.is_face_detected = is_face_detected + if is_face_detected: + # Add a dummy face marker to the sensor_msgs/Image + cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") + cv2.circle( + cv_image, + (msg.width // 2, msg.height // 2), + msg.height // 25, + (0, 0, 255), + -1, + ) + annotated_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8") + annotated_img = annotated_msg + # Publish the detected mouth center + face_detection_msg.detected_mouth_center = PointStamped() + face_detection_msg.detected_mouth_center.header = msg.header + face_detection_msg.detected_mouth_center.point.x = msg.width / 2.0 + face_detection_msg.detected_mouth_center.point.y = msg.height / 2.0 + face_detection_msg.detected_mouth_center.point.z = 0.0 + else: + annotated_img = msg + face_detection_msg.is_mouth_open = open_mouth_detected + self.publisher_results.publish(face_detection_msg) + self.publisher_image.publish(annotated_img) + + +def main(args=None): + rclpy.init(args=args) + + face_detection = FaceDetectionNode() + + rclpy.spin(face_detection) + + rclpy.shutdown() + + +if __name__ == "__main__": + main() diff --git a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/reverse_string_service.py b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/reverse_string_service.py index 4d414f29..8e8a6af3 100755 --- a/feeding_web_app_ros2_test/feeding_web_app_ros2_test/reverse_string_service.py +++ b/feeding_web_app_ros2_test/feeding_web_app_ros2_test/reverse_string_service.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -from feeding_web_app_ros2_msgs.srv import ReverseString # CHANGE +from feeding_web_app_ros2_msgs.srv import ReverseString import rclpy from rclpy.node import Node diff --git a/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml b/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml index d2ecb85f..f6095664 100644 --- a/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml +++ b/feeding_web_app_ros2_test/launch/feeding_web_app_dummy_nodes_launch.xml @@ -8,6 +8,8 @@ + + \ No newline at end of file diff --git a/feeding_web_app_ros2_test/package.xml b/feeding_web_app_ros2_test/package.xml index 7eb20053..059d629c 100644 --- a/feeding_web_app_ros2_test/package.xml +++ b/feeding_web_app_ros2_test/package.xml @@ -11,6 +11,7 @@ ament_flake8 ament_pep257 python3-pytest + ada_feeding_msgs feeding_web_app_ros2_msgs cv_bridge rclpy diff --git a/feeding_web_app_ros2_test/setup.py b/feeding_web_app_ros2_test/setup.py index 84ee0764..e046ad6e 100644 --- a/feeding_web_app_ros2_test/setup.py +++ b/feeding_web_app_ros2_test/setup.py @@ -33,6 +33,7 @@ "console_scripts": [ # Scripts for the main app "DummyRealSense = feeding_web_app_ros2_test.DummyRealSense:main", + "FaceDetection = feeding_web_app_ros2_test.FaceDetection:main", "MoveAbovePlate = feeding_web_app_ros2_test.MoveAbovePlate:main", # Scripts for the "TestROS" component "listener = feeding_web_app_ros2_test.subscriber:main", diff --git a/feedingwebapp/src/Pages/Constants.js b/feedingwebapp/src/Pages/Constants.js index 38139fcf..80a4bf38 100644 --- a/feedingwebapp/src/Pages/Constants.js +++ b/feedingwebapp/src/Pages/Constants.js @@ -23,8 +23,11 @@ FOOTER_STATE_ICON_DICT[MEAL_STATE.R_MovingToMouth] = '/robot_state_imgs/move_to_ FOOTER_STATE_ICON_DICT[MEAL_STATE.R_StowingArm] = '/robot_state_imgs/stowing_arm_position.svg' export { FOOTER_STATE_ICON_DICT } -// The names of the camera feed ROS topic(s) +// The names of the ROS topic(s) export const CAMERA_FEED_TOPIC = '/camera/color/image_raw' +export const FACE_DETECTION_TOPIC = '/face_detection' +export const FACE_DETECTION_TOPIC_MSG = 'ada_feeding_msgs/FaceDetection' +export const FACE_DETECTION_IMG_TOPIC = '/face_detection_img' // For states that call ROS actions, this dictionary contains // the action name and the message type @@ -35,6 +38,15 @@ ROS_ACTIONS_NAMES[MEAL_STATE.R_MovingAbovePlate] = { } export { ROS_ACTIONS_NAMES } +// For states that call ROS services, this dictionary contains +// the service name and the message type +let ROS_SERVICE_NAMES = {} +ROS_SERVICE_NAMES[MEAL_STATE.U_BiteInitiation] = { + serviceName: 'ToggleFaceDetection', + messageType: 'ada_feeding_msgs/srv/ToggleFaceDetection' +} +export { ROS_SERVICE_NAMES } + // The meaning of the status that motion actions return in their results. // These should match the action definition(s) export const MOTION_STATUS_SUCCESS = 0 diff --git a/feedingwebapp/src/Pages/GlobalState.jsx b/feedingwebapp/src/Pages/GlobalState.jsx index 06f3310a..c7eae9ec 100644 --- a/feedingwebapp/src/Pages/GlobalState.jsx +++ b/feedingwebapp/src/Pages/GlobalState.jsx @@ -92,6 +92,7 @@ export const useGlobalState = create( mealStateTransitionTime: Date.now(), appPage: APP_PAGE.Home, desiredFoodItem: null, + detectedMouthCenter: null, // Settings values stagingPosition: SETTINGS.stagingPosition[0], biteInitiation: SETTINGS.biteInitiation[0], @@ -111,6 +112,10 @@ export const useGlobalState = create( set(() => ({ desiredFoodItem: desiredFoodItem })), + setDetectedMouthCenter: (detectedMouthCenter) => + set(() => ({ + detectedMouthCenter: detectedMouthCenter + })), setStagingPosition: (stagingPosition) => set(() => ({ stagingPosition: stagingPosition diff --git a/feedingwebapp/src/Pages/Home/MealStates/BiteInitiation.jsx b/feedingwebapp/src/Pages/Home/MealStates/BiteInitiation.jsx index 810db1ed..9e04efe1 100644 --- a/feedingwebapp/src/Pages/Home/MealStates/BiteInitiation.jsx +++ b/feedingwebapp/src/Pages/Home/MealStates/BiteInitiation.jsx @@ -1,20 +1,47 @@ // React Imports -import React from 'react' +import React, { useCallback, useEffect, useRef, useState } from 'react' import Button from 'react-bootstrap/Button' +// PropTypes is used to validate that the used props are in fact passed to this +// Component +import PropTypes from 'prop-types' import Row from 'react-bootstrap/Row' // Local Imports import '../Home.css' +import { + connectToROS, + createROSService, + createROSServiceRequest, + subscribeToROSTopic, + unsubscribeFromROSTopic +} from '../../../ros/ros_helpers' +import { convertRemToPixels, scaleWidthHeightToWindow } from '../../../helpers' +import { + FACE_DETECTION_IMG_TOPIC, + FACE_DETECTION_TOPIC, + FACE_DETECTION_TOPIC_MSG, + REALSENSE_WIDTH, + REALSENSE_HEIGHT, + ROS_SERVICE_NAMES +} from '../../Constants' import { useGlobalState, MEAL_STATE } from '../../GlobalState' import { FOOTER_STATE_ICON_DICT } from '../../Constants' /** * The BiteInitiation component appears after the robot has moved to the staging * position, and waits for the user to indicate that they are ready for a bite. + * + * @param {boolean} debug - whether to run it in debug mode (e.g., if you aren't + * simulatenously running the robot) or not */ -const BiteInitiation = () => { +const BiteInitiation = (props) => { + // Keep track of whether a mouth has been detected or not + const [mouthDetected, setMouthDetected] = useState(false) + // Get the relevant global variables const setMealState = useGlobalState((state) => state.setMealState) + const setDetectedMouthCenter = useGlobalState((state) => state.setDetectedMouthCenter) + // Get icon image for move above plate let moveAbovePlateImage = FOOTER_STATE_ICON_DICT[MEAL_STATE.R_MovingAbovePlate] // Get icon image for move to mouth position @@ -23,56 +50,165 @@ const BiteInitiation = () => { /** * Callback function for when the user is ready for their bite. */ - function readyForBite() { + const readyForBite = useCallback(() => { + console.log('readyForBite') setMealState(MEAL_STATE.R_MovingToMouth) - } + }, [setMealState]) + + // Connect to ROS, if not already connected. Put this in local state to avoid + // re-connecting upon every re-render. + const ros = useRef(connectToROS().ros) + + // Subscribe to the ROS Topic with the face detection result. This is created + // in local state to avoid re-creating it upon every re-render. + const faceDetectionCallback = useCallback( + (message) => { + if (message.is_face_detected) { + setMouthDetected(message.is_face_detected) + setDetectedMouthCenter(message.detected_mouth_center) + // If the mouth is open, move on to the next state + if (message.is_mouth_open) { + readyForBite() + } + } + }, + [setDetectedMouthCenter, setMouthDetected, readyForBite] + ) + useEffect(() => { + let topic = subscribeToROSTopic(ros.current, FACE_DETECTION_TOPIC, FACE_DETECTION_TOPIC_MSG, faceDetectionCallback) + // In practice, because the values passed in in the second argument of + // useEffect will not change on re-renders, this return statement will + // only be called when the component unmounts. + return () => { + unsubscribeFromROSTopic(topic, faceDetectionCallback) + } + }, [faceDetectionCallback]) + + // Create the ROS Service. This is created in local state to avoid + // re-creating it upon every re-render. + let { serviceName, messageType } = ROS_SERVICE_NAMES[MEAL_STATE.U_BiteInitiation] + let toggleFaceDetectionService = useRef(createROSService(ros.current, serviceName, messageType)) + + /** + * Toggles face detection on the first time this component is rendered, but + * not upon additional re-renders. See here for more details on how `useEffect` + * achieves this goal: https://stackoverflow.com/a/69264685 + */ + useEffect(() => { + // Create a service request + let request = createROSServiceRequest({ turn_on: true }) + // Call the service + toggleFaceDetectionService.current.callService(request, (response) => console.log('Got service response', response)) + + // In practice, because the values passed in in the second argument of + // useEffect will not change on re-renders, this return statement will + // only be called when the component unmounts. + return () => { + // Create a service request + let request = createROSServiceRequest({ turn_on: false }) + // Call the service + toggleFaceDetectionService.current.callService(request, (response) => console.log('Got service response', response)) + } + }, [toggleFaceDetectionService]) /** * Callback function for when the user wants to move above plate. */ - function moveAbovePlate() { + const cancelBite = useCallback(() => { + console.log('cancelBite') setMealState(MEAL_STATE.R_MovingAbovePlate) - } + }, [setMealState]) + + // Get the size of the robot's live video stream. + const margin = convertRemToPixels(1) + let { width: width, height: height } = scaleWidthHeightToWindow(REALSENSE_WIDTH, REALSENSE_HEIGHT, margin, margin, margin, margin) // Render the component return (
- - {/* Ask the user whether they're ready for a bite and if they want to move to mouth position */} -

- Ready for bite? Move to mouth. -

- {/* Icon to move to mouth */} - -
- {/* Add empty space */} -
 
- - {/* Ask the user whether they want to move to above plate position */} -

- Cancel bite and move above plate. + {/* Tell the user whether their mouth has been detected or not */} + {mouthDetected ? ( + <> +

+ Detected mouth.! +

+

+ Open mouth when ready... +

+ + ) : ( +

+ Detecting mouth...

- {/* Icon to move above plate */} - +
+ + {/* Ask the user whether they want to move to above plate position */} +

+ Cancel bite and move above plate. +

+ {/* Icon to move above plate */} + +
+ + ) : ( + <> + )} + {/* If the user is running in debug mode, give them the option to skip */} + {props.debug ? ( + - + ) : ( + <> + )}
) } +BiteInitiation.propTypes = { + // Whether to run it in debug mode (e.g., if you aren't simulatenously running + // the robot) or not + debug: PropTypes.bool.isRequired +} export default BiteInitiation diff --git a/feedingwebapp/src/ros/ros_helpers.js b/feedingwebapp/src/ros/ros_helpers.js index a4af261a..c53807cd 100644 --- a/feedingwebapp/src/ros/ros_helpers.js +++ b/feedingwebapp/src/ros/ros_helpers.js @@ -75,6 +75,16 @@ export function subscribeToROSTopic(ros, topicName, topicType, callback) { return topic } +/** + * Unsubscribe from a ROS topic. + * + * @param {object} topic The ROSLIB.Topic. + * @param {function} callback The callback function to unsubscribe. + */ +export function unsubscribeFromROSTopic(topic, callback) { + topic.unsubscribe(callback) +} + /** * Create a ROS Service. *