Skip to content

Added dummy face detection node, integrated with BiteInitiation #37

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
May 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
185 changes: 185 additions & 0 deletions feeding_web_app_ros2_test/feeding_web_app_ros2_test/FaceDetection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
#!/usr/bin/env python3
from ada_feeding_msgs.msg import FaceDetection
from ada_feeding_msgs.srv import ToggleFaceDetection
import cv2
from cv_bridge import CvBridge
from geometry_msgs.msg import PointStamped
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from threading import Lock


class FaceDetectionNode(Node):
def __init__(
self,
face_detection_interval=150,
num_images_with_face=60,
open_mouth_interval=90,
num_images_with_open_mouth=30,
):
"""
Initializes the FaceDetection node, which exposes a ToggleFaceDetection
service that can be used to toggle the face detection on or off and
publishes information about detected faces to the /face_detection
topic when face detection is on.

After face_detection_interval images without a face, this dummy function
detects a face for num_images_with_face frames. After open_mouth_interval
images with a face but without an open mouth, this dummy function
detects an open mouth for num_images_with_open_mouth frames.

Parameters:
----------
face_detection_interval: The number of frames between each face detection.
num_images_with_face: The number of frames that must have a face in them.
open_mouth_interval: The number of frames between each open mouth detection.
num_images_with_open_mouth: The number of frames that must have an open mouth in them.
"""
super().__init__("face_detection")

# Internal variables to track when a face and/or open mouth should be detected
self.face_detection_interval = face_detection_interval
self.num_images_with_face = num_images_with_face
self.open_mouth_interval = open_mouth_interval
self.num_images_with_open_mouth = num_images_with_open_mouth
self.num_consecutive_images_without_face = 0
self.num_consecutive_images_with_face = 0
self.num_consecutive_images_without_open_mouth = 0
self.num_consecutive_images_with_open_mouth = 0

# Convert between ROS and CV images
self.bridge = CvBridge()

# Keeps track of whether face detection is on or not
self.is_on = False
self.is_on_lock = Lock()

# Create the service
self.srv = self.create_service(
ToggleFaceDetection,
"ToggleFaceDetection",
self.toggle_face_detection_callback,
)

# Subscribe to the camera feed
self.subscription = self.create_subscription(
Image, "camera/color/image_raw", self.camera_callback, 1
)
self.subscription # prevent unused variable warning

# Create the publishers
self.publisher_results = self.create_publisher(
FaceDetection, "face_detection", 1
)
self.publisher_image = self.create_publisher(Image, "face_detection_img", 1)

def toggle_face_detection_callback(self, request, response):
"""
Callback function for the ToggleFaceDetection service. Safely toggles
the face detection on or off depending on the request.
"""
self.get_logger().info(
"Incoming service request. turn_on: %s" % (request.turn_on)
)
if request.turn_on:
# Reset counters
self.num_consecutive_images_without_face = 0
self.num_consecutive_images_with_face = 0
self.num_consecutive_images_without_open_mouth = 0
self.num_consecutive_images_with_open_mouth = 0
# Turn on face detection
self.is_on_lock.acquire()
self.is_on = True
self.is_on_lock.release()
response.face_detection_is_on = True
else:
self.is_on_lock.acquire()
self.is_on = False
self.is_on_lock.release()
response.face_detection_is_on = False
return response

def camera_callback(self, msg):
"""
Callback function for the camera feed. If face detection is on, this
function will detect faces in the image and publish information about
them to the /face_detection topic.
"""
self.is_on_lock.acquire()
is_on = self.is_on
self.is_on_lock.release()
if is_on:
# Update the number of consecutive images with/without a face
is_face_detected = False
if self.num_consecutive_images_with_face == self.num_images_with_face:
self.num_consecutive_images_without_face = 0
self.num_consecutive_images_with_face = 0
if self.num_consecutive_images_without_face == self.face_detection_interval:
# Detect a face
self.num_consecutive_images_with_face += 1
is_face_detected = True
else:
# Don't detect a face
self.num_consecutive_images_without_face += 1

# Update the number of consecutive images with/without an open mouth
open_mouth_detected = False
if is_face_detected:
if (
self.num_consecutive_images_with_open_mouth
== self.num_images_with_open_mouth
):
self.num_consecutive_images_without_open_mouth = 0
self.num_consecutive_images_with_open_mouth = 0
if (
self.num_consecutive_images_without_open_mouth
== self.open_mouth_interval
):
# Detect an open mouth
self.num_consecutive_images_with_open_mouth += 1
open_mouth_detected = True
else:
# Don't detect an open mouth
self.num_consecutive_images_without_open_mouth += 1

# Publish the face detection information
face_detection_msg = FaceDetection()
face_detection_msg.is_face_detected = is_face_detected
if is_face_detected:
# Add a dummy face marker to the sensor_msgs/Image
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
cv2.circle(
cv_image,
(msg.width // 2, msg.height // 2),
msg.height // 25,
(0, 0, 255),
-1,
)
annotated_msg = self.bridge.cv2_to_imgmsg(cv_image, "bgr8")
annotated_img = annotated_msg
# Publish the detected mouth center
face_detection_msg.detected_mouth_center = PointStamped()
face_detection_msg.detected_mouth_center.header = msg.header
face_detection_msg.detected_mouth_center.point.x = msg.width / 2.0
face_detection_msg.detected_mouth_center.point.y = msg.height / 2.0
face_detection_msg.detected_mouth_center.point.z = 0.0
else:
annotated_img = msg
face_detection_msg.is_mouth_open = open_mouth_detected
self.publisher_results.publish(face_detection_msg)
self.publisher_image.publish(annotated_img)


def main(args=None):
rclpy.init(args=args)

face_detection = FaceDetectionNode()

rclpy.spin(face_detection)

rclpy.shutdown()


if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
from feeding_web_app_ros2_msgs.srv import ReverseString # CHANGE
from feeding_web_app_ros2_msgs.srv import ReverseString

import rclpy
from rclpy.node import Node
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

<!-- The RealSense Node -->
<node pkg="feeding_web_app_ros2_test" exec="DummyRealSense" name="DummyRealSense"/>
<!-- The FaceDetection node -->
<node pkg="feeding_web_app_ros2_test" exec="FaceDetection" name="FaceDetection"/>
<!-- The MoveAbovePlate action -->
<node pkg="feeding_web_app_ros2_test" exec="MoveAbovePlate" name="MoveAbovePlate"/>
</launch>
1 change: 1 addition & 0 deletions feeding_web_app_ros2_test/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
<test_depend>ament_flake8</test_depend>
<test_depend>ament_pep257</test_depend>
<test_depend>python3-pytest</test_depend>
<exec_depend>ada_feeding_msgs</exec_depend>
<exec_depend>feeding_web_app_ros2_msgs</exec_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>rclpy</exec_depend>
Expand Down
1 change: 1 addition & 0 deletions feeding_web_app_ros2_test/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
"console_scripts": [
# Scripts for the main app
"DummyRealSense = feeding_web_app_ros2_test.DummyRealSense:main",
"FaceDetection = feeding_web_app_ros2_test.FaceDetection:main",
"MoveAbovePlate = feeding_web_app_ros2_test.MoveAbovePlate:main",
# Scripts for the "TestROS" component
"listener = feeding_web_app_ros2_test.subscriber:main",
Expand Down
14 changes: 13 additions & 1 deletion feedingwebapp/src/Pages/Constants.js
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,11 @@ FOOTER_STATE_ICON_DICT[MEAL_STATE.R_MovingToMouth] = '/robot_state_imgs/move_to_
FOOTER_STATE_ICON_DICT[MEAL_STATE.R_StowingArm] = '/robot_state_imgs/stowing_arm_position.svg'
export { FOOTER_STATE_ICON_DICT }

// The names of the camera feed ROS topic(s)
// The names of the ROS topic(s)
export const CAMERA_FEED_TOPIC = '/camera/color/image_raw'
export const FACE_DETECTION_TOPIC = '/face_detection'
export const FACE_DETECTION_TOPIC_MSG = 'ada_feeding_msgs/FaceDetection'
export const FACE_DETECTION_IMG_TOPIC = '/face_detection_img'

// For states that call ROS actions, this dictionary contains
// the action name and the message type
Expand All @@ -35,6 +38,15 @@ ROS_ACTIONS_NAMES[MEAL_STATE.R_MovingAbovePlate] = {
}
export { ROS_ACTIONS_NAMES }

// For states that call ROS services, this dictionary contains
// the service name and the message type
let ROS_SERVICE_NAMES = {}
ROS_SERVICE_NAMES[MEAL_STATE.U_BiteInitiation] = {
serviceName: 'ToggleFaceDetection',
messageType: 'ada_feeding_msgs/srv/ToggleFaceDetection'
}
export { ROS_SERVICE_NAMES }

// The meaning of the status that motion actions return in their results.
// These should match the action definition(s)
export const MOTION_STATUS_SUCCESS = 0
Expand Down
5 changes: 5 additions & 0 deletions feedingwebapp/src/Pages/GlobalState.jsx
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ export const useGlobalState = create(
mealStateTransitionTime: Date.now(),
appPage: APP_PAGE.Home,
desiredFoodItem: null,
detectedMouthCenter: null,
// Settings values
stagingPosition: SETTINGS.stagingPosition[0],
biteInitiation: SETTINGS.biteInitiation[0],
Expand All @@ -111,6 +112,10 @@ export const useGlobalState = create(
set(() => ({
desiredFoodItem: desiredFoodItem
})),
setDetectedMouthCenter: (detectedMouthCenter) =>
set(() => ({
detectedMouthCenter: detectedMouthCenter
})),
setStagingPosition: (stagingPosition) =>
set(() => ({
stagingPosition: stagingPosition
Expand Down
Loading