Skip to content

Commit

Permalink
testing flattening
Browse files Browse the repository at this point in the history
  • Loading branch information
danielbrownmsm committed Dec 24, 2024
1 parent b5c95cf commit e4b5eb1
Show file tree
Hide file tree
Showing 3 changed files with 105 additions and 32 deletions.
50 changes: 22 additions & 28 deletions autonav_ws/src/autonav_feelers/src/feeler_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ class FeelerNode : public AutoNav::Node {
void init() override {
// configuration stuff
auto config_ = FeelerNodeConfig();
config_.max_length = 400;
config_.number_of_feelers = 20;
config_.max_length = 650;
config_.number_of_feelers = 40;
config_.start_angle = 5;
config_.waypointPopDist = 2;
config_.ultrasonic_contribution = 1;
Expand Down Expand Up @@ -101,24 +101,24 @@ class FeelerNode : public AutoNav::Node {
this->ultrasonic_feelers.push_back(Feeler(x, y)); // there are 2 ultrasonic distance sensors per side
}

for (Feeler feeler : ultrasonic_feelers) {
for (Feeler &feeler : ultrasonic_feelers) {
feeler.setColor(cv::Scalar(0, 200, 100)); // ultrasonic feelers are a different color
}

lastTime = now();

// subscribers
// positionSubscriber = create_subscription<autonav_msgs::msg::Position>("/autonav/position", 1, std::bind(&FeelerNode::onPositionReceived, this, std::placeholders::_1));
positionSubscriber = create_subscription<autonav_msgs::msg::Position>("/autonav/position", 1, std::bind(&FeelerNode::onPositionReceived, this, std::placeholders::_1));
imageSubscriber = create_subscription<sensor_msgs::msg::CompressedImage>("/autonav/vision/combined/filtered", 1, std::bind(&FeelerNode::onImageReceived, this, std::placeholders::_1));
debugImageSubscriber = create_subscription<sensor_msgs::msg::CompressedImage>("/autonav/vision/combined/debug", 1, std::bind(&FeelerNode::onDebugImageReceived, this, std::placeholders::_1));
// ultrasonicSubscriber = create_subscription<autonav_msgs::msg::Ultrasonic>("/autonav/ultrasonics", 1, std::bind(&FeelerNode::onUltrasonicsReceived, this, std::placeholders::_1));
ultrasonicSubscriber = create_subscription<autonav_msgs::msg::Ultrasonic>("/autonav/ultrasonics", 1, std::bind(&FeelerNode::onUltrasonicsReceived, this, std::placeholders::_1));

// publishers
// motorPublisher = create_publisher<autonav_msgs::msg::MotorInput>("/autonav/motor_input", 1);
motorPublisher = create_publisher<autonav_msgs::msg::MotorInput>("/autonav/motor_input", 1);
debugPublisher = create_publisher<sensor_msgs::msg::CompressedImage>("/autonav/feelers/debug", 1);
// safetyLightsPublisher = create_publisher<autonav_msgs::msg::SafetyLights>("/autonav/safety_lights", 1);
// audibleFeedbackPublisher = create_publisher<autonav_msgs::msg::AudibleFeedback>("/autonav/audible_feedback", 1);
// publishTimer = this->create_wall_timer(std::chrono::milliseconds(50), std::bind(&FeelerNode::publishOutputMessages, this));
safetyLightsPublisher = create_publisher<autonav_msgs::msg::SafetyLights>("/autonav/safety_lights", 1);
audibleFeedbackPublisher = create_publisher<autonav_msgs::msg::AudibleFeedback>("/autonav/audible_feedback", 1);
publishTimer = this->create_wall_timer(std::chrono::milliseconds(50), std::bind(&FeelerNode::publishOutputMessages, this));

set_device_state(AutoNav::DeviceState::READY);

Expand Down Expand Up @@ -174,7 +174,7 @@ class FeelerNode : public AutoNav::Node {

// log("drew that one image", AutoNav::Logging::INFO); //FIXME TODO

this->perf_start("FeelerNode::update");
// this->perf_start("FeelerNode::update");

// calculate new length of every new feeler
for (Feeler &feeler : this->feelers) {
Expand All @@ -184,7 +184,7 @@ class FeelerNode : public AutoNav::Node {

// log("first feeler: " + this->feelers.at(0).to_string(), AutoNav::Logging::INFO);

this->perf_stop("FeelerNode::update", true);
// this->perf_stop("FeelerNode::update", true);

// log("FEELERS DRAWING!", AutoNav::Logging::WARN); //FIXME TODO
// log("FEELERS LENGTH, MASK ROWS, MASK COLS, DEBUG ROWS, DEBUG COLS", AutoNav::Logging::WARN);
Expand Down Expand Up @@ -244,7 +244,7 @@ class FeelerNode : public AutoNav::Node {
}
}

// this->calculateOutputs();
this->calculateOutputs();
}

/**
Expand All @@ -260,7 +260,7 @@ class FeelerNode : public AutoNav::Node {

// log("ULTRASONICS GOT!", AutoNav::Logging::WARN); //FIXME TODO

// this->calculateOutputs();
this->calculateOutputs();
}

/**
Expand All @@ -285,7 +285,7 @@ class FeelerNode : public AutoNav::Node {
// log("LOGGING DEBUG IMAGE RECEIVED...", AutoNav::Logging::ERROR); //FIXME TODO

// draw feelers on the debug image
this->perf_start("FeelerNode::draw");
// this->perf_start("FeelerNode::draw");
for (Feeler &feeler : this->feelers) {
// log(feeler.to_string(), AutoNav::Logging::WARN);
// log("==================", AutoNav::Logging::WARN);
Expand All @@ -295,22 +295,16 @@ class FeelerNode : public AutoNav::Node {
}

// draw the ultrasonic feelers on the image (on top of the vision feelers)
// for (Feeler feeler : this->ultrasonic_feelers) {
// feeler.draw(this->debug_image_ptr->image);
// }
for (Feeler feeler : this->ultrasonic_feelers) {
feeler.draw(this->debug_image_ptr->image);
}

// draw the GPS feeler
// this->gpsFeeler.draw(this->debug_image_ptr->image);
this->gpsFeeler.draw(this->debug_image_ptr->image);

// draw the heading arrow on top of everything else
this->headingArrow.draw(this->debug_image_ptr->image);
this->perf_stop("FeelerNode::draw", true);


//TEMP TODO FIXME BUG TESTING HACK
// Feeler x = Feeler(-200, 0);
// x.setColor(cv::Scalar(40, 20, 150));
// x.draw(this->debug_image_ptr->image);
// this->perf_stop("FeelerNode::draw", true);

// publish the debug image
this->debugPublisher->publish(*(debug_image_ptr->toCompressedImageMsg())); //TODO
Expand All @@ -326,7 +320,7 @@ class FeelerNode : public AutoNav::Node {
void calculateOutputs() {
// reinitialize the heading arrow (with a bias towards going 'forwards')
//TODO what if forwards isn't forwards? we re-initialize this a lot, so FIXME does not account for robot rotation
this->headingArrow = Feeler(0, 100); //TODO this should be configurable (the bias forwards, that is)
this->headingArrow = Feeler(0, 50); //TODO this should be configurable (the bias forwards, that is)
this->headingArrow.setColor(cv::Scalar(0, 250, 0)); //TODO this should be configurable?

// add all the vision-based feelers together
Expand Down Expand Up @@ -375,8 +369,8 @@ class FeelerNode : public AutoNav::Node {
// convert headingArrow to motor outputs
//FIXME we want to be going max speed on the straightaways
//FIXME the clamping should be configurable or something
msg.forward_velocity = std::clamp(this->headingArrow.getY(), -5, 5);
msg.sideways_velocity = std::clamp(this->headingArrow.getX(), -5, 5);
msg.forward_velocity = std::clamp(static_cast<double>(this->headingArrow.getY()) / 20, -3.0, 3.0); //FIXME configure divider number thingy
msg.sideways_velocity = std::clamp(static_cast<double>(this->headingArrow.getX()) / 20, -3.0, 3.0); //FIXME configure divider number thingy
msg.angular_velocity = 0.0; //TODO figure out when we want to turn

//TODO safety lights need to change to other colors and stuff for debug information
Expand Down
75 changes: 75 additions & 0 deletions autonav_ws/src/autonav_feelers/src/vision_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import cv2
import numpy as np


# copied/pasted from combination.py (really these should be grabbed from the image itself or something)
IMAGE_WIDTH = 640
IMAGE_HEIGHT = 480

COMBINED_IMAGE_WIDTH = IMAGE_HEIGHT*2 + IMAGE_WIDTH
COMBINED_IMAGE_HEIGHT = IMAGE_HEIGHT*2 + IMAGE_WIDTH

videoReader = cv2.VideoCapture("./data/debug_combined.mp4")

written = False

while True:
ret, combined_image = videoReader.read()

if not ret:
break

#TODO this was also copypastaed from combination.py
x_offset = (COMBINED_IMAGE_WIDTH//2)-(IMAGE_WIDTH//2)

image_front = combined_image[0 : IMAGE_HEIGHT, x_offset : x_offset+IMAGE_WIDTH]
image_left = combined_image[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, 0 : IMAGE_HEIGHT]
image_right = combined_image[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, -IMAGE_HEIGHT : ]
image_back = combined_image[COMBINED_IMAGE_HEIGHT-IMAGE_HEIGHT : , x_offset : x_offset+IMAGE_WIDTH]

image_left = cv2.rotate(image_left, cv2.ROTATE_90_CLOCKWISE)
image_right = cv2.rotate(image_right, cv2.ROTATE_90_COUNTERCLOCKWISE)
image_back = cv2.rotate(image_back, cv2.ROTATE_180)

# cv2.imshow("pre-flattened", combined_image)

# top left, top right, bottom left, botom right
# src_pts = np.float32([(0, 0), (IMAGE_WIDTH, 0), (0, IMAGE_HEIGHT), (IMAGE_WIDTH, IMAGE_HEIGHT)])
# src_pts = np.float32([(218, 64), (414, 64), (0, IMAGE_HEIGHT), (IMAGE_WIDTH, IMAGE_HEIGHT)])
src_pts = np.float32([(200, 0), (IMAGE_WIDTH-200, 0), (0, IMAGE_HEIGHT), (IMAGE_WIDTH, IMAGE_HEIGHT)])
dest_pts = np.float32([(0, 0), (IMAGE_WIDTH, 0), (200, IMAGE_HEIGHT), (IMAGE_WIDTH-200, IMAGE_HEIGHT)])

# matrix = cv2.getPerspectiveTransform(dest_pts, src_pts)
matrix = cv2.getPerspectiveTransform(src_pts, dest_pts)
image_front_flattened = cv2.warpPerspective(image_front, matrix, (640, 480))
image_left_flattened = cv2.warpPerspective(image_left, matrix, (640, 480))
image_right_flattened = cv2.warpPerspective(image_right, matrix, (640, 480))
image_back_flattened = cv2.warpPerspective(image_back, matrix, (640, 480))
# print(image_front_flattened.shape)

image_left_flattened = cv2.rotate(image_left_flattened, cv2.ROTATE_90_COUNTERCLOCKWISE)
image_right_flattened = cv2.rotate(image_right_flattened, cv2.ROTATE_90_CLOCKWISE)
image_back_flattened = cv2.rotate(image_back_flattened, cv2.ROTATE_180)

combined_image[0 : IMAGE_HEIGHT, x_offset : x_offset+IMAGE_WIDTH] = image_front_flattened
combined_image[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, 0 : IMAGE_HEIGHT] = image_left_flattened
combined_image[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, -IMAGE_HEIGHT : ] = image_right_flattened
combined_image[COMBINED_IMAGE_HEIGHT-IMAGE_HEIGHT : , x_offset : x_offset+IMAGE_WIDTH] = image_back_flattened

shrunk_image = cv2.resize(combined_image, (COMBINED_IMAGE_WIDTH//2, COMBINED_IMAGE_HEIGHT//2))

if not written:
cv2.imwrite("./data/flattened_front.png", image_front_flattened)
written = True
# cv2.imshow("flattened big", combined_image)
cv2.imshow("combined", shrunk_image)
# cv2.imshow("flattened", image_front_flattened)
# cv2.imshow("regular", image_front)
keycode = cv2.waitKey()

if keycode == ord("q"):
break


videoReader.release()
cv2.destroyAllWindows()
12 changes: 8 additions & 4 deletions autonav_ws/src/autonav_vision/src/combination.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,15 @@

bridge = CvBridge()

#TODO all image dimensions should be based off the image received instead of constants in this file, so we only have to change it in like camera.py and then make no changes here

IMAGE_WIDTH = 640
IMAGE_HEIGHT = 480

#TODO use like max() or min() in case we play with the dimensions
# or like, combine them better so the total combined image is only 640x480 or something
COMBINED_IMAGE_WIDTH = IMAGE_HEIGHT*2 # left and right cameras are 480 pixels tall but turned on their sides so it's 480 pixels wide and then next to each other, and the top/bottom cameras are only 640 wide so this is fine
# COMBINED_IMAGE_WIDTH = IMAGE_HEIGHT*2 # left and right cameras are 480 pixels tall but turned on their sides so it's 480 pixels wide and then next to each other, and the top/bottom cameras are only 640 wide so this is fine
COMBINED_IMAGE_WIDTH = IMAGE_HEIGHT*2 + IMAGE_WIDTH # make it a square, follow what the height is
COMBINED_IMAGE_HEIGHT = IMAGE_HEIGHT*2 + IMAGE_WIDTH # top and bottom cameras are 480 pixels tall, plus the left/right cameras turned on their side which adds 640 pixels


Expand Down Expand Up @@ -120,7 +123,7 @@ def try_combine_images(self):
x_offset = (COMBINED_IMAGE_WIDTH//2)-(IMAGE_WIDTH//2)
combined[0 : IMAGE_HEIGHT, x_offset : x_offset+IMAGE_WIDTH] = image_front
combined[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, 0 : IMAGE_HEIGHT] = image_left # the left and right cameras are rotated 90 degrees so coordinates are backwards, it is not [IMAGE_HEIGHT : IMAGE_HEIGHT*2, 0 : IMAGE_WIDTH]
combined[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, IMAGE_HEIGHT : ] = image_right # same here, if it wasn't rotated it would be [IMAGE_HEIGHT : IMAGE_HEIGHT*2, IMAGE_WIDTH : ]
combined[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, -IMAGE_HEIGHT : ] = image_right # same here, if it wasn't rotated it would be [IMAGE_HEIGHT : IMAGE_HEIGHT*2, IMAGE_WIDTH : ]
combined[COMBINED_IMAGE_HEIGHT-IMAGE_HEIGHT : , x_offset : x_offset+IMAGE_WIDTH] = image_back

# and publish the image
Expand All @@ -142,10 +145,11 @@ def try_combine_images(self):
# debug_combined = cv2

# we have a copy of every image now, so combine them
#FIXME *really* don't like duplicated code here
x_offset = (COMBINED_IMAGE_WIDTH//2)-(IMAGE_WIDTH//2)
debug_combined[0 : IMAGE_HEIGHT, x_offset : x_offset+IMAGE_WIDTH] = debug_image_front
debug_combined[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, 0 : IMAGE_HEIGHT] = debug_image_left # the left and right cameras are rotated 90 degrees so coordinates are backwards, it is not [IMAGE_HEIGHT : IMAGE_HEIGHT*2, 0 : IMAGE_WIDTH]
debug_combined[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, IMAGE_HEIGHT : ] = debug_image_right # same here, if it wasn't rotated it would be [IMAGE_HEIGHT : IMAGE_HEIGHT*2, IMAGE_WIDTH : ]
debug_combined[IMAGE_HEIGHT : IMAGE_HEIGHT+IMAGE_WIDTH, IMAGE_HEIGHT+IMAGE_WIDTH : ] = debug_image_right # same here, if it wasn't rotated it would be [IMAGE_HEIGHT : IMAGE_HEIGHT*2, IMAGE_WIDTH : ]
debug_combined[COMBINED_IMAGE_HEIGHT-IMAGE_HEIGHT : , x_offset : x_offset+IMAGE_WIDTH] = debug_image_back

# and publish the image
Expand All @@ -168,7 +172,7 @@ def try_combine_images(self):

# FIXME DEBUG HACK
# while the UI still in development, log images to a video for debugging
if self.frame < 200:
if self.frame < 500:
combined = cv2.cvtColor(np.uint8(combined), cv2.COLOR_GRAY2BGR)
self.video_writer.write(combined)
self.debug_video_writer.write(debug_combined)
Expand Down

0 comments on commit e4b5eb1

Please sign in to comment.