Skip to content

Commit

Permalink
Add robot pose visual to system
Browse files Browse the repository at this point in the history
  • Loading branch information
justinb4003 committed Mar 15, 2024
1 parent 736018d commit dbe008f
Show file tree
Hide file tree
Showing 4 changed files with 129 additions and 25 deletions.
62 changes: 52 additions & 10 deletions robot.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
# All units of length will be in meters unless otherwise noted

from dataclasses import Field
import json

from time import time
from math import radians
from wpilib import SmartDashboard, Joystick, DriverStation, Timer
from wpilib import SmartDashboard, Joystick, DriverStation, Timer, Field2d
from commands2 import TimedCommandRobot, SequentialCommandGroup, InstantCommand
from commands2.button import JoystickButton
from wpimath.geometry import Rotation2d, Pose2d
Expand Down Expand Up @@ -53,6 +54,12 @@ class MyRobot(TimedCommandRobot):

def robotInit(self) -> None:
self.vision_timer = Timer()
self.field = Field2d()
SmartDashboard.putData(self.field)
pn = SmartDashboard.putNumber
pn('visiontest/fakeX', 3)
pn('visiontest/fakeY', 4)
pn('visiontest/fakeRot', 0)
"""Robot initialization function"""
if True:
# Disable the joystick warnings in simulator mode; they're annoying
Expand Down Expand Up @@ -187,8 +194,21 @@ def robotPeriodic(self) -> None:

# Rough idea of how to incorporate vision into odometry
if self.swerve.vision_stable is True:
from math import pi
# Here's our method to pull data from LimeLight's network table
ll_poses, cameralag, computelag = self.get_poses_from_limelight()
if is_sim():
gn = SmartDashboard.getNumber
fakex = gn('visiontest/fakeX', 0)
fakey = gn('visiontest/fakeY', 0)
fakerot = gn('visiontest/fakeRot', 0)
ll_poses = [Pose2d(fakex, fakey, Rotation2d(radians(fakerot)))]
certain_within = [(10, 10, 1/pi)]
cameralag = 0
computelag = 0
else:
ll_poses, certain_within, cameralag, computelag = (
self.get_poses_from_limelight()
)
# Here we can make a determination to use the vision data or not
# For example we might only want to accept posees that are already
# within 1 meter of where we think we are.
Expand All @@ -198,16 +218,21 @@ def robotPeriodic(self) -> None:
# to get things performing right. Nothing is set in stone.
if len(ll_poses) > 0:
timelag = cameralag + computelag
for p in ll_poses:
for p, cw in zip(ll_poses, certain_within):
x = p.X()
y = p.Y()
rot = p.rotation().degrees()
# print(f'vision heading: {x}, {y}, {rot}')
# self.swerve.odometry.addVisionMeasurement(p, 0, (0.1, 0.1, 0.1))
self.swerve.odometry.addVisionMeasurement(p, self.vision_timer.getFPGATimestamp(), (100, 100, 100))
ts = self.vision_timer.getFPGATimestamp() - timelag
print(f'vision heading: {x}, {y}, {rot}, {ts}')
xdev, ydev, rotdev = cw
self.swerve.odometry.addVisionMeasurement(
p, ts, (xdev, ydev, rotdev)
)
else:
# print('no vision data')
pass
self.field.setRobotPose(self.swerve.getPose())
pass

def auto_station_1(self):
Expand Down Expand Up @@ -285,30 +310,47 @@ def teleopPeriodic(self) -> None:
# https://docs.limelightvision.io/docs/docs-limelight/apis/json-dump-specification
# We are going to be using 't6r_fs' aka "Robot Pose in field space as
# computed by this fiducial (x,y,z,rx,ry,rz)"
def get_poses_from_limelight(self) -> tuple[list[Pose2d], float]:
def get_poses_from_limelight(self):
t1 = time()
data = self.swerve.ll_json_entry.get()
obj = json.loads(data)
poses = []
camera_lag = 0
certain_within = []
camera_lag = 0
# Short circuit any JSON processing if we got back an empty list, which
# is the default value for the limelight network table entry
if len(obj) == 0:
t2 = time()
diff = t2 - t1
return poses, camera_lag, diff
return poses, certain_within, camera_lag, diff

result = obj['Results']
# camera_lag = result['tl']
for fid in result['Fiducial']:
target_area = fid['ta']
# Use this, the total area the target is in pixels on the camera
# picture, to determine how certain we are of the robot's position.

# start with a very uncertain value unless a condition is met to
# say otherwise. The format is undertainty in the x in meters,
# y in meters, and rotation in degrees
certainty = 10
if target_area > 200:
certainty = (3, 3, 10)
if target_area > 300:
certainty = (2, 2, 5)
if target_area > 400:
certainty = (1, 1, 3)

robot_pose_raw = fid['t6r_fs']
# TODO: Verify that the rotation is the right value
pose = Pose2d(robot_pose_raw[0], robot_pose_raw[1],
Rotation2d(radians(robot_pose_raw[3])))
Rotation2d(radians(robot_pose_raw[3])))
poses.append(pose)
certain_within.append(certainty)
t2 = time()
diff = t2 - t1
return poses, camera_lag, diff
return poses, certain_within, camera_lag, diff

# TODO: Heading needs to be added to the return on this
# and the overal processing could be a lot cleaner.
Expand Down
43 changes: 30 additions & 13 deletions simgui-window.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,31 +8,48 @@
"height": "900",
"maximized": "0",
"style": "0",
"userScale": "2",
"userScale": "4",
"width": "1600",
"xpos": "-1",
"ypos": "-1"
"xpos": "160",
"ypos": "106"
}
},
"Table": {
"0x542B5671,2": {
"Column 0 Width": "260",
"Column 1 Width": "228",
"RefScale": "19"
}
},
"Window": {
"###/SmartDashboard/Field": {
"Collapsed": "0",
"Pos": "729,-22",
"Size": "937,664"
},
"###Addressable LEDs": {
"Collapsed": "0",
"Pos": "13,432",
"Size": "199,436"
},
"###FMS": {
"Collapsed": "0",
"Pos": "6,675",
"Size": "336,158"
"Pos": "240,675",
"Size": "388,176"
},
"###Joysticks": {
"Collapsed": "0",
"Pos": "312,581",
"Size": "976,82"
"Size": "1156,91"
},
"###NetworkTables": {
"Collapsed": "0",
"Pos": "312,346",
"Size": "937,231"
"Pos": "99,40",
"Size": "635,613"
},
"###NetworkTables Info": {
"Collapsed": "0",
"Pos": "312,162",
"Pos": "326,714",
"Size": "937,181"
},
"###Other Devices": {
Expand All @@ -42,13 +59,13 @@
},
"###System Joysticks": {
"Collapsed": "0",
"Pos": "6,437",
"Size": "232,254"
"Pos": "15,185",
"Size": "273,290"
},
"###Timing": {
"Collapsed": "0",
"Pos": "6,187",
"Size": "162,168"
"Size": "188,186"
},
"Debug##Default": {
"Collapsed": "0",
Expand All @@ -58,7 +75,7 @@
"Robot State": {
"Collapsed": "0",
"Pos": "5,20",
"Size": "109,134"
"Size": "126,152"
}
}
}
47 changes: 46 additions & 1 deletion simgui.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,52 @@
{
"HALProvider": {
"Addressable LEDs": {
"window": {
"visible": true
}
},
"Timing": {
"window": {
"visible": false
}
}
},
"NTProvider": {
"types": {
"/FMSInfo": "FMSInfo"
"/FMSInfo": "FMSInfo",
"/SmartDashboard/Field": "Field2d"
},
"windows": {
"/SmartDashboard/Field": {
"bottom": 1476,
"height": 8.210550308227539,
"left": 150,
"right": 2961,
"top": 79,
"width": 16.541748046875,
"window": {
"visible": true
}
}
}
},
"NetworkTables": {
"transitory": {
"Shuffleboard": {
"open": true
},
"SmartDashboard": {
"drivetrain": {
"odometry": {
"open": true
},
"open": true
},
"open": true,
"visiontest": {
"open": true
}
}
}
},
"NetworkTables Info": {
Expand Down
2 changes: 1 addition & 1 deletion subsystems/drivetrain.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def __init__(self, gyro: gyro.Gyro, driver_controller, photon: NoteTracker,
self.backRight.getPosition(),
),
Pose2d(),
(0, 0, 0), # wheel std devs for Kalman filters
(1, 1, 1), # wheel std devs for Kalman filters
(0, 0, 0), # vision std devs for Kalman filters
)

Expand Down

0 comments on commit dbe008f

Please sign in to comment.