diff --git a/calibration_debug.py b/calibration_debug.py new file mode 100644 index 0000000..870a882 --- /dev/null +++ b/calibration_debug.py @@ -0,0 +1,298 @@ +import logging +logging.getLogger('matplotlib').setLevel(logging.WARNING) + +import matplotlib.pyplot as plt +from collections import deque +from pathlib import Path +from .types import * +import numpy as np +import glob +import cv2 + +# Helper functions + +def scale_intrinsics(intrinsics, originalShape, destShape): + scale = destShape[1] / originalShape[1] # scale on width + scale_mat = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]) + scaled_intrinsics = np.matmul(scale_mat, intrinsics) + """ print("Scaled height offset : {}".format( + (originalShape[0] * scale - destShape[0]) / 2)) + print("Scaled width offset : {}".format( + (originalShape[1] * scale - destShape[1]) / 2)) """ + scaled_intrinsics[1][2] -= (originalShape[0] # c_y - along height of the image + * scale - destShape[0]) / 2 + scaled_intrinsics[0][2] -= (originalShape[1] # c_x width of the image + * scale - destShape[1]) / 2 + #if self.traceLevel == 3 or self.traceLevel == 10: + print('original_intrinsics') + print(intrinsics) + print('scaled_intrinsics') + print(scaled_intrinsics) + + return scaled_intrinsics + +def scale_image(img, scaled_res): + expected_height = img.shape[0]*(scaled_res[1]/img.shape[1]) + #if self.traceLevel == 2 or self.traceLevel == 10: + print("Expected Height: {}".format(expected_height)) + + if not (img.shape[0] == scaled_res[0] and img.shape[1] == scaled_res[1]): + if int(expected_height) == scaled_res[0]: + # resizing to have both stereo and rgb to have same + # resolution to capture extrinsics of the rgb-right camera + img = cv2.resize(img, (scaled_res[1], scaled_res[0]), + interpolation=cv2.INTER_CUBIC) + return img + else: + # resizing and cropping to have both stereo and rgb to have same resolution + # to calculate extrinsics of the rgb-right camera + scale_width = scaled_res[1]/img.shape[1] + dest_res = ( + int(img.shape[1] * scale_width), int(img.shape[0] * scale_width)) + img = cv2.resize( + img, dest_res, interpolation=cv2.INTER_CUBIC) + if img.shape[0] < scaled_res[0]: + raise RuntimeError("resizeed height of rgb is smaller than required. {0} < {1}".format( + img.shape[0], scaled_res[0])) + # print(gray.shape[0] - req_resolution[0]) + del_height = (img.shape[0] - scaled_res[0]) // 2 + # gray = gray[: req_resolution[0], :] + img = img[del_height: del_height + scaled_res[0], :] + return img + else: + return img + +def detect_charuco_board(config: CharucoBoard, image: np.array): + arucoParams = cv2.aruco.DetectorParameters_create() + arucoParams.minMarkerDistanceRate = 0.01 + corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(image, config.dictionary, parameters=arucoParams) # First, detect markers + marker_corners, marker_ids, refusd, recoverd = cv2.aruco.refineDetectedMarkers(image, config.board, corners, ids, rejectedCorners=rejectedImgPoints) + # If found, add object points, image points (after refining them) + if len(marker_corners) > 0: + ret, corners, ids = cv2.aruco.interpolateCornersCharuco(marker_corners,marker_ids,image, config.board, minMarkers = 1) + return ret, corners, ids, marker_corners, marker_ids + else: + return None, None, None, None, None + +# Debugging functions + +def debug_epipolar_charuco(left_cam_info: CameraData, right_cam_info: CameraData, left_board: CharucoBoard, right_board: CharucoBoard, t, r_l, r_r): + images_left = glob.glob(left_cam_info['images_path'] + '/*.png') + images_right = glob.glob(right_cam_info['images_path'] + '/*.png') + images_left.sort() + print(images_left) + images_right.sort() + assert len(images_left) != 0, "ERROR: Images not read correctly" + assert len(images_right) != 0, "ERROR: Images not read correctly" + isHorizontal = np.absolute(t[0]) > np.absolute(t[1]) + + scale = None + scale_req = False + frame_left_shape = cv2.imread(images_left[0], 0).shape + frame_right_shape = cv2.imread(images_right[0], 0).shape + scalable_res = frame_left_shape + scaled_res = frame_right_shape + if frame_right_shape[0] < frame_left_shape[0] and frame_right_shape[1] < frame_left_shape[1]: + scale_req = True + scale = frame_right_shape[1] / frame_left_shape[1] + elif frame_right_shape[0] > frame_left_shape[0] and frame_right_shape[1] > frame_left_shape[1]: + scale_req = True + scale = frame_left_shape[1] / frame_right_shape[1] + scalable_res = frame_right_shape + scaled_res = frame_left_shape + + if scale_req: + scaled_height = scale * scalable_res[0] + diff = scaled_height - scaled_res[0] + if diff < 0: + scaled_res = (int(scaled_height), scaled_res[1]) + #if self.traceLevel == 3 or self.traceLevel == 10: + print( + f'Is scale Req: {scale_req}\n scale value: {scale} \n scalable Res: {scalable_res} \n scale Res: {scaled_res}') + print("Original res Left :{}".format(frame_left_shape)) + print("Original res Right :{}".format(frame_right_shape)) + # print("Scale res :{}".format(scaled_res)) + + M_l = left_cam_info['intrinsics'] + M_r = right_cam_info['intrinsics'] + M_lp = scale_intrinsics(M_l, frame_left_shape, scaled_res) + M_rp = scale_intrinsics(M_r, frame_right_shape, scaled_res) + + criteria = (cv2.TERM_CRITERIA_EPS + + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) + + # TODO(Sachin): Observe Images by adding visualization + # TODO(Sachin): Check if the stetch is only in calibration Images + print('Original intrinsics ....') + print(f"L {M_lp}") + print(f"R: {M_rp}") + #if self.traceLevel == 3 or self.traceLevel == 10: + print(f'Width and height is {scaled_res[::-1]}') + # kScaledL, _ = cv2.getOptimalNewCameraMatrix(M_r, d_r, scaled_res[::-1], 0) + # kScaledL, _ = cv2.getOptimalNewCameraMatrix(M_r, d_l, scaled_res[::-1], 0) + # kScaledR, _ = cv2.getOptimalNewCameraMatrix(M_r, d_r, scaled_res[::-1], 0) + kScaledR = kScaledL = M_rp + + # if self.cameraModel != 'perspective': + # kScaledR = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(M_r, d_r, scaled_res[::-1], np.eye(3), fov_scale=1.1) + # kScaledL = kScaledR + + + print('Intrinsics from the getOptimalNewCameraMatrix/Original ....') + print(f"L: {kScaledL}") + print(f"R: {kScaledR}") + oldEpipolarError = None + epQueue = deque() + movePos = True + + left_calib_model = left_cam_info['calib_model'] + right_calib_model = right_cam_info['calib_model'] + d_l = left_cam_info['dist_coeff'] + d_r = left_cam_info['dist_coeff'] + print(left_calib_model, right_calib_model) + if left_calib_model == 'perspective': + mapx_l, mapy_l = cv2.initUndistortRectifyMap( + M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) + else: + mapx_l, mapy_l = cv2.fisheye.initUndistortRectifyMap( + M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) + if right_calib_model == "perspective": + mapx_r, mapy_r = cv2.initUndistortRectifyMap( + M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) + else: + mapx_r, mapy_r = cv2.fisheye.initUndistortRectifyMap( + M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) + + image_data_pairs = [] + for image_left, image_right in zip(images_left, images_right): + # read images + img_l = cv2.imread(image_left, 0) + img_r = cv2.imread(image_right, 0) + + img_l = scale_image(img_l, scaled_res) + img_r = scale_image(img_r, scaled_res) + # print(img_l.shape) + # print(img_r.shape) + + # warp right image + # img_l = cv2.warpPerspective(img_l, self.H1, img_l.shape[::-1], + # cv2.INTER_CUBIC + + # cv2.WARP_FILL_OUTLIERS + + # cv2.WARP_INVERSE_MAP) + + # img_r = cv2.warpPerspective(img_r, self.H2, img_r.shape[::-1], + # cv2.INTER_CUBIC + + # cv2.WARP_FILL_OUTLIERS + + # cv2.WARP_INVERSE_MAP) + + img_l = cv2.remap(img_l, mapx_l, mapy_l, cv2.INTER_LINEAR) + img_r = cv2.remap(img_r, mapx_r, mapy_r, cv2.INTER_LINEAR) + + image_data_pairs.append((img_l, img_r)) + + #if self.traceLevel == 4 or self.traceLevel == 5 or self.traceLevel == 10: + #cv2.imshow("undistorted-Left", img_l) + #cv2.imshow("undistorted-right", img_r) + # print(f'image path - {im}') + # print(f'Image Undistorted Size {undistorted_img.shape}') + # k = cv2.waitKey(0) + # if k == 27: # Esc key to stop + # break + #if self.traceLevel == 4 or self.traceLevel == 5 or self.traceLevel == 10: + # cv2.destroyWindow("undistorted-Left") + # cv2.destroyWindow("undistorted-right") + # compute metrics + imgpoints_r = [] + imgpoints_l = [] + image_epipolar_color = [] + # new_imagePairs = []) + for i, image_data_pair in enumerate(image_data_pairs): + res2_l = detect_charuco_board(left_board, image_data_pair[0]) + res2_r = detect_charuco_board(right_board, image_data_pair[1]) + + # if self.traceLevel == 2 or self.traceLevel == 4 or self.traceLevel == 10: + + + img_concat = cv2.hconcat([image_data_pair[0], image_data_pair[1]]) + img_concat = cv2.cvtColor(img_concat, cv2.COLOR_GRAY2RGB) + line_row = 0 + while line_row < img_concat.shape[0]: + cv2.line(img_concat, + (0, line_row), (img_concat.shape[1], line_row), + (0, 255, 0), 1) + line_row += 30 + + cv2.imshow('Stereo Pair', img_concat) + k = cv2.waitKey(1) + + if res2_l[1] is not None and res2_r[2] is not None and len(res2_l[1]) > 3 and len(res2_r[1]) > 3: + + cv2.cornerSubPix(image_data_pair[0], res2_l[1], + winSize=(5, 5), + zeroZone=(-1, -1), + criteria=criteria) + cv2.cornerSubPix(image_data_pair[1], res2_r[1], + winSize=(5, 5), + zeroZone=(-1, -1), + criteria=criteria) + + # termination criteria + img_pth_right = Path(images_right[i]) + img_pth_left = Path(images_left[i]) + org = (100, 50) + # cv2.imshow('ltext', lText) + # cv2.waitKey(0) + localError = 0 + corners_l = [] + corners_r = [] + for j in range(len(res2_l[2])): + idx = np.where(res2_r[2] == res2_l[2][j]) + if idx[0].size == 0: + continue + corners_l.append(res2_l[1][j]) + corners_r.append(res2_r[1][idx]) + + imgpoints_l.append(corners_l) + imgpoints_r.append(corners_r) + epi_error_sum = 0 + corner_epipolar_color = [] + for l_pt, r_pt in zip(corners_l, corners_r): + if isHorizontal: + curr_epipolar_error = abs(l_pt[0][1] - r_pt[0][1]) + else: + curr_epipolar_error = abs(l_pt[0][0] - r_pt[0][0]) + if curr_epipolar_error >= 1: + corner_epipolar_color.append(1) + else: + corner_epipolar_color.append(0) + epi_error_sum += curr_epipolar_error + localError = epi_error_sum / len(corners_l) + image_epipolar_color.append(corner_epipolar_color) + #if self.traceLevel == 2 or self.traceLevel == 3 or self.traceLevel == 4 or self.traceLevel == 10: + print("Epipolar Error per image on host in " + img_pth_right.name + " : " + + str(localError)) + else: + print('Numer of corners is in left -> and right ->') + return -1 + lText = cv2.putText(cv2.cvtColor(image_data_pair[0],cv2.COLOR_GRAY2RGB), img_pth_left.name, org, cv2.FONT_HERSHEY_SIMPLEX, 1, (2, 0, 255), 2, cv2.LINE_AA) + rText = cv2.putText(cv2.cvtColor(image_data_pair[1],cv2.COLOR_GRAY2RGB), img_pth_right.name + " Error: " + str(localError), org, cv2.FONT_HERSHEY_SIMPLEX, 1, (2, 0, 255), 2, cv2.LINE_AA) + image_data_pairs[i] = (lText, rText) + + + epi_error_sum = 0 + total_corners = 0 + for corners_l, corners_r in zip(imgpoints_l, imgpoints_r): + total_corners += len(corners_l) + for l_pt, r_pt in zip(corners_l, corners_r): + if isHorizontal: + epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) + else: + epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) + + avg_epipolar = epi_error_sum / total_corners + print("Average Epipolar Error is : " + str(avg_epipolar)) + + #if self.enable_rectification_disp: + # self.display_rectification(image_data_pairs, imgpoints_l, imgpoints_r, image_epipolar_color, isHorizontal) + + return avg_epipolar diff --git a/calibration_utils.py b/calibration_utils.py index 8ff22a6..1b4aad1 100644 --- a/calibration_utils.py +++ b/calibration_utils.py @@ -1,2087 +1,939 @@ -#!/usr/bin/env python3 - -import cv2 -import glob -import os -import shutil -import numpy as np from scipy.spatial.transform import Rotation -import time -import json -import cv2.aruco as aruco -import logging -logging.getLogger('matplotlib').setLevel(logging.WARNING) - +from .worker import ParallelWorker +from typing import List, Tuple from pathlib import Path -from functools import reduce -from collections import deque -from typing import Optional -import matplotlib.pyplot as plt -from scipy.interpolate import griddata -plt.rcParams.update({'font.size': 16}) -import matplotlib.colors as colors -import logging -logging.getLogger('matplotlib').setLevel(logging.WARNING) -per_ccm = True -extrinsic_per_ccm = False -cdict = {'red': ((0.0, 0.0, 0.0), # no red at 0 - (0.5, 1.0, 1.0), # all channels set to 1.0 at 0.5 to create white - (1.0, 0.8, 0.8)), # set to 0.8 so its not too bright at 1 -'green': ((0.0, 0.8, 0.8), # set to 0.8 so its not too bright at 0 - (0.5, 1.0, 1.0), # all channels set to 1.0 at 0.5 to create white - (1.0, 0.0, 0.0)), # no green at 1 -'blue': ((0.0, 0.0, 0.0), # no blue at 0 - (0.5, 1.0, 1.0), # all channels set to 1.0 at 0.5 to create white - (1.0, 0.0, 0.0)) # no blue at 1 -} -# Create the colormap using the dictionary -GnRd = colors.LinearSegmentedColormap('GnRd', cdict) -def get_quadrant_coordinates(width, height, nx, ny): - quadrant_width = width // nx - quadrant_height = height // ny - quadrant_coords = [] - - for i in range(int(nx)): - for j in range(int(ny)): - left = i * quadrant_width - upper = j * quadrant_height - right = left + quadrant_width - bottom = upper + quadrant_height - quadrant_coords.append((left, upper, right, bottom)) - - return quadrant_coords - -def sort_points_into_quadrants(points, width, height, error, nx = 4, ny = 4): - quadrant_coords = get_quadrant_coordinates(width, height, nx, ny) - quadrants = {i: [] for i in range(int(nx*ny))} # Create a dictionary to store points by quadrant index - - for x, y in points: - # Find the correct quadrant for each point - for index, (left, upper, right, bottom) in enumerate(quadrant_coords): - if left <= x < right and upper <= y < bottom: - quadrants[index].append(error[index]) - break - - return quadrants, quadrant_coords - -def distance(point1, point2): - return np.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) -# Creates a set of 13 polygon coordinates -rectProjectionMode = 0 +from .types import * +import numpy as np +import time +import cv2 +PER_CCM = True +EXTRINSICS_PER_CCM = False colors = [(0, 255 , 0), (0, 0, 255)] -def setPolygonCoordinates(height, width): - horizontal_shift = width//4 - vertical_shift = height//4 - - margin = 60 - slope = 150 - - p_coordinates = [ - [[margin, margin], [margin, height-margin], - [width-margin, height-margin], [width-margin, margin]], - - [[margin, 0], [margin, height], [width//2, height-slope], [width//2, slope]], - [[horizontal_shift, 0], [horizontal_shift, height], [ - width//2 + horizontal_shift, height-slope], [width//2 + horizontal_shift, slope]], - [[horizontal_shift*2-margin, 0], [horizontal_shift*2-margin, height], [width//2 + - horizontal_shift*2-margin, height-slope], [width//2 + horizontal_shift*2-margin, slope]], - - [[width-margin, 0], [width-margin, height], - [width//2, height-slope], [width//2, slope]], - [[width-horizontal_shift, 0], [width-horizontal_shift, height], [width // - 2-horizontal_shift, height-slope], [width//2-horizontal_shift, slope]], - [[width-horizontal_shift*2+margin, 0], [width-horizontal_shift*2+margin, height], [width // - 2-horizontal_shift*2+margin, height-slope], [width//2-horizontal_shift*2+margin, slope]], - - [[0, margin], [width, margin], [ - width-slope, height//2], [slope, height//2]], - [[0, vertical_shift], [width, vertical_shift], [width-slope, - height//2+vertical_shift], [slope, height//2+vertical_shift]], - [[0, vertical_shift*2-margin], [width, vertical_shift*2-margin], [width-slope, - height//2+vertical_shift*2-margin], [slope, height//2+vertical_shift*2-margin]], - [[0, height-margin], [width, height-margin], - [width-slope, height//2], [slope, height//2]], - [[0, height-vertical_shift], [width, height-vertical_shift], [width - - slope, height//2-vertical_shift], [slope, height//2-vertical_shift]], - [[0, height-vertical_shift*2+margin], [width, height-vertical_shift*2+margin], [width - - slope, height//2-vertical_shift*2+margin], [slope, height//2-vertical_shift*2+margin]] - ] - return p_coordinates - - -def getPolygonCoordinates(idx, p_coordinates): - return p_coordinates[idx] - - -def getNumOfPolygons(p_coordinates): - return len(p_coordinates) - -# Filters polygons to just those at the given indexes. - - -def select_polygon_coords(p_coordinates, indexes): - if indexes == None: - # The default - return p_coordinates +class StereoExceptions(Exception): + def __init__(self, message, stage, path=None, *args, **kwargs) -> None: + self.stage = stage + self.path = path + super().__init__(message, *args) + + @property + def summary(self) -> str: + """ + Returns a more comprehensive summary of the exception + """ + return f"'{self.args[0]}' (occured during stage '{self.stage}')" + +def estimate_pose_and_filter_single(camData: CameraData, corners, ids, charucoBoard): + objpoints = charucoBoard.chessboardCorners[ids] + + ini_threshold=5 + threshold = None + + objects = [] + all_objects = [] + while len(objects) < len(objpoints[:,0,0]) * camData['min_inliers']: + if ini_threshold > camData['max_threshold']: + break + ret, rvec, tvec, objects = cv2.solvePnPRansac(objpoints, corners, camData['intrinsics'], camData['dist_coeff'], flags = cv2.SOLVEPNP_P3P, reprojectionError = ini_threshold, iterationsCount = 10000, confidence = 0.9) + + if not ret: + raise RuntimeError('Exception') # TODO : Handle + + all_objects.append(objects) + imgpoints2 = objpoints.copy() + + all_corners2 = corners.copy() + all_corners2 = np.array([all_corners2[id[0]] for id in objects]) + imgpoints2 = np.array([imgpoints2[id[0]] for id in objects]) + + ret, rvec, tvec = cv2.solvePnP(imgpoints2, all_corners2, camData['intrinsics'], camData['dist_coeff']) + + ini_threshold += camData['threshold_stepper'] + + if ids is not None and corners.size > 0: # TODO : Try to remove the np reshaping + ids = ids.flatten() # Flatten the IDs from 2D to 1D + imgpoints2, _ = cv2.projectPoints(objpoints, rvec, tvec, camData['intrinsics'], camData['dist_coeff']) + corners2 = corners.reshape(-1, 2) + imgpoints2 = imgpoints2.reshape(-1, 2) + + errors = np.linalg.norm(corners2 - imgpoints2, axis=1) + if threshold == None: + threshold = max(2*np.median(errors), 150) + valid_mask = errors <= threshold + removed_mask = ~valid_mask + + # Collect valid IDs in the original format (array of arrays) + valid_ids = ids[valid_mask] + #filtered_ids.append(valid_ids.reshape(-1, 1).astype(np.int32)) # Reshape and store as array of arrays + + # Collect data for valid points + #filtered_corners.append(corners2[valid_mask].reshape(-1, 1, 2)) # Collect valid corners for calibration + + #removed_corners.extend(corners2[removed_mask]) + return corners2[valid_mask].reshape(-1, 1, 2), valid_ids.reshape(-1, 1).astype(np.int32), corners2[removed_mask] + +def get_features(config: CalibrationConfig, camData: CameraData) -> CameraData: + f = camData['size'][0] / (2 * np.tan(np.deg2rad(camData["hfov"]/2))) + + camData['intrinsics'] = np.array([ + [f, 0.0, camData['size'][0]/2], + [0.0, f, camData['size'][1]/2], + [0.0, 0.0, 1.0] + ]) + camData['dist_coeff'] = np.zeros((12, 1)) + + # check if there are any suspicious corners with high reprojection error + max_threshold = 75 + config.initialMaxThreshold * (camData['hfov']/ 30 + camData['size'][1] / 800 * 0.2) + threshold_stepper = int(1.5 * (camData['hfov'] / 30 + camData['size'][1] / 800)) + if threshold_stepper < 1: + threshold_stepper = 1 + min_inliers = 1 - config.initialMinFiltered * (camData['hfov'] / 60 + camData['size'][1] / 800 * 0.2) + camData['max_threshold'] = max_threshold + camData['threshold_stepper'] = threshold_stepper + camData['min_inliers'] = min_inliers + + return camData + +def estimate_pose_and_filter(camData: CameraData, allCorners, allIds, charucoBoard): + filtered_corners = [] + filtered_ids = [] + for corners, ids in zip(allCorners, allIds): + corners, ids, _ = estimate_pose_and_filter_single(camData, corners, ids, charucoBoard) + filtered_corners.append(corners) + filtered_ids.append(ids) + + return filtered_corners, filtered_ids + +def calibrate_charuco(camData: CameraData, allCorners, allIds, dataset: Dataset): + # TODO : If we still need this check it needs to be elsewhere + # if sum([len(corners) < 4 for corners in filteredCorners]) > 0.15 * len(filteredCorners): + # raise RuntimeError(f"More than 1/4 of images has less than 4 corners for {cam_info['name']}") + + distortion_flags = get_distortion_flags(camData['distortion_model']) + flags = cv2.CALIB_USE_INTRINSIC_GUESS + distortion_flags + + # Convert to int32 from uint32 # TODO : Shouldn't be necessary + for i, ids in enumerate(allIds): allIds[i] = ids.reshape(-1, 1).astype(np.int32) + + # Filter for only images with >6 corners # TODO : Shouldn't be in here, should be in a separate necessary filtering function + allCorners2 = [] + allIds2 = [] + + for corners, ids in zip(allCorners, allIds): + if len(ids) < 6: + continue + allCorners2.append(corners) + allIds2.append(ids) + + #try: + (ret, camera_matrix, distortion_coefficients, + rotation_vectors, translation_vectors, + stdDeviationsIntrinsics, stdDeviationsExtrinsics, + perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended( + charucoCorners=allCorners2, + charucoIds=allIds2, + board=dataset.board.board, + imageSize=camData['size'], + cameraMatrix=camData['intrinsics'], + distCoeffs=camData['dist_coeff'], + flags=flags, + criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 1000, 1e-6)) + #except: + # return f"First intrisic calibration failed for {cam_info['size']}", None, None + + camData['intrinsics'] = camera_matrix + camData['dist_coeff'] = distortion_coefficients + camData['filtered_corners'] = allCorners2 + camData['filtered_ids'] = allIds2 + return camData + +def filter_features_fisheye(camData: CameraData, intrinsic_img, all_features, all_ids): + f = camData['size'][0] / (2 * np.tan(np.deg2rad(camData["hfov"]/2))) + print("INTRINSIC CALIBRATION") + cameraIntrinsics = np.array([[f, 0.0, camData['size'][0]/2], + [0.0, f, camData['size'][1]/2], + [0.0, 0.0, 1.0]]) + + distCoeff = np.zeros((12, 1)) + + if camData["name"] in intrinsic_img: + raise RuntimeError('This is broken') + all_features, all_ids, filtered_images = remove_features(filtered_features, filtered_ids, intrinsic_img[camData["name"]], image_files) + else: + filtered_images = camData['images_path'] + + filtered_features = all_features + filtered_ids = all_ids + + camData['filtered_ids'] = filtered_ids + camData['filtered_corners'] = filtered_features + camData['intrinsics'] = cameraIntrinsics + camData['dist_coeff'] = distCoeff + + return camData + +def calibrate_ccm_intrinsics_per_ccm(config: CalibrationConfig, camData: CameraData, dataset: Dataset): + start = time.time() + print('starting calibrate_wf') + ret, cameraIntrinsics, distCoeff, _, _, filtered_ids, filtered_corners, size, coverageImage, all_corners, all_ids = calibrate_wf_intrinsics(config, camData, dataset) + if isinstance(ret, str) and all_ids is None: + raise RuntimeError('Exception' + ret) # TODO : Handle + print(f'calibrate_wf took {round(time.time() - start, 2)}s') + + camData['intrinsics'] = cameraIntrinsics + camData['dist_coeff'] = distCoeff + camData['reprojection_error'] = ret + print("Reprojection error of {0}: {1}".format( + camData['name'], ret)) + + return camData + +def calibrate_ccm_intrinsics(config: CalibrationConfig, camData: CameraData): + imsize = camData['size'] + hfov = camData['hfov'] + name = camData['name'] + allCorners = camData['filtered_corners'] # TODO : I don't think this has a way to get here from one of the codepaths in matin in the else: + allIds = camData['filtered_ids'] + calib_model = camData['calib_model'] + distortionModel = camData['distortion_model'] + + coverageImage = np.ones(imsize[::-1], np.uint8) * 255 + coverageImage = cv2.cvtColor(coverageImage, cv2.COLOR_GRAY2BGR) + coverageImage = draw_corners(allCorners, coverageImage) + if calib_model == 'perspective': + distortion_flags = get_distortion_flags(distortionModel) + ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, allCorners, allIds = calibrate_camera_charuco( + allCorners, allIds, imsize, distortion_flags, camData['intrinsics'], camData['dist_coeff']) + # undistort_visualization( + # self, image_files, camera_matrix, distortion_coefficients, imsize, name) + + return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds + else: + print('Fisheye--------------------------------------------------') + ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners = calibrate_fisheye( + config, allCorners, allIds, imsize, hfov, name) + # undistort_visualization( + # self, image_files, camera_matrix, distortion_coefficients, imsize, name) + print('Fisheye rotation vector', rotation_vectors[0]) + print('Fisheye translation vector', translation_vectors[0]) + + camData['filtered_ids'] = filtered_ids + camData['filtered_corners'] = filtered_corners + + camData['intrinsics'] = camera_matrix + camData['dist_coeff'] = distortion_coefficients + camData['reprojection_error'] = ret + print("Reprojection error of {0}: {1}".format( + camData['name'], ret)) + + return camData + +def calibrate_stereo_perspective(config: CalibrationConfig, obj_pts, allLeftCorners, allRightCorners, leftCamData: CameraData, rightCamData: CameraData): + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, left_distortion_model = leftCamData['intrinsics'], leftCamData['dist_coeff'], rightCamData['intrinsics'], rightCamData['dist_coeff'], leftCamData['distortion_model'] + specTranslation = leftCamData['extrinsics']['specTranslation'] + rot = leftCamData['extrinsics']['rotation'] + + t_in = np.array( + [specTranslation['x'], specTranslation['y'], specTranslation['z']], dtype=np.float32) + r_in = Rotation.from_euler( + 'xyz', [rot['r'], rot['p'], rot['y']], degrees=True).as_matrix().astype(np.float32) + + flags = 0 + # flags |= cv2.CALIB_USE_EXTRINSIC_GUESS + # print(flags) + flags = cv2.CALIB_FIX_INTRINSIC + distortion_flags = get_distortion_flags(left_distortion_model) + flags += distortion_flags + # print(flags) + ret, M1, d1, M2, d2, R, T, E, F, _ = cv2.stereoCalibrateExtended( + obj_pts, allLeftCorners, allRightCorners, + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, None, + R=r_in, T=t_in, criteria=config.stereoCalibCriteria, flags=flags) + + r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) + print(f'Epipolar error is {ret}') + print('Printing Extrinsics res...') + print(R) + print(T) + print(f'Euler angles in XYZ {r_euler} degs') + + R_l, R_r, P_l, P_r, Q, validPixROI1, validPixROI2 = cv2.stereoRectify( + cameraMatrix_l, + distCoeff_l, + cameraMatrix_r, + distCoeff_r, + None, R, T) # , alpha=0.1 + # self.P_l = P_l + # self.P_r = P_r + r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) + r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) + + return [ret, R, T, R_l, R_r, P_l, P_r] + +def calibrate_stereo_perspective_per_ccm(config: CalibrationConfig, obj_pts, left_corners_sampled, right_corners_sampled, left_cam_info, right_cam_info): + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r = left_cam_info['intrinsics'], left_cam_info['dist_coeff'], right_cam_info['intrinsics'], right_cam_info['dist_coeff'] + specTranslation = left_cam_info['extrinsics']['specTranslation'] + rot = left_cam_info['extrinsics']['rotation'] + + t_in = np.array( + [specTranslation['x'], specTranslation['y'], specTranslation['z']], dtype=np.float32) + r_in = Rotation.from_euler( + 'xyz', [rot['r'], rot['p'], rot['y']], degrees=True).as_matrix().astype(np.float32) + + flags = cv2.CALIB_FIX_INTRINSIC + ret, M1, d1, M2, d2, R, T, E, F, _ = cv2.stereoCalibrateExtended( + obj_pts, left_corners_sampled, right_corners_sampled, + np.eye(3), np.zeros(12), np.eye(3), np.zeros(12), None, + R=r_in, T=t_in, criteria=config.stereoCalibCriteria , flags=flags) + + r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) + scale = ((cameraMatrix_l[0][0]*cameraMatrix_r[0][0])) + print(f'Epipolar error without scale: {ret}') + print(f'Epipolar error with scale: {ret*np.sqrt(scale)}') + print('Printing Extrinsics res...') + print(R) + print(T) + print(f'Euler angles in XYZ {r_euler} degs') + R_l, R_r, P_l, P_r, Q, validPixROI1, validPixROI2 = cv2.stereoRectify( + cameraMatrix_l, + distCoeff_l, + cameraMatrix_r, + distCoeff_r, + None, R, T) # , alpha=0.1 + # self.P_l = P_l + # self.P_r = P_r + r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) + r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) + + # print(f'P_l is \n {P_l}') + # print(f'P_r is \n {P_r}') + return [ret, R, T, R_l, R_r, P_l, P_r] + +def calibrate_stereo_fisheye(config: CalibrationConfig, obj_pts, left_corners_sampled, right_corners_sampled, left_cam_info, right_cam_info): + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r = left_cam_info['intrinsics'], left_cam_info['dist_coeff'], right_cam_info['intrinsics'], right_cam_info['dist_coeff'] + # make sure all images have the same *number of* points + min_num_points = min([len(pts) for pts in obj_pts]) + obj_pts_truncated = [pts[:min_num_points] for pts in obj_pts] + left_corners_truncated = [pts[:min_num_points] for pts in left_corners_sampled] + right_corners_truncated = [pts[:min_num_points] for pts in right_corners_sampled] + + flags = 0 + # flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + # flags |= cv2.fisheye.CALIB_CHECK_COND + # flags |= cv2.fisheye.CALIB_FIX_SKEW + flags |= cv2.fisheye.CALIB_FIX_INTRINSIC + # flags |= cv2.fisheye.CALIB_FIX_K1 + # flags |= cv2.fisheye.CALIB_FIX_K2 + # flags |= cv2.fisheye.CALIB_FIX_K3 + # flags |= cv2.fisheye.CALIB_FIX_K4 + # flags |= cv2.CALIB_RATIONAL_MODEL + # flags |= cv2.CALIB_USE_INTRINSIC_GUESS + # flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + # flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW + (ret, M1, d1, M2, d2, R, T), E, F = cv2.fisheye.stereoCalibrate( + obj_pts_truncated, left_corners_truncated, right_corners_truncated, + cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, None, + flags=flags, criteria=config.stereoCalibCriteria), None, None + r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) + print(f'Reprojection error is {ret}') + isHorizontal = np.absolute(T[0]) > np.absolute(T[1]) + + R_l, R_r, P_l, P_r, Q = cv2.fisheye.stereoRectify( + cameraMatrix_l, + distCoeff_l, + cameraMatrix_r, + distCoeff_r, + None, R, T, flags=0) + + r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) + r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) + + return [ret, R, T, R_l, R_r, P_l, P_r] + +def find_stereo_common_features(leftCorners, leftIds, rightCorners, rightIds, board: CharucoBoard): + left_corners_sampled = [] + right_corners_sampled = [] + obj_pts = [] + failed = 0 + + for i, _ in enumerate(leftIds): # For ids in all images + commonIds = np.intersect1d(leftIds[i], rightIds[i]) + left_sub_corners = leftCorners[i][np.isin(leftIds[i], commonIds)] + right_sub_corners = rightCorners[i][np.isin(rightIds[i], commonIds)] + obj_pts_sub = board.board.chessboardCorners[commonIds] + + if len(commonIds) > 6: + obj_pts.append(obj_pts_sub) + left_corners_sampled.append(left_sub_corners) + right_corners_sampled.append(right_sub_corners) else: - print("Filtering polygons to those at indexes=", indexes) - return [p_coordinates[i] for i in indexes] - - -def image_filename(polygon_index, total_num_of_captured_images): - return "p{polygon_index}_{total_num_of_captured_images}.png".format(polygon_index=polygon_index, total_num_of_captured_images=total_num_of_captured_images) - - -def polygon_from_image_name(image_name): - """Returns the polygon index from an image name (ex: "left_p10_0.png" => 10)""" - return int(re.findall("p(\d+)", image_name)[0]) + failed += 1 + + if failed > len(leftIds) / 3: + raise RuntimeError('More than 1/3 of images had less than 6 common features found') + + return left_corners_sampled, right_corners_sampled, obj_pts + +def undistort_points_perspective(allCorners, camInfo): + return [cv2.undistortPoints(np.array(corners), camInfo['intrinsics'], camInfo['dist_coeff'], P=camInfo['intrinsics']) for corners in allCorners] + +def undistort_points_fisheye(allCorners, camInfo): + return [cv2.fisheye.undistortPoints(np.array(corners), camInfo['intrinsics'], camInfo['dist_coeff'], P=camInfo['intrinsics']) for corners in allCorners] + +def remove_and_filter_stereo_features(leftCamData: CameraData, rightCamData: CameraData, leftDataset: Dataset, rightDataset: Dataset): + leftCorners, leftIds = estimate_pose_and_filter(leftCamData, leftDataset.allCorners, leftDataset.allIds, leftDataset.board.board) + rightCorners, rightIds = estimate_pose_and_filter(rightCamData, rightDataset.allCorners, rightDataset.allIds, leftDataset.board.board) + + return leftCorners, leftIds, rightCorners, rightIds + +def calculate_epipolar_error(left_cam_info: CameraData, right_cam_info: CameraData, left_cam: Dataset, right_cam: Dataset, board_config, extrinsics): + if extrinsics[0] == -1: + return -1, extrinsics[1] + stereoConfig = None + if 'stereo_config' in board_config: + leftCamName = board_config['cameras'][board_config['stereo_config']['left_cam']]['name'] + rightCamName = board_config['cameras'][board_config['stereo_config']['right_cam']]['name'] + if leftCamName == left_cam.name and rightCamName == right_cam.name: # TODO : Is this supposed to take the last camera pair? + stereoConfig = { + 'rectification_left': extrinsics[3], + 'rectification_right': extrinsics[4] + } + elif leftCamName == right_cam and rightCamName == left_cam: + stereoConfig = { + 'rectification_left': extrinsics[4], + 'rectification_right': extrinsics[3] + } + + print('<-------------Epipolar error of {} and {} ------------>'.format( + left_cam_info['name'], right_cam_info['name'])) + #print(f"dist {left_cam_info['name']}: {left_cam_info['dist_coeff']}") + #print(f"dist {right_cam_info['name']}: {right_cam_info['dist_coeff']}") + if left_cam_info['intrinsics'][0][0] < right_cam_info['intrinsics'][0][0]: + scale = right_cam_info['intrinsics'][0][0] + else: + scale = left_cam_info['intrinsics'][0][0] + if PER_CCM and EXTRINSICS_PER_CCM: + scale = ((left_cam_info['intrinsics'][0][0]*right_cam_info['intrinsics'][0][0] + left_cam_info['intrinsics'][1][1]*right_cam_info['intrinsics'][1][1])/2) + print(f"Epipolar error {extrinsics[0]*np.sqrt(scale)}") + left_cam_info['extrinsics']['epipolar_error'] = extrinsics[0]*np.sqrt(scale) + else: + print(f"Epipolar error {extrinsics[0]}") + left_cam_info['extrinsics']['epipolar_error'] = extrinsics[0] + + left_cam_info['extrinsics']['rotation_matrix'] = extrinsics[1] + left_cam_info['extrinsics']['translation'] = extrinsics[2] + + return left_cam_info, stereoConfig + +def get_distortion_flags(distortionModel: DistortionModel): + if distortionModel == None: + print("Use DEFAULT model") + flags = cv2.CALIB_RATIONAL_MODEL + + elif all(char in '01' for char in str(distortionModel)): + flags = cv2.CALIB_RATIONAL_MODEL + flags += cv2.CALIB_TILTED_MODEL + flags += cv2.CALIB_THIN_PRISM_MODEL + distFlags = int(distortionModel, 2) + + if distFlags & (1 << 0): + print("FIX_K1") + flags += cv2.CALIB_FIX_K1 + if distFlags & (1 << 1): + print("FIX_K2") + flags += cv2.CALIB_FIX_K2 + if distFlags & (1 << 2): + print("FIX_K3") + flags += cv2.CALIB_FIX_K3 + if distFlags & (1 << 3): + print("FIX_K4") + flags += cv2.CALIB_FIX_K4 + if distFlags & (1 << 4): + print("FIX_K5") + flags += cv2.CALIB_FIX_K5 + if distFlags & (1 << 5): + print("FIX_K6") + flags += cv2.CALIB_FIX_K6 + if distFlags & (1 << 6): + print("FIX_TANGENT_DISTORTION") + flags += cv2.CALIB_ZERO_TANGENT_DIST + if distFlags & (1 << 7): + print("FIX_TILTED_DISTORTION") + flags += cv2.CALIB_FIX_TAUX_TAUY + if distFlags & (1 << 8): + print("FIX_PRISM_DISTORTION") + flags += cv2.CALIB_FIX_S1_S2_S3_S4 + + elif distortionModel == DistortionModel.Normal: + print("Using NORMAL model") + flags = cv2.CALIB_RATIONAL_MODEL + flags += cv2.CALIB_TILTED_MODEL + + elif distortionModel == DistortionModel.Tilted: + print("Using TILTED model") + flags = cv2.CALIB_RATIONAL_MODEL + flags += cv2.CALIB_TILTED_MODEL + + elif distortionModel == DistortionModel.Prism: + print("Using PRISM model") + flags = cv2.CALIB_RATIONAL_MODEL + flags += cv2.CALIB_TILTED_MODEL + flags += cv2.CALIB_THIN_PRISM_MODEL + + elif distortionModel == DistortionModel.Thermal: + print("Using THERMAL model") + flags = cv2.CALIB_RATIONAL_MODEL + flags += cv2.CALIB_FIX_K3 + flags += cv2.CALIB_FIX_K5 + flags += cv2.CALIB_FIX_K6 + + elif isinstance(distortionModel, int): + print("Using CUSTOM flags") + flags = distortionModel + return flags + +def calibrate_wf_intrinsics(config: CalibrationConfig, camData: CameraData, dataset: Dataset): + name = camData['name'] + allCorners = camData['filtered_corners'] + allIds = camData['filtered_ids'] + imsize = camData['size'] + hfov = camData['hfov'] + calib_model = camData['calib_model'] + distortionModel = camData['distortion_model'] + cameraIntrinsics = camData['intrinsics'] + distCoeff = camData['dist_coeff'] + + coverageImage = np.ones(imsize[::-1], np.uint8) * 255 + coverageImage = cv2.cvtColor(coverageImage, cv2.COLOR_GRAY2BGR) + coverageImage = draw_corners(allCorners, coverageImage) + if calib_model == 'perspective': + distortion_flags = get_distortion_flags(distortionModel) + ret, cameraIntrinsics, distCoeff, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, allCorners, allIds = calibrate_camera_charuco( + allCorners, allIds, imsize, distortion_flags, cameraIntrinsics, distCoeff, dataset) + + return ret, cameraIntrinsics, distCoeff, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds + else: + print('Fisheye--------------------------------------------------') + ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners = calibrate_fisheye( + config, allCorners, allIds, imsize, hfov, name) + print('Fisheye rotation vector', rotation_vectors[0]) + print('Fisheye translation vector', translation_vectors[0]) + + # (Height, width) + return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds + +def draw_corners(charuco_corners, displayframe): + for corners in charuco_corners: + color = (int(np.random.randint(0, 255)), int(np.random.randint(0, 255)), int(np.random.randint(0, 255))) + for corner in corners: + corner_int = (int(corner[0][0]), int(corner[0][1])) + cv2.circle(displayframe, corner_int, 4, color, -1) + height, width = displayframe.shape[:2] + start_point = (0, 0) # top of the image + end_point = (0, height) + + color = (0, 0, 0) # blue in BGR + thickness = 4 + + # Draw the line on the image + cv2.line(displayframe, start_point, end_point, color, thickness) + return displayframe + +def features_filtering_function(rvecs, tvecs, cameraMatrix, distCoeffs, filtered_corners,filtered_id, dataset: Dataset, threshold = None): + whole_error = [] + all_points = [] + all_corners = [] + all_error = [] + all_ids = [] + removed_corners = [] + removed_points = [] + removed_ids = [] + removed_error = [] + display_corners = [] + display_points = [] + circle_size = 0 + reported_error = [] + for i, (corners, ids) in enumerate(zip(filtered_corners, filtered_id)): + if ids is not None and corners.size > 0: + ids = ids.flatten() # Flatten the IDs from 2D to 1D + objPoints = np.array([dataset.board.board.chessboardCorners[id] for id in ids], dtype=np.float32) + imgpoints2, _ = cv2.projectPoints(objPoints, rvecs[i], tvecs[i], cameraMatrix, distCoeffs) + corners2 = corners.reshape(-1, 2) + imgpoints2 = imgpoints2.reshape(-1, 2) + + errors = np.linalg.norm(corners2 - imgpoints2, axis=1) + if threshold == None: + threshold = max(2*np.median(errors), 150) + valid_mask = errors <= threshold + removed_mask = ~valid_mask + + # Collect valid IDs in the original format (array of arrays) + valid_ids = ids[valid_mask] + all_ids.append(valid_ids.reshape(-1, 1).astype(np.int32)) # Reshape and store as array of arrays + + # Collect data for valid points + reported_error.extend(errors) + all_error.extend(errors[valid_mask]) + display_corners.extend(corners2) + display_points.extend(imgpoints2[valid_mask]) + all_points.append(imgpoints2[valid_mask]) # Collect valid points for calibration + all_corners.append(corners2[valid_mask].reshape(-1, 1, 2)) # Collect valid corners for calibration + + removed_corners.extend(corners2[removed_mask]) + removed_points.extend(imgpoints2[removed_mask]) + removed_ids.extend(ids[removed_mask]) + removed_error.extend(errors[removed_mask]) + + total_error_squared = np.sum(errors[valid_mask]**2) + total_points = len(objPoints[valid_mask]) + rms_error = np.sqrt(total_error_squared / total_points if total_points else 0) + whole_error.append(rms_error) + + total_error_squared = 0 + total_points = 0 + + return all_corners ,all_ids, all_error, removed_corners, removed_ids, removed_error + +def camera_pose_charuco(objpoints: np.array, corners: np.array, ids: np.array, K: np.array, d: np.array, ini_threshold = 2, min_inliers = 0.95, threshold_stepper = 1, max_threshold = 50): + objects = [] + while len(objects) < len(objpoints[:,0,0]) * min_inliers: + if ini_threshold > max_threshold: + break + ret, rvec, tvec, objects = cv2.solvePnPRansac(objpoints, corners, K, d, flags = cv2.SOLVEPNP_P3P, reprojectionError = ini_threshold, iterationsCount = 10000, confidence = 0.9) + + if not ret: + break + + imgpoints2 = objpoints.copy() + + all_corners = corners.copy() + all_corners = np.array([all_corners[id[0]] for id in objects]) + imgpoints2 = np.array([imgpoints2[id[0]] for id in objects]) + + ret, rvec, tvec = cv2.solvePnP(imgpoints2, all_corners, K, d) + imgpoints2, _ = cv2.projectPoints(imgpoints2, rvec, tvec, K, d) + + ini_threshold += threshold_stepper + if ret: + return rvec, tvec + else: + raise RuntimeError() # TODO : Handle + +def compute_reprojection_errors(obj_pts: np.array, img_pts: np.array, K: np.array, dist: np.array, rvec: np.array, tvec: np.array, fisheye = False): + if fisheye: + proj_pts, _ = cv2.fisheye.projectPoints(obj_pts, rvec, tvec, K, dist) + else: + proj_pts, _ = cv2.projectPoints(obj_pts, rvec, tvec, K, dist) + errs = np.linalg.norm(np.squeeze(proj_pts) - np.squeeze(img_pts), axis = 1) + return errs + +def undistort_visualization(self, img_list, K, D, img_size, name): + for index, im in enumerate(img_list): + # print(im) + img = cv2.imread(im) + # h, w = img.shape[:2] + if self._cameraModel == 'perspective': + kScaled, _ = cv2.getOptimalNewCameraMatrix(K, D, img_size, 0) + # print(f'K scaled is \n {kScaled} and size is \n {img_size}') + # print(f'D Value is \n {D}') + map1, map2 = cv2.initUndistortRectifyMap( + K, D, np.eye(3), kScaled, img_size, cv2.CV_32FC1) + else: + map1, map2 = cv2.fisheye.initUndistortRectifyMap( + K, D, np.eye(3), K, img_size, cv2.CV_32FC1) + + undistorted_img = cv2.remap( + img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) + + if index == 0: + undistorted_file_path = self.data_path + '/' + name + f'_undistorted.png' + cv2.imwrite(undistorted_file_path, undistorted_img) + +def filter_corner_outliers(config: CalibrationConfig, allIds, allCorners, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors): + corners_removed = False + for i in range(len(allIds)): + corners = allCorners[i] + ids = allIds[i] + objpts = config.board.chessboardCorners[ids] + if config.cameraModel == "fisheye": + errs = compute_reprojection_errors(objpts, corners, camera_matrix, distortion_coefficients, rotation_vectors[i], translation_vectors[i], fisheye = True) + else: + errs = compute_reprojection_errors(objpts, corners, camera_matrix, distortion_coefficients, rotation_vectors[i], translation_vectors[i]) + suspicious_err_thr = max(2*np.median(errs), 100) + n_offending_pts = np.sum(errs > suspicious_err_thr) + offending_pts_idxs = np.where(errs > suspicious_err_thr)[0] + # check if there are offending points and if they form a minority + if n_offending_pts > 0 and n_offending_pts < len(corners)/5: + print(f"removing {n_offending_pts} offending points with errs {errs[offending_pts_idxs]}") + corners_removed = True + #remove the offending points + offset = 0 + allCorners[i] = np.delete(allCorners[i],offending_pts_idxs, axis = 0) + allIds[i] = np.delete(allIds[i],offending_pts_idxs, axis = 0) + return corners_removed, allIds, allCorners + +def calibrate_camera_charuco(allCorners, allIds, imsize, distortion_flags, cameraIntrinsics, distCoeff, dataset: Dataset): + """ + Calibrates the camera using the dected corners. + """ + threshold = 2 * imsize[1]/800.0 + # check if there are any suspicious corners with high reprojection error + rvecs = [] + tvecs = [] + index = 0 + for corners, ids in zip(allCorners, allIds): + objpts = dataset.board.board.chessboardCorners[ids] + rvec, tvec = camera_pose_charuco(objpts, corners, ids, cameraIntrinsics, distCoeff) + tvecs.append(tvec) + rvecs.append(rvec) + index += 1 + + # Here we need to get initialK and parameters for each camera ready and fill them inside reconstructed reprojection error per point + ret = 0.0 + flags = cv2.CALIB_USE_INTRINSIC_GUESS + flags += distortion_flags + + # flags = (cv2.CALIB_RATIONAL_MODEL) + reprojection = [] + num_threshold = [] + iterations_array = [] + intrinsic_array = {"f_x": [], "f_y": [], "c_x": [],"c_y": []} + distortion_array = {} + index = 0 + camera_matrix = cameraIntrinsics + distortion_coefficients = distCoeff + rotation_vectors = rvecs + translation_vectors = tvecs + translation_array_x = [] + translation_array_y = [] + translation_array_z = [] + import time + if True: + whole = time.time() + while True: + intrinsic_array['f_x'].append(camera_matrix[0][0]) + intrinsic_array['f_y'].append(camera_matrix[1][1]) + intrinsic_array['c_x'].append(camera_matrix[0][2]) + intrinsic_array['c_y'].append(camera_matrix[1][2]) + num_threshold.append(threshold) + + translation_array_x.append(np.mean(np.array(translation_vectors).T[0][0])) + translation_array_y.append(np.mean(np.array(translation_vectors).T[0][1])) + translation_array_z.append(np.mean(np.array(translation_vectors).T[0][2])) + + start = time.time() + filtered_corners, filtered_ids, all_error, removed_corners, removed_ids, removed_error = features_filtering_function(rotation_vectors, translation_vectors, camera_matrix, distortion_coefficients, allCorners, allIds, threshold = threshold, dataset=dataset) + iterations_array.append(index) + reprojection.append(ret) + for i in range(len(distortion_coefficients)): + if i not in distortion_array: + distortion_array[i] = [] + distortion_array[i].append(distortion_coefficients[i][0]) + print(f"Each filtering {time.time() - start}") + start = time.time() + try: + (ret, camera_matrix, distortion_coefficients, + rotation_vectors, translation_vectors, + stdDeviationsIntrinsics, stdDeviationsExtrinsics, + perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended( + charucoCorners=filtered_corners, + charucoIds=filtered_ids, + board=dataset.board.board, + imageSize=imsize, + cameraMatrix=cameraIntrinsics, + distCoeffs=distCoeff, + flags=flags, + criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 50000, 1e-9)) + except: + print('failed on dataset', dataset.name) + raise StereoExceptions(message="Intrisic calibration failed", stage="intrinsic_calibration", element='', id=0) + cameraIntrinsics = camera_matrix + distCoeff = distortion_coefficients + threshold = 5 * imsize[1]/800.0 + print(f"Each calibration {time.time()-start}") + index += 1 + if index > 5: #or (previous_ids == removed_ids and len(previous_ids) >= len(removed_ids) and index > 2): + print(f"Whole procedure: {time.time() - whole}") + break + #previous_ids = removed_ids + return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, allCorners, allIds + +def calibrate_fisheye(config: CalibrationConfig, allCorners, allIds, imsize, hfov, name): + f_init = imsize[0]/np.deg2rad(hfov)*1.15 + + cameraMatrixInit = np.array([[f_init, 0. , imsize[0]/2], + [0. , f_init, imsize[1]/2], + [0. , 0. , 1. ]]) + distCoeffsInit = np.zeros((4,1)) + # check if there are any suspicious corners with high reprojection error + rvecs = [] + tvecs = [] + for corners, ids in zip(allCorners, allIds): + objpts = config.board.chessboardCorners[ids] + corners_undist = cv2.fisheye.undistortPoints(corners, cameraMatrixInit, distCoeffsInit) + rvec, tvec = camera_pose_charuco(objpts, corners_undist,ids, np.eye(3), np.array((0.0,0,0,0))) + tvecs.append(tvec) + rvecs.append(rvec) + + corners_removed, filtered_ids, filtered_corners = filter_corner_outliers(config, allIds, allCorners, cameraMatrixInit, distCoeffsInit, rvecs, tvecs) + + obj_points = [] + for ids in filtered_ids: + obj_points.append(config.board.chessboardCorners[ids]) + # TODO :Maybe this can be obj_points = config.board.chessboardCorners[filtered_ids] + + print("Camera Matrix initialization.............") + print(cameraMatrixInit) + flags = 0 + flags |= cv2.fisheye.CALIB_CHECK_COND + flags |= cv2.fisheye.CALIB_USE_INTRINSIC_GUESS + flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + flags |= cv2.fisheye.CALIB_FIX_SKEW + + term_criteria = (cv2.TERM_CRITERIA_COUNT + + cv2.TERM_CRITERIA_EPS, 30, 1e-9) + try: + res, K, d, rvecs, tvecs = cv2.fisheye.calibrate(obj_points, filtered_corners, None, cameraMatrixInit, distCoeffsInit, flags=flags, criteria=term_criteria) + except: + # calibration failed for full FOV, let's try to limit the corners to smaller part of FOV first to find initial parameters + success = False + crop = 0.95 + while not success: + print(f"trying crop factor {crop}") + obj_points_limited = [] + corners_limited = [] + for obj_pts, corners in zip(obj_points, filtered_corners): + obj_points_tmp = [] + corners_tmp = [] + for obj_pt, corner in zip(obj_pts, corners): + check_x = corner[0,0] > imsize[0]*(1-crop) and corner[0,0] < imsize[0]*crop + check_y = corner[0,1] > imsize[1]*(1-crop) and corner[0,1] < imsize[1]*crop + if check_x and check_y: + obj_points_tmp.append(obj_pt) + corners_tmp.append(corner) + obj_points_limited.append(np.array(obj_points_tmp)) + corners_limited.append(np.array(corners_tmp)) + try: + res, K, d, rvecs, tvecs = cv2.fisheye.calibrate(obj_points_limited, corners_limited, None, cameraMatrixInit, distCoeffsInit, flags=flags, criteria=term_criteria) + print(f"success with crop factor {crop}") + success = True + break + except: + print(f"failed with crop factor {crop}") + if crop > 0.7: + crop -= 0.05 + else: + raise Exception("Calibration failed: Tried maximum crop factor and still no success") + if success: + # trying the full FOV once more with better initial K + print(f"new K init {K}") + print(f"new d_init {d}") + try: + res, K, d, rvecs, tvecs = cv2.fisheye.calibrate(obj_points, filtered_corners, imsize, K, distCoeffsInit, flags=flags, criteria=term_criteria) + except: + print(f"Failed the full res calib, using calibration with crop factor {crop}") + + return res, K, d, rvecs, tvecs, filtered_ids, filtered_corners + +def proxy_estimate_pose_and_filter_single(camData, corners, ids, dataset): + return estimate_pose_and_filter_single(camData, corners, ids, dataset.board.board) +class StereoCalibration(object): + """Class to Calculate Calibration and Rectify a Stereo Camera.""" + + def calibrate(self, board_config, camera_model, intrinsicCameras: List[Dataset] = [], extrinsicPairs: List[Tuple[Dataset, Dataset]] = [], initial_max_threshold = 15, initial_min_filtered = 0.05, debug: bool = False): + """Function to calculate calibration for stereo camera.""" + config = CalibrationConfig( + initial_max_threshold, initial_min_filtered, + camera_model, (cv2.TERM_CRITERIA_COUNT + cv2.TERM_CRITERIA_EPS, 300, 1e-9) + ) + + for a, b in extrinsicPairs: + if len(a.allIds) != len(b.allIds): + print('Not all dataset for extrinsic calibration have the same number of images') + raise RuntimeError('Not all dataset for extrinsic calibration have the same number of images') # TODO : This isn't thorough enough + if a.board is not b.board: + print('Extrinsic pair has different dataset board') + raise RuntimeError('Extrinsic pair has different dataset board') + + pw = ParallelWorker(10) + camInfos = {} + stereoConfigs = [] + allExtrinsics = [] + + # Calibrate camera intrinsics for all provided datasets + for dataset in intrinsicCameras: + camData = [c for c in board_config['cameras'].values() if c['name'] == dataset.name][0] + + if "calib_model" in camData: + cameraModel_ccm, model_ccm = camData["calib_model"].split("_") + if cameraModel_ccm == "fisheye": + model_ccm == None + calib_model = cameraModel_ccm + distortion_model = model_ccm + else: + calib_model = camera_model + distortion_model = DistortionModel.Tilted # Use the tilted model by default + + camData['size'] = dataset.imageSize + camData['calib_model'] = calib_model + camData['distortion_model'] = distortion_model + + if PER_CCM: + camData = pw.run(get_features, config, camData) + if camera_model== "fisheye": + camData = pw.run(filter_features_fisheye, camData, dataset.allCorners, dataset.allIds) # TODO : Input types are wrong + elif dataset.enableFiltering: + corners, ids = pw.map(proxy_estimate_pose_and_filter_single, camData, dataset.allCorners, dataset.allIds, dataset)[:2] + else: + corners, ids = dataset.allCorners, dataset.allIds -class StereoExceptions(Exception): - def __init__(self, message, stage, path=None, *args, **kwargs) -> None: - self.stage = stage - self.path = path - super().__init__(message, *args, **kwargs) + camData = pw.run(calibrate_charuco, camData, corners, ids, dataset) + camData = pw.run(calibrate_ccm_intrinsics_per_ccm, config, camData, dataset) + camInfos[dataset.name] = camData + else: + camData = calibrate_ccm_intrinsics(config, camData) - @property - def summary(self) -> str: - """ - Returns a more comprehensive summary of the exception - """ - return f"'{self.args[0]}' (occured during stage '{self.stage}')" + for left, right in extrinsicPairs: + left_cam_info = camInfos[left.name] + right_cam_info = camInfos[right.name] + leftCorners, leftIds, rightCorners, rightIds = pw.run(remove_and_filter_stereo_features, left_cam_info, right_cam_info, left, right)[:4] -class StereoCalibration(object): - """Class to Calculate Calibration and Rectify a Stereo Camera.""" - - def __init__(self, traceLevel: float = 1.0, outputScaleFactor: float = 0.5, disableCamera: list = [], model = None,distortion_model = {}, filtering_enable = False, initial_max_threshold = 15, initial_min_filtered = 0.05, calibration_max_threshold = 10): - self.filtering_enable = filtering_enable - self.ccm_model = distortion_model - self.model = model - self.traceLevel = traceLevel - self.output_scale_factor = outputScaleFactor - self.disableCamera = disableCamera - self.errors = {} - self.initial_max_threshold = initial_max_threshold - self.initial_min_filtered = initial_min_filtered - self.calibration_max_threshold = calibration_max_threshold - self.calibration_min_filtered = initial_min_filtered - - """Class to Calculate Calibration and Rectify a Stereo Camera.""" - - def calibrate(self, board_config, filepath, square_size, mrk_size, squaresX, squaresY, camera_model, enable_disp_rectify, charucos = {}, intrinsic_img = {}, extrinsic_img = []): - """Function to calculate calibration for stereo camera.""" - start_time = time.time() - # init object data - if self.traceLevel == 2 or self.traceLevel == 10: - print(f'squareX is {squaresX}') - self.enable_rectification_disp = True - if intrinsic_img != {}: - for cam in intrinsic_img: - intrinsic_img[cam].sort(reverse=True) - if extrinsic_img != {}: - for cam in extrinsic_img: - extrinsic_img[cam].sort(reverse=True) - self.intrinsic_img = intrinsic_img - self.extrinsic_img = extrinsic_img - self.cameraModel = camera_model - self.cameraIntrinsics = {} - self.cameraDistortion = {} - self.distortion_model = {} - self.calib_model = {} - self.collected_features = {} - self.collected_ids = {} - self.all_features = {} - self.all_errors = {} - self.errors = {} - self.data_path = filepath - self.charucos = charucos - self.aruco_dictionary = aruco.Dictionary_get(aruco.DICT_4X4_1000) - self.squaresX = squaresX - self.squaresY = squaresY - self.board = aruco.CharucoBoard_create( - # 22, 16, - squaresX, squaresY, - square_size, - mrk_size, - self.aruco_dictionary) - - self.cams = [] - # parameters = aruco.DetectorParameters_create() - combinedCoverageImage = None - resizeWidth, resizeHeight = 1280, 800 - self.height = {} - self.width = {} - assert mrk_size != None, "ERROR: marker size not set" - for camera in board_config['cameras'].keys(): - cam_info = board_config['cameras'][camera] - if cam_info["name"] not in self.disableCamera: - images_path = filepath + '/' + cam_info['name'] - image_files = glob.glob(images_path + "/*") - image_files.sort() - for im in image_files: - frame = cv2.imread(im) - self.height[cam_info["name"]], self.width[cam_info["name"]], _ = frame.shape - widthRatio = resizeWidth / self.width[cam_info["name"]] - heightRatio = resizeHeight / self.height[cam_info["name"]] - if (widthRatio > 0.8 and heightRatio > 0.8 and widthRatio <= 1.0 and heightRatio <= 1.0) or (widthRatio > 1.2 and heightRatio > 1.2) or (resizeHeight == 0): - resizeWidth = self.width[cam_info["name"]] - resizeHeight = self.height[cam_info["name"]] - break - for camera in board_config['cameras'].keys(): - cam_info = board_config['cameras'][camera] - self.id = cam_info["name"] - if cam_info["name"] not in self.disableCamera: - print( - '<------------Calibrating {} ------------>'.format(cam_info['name'])) - images_path = filepath + '/' + cam_info['name'] - if "calib_model" in cam_info.keys(): - self.cameraModel_ccm, self.model_ccm = cam_info["calib_model"].split("_") - if self.cameraModel_ccm == "fisheye": - self.model_ccm == None - self.calib_model[cam_info["name"]] = self.cameraModel_ccm - self.distortion_model[cam_info["name"]] = self.model_ccm - else: - self.calib_model[cam_info["name"]] = self.cameraModel - if cam_info["name"] in self.ccm_model: - self.distortion_model[cam_info["name"]] = self.ccm_model[cam_info["name"]] - else: - self.distortion_model[cam_info["name"]] = self.model - - - features = None - self.img_path = glob.glob(images_path + "/*") - if charucos == {}: - self.img_path = sorted(self.img_path, key=lambda x: int(x.split('_')[1])) - else: - self.img_path.sort() - cam_info["img_path"] = self.img_path - self.name = cam_info["name"] - if per_ccm: - all_features, all_ids, imsize = self.getting_features(images_path, cam_info["name"], features=features) - if isinstance(all_features, str) and all_ids is None: - if cam_info["name"] not in self.errors.keys(): - self.errors[cam_info["name"]] = [] - self.errors[cam_info["name"]].append(all_features) - continue - cam_info["imsize"] = imsize - - f = imsize[0] / (2 * np.tan(np.deg2rad(cam_info["hfov"]/2))) - print("INTRINSIC CALIBRATION") - cameraMatrixInit = np.array([[f, 0.0, imsize[0]/2], - [0.0, f, imsize[1]/2], - [0.0, 0.0, 1.0]]) - if cam_info["name"] not in self.cameraIntrinsics.keys(): - self.cameraIntrinsics[cam_info["name"]] = cameraMatrixInit - - if self.traceLevel == 3 or self.traceLevel == 10: - print( - f'Camera Matrix initialization with HFOV of {cam_info["name"]} is.............') - print(cameraMatrixInit) - distCoeffsInit = np.zeros((12, 1)) - if cam_info["name"] not in self.cameraDistortion: - self.cameraDistortion[cam_info["name"]] = distCoeffsInit - - if cam_info["name"] in self.intrinsic_img: - all_features, all_ids, filtered_images = self.remove_features(filtered_features, filtered_ids, self.intrinsic_img[cam_info["name"]], image_files) - else: - filtered_images = images_path - current_time = time.time() - if self.cameraModel != "fisheye": - print("Filtering corners") - removed_features, filtered_features, filtered_ids = self.filtering_features(all_features, all_ids, cam_info["name"],imsize,cam_info["hfov"], cameraMatrixInit, distCoeffsInit) - - if filtered_features is None: - if cam_info["name"] not in self.errors.keys(): - self.errors[cam_info["name"]] = [] - self.errors[cam_info["name"]].append(removed_features) - continue - - print(f"Filtering takes: {time.time()-current_time}") - if cam_info["name"] not in self.collected_features.keys(): - self.collected_features[cam_info["name"]] = filtered_features - if cam_info["name"] not in self.collected_ids.keys(): - self.collected_ids[cam_info["name"]] = filtered_ids - else: - filtered_features = all_features - filtered_ids = all_ids - - cam_info['filtered_ids'] = filtered_ids - cam_info['filtered_corners'] = filtered_features - - ret, intrinsics, dist_coeff, _, _, filtered_ids, filtered_corners, size, coverageImage, all_corners, all_ids = self.calibrate_wf_intrinsics(cam_info["name"], all_features, all_ids, filtered_features, filtered_ids, cam_info["imsize"], cam_info["hfov"], features, filtered_images) - if isinstance(ret, str) and all_ids is None: - if cam_info["name"] not in self.errors.keys(): - self.errors[cam_info["name"]] = [] - self.errors[cam_info["name"]].append(ret) - continue - else: - ret, intrinsics, dist_coeff, _, _, filtered_ids, filtered_corners, size, coverageImage, all_corners, all_ids = self.calibrate_intrinsics( - images_path, cam_info['hfov'], cam_info["name"]) - cam_info['filtered_ids'] = filtered_ids - cam_info['filtered_corners'] = filtered_corners - self.cameraIntrinsics[cam_info["name"]] = intrinsics - self.cameraDistortion[cam_info["name"]] = dist_coeff - cam_info['intrinsics'] = intrinsics - cam_info['dist_coeff'] = dist_coeff - cam_info['size'] = size # (Width, height) - cam_info['reprojection_error'] = ret - print("Reprojection error of {0}: {1}".format( - cam_info['name'], ret)) - if self.traceLevel == 3 or self.traceLevel == 10: - print("Estimated intrinsics of {0}: \n {1}".format( - cam_info['name'], intrinsics)) - - coverage_name = cam_info['name'] - print_text = f'Coverage Image of {coverage_name} with reprojection error of {round(ret,5)}' - height, width, _ = coverageImage.shape - - if width > resizeWidth and height > resizeHeight: - coverageImage = cv2.resize( - coverageImage, (0, 0), fx= resizeWidth / width, fy= resizeWidth / width) - - height, width, _ = coverageImage.shape - if height > resizeHeight: - height_offset = (height - resizeHeight)//2 - coverageImage = coverageImage[height_offset:height_offset+resizeHeight, :] - - height, width, _ = coverageImage.shape - height_offset = (resizeHeight - height)//2 - width_offset = (resizeWidth - width)//2 - subImage = np.pad(coverageImage, ((height_offset, height_offset), (width_offset, width_offset), (0, 0)), 'constant', constant_values=0) - cv2.putText(subImage, print_text, (50, 50+height_offset), cv2.FONT_HERSHEY_SIMPLEX, 2*coverageImage.shape[0]/1750, (0, 0, 0), 2) - if combinedCoverageImage is None: - combinedCoverageImage = subImage - else: - combinedCoverageImage = np.hstack((combinedCoverageImage, subImage)) - coverage_file_path = filepath + '/' + coverage_name + '_coverage.png' - - cv2.imwrite(coverage_file_path, subImage) - if self.errors != {}: - string = "" - for key in self.errors.keys(): - string += self.errors[key][0] + "\n" - raise StereoExceptions(message=string, stage="intrinsic") - - combinedCoverageImage = cv2.resize(combinedCoverageImage, (0, 0), fx=self.output_scale_factor, fy=self.output_scale_factor) - if enable_disp_rectify: - # cv2.imshow('coverage image', combinedCoverageImage) - cv2.waitKey(1) - cv2.destroyAllWindows() - - for camera in board_config['cameras'].keys(): - left_cam_info = board_config['cameras'][camera] - if str(left_cam_info["name"]) not in self.disableCamera: - if 'extrinsics' in left_cam_info: - if 'to_cam' in left_cam_info['extrinsics']: - left_cam = camera - right_cam = left_cam_info['extrinsics']['to_cam'] - left_path = filepath + '/' + left_cam_info['name'] - - right_cam_info = board_config['cameras'][left_cam_info['extrinsics']['to_cam']] - if str(right_cam_info["name"]) not in self.disableCamera: - right_path = filepath + '/' + right_cam_info['name'] - print('<-------------Extrinsics calibration of {} and {} ------------>'.format( - left_cam_info['name'], right_cam_info['name'])) - - specTranslation = left_cam_info['extrinsics']['specTranslation'] - rot = left_cam_info['extrinsics']['rotation'] - - translation = np.array( - [specTranslation['x'], specTranslation['y'], specTranslation['z']], dtype=np.float32) - rotation = Rotation.from_euler( - 'xyz', [rot['r'], rot['p'], rot['y']], degrees=True).as_matrix().astype(np.float32) - if per_ccm and extrinsic_per_ccm: - if left_cam_info["name"] in self.extrinsic_img or right_cam_info["name"] in self.extrinsic_img: - if left_cam_info["name"] in self.extrinsic_img: - array = self.extrinsic_img[left_cam_info["name"]] - elif right_cam_info["name"] in self.extrinsic_img: - array = self.extrinsic_img[left_cam_info["name"]] - left_cam_info['filtered_corners'], left_cam_info['filtered_ids'], filtered_images = self.remove_features(left_cam_info['filtered_corners'], left_cam_info['filtered_ids'], array) - right_cam_info['filtered_corners'], right_cam_info['filtered_ids'], filtered_images = self.remove_features(right_cam_info['filtered_corners'], right_cam_info['filtered_ids'], array) - removed_features, left_cam_info['filtered_corners'], left_cam_info['filtered_ids'] = self.filtering_features(left_cam_info['filtered_corners'], left_cam_info['filtered_ids'], left_cam_info["name"],left_cam_info["imsize"],left_cam_info["hfov"], self.cameraIntrinsics["name"], self.cameraDistortion["name"]) - removed_features, right_cam_info['filtered_corners'], right_cam_info['filtered_ids'] = self.filtering_features(right_cam_info['filtered_corners'], right_cam_info['filtered_ids'], right_cam_info["name"], right_cam_info["imsize"], right_cam_info["hfov"], self.cameraIntrinsics["name"], self.cameraDistortion["name"]) - - extrinsics = self.calibrate_stereo(left_cam_info['name'], right_cam_info['name'], left_cam_info['filtered_ids'], left_cam_info['filtered_corners'], right_cam_info['filtered_ids'], right_cam_info['filtered_corners'], left_cam_info['intrinsics'], left_cam_info[ - 'dist_coeff'], right_cam_info['intrinsics'], right_cam_info['dist_coeff'], translation, rotation, features) - if extrinsics[0] == -1: - return -1, extrinsics[1] - - if board_config['stereo_config']['left_cam'] == left_cam and board_config['stereo_config']['right_cam'] == right_cam: - board_config['stereo_config']['rectification_left'] = extrinsics[3] - board_config['stereo_config']['rectification_right'] = extrinsics[4] + left_corners_sampled, right_corners_sampled, obj_pts= pw.run(find_stereo_common_features, leftCorners, leftIds, rightCorners, rightIds, left.board)[:3] - elif board_config['stereo_config']['left_cam'] == right_cam and board_config['stereo_config']['right_cam'] == left_cam: - board_config['stereo_config']['rectification_left'] = extrinsics[4] - board_config['stereo_config']['rectification_right'] = extrinsics[3] - - """ for stereoObj in board_config['stereo_config']: - - if stereoObj['left_cam'] == left_cam and stereoObj['right_cam'] == right_cam and stereoObj['main'] == 1: - stereoObj['rectification_left'] = extrinsics[3] - stereoObj['rectification_right'] = extrinsics[4] """ - - print('<-------------Epipolar error of {} and {} ------------>'.format( - left_cam_info['name'], right_cam_info['name'])) - #print(f"dist {left_cam_info['name']}: {left_cam_info['dist_coeff']}") - #print(f"dist {right_cam_info['name']}: {right_cam_info['dist_coeff']}") - if left_cam_info['intrinsics'][0][0] < right_cam_info['intrinsics'][0][0]: - scale = right_cam_info['intrinsics'][0][0] - else: - scale = left_cam_info['intrinsics'][0][0] - if per_ccm and extrinsic_per_ccm: - scale = ((left_cam_info['intrinsics'][0][0]*right_cam_info['intrinsics'][0][0] + left_cam_info['intrinsics'][1][1]*right_cam_info['intrinsics'][1][1])/2) - print(f"Epipolar error {extrinsics[0]*np.sqrt(scale)}") - left_cam_info['extrinsics']['epipolar_error'] = extrinsics[0]*np.sqrt(scale) - left_cam_info['extrinsics']['stereo_error'] = extrinsics[0]*np.sqrt(scale) - else: - print(f"Epipolar error {extrinsics[0]}") - left_cam_info['extrinsics']['epipolar_error'] = extrinsics[0] - left_cam_info['extrinsics']['stereo_error'] = extrinsics[0] - """self.test_epipolar_charuco(left_cam_info['name'], - right_cam_info['name'], - left_path, - right_path, - left_cam_info['intrinsics'], - left_cam_info['dist_coeff'], - right_cam_info['intrinsics'], - right_cam_info['dist_coeff'], - extrinsics[2], # Translation between left and right Cameras - extrinsics[3], # Left Rectification rotation - extrinsics[4]) # Right Rectification rotation""" - - - left_cam_info['extrinsics']['rotation_matrix'] = extrinsics[1] - left_cam_info['extrinsics']['translation'] = extrinsics[2] - - return 1, board_config - - def getting_features(self, img_path, name, features = None): - if self.charucos != {}: - allCorners = [] - allIds = [] - for index, charuco_img in enumerate(self.charucos[name]): - ids, charucos = charuco_img - allCorners.append(charucos) - allIds.append(ids) - imsize = (self.width[name], self.height[name]) - return allCorners, allIds, imsize - - elif features == None or features == "charucos": - img_path = glob.glob(img_path + "/*") - img_path.sort() - allCorners, allIds, _, _, imsize, _ = self.analyze_charuco(img_path) - return allCorners, allIds, imsize - - if features == "checker_board": - img_path = glob.glob(img_path + "/*") - img_path.sort() - allCorners, allIds, _, _, imsize, _ = self.analyze_charuco(img_path) - return allCorners, allIds, imsize - ###### ADD HERE WHAT IT IS NEEDED ###### - - def filtering_features(self,allCorners, allIds, name,imsize, hfov, cameraMatrixInit, distCoeffsInit): - - # check if there are any suspicious corners with high reprojection error - rvecs = [] - tvecs = [] - index = 0 - self.index = 0 - max_threshold = 75 + self.initial_max_threshold * (hfov / 30 + imsize[1] / 800 * 0.2) - threshold_stepper = int(1.5 * (hfov / 30 + imsize[1] / 800)) - if threshold_stepper < 1: - threshold_stepper = 1 - print(threshold_stepper) - min_inlier = 1 - self.initial_min_filtered * (hfov / 60 + imsize[1] / 800 * 0.2) - overall_pose = time.time() - for index, corners in enumerate(allCorners): - if len(corners) < 4: - return f"Less than 4 corners detected on {index} image.", None, None - for corners, ids in zip(allCorners, allIds): - current = time.time() - self.index = index - objpts = self.charuco_ids_to_objpoints(ids) - rvec, tvec, newids = self.camera_pose_charuco(objpts, corners, ids, cameraMatrixInit, distCoeffsInit, max_threshold = max_threshold, min_inliers=min_inlier, ini_threshold = 5, threshold_stepper=threshold_stepper) - #allCorners[index] = np.array([corners[id[0]-1] for id in newids]) - #allIds[index] = np.array([ids[id[0]-1] for id in newids]) - tvecs.append(tvec) - rvecs.append(rvec) - print(f"Pose estimation {index}, {time.time() -current}s") - index += 1 - print(f"Overall pose estimation {time.time() - overall_pose}s") - - # Here we need to get initialK and parameters for each camera ready and fill them inside reconstructed reprojection error per point - ret = 0.0 - distortion_flags = self.get_distortion_flags(name) - flags = cv2.CALIB_USE_INTRINSIC_GUESS + distortion_flags - current = time.time() - filtered_corners, filtered_ids,all_error, removed_corners, removed_ids, removed_error = self.features_filtering_function(rvecs, tvecs, cameraMatrixInit, distCoeffsInit, ret, allCorners, allIds, camera = name) - corner_detector = filtered_corners.copy() - for index, corners in enumerate(filtered_corners): - if len(corners) < 4: - corner_detector.remove(corners) - if len(corner_detector) < int(len(self.img_path)*0.75): - return f"More than 1/4 of images has less than 4 corners for {name}", None, None - - - print(f"Filtering {time.time() -current}s") - try: - (ret, camera_matrix, distortion_coefficients, - rotation_vectors, translation_vectors, - stdDeviationsIntrinsics, stdDeviationsExtrinsics, - perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended( - charucoCorners=filtered_corners, - charucoIds=filtered_ids, - board=self.board, - imageSize=imsize, - cameraMatrix=cameraMatrixInit, - distCoeffs=distCoeffsInit, - flags=flags, - criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 1000, 1e-6)) - except: - return f"First intrisic calibration failed for {name}", None, None - - self.cameraIntrinsics[name] = camera_matrix - self.cameraDistortion[name] = distortion_coefficients - return removed_corners, filtered_corners, filtered_ids - - def remove_features(self, allCorners, allIds, array, img_files = None): - filteredCorners = allCorners.copy() - filteredIds = allIds.copy() - if img_files is not None: - img_path = img_files.copy() - - for index in array: - filteredCorners.pop(index) - filteredIds.pop(index) - if img_files is not None: - img_path.pop(index) - - return filteredCorners, filteredIds, img_path - - def get_distortion_flags(self,name): - def is_binary_string(s: str) -> bool: - # Check if all characters in the string are '0' or '1' - return all(char in '01' for char in s) - if self.distortion_model[name] == None: - print("Use DEFAULT model") - flags = cv2.CALIB_RATIONAL_MODEL - elif is_binary_string(self.distortion_model[name]): - flags = cv2.CALIB_RATIONAL_MODEL - flags += cv2.CALIB_TILTED_MODEL - flags += cv2.CALIB_THIN_PRISM_MODEL - binary_number = int(self.distortion_model[name], 2) - # Print the results - if binary_number == 0: - clauses_status = [True, True,True, True, True, True, True, True, True] - else: - clauses_status = [(binary_number & (1 << i)) != 0 for i in range(len(self.distortion_model[name]))] - clauses_status = clauses_status[::-1] - if clauses_status[0]: - print("FIX_K1") - flags += cv2.CALIB_FIX_K1 - if clauses_status[1]: - print("FIX_K2") - flags += cv2.CALIB_FIX_K2 - if clauses_status[2]: - print("FIX_K3") - flags += cv2.CALIB_FIX_K3 - if clauses_status[3]: - print("FIX_K4") - flags += cv2.CALIB_FIX_K4 - if clauses_status[4]: - print("FIX_K5") - flags += cv2.CALIB_FIX_K5 - if clauses_status[5]: - print("FIX_K6") - flags += cv2.CALIB_FIX_K6 - if clauses_status[6]: - print("FIX_TANGENT_DISTORTION") - flags += cv2.CALIB_ZERO_TANGENT_DIST - if clauses_status[7]: - print("FIX_TILTED_DISTORTION") - flags += cv2.CALIB_FIX_TAUX_TAUY - if clauses_status[8]: - print("FIX_PRISM_DISTORTION") - flags += cv2.CALIB_FIX_S1_S2_S3_S4 - - elif isinstance(self.distortion_model[name], str): - if self.distortion_model[name] == "NORMAL": - print("Using NORMAL model") - flags = cv2.CALIB_RATIONAL_MODEL - flags += cv2.CALIB_TILTED_MODEL - - elif self.distortion_model[name] == "TILTED": - print("Using TILTED model") - flags = cv2.CALIB_RATIONAL_MODEL - flags += cv2.CALIB_TILTED_MODEL - - elif self.distortion_model[name] == "PRISM": - print("Using PRISM model") - flags = cv2.CALIB_RATIONAL_MODEL - flags += cv2.CALIB_TILTED_MODEL - flags += cv2.CALIB_THIN_PRISM_MODEL - - elif self.distortion_model[name] == "THERMAL": - print("Using THERMAL model") - flags = cv2.CALIB_RATIONAL_MODEL - flags += cv2.CALIB_FIX_K3 - flags += cv2.CALIB_FIX_K5 - flags += cv2.CALIB_FIX_K6 - - elif isinstance(self.distortion_model[name], int): - print("Using CUSTOM flags") - flags = self.distortion_model[name] - return flags - - def calibrate_wf_intrinsics(self, name, all_Features, all_features_Ids, allCorners, allIds, imsize, hfov, features, image_files): - image_files = glob.glob(image_files + "/*") - image_files.sort() - coverageImage = np.ones(imsize[::-1], np.uint8) * 255 - coverageImage = cv2.cvtColor(coverageImage, cv2.COLOR_GRAY2BGR) - coverageImage = self.draw_corners(allCorners, coverageImage) - if self.calib_model[name] == 'perspective': - if features == None or features == "charucos": - distortion_flags = self.get_distortion_flags(name) - ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, allCorners, allIds = self.calibrate_camera_charuco( - all_Features, all_features_Ids,allCorners, allIds, imsize, hfov, name, distortion_flags) - if self.charucos == {}: - self.undistort_visualization( - image_files, camera_matrix, distortion_coefficients, imsize, name) - - return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds - else: - return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds - #### ADD ADDITIONAL FEATURES CALIBRATION #### - else: - if features == None or features == "charucos": - print('Fisheye--------------------------------------------------') - ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners = self.calibrate_fisheye( - allCorners, allIds, imsize, hfov, name) - self.undistort_visualization( - image_files, camera_matrix, distortion_coefficients, imsize, name) - print('Fisheye rotation vector', rotation_vectors[0]) - print('Fisheye translation vector', translation_vectors[0]) - - # (Height, width) - return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds - - def draw_corners(self, charuco_corners, displayframe): - for corners in charuco_corners: - color = (int(np.random.randint(0, 255)), int(np.random.randint(0, 255)), int(np.random.randint(0, 255))) - for corner in corners: - corner_int = (int(corner[0][0]), int(corner[0][1])) - cv2.circle(displayframe, corner_int, 4, color, -1) - height, width = displayframe.shape[:2] - start_point = (0, 0) # top of the image - end_point = (0, height) - - color = (0, 0, 0) # blue in BGR - thickness = 4 - - # Draw the line on the image - cv2.line(displayframe, start_point, end_point, color, thickness) - return displayframe - - def features_filtering_function(self,rvecs, tvecs, cameraMatrix, distCoeffs, reprojection, filtered_corners,filtered_id, camera, display = True, threshold = None, draw_quadrants = False, nx = 4, ny = 4): - whole_error = [] - all_points = [] - all_corners = [] - all_error = [] - all_ids = [] - removed_corners = [] - removed_points = [] - removed_ids = [] - removed_error = [] - display_corners = [] - display_points = [] - circle_size = 0 - reported_error = [] - for i, (corners, ids, frame_path) in enumerate(zip(filtered_corners, filtered_id, self.img_path)): - frame = cv2.imread(frame_path) - if ids is not None and corners.size > 0: - ids = ids.flatten() # Flatten the IDs from 2D to 1D - objPoints = np.array([self.board.chessboardCorners[id] for id in ids], dtype=np.float32) - imgpoints2, _ = cv2.projectPoints(objPoints, rvecs[i], tvecs[i], cameraMatrix, distCoeffs) - corners2 = corners.reshape(-1, 2) - imgpoints2 = imgpoints2.reshape(-1, 2) - - errors = np.linalg.norm(corners2 - imgpoints2, axis=1) - if threshold == None: - threshold = max(2*np.median(errors), 150) - valid_mask = errors <= threshold - removed_mask = ~valid_mask - - # Collect valid IDs in the original format (array of arrays) - valid_ids = ids[valid_mask] - all_ids.append(valid_ids.reshape(-1, 1).astype(np.int32)) # Reshape and store as array of arrays - - # Collect data for valid points - reported_error.extend(errors) - all_error.extend(errors[valid_mask]) - display_corners.extend(corners2) - display_points.extend(imgpoints2[valid_mask]) - all_points.append(imgpoints2[valid_mask]) # Collect valid points for calibration - all_corners.append(corners2[valid_mask].reshape(-1, 1, 2)) # Collect valid corners for calibration - - removed_corners.extend(corners2[removed_mask]) - removed_points.extend(imgpoints2[removed_mask]) - removed_ids.extend(ids[removed_mask]) - removed_error.extend(errors[removed_mask]) - - total_error_squared = np.sum(errors[valid_mask]**2) - total_points = len(objPoints[valid_mask]) - rms_error = np.sqrt(total_error_squared / total_points if total_points else 0) - whole_error.append(rms_error) - - if self.traceLevel in {2, 4, 10}: - print(f"Overall RMS re-projection error for frame {i}: {rms_error}") - - total_error_squared = 0 - total_points = 0 - - if self.traceLevel == 8 or self.traceLevel == 10: - centroid_x = np.mean(np.array(display_corners).T[0]) - centroid_y = np.mean(np.array(display_corners).T[1]) - - # Calculate distances from the center - distances = [distance((centroid_x, centroid_y), point) for point in np.array(corners2)] - max_distance = max(distances) - if max_distance > circle_size: - circle_size = max_distance - circle = plt.Circle((centroid_x, centroid_y), circle_size, color='black', fill=True, label = "Calibrated area", alpha = 0.2) - fig, ax = plt.subplots() - ax.add_artist(circle) - ax.set_title(f"Reprojection error for frame {i}, camera {camera}") - ax.scatter(np.array(corners2).T[0], np.array(corners2).T[1], label = "Original", alpha = 0.5, color = "Black") - img = ax.scatter(np.array(imgpoints2).T[0], np.array(imgpoints2).T[1], c=errors, cmap = GnRd, label = "Reprojected", vmin=0, vmax=threshold) - ax.imshow(frame, alpha = 0.5) - ax.plot([],[], label = f"Rerprojection Remade: {round(rms_error, 4)}", color = "white") - ax.plot([],[], label = f"Whole Rerprojection OpenCV: {round(reprojection, 4)}", color = "white") - cbar = plt.colorbar(img, ax=ax) - cbar.set_label("Reprojection error") - ax.set_xlabel('Width') - ax.set_xlim([0,self.width[camera]]) - ax.set_ylim([0,self.height[camera]]) - ax.legend() - ax.set_ylabel('Y coordinate') - plt.grid() - plt.show() - - if self.traceLevel == 9 or self.traceLevel == 10: - centroid_x = np.mean(np.array(removed_corners).T[0]) - centroid_y = np.mean(np.array(removed_corners).T[1]) - - # Calculate distances from the center - distances = [distance((centroid_x, centroid_y), point) for point in np.array(corners2[removed_mask])] - max_distance = max(distances) - if max_distance > circle_size: - circle_size = max_distance - circle = plt.Circle((centroid_x, centroid_y), circle_size, color='black', fill=True, label = "Calibrated area", alpha = 0.2) - fig, ax = plt.subplots() - ax.add_artist(circle) - ax.set_title(f"Removed error for frame {i}, camera {camera}") - ax.scatter(np.array(corners2[removed_mask]).T[0], np.array(corners2[removed_mask]).T[1], label = "Original", alpha = 0.5, color = "Black") - img = ax.scatter(np.array(imgpoints2[removed_mask]).T[0], np.array(imgpoints2[removed_mask]).T[1], c=errors[removed_mask], cmap = GnRd, label = "Reprojected", vmin=0, vmax=max(errors[removed_mask])) - ax.imshow(frame, alpha = 0.5) - ax.plot([],[], label = f"Rerprojection Remade: {round(np.mean(errors[removed_mask]), 4)}", color = "white") - ax.plot([],[], label = f"Whole Rerprojection OpenCV: {round(reprojection, 4)}", color = "white") - cbar = plt.colorbar(img, ax=ax) - cbar.set_label("Reprojection error") - ax.set_xlabel('Width') - ax.set_xlim([0,self.width[camera]]) - ax.set_ylim([0,self.height[camera]]) - ax.legend() - ax.set_ylabel('Y coordinate') - plt.grid() - plt.show() - - if self.traceLevel == 3 or self.traceLevel == 5 or self.traceLevel == 10: - center_x, center_y = self.width[camera] / 2, self.height[camera] / 2 - distances = [distance((center_x, center_y), point) for point in np.array(display_corners)] - max_distance = max(distances) - circle = plt.Circle((center_x, center_y), max_distance, color='black', fill=True, label = "Calibrated area", alpha = 0.2) - fig, ax = plt.subplots() - ax.add_artist(circle) - ax.set_title(f"Reprojection map camera {camera}") - ax.scatter(np.array(display_points).T[0], np.array(display_points).T[1], label = "Original", alpha = 0.5, color = "Black") - img = ax.scatter(np.array(display_corners).T[0], np.array(display_corners).T[1], c=all_error, cmap = GnRd, label = "Reprojected", vmin=0, vmax=threshold) - ax.imshow(frame, alpha = 0.5) - ax.plot([],[], label = f"Rerprojection Remade ALL: {round(np.sqrt(np.mean(np.array(whole_error)**2)), 4)}", color = "white") - ax.plot([],[], label = f"Whole Rerprojection OpenCV: {round(reprojection, 4)}", color = "white") - cbar = plt.colorbar(img, ax=ax) - cbar.set_label("Reprojection error") - ax.set_xlabel('Width') - ax.set_xlim([0,self.width[camera]]) - ax.set_ylim([0,self.height[camera]]) - ax.legend() - ax.set_ylabel('Height') - plt.grid() - plt.show() - - if self.traceLevel == 3 or self.traceLevel == 5 or self.traceLevel == 10: - x = np.array(display_points).T[0] - y = np.array(display_points).T[1] - z = np.array(all_error) - - x_flat = x.flatten() - y_flat = y.flatten() - z_flat = z.flatten() - - fig, ax = plt.subplots() - - self.errors[camera] = z_flat - _, bins, _ = ax.hist(all_error, range= [0,30], bins = 100, label = f"{camera}", color= "Blue", edgecolor= "black") - ax.hist(removed_error, bins = bins, color= "Blue", edgecolor= "black") - ax.plot([],[], label = f"MAX : {max(reported_error)}") - ax.plot([],[], label = f"MIN : {min(reported_error)}") - ax.legend() - ax.set_title("Reprojection error for shorter dataset") - ax.set_xlabel("Reprojection error") - plt.show() - grid_x, grid_y = np.mgrid[min(x_flat):max(x_flat):100j, min(y_flat):max(y_flat):100j] - grid_z = abs(griddata((x_flat, y_flat), z_flat, (grid_x, grid_y), method='cubic')) - - plt.title(f"Reprojection error of {camera}") - plt.contourf(grid_x, grid_y, grid_z, 50, cmap=GnRd) - plt.colorbar(label='Reprojection error') - contours = plt.contour(grid_x, grid_y, grid_z, 7, colors ="black", alpha = 0.7, linewidths = 0.5, linestyles = "dashed") - plt.clabel(contours, inline=True, fontsize=8) - plt.xlim([0,self.width[camera]]) - plt.ylim([0,self.height[camera]]) - plt.grid() - plt.show() - - if self.traceLevel == 11: - sorted_points, quadrant_coords = sort_points_into_quadrants(display_points, self.width[camera], self.height[camera], error=all_error) - quadrant_width = self.width // nx - quadrant_height = self.height // ny - - image = np.full((self.height, self.width, 3), 255, dtype=np.uint8) - - # Define the quadrant lines - quadrant_width = self.width // nx - quadrant_height = self.height // ny - index = 0 - import math - for i in range(nx): - for j in range(ny): - # Scale the color from red (0, 0, 255) to green (0, 255, 0) - count = np.mean(sorted_points[index]) - if math.isnan(count): - count = "Missing" - red = 255 - green = 0 - if len(sorted_points[index]) < ((self.squaresX*self.squaresY))*0.05: - count = f"Only {len(sorted_points[index])} points" - red = 255 - green = 0 - else: - count = round(count, 4) - red = int(((count / threshold)) * 255) - green = int((1 -count / threshold) * 255) - color = (0, green, red) - - left = j * quadrant_width - upper = i * quadrant_height - right = left + quadrant_width - bottom = upper + quadrant_height - index += 1 - - cv2.rectangle(image, (left, upper), (right, bottom), color, -1) - - text_position = (left + 10, upper + quadrant_height // 2) - cv2.putText(image, str(count), text_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA) - - cv2.imshow('Quadrants with Points', image) - cv2.waitKey(1) - cv2.destroyAllWindows() - if camera not in self.all_features.keys(): - self.all_features[camera] = display_corners - self.all_features[camera] = display_corners - if camera not in self.all_errors.keys(): - self.all_errors[camera] = all_error - self.all_errors[camera] = reported_error - return all_corners ,all_ids, all_error, removed_corners, removed_ids, removed_error - - def detect_charuco_board(self, image: np.array): - arucoParams = cv2.aruco.DetectorParameters_create() - arucoParams.minMarkerDistanceRate = 0.01 - corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(image, self.aruco_dictionary, parameters=arucoParams) # First, detect markers - marker_corners, marker_ids, refusd, recoverd = cv2.aruco.refineDetectedMarkers(image, self.board, corners, ids, rejectedCorners=rejectedImgPoints) - # If found, add object points, image points (after refining them) - if len(marker_corners) > 0: - ret, corners, ids = cv2.aruco.interpolateCornersCharuco(marker_corners,marker_ids,image, self.board, minMarkers = 1) - return ret, corners, ids, marker_corners, marker_ids - else: - return None, None, None, None, None - - def camera_pose_charuco(self, objpoints: np.array, corners: np.array, ids: np.array, K: np.array, d: np.array, ini_threshold = 2, min_inliers = 0.95, threshold_stepper = 1, max_threshold = 50): - objects = [] - all_objects = [] - index = 0 - start_time = time.time() - while len(objects) < len(objpoints[:,0,0]) * min_inliers: - if ini_threshold > max_threshold: - break - ret, rvec, tvec, objects = cv2.solvePnPRansac(objpoints, corners, K, d, flags = cv2.SOLVEPNP_P3P, reprojectionError = ini_threshold, iterationsCount = 10000, confidence = 0.9) - all_objects.append(objects) - imgpoints2 = objpoints.copy() - - all_corners = corners.copy() - all_corners = np.array([all_corners[id[0]] for id in objects]) - imgpoints2 = np.array([imgpoints2[id[0]] for id in objects]) - - ret, rvec, tvec = cv2.solvePnP(imgpoints2, all_corners, K, d) - imgpoints2, _ = cv2.projectPoints(imgpoints2, rvec, tvec, self.cameraIntrinsics[self.name], self.cameraDistortion[self.name]) - - ini_threshold += threshold_stepper - index += 1 - if self.traceLevel == 13: - image = cv2.imread(self.img_path[self.index]) - plt.title(f"Number of rejected corners in filtering, iterations needed: {ini_threshold}, inliers: {round(len(objects)/len(corners[:,0,0]), 4) *100} %") - plt.imshow(image) - plt.scatter(corners[:,0,0],corners[:,0,1], marker= "o", label = f"Detected all corners: {len(corners[:,0,0])}", color = "Red") - plt.scatter(imgpoints2[:,0,0],imgpoints2[:,0,1], label = f"Filtering method: {len(objects)}", marker = "x", color = "Green") - plt.legend() - plt.show() - if ret: - return rvec, tvec, objects - else: - return None - - def compute_reprojection_errors(self, obj_pts: np.array, img_pts: np.array, K: np.array, dist: np.array, rvec: np.array, tvec: np.array, fisheye = False): - if fisheye: - proj_pts, _ = cv2.fisheye.projectPoints(obj_pts, rvec, tvec, K, dist) - else: - proj_pts, _ = cv2.projectPoints(obj_pts, rvec, tvec, K, dist) - errs = np.linalg.norm(np.squeeze(proj_pts) - np.squeeze(img_pts), axis = 1) - return errs - - def charuco_ids_to_objpoints(self, ids): - one_pts = self.board.chessboardCorners - objpts = [] - for j in range(len(ids)): - objpts.append(one_pts[ids[j]]) - return np.array(objpts) - - - def analyze_charuco(self, images, scale_req=False, req_resolution=(800, 1280)): - """ - Charuco base pose estimation. - """ - # print("POSE ESTIMATION STARTS:") - allCorners = [] - allIds = [] - all_marker_corners = [] - all_marker_ids = [] - all_recovered = [] - # decimator = 0 - # SUB PIXEL CORNER DETECTION CRITERION - criteria = (cv2.TERM_CRITERIA_EPS + - cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) - count = 0 - skip_vis = False - for im in images: - if self.traceLevel == 3 or self.traceLevel == 10: - print("=> Processing image {0}".format(im)) - img_pth = Path(im) - frame = cv2.imread(im) - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - expected_height = gray.shape[0]*(req_resolution[1]/gray.shape[1]) - - if scale_req and not (gray.shape[0] == req_resolution[0] and gray.shape[1] == req_resolution[1]): - if int(expected_height) == req_resolution[0]: - # resizing to have both stereo and rgb to have same - # resolution to capture extrinsics of the rgb-right camera - gray = cv2.resize(gray, req_resolution[::-1], - interpolation=cv2.INTER_CUBIC) - else: - # resizing and cropping to have both stereo and rgb to have same resolution - # to calculate extrinsics of the rgb-right camera - scale_width = req_resolution[1]/gray.shape[1] - dest_res = ( - int(gray.shape[1] * scale_width), int(gray.shape[0] * scale_width)) - gray = cv2.resize( - gray, dest_res, interpolation=cv2.INTER_CUBIC) - if gray.shape[0] < req_resolution[0]: - raise RuntimeError("resizeed height of rgb is smaller than required. {0} < {1}".format( - gray.shape[0], req_resolution[0])) - # print(gray.shape[0] - req_resolution[0]) - del_height = (gray.shape[0] - req_resolution[0]) // 2 - # gray = gray[: req_resolution[0], :] - gray = gray[del_height: del_height + req_resolution[0], :] - - count += 1 - - ret, charuco_corners, charuco_ids, marker_corners, marker_ids = self.detect_charuco_board(gray) - - if self.traceLevel == 2 or self.traceLevel == 4 or self.traceLevel == 10: - print('{0} number of Markers corners detected in the image {1}'.format( - len(charuco_corners), img_pth.name)) - - if charuco_corners is not None and charuco_ids is not None and len(charuco_corners) > 3: - - charuco_corners = cv2.cornerSubPix(gray, charuco_corners, - winSize=(5, 5), - zeroZone=(-1, -1), - criteria=criteria) - allCorners.append(charuco_corners) # Charco chess corners - allIds.append(charuco_ids) # charuco chess corner id's - all_marker_corners.append(marker_corners) - all_marker_ids.append(marker_ids) - else: - print(im) - return f'Failed to detect more than 3 markers on image {im}', None, None, None, None, None - - if self.traceLevel == 2 or self.traceLevel == 4 or self.traceLevel == 10: - rgb_img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR) - cv2.aruco.drawDetectedMarkers(rgb_img, marker_corners, marker_ids, (0, 0, 255)) - cv2.aruco.drawDetectedCornersCharuco(rgb_img, charuco_corners, charuco_ids, (0, 255, 0)) - - if rgb_img.shape[1] > 1920: - rgb_img = cv2.resize(rgb_img, (0, 0), fx=0.7, fy=0.7) - if not skip_vis: - name = img_pth.name + ' - ' + "marker frame" - cv2.imshow(name, rgb_img) - k = cv2.waitKey(1) - if k == 27: # Esc key to skip vis - skip_vis = True - cv2.destroyAllWindows() - # imsize = gray.shape[::-1] - return allCorners, allIds, all_marker_corners, all_marker_ids, gray.shape[::-1], all_recovered - - def calibrate_intrinsics(self, image_files, hfov, name): - image_files = glob.glob(image_files + "/*") - image_files.sort() - assert len( - image_files) != 0, "ERROR: Images not read correctly, check directory" - if self.charucos == {}: - allCorners, allIds, _, _, imsize, _ = self.analyze_charuco(image_files) - else: - allCorners = [] - allIds = [] - for index, charuco_img in enumerate(self.charucos[name]): - ids, charucos = charuco_img - allCorners.append(charucos) - allIds.append(ids) - imsize = (self.height[name], self.width[name]) - - coverageImage = np.ones(imsize[::-1], np.uint8) * 255 - coverageImage = cv2.cvtColor(coverageImage, cv2.COLOR_GRAY2BGR) - coverageImage = self.draw_corners(allCorners, coverageImage) - if self.calib_model[name] == 'perspective': - distortion_flags = self.get_distortion_flags(name) - ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, allCorners, allIds = self.calibrate_camera_charuco( - allCorners, allIds, imsize, hfov, name, distortion_flags) - self.undistort_visualization( - image_files, camera_matrix, distortion_coefficients, imsize, name) - - return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds - else: - print('Fisheye--------------------------------------------------') - ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners = self.calibrate_fisheye( - allCorners, allIds, imsize, hfov, name) - self.undistort_visualization( - image_files, camera_matrix, distortion_coefficients, imsize, name) - print('Fisheye rotation vector', rotation_vectors[0]) - print('Fisheye translation vector', translation_vectors[0]) - - # (Height, width) - return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, imsize, coverageImage, allCorners, allIds - - - def scale_intrinsics(self, intrinsics, originalShape, destShape): - scale = destShape[1] / originalShape[1] # scale on width - scale_mat = np.array([[scale, 0, 0], [0, scale, 0], [0, 0, 1]]) - scaled_intrinsics = np.matmul(scale_mat, intrinsics) - """ print("Scaled height offset : {}".format( - (originalShape[0] * scale - destShape[0]) / 2)) - print("Scaled width offset : {}".format( - (originalShape[1] * scale - destShape[1]) / 2)) """ - scaled_intrinsics[1][2] -= (originalShape[0] # c_y - along height of the image - * scale - destShape[0]) / 2 - scaled_intrinsics[0][2] -= (originalShape[1] # c_x width of the image - * scale - destShape[1]) / 2 - if self.traceLevel == 3 or self.traceLevel == 10: - print('original_intrinsics') - print(intrinsics) - print('scaled_intrinsics') - print(scaled_intrinsics) - - return scaled_intrinsics - - def undistort_visualization(self, img_list, K, D, img_size, name): - for index, im in enumerate(img_list): - # print(im) - img = cv2.imread(im) - # h, w = img.shape[:2] - if self.cameraModel == 'perspective': - kScaled, _ = cv2.getOptimalNewCameraMatrix(K, D, img_size, 0) - # print(f'K scaled is \n {kScaled} and size is \n {img_size}') - # print(f'D Value is \n {D}') - map1, map2 = cv2.initUndistortRectifyMap( - K, D, np.eye(3), kScaled, img_size, cv2.CV_32FC1) - else: - map1, map2 = cv2.fisheye.initUndistortRectifyMap( - K, D, np.eye(3), K, img_size, cv2.CV_32FC1) - - undistorted_img = cv2.remap( - img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) - - if index == 0: - undistorted_file_path = self.data_path + '/' + name + f'_undistorted.png' - cv2.imwrite(undistorted_file_path, undistorted_img) - if self.traceLevel == 4 or self.traceLevel == 5 or self.traceLevel == 10: - cv2.putText(undistorted_img, "Press S to save image", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2*undistorted_img.shape[0]/1750, (0, 0, 255), 2) - cv2.imshow("undistorted", undistorted_img) - k = cv2.waitKey(1) - if k == ord("s") or k == ord("S"): - undistorted_img = cv2.remap( - img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT) - undistorted_file_path = self.data_path + '/' + name + f'_{index}_undistorted.png' - cv2.imwrite(undistorted_file_path, undistorted_img) - if k == 27: # Esc key to stop - break - print(f'image path - {im}') - print(f'Image Undistorted Size {undistorted_img.shape}') - cv2.destroyWindow("undistorted") - - - def filter_corner_outliers(self, allIds, allCorners, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors): - corners_removed = False - for i in range(len(allIds)): - corners = allCorners[i] - ids = allIds[i] - objpts = self.charuco_ids_to_objpoints(ids) - if self.cameraModel == "fisheye": - errs = self.compute_reprojection_errors(objpts, corners, camera_matrix, distortion_coefficients, rotation_vectors[i], translation_vectors[i], fisheye = True) - else: - errs = self.compute_reprojection_errors(objpts, corners, camera_matrix, distortion_coefficients, rotation_vectors[i], translation_vectors[i]) - suspicious_err_thr = max(2*np.median(errs), 100) - n_offending_pts = np.sum(errs > suspicious_err_thr) - offending_pts_idxs = np.where(errs > suspicious_err_thr)[0] - # check if there are offending points and if they form a minority - if n_offending_pts > 0 and n_offending_pts < len(corners)/5: - print(f"removing {n_offending_pts} offending points with errs {errs[offending_pts_idxs]}") - corners_removed = True - #remove the offending points - offset = 0 - allCorners[i] = np.delete(allCorners[i],offending_pts_idxs, axis = 0) - allIds[i] = np.delete(allIds[i],offending_pts_idxs, axis = 0) - return corners_removed, allIds, allCorners - - - def calibrate_camera_charuco(self, all_Features, all_features_Ids, allCorners, allIds, imsize, hfov, name, distortion_flags): - """ - Calibrates the camera using the dected corners. - """ - f = imsize[0] / (2 * np.tan(np.deg2rad(hfov/2))) - - if name not in self.cameraIntrinsics: - cameraMatrixInit = np.array([[f, 0.0, imsize[0]/2], - [0.0, f, imsize[1]/2], - [0.0, 0.0, 1.0]]) - threshold = 20 * imsize[1]/800.0 - else: - cameraMatrixInit = self.cameraIntrinsics[name] - threshold = 2 * imsize[1]/800.0 - - if self.traceLevel == 3 or self.traceLevel == 10: - print( - f'Camera Matrix initialization with HFOV of {hfov} is.............') - print(cameraMatrixInit) - if name not in self.cameraDistortion: - distCoeffsInit = np.zeros((5, 1)) - else: - distCoeffsInit = self.cameraDistortion[name] - # check if there are any suspicious corners with high reprojection error - rvecs = [] - tvecs = [] - self.index = 0 - index = 0 - max_threshold = 10 + self.initial_max_threshold * (hfov / 30 + imsize[1] / 800 * 0.2) - min_inlier = 1 - self.initial_min_filtered * (hfov / 60 + imsize[1] / 800 * 0.2) - for corners, ids in zip(allCorners, allIds): - self.index = index - objpts = self.charuco_ids_to_objpoints(ids) - rvec, tvec, newids = self.camera_pose_charuco(objpts, corners, ids, cameraMatrixInit, distCoeffsInit) - tvecs.append(tvec) - rvecs.append(rvec) - index += 1 - - # Here we need to get initialK and parameters for each camera ready and fill them inside reconstructed reprojection error per point - ret = 0.0 - flags = cv2.CALIB_USE_INTRINSIC_GUESS - flags += distortion_flags - - # flags = (cv2.CALIB_RATIONAL_MODEL) - reprojection = [] - removed_errors = [] - num_corners = [] - num_threshold = [] - iterations_array = [] - intrinsic_array = {"f_x": [], "f_y": [], "c_x": [],"c_y": []} - distortion_array = {} - index = 0 - if self.traceLevel != 0: - fig, ax = plt.subplots() - camera_matrix = cameraMatrixInit - distortion_coefficients = distCoeffsInit - rotation_vectors = rvecs - translation_vectors = tvecs - translation_array_x = [] - translation_array_y = [] - translation_array_z = [] - corner_checker = 0 - previous_ids = [] - import time - try: - whole = time.time() - while True: - intrinsic_array['f_x'].append(camera_matrix[0][0]) - intrinsic_array['f_y'].append(camera_matrix[1][1]) - intrinsic_array['c_x'].append(camera_matrix[0][2]) - intrinsic_array['c_y'].append(camera_matrix[1][2]) - num_threshold.append(threshold) - - translation_array_x.append(np.mean(np.array(translation_vectors).T[0][0])) - translation_array_y.append(np.mean(np.array(translation_vectors).T[0][1])) - translation_array_z.append(np.mean(np.array(translation_vectors).T[0][2])) - - - start = time.time() - filtered_corners, filtered_ids, all_error, removed_corners, removed_ids, removed_error = self.features_filtering_function(rotation_vectors, translation_vectors, camera_matrix, distortion_coefficients, ret, allCorners, allIds, camera = name, threshold = threshold) - num_corners.append(len(removed_corners)) - iterations_array.append(index) - reprojection.append(ret) - for i in range(len(distortion_coefficients)): - if i not in distortion_array.keys(): - distortion_array[i] = [] - distortion_array[i].append(distortion_coefficients[i][0]) - print(f"Each filtering {time.time() - start}") - if self.traceLevel == 12: - _, bins, _ = ax.hist(all_error, range = [0,30], bins = 100, label = f"Iteration {index}, number filtered: {len(removed_corners)}", color= plt.cm.summer(1-(index-1)/10), alpha = 0.3, edgecolor ="black") - ax.hist(removed_error, bins = bins, color= plt.cm.summer(1-(index-1)/7), alpha = 0.8, edgecolor = "black") - ax.vlines(threshold, ymin = -0.5, ymax = len(all_error), color = "red") - start = time.time() - try: - (ret, camera_matrix, distortion_coefficients, - rotation_vectors, translation_vectors, - stdDeviationsIntrinsics, stdDeviationsExtrinsics, - perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended( - charucoCorners=filtered_corners, - charucoIds=filtered_ids, - board=self.board, - imageSize=imsize, - cameraMatrix=cameraMatrixInit, - distCoeffs=distCoeffsInit, - flags=flags, - criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 50000, 1e-9)) - except: - raise StereoExceptions(message="Intrisic calibration failed", stage="intrinsic_calibration", element=name, id=self.id) - cameraMatrixInit = camera_matrix - distCoeffsInit = distortion_coefficients - threshold = 5 * imsize[1]/800.0 - print(f"Each calibration {time.time()-start}") - index += 1 - if index > 5 or (previous_ids == removed_ids and len(previous_ids) >= len(removed_ids) and index > 2): - print(f"Whole procedure: {time.time() - whole}") - if self.traceLevel == 12: - fig.suptitle(f"Histograms of reprojection error for threshold {threshold}") - ax.legend() - ax.grid() - plt.show() - plt.title(f"Reprojection error after {int(index) + 1} iterations, threshold {threshold}") - plt.plot(iterations_array, reprojection) - plt.xlabel("Iteration") - plt.ylabel("Reprojection error") - plt.grid() - plt.show() - plt.title(f"Corners rejected after {int(index) + 1} iterations, threshold {threshold}") - plt.plot(iterations_array, num_corners) - plt.xlabel("Iteration") - plt.ylabel("Rejected corners") - plt.grid() - plt.show() - plt.title(f"Distortion after {int(index) + 1} iterations, threshold {threshold}") - for key, distortion in distortion_array.items(): - if distortion[0] != 0: - plt.plot(iterations_array,distortion, label = f"Distortion: {key}") - plt.ylabel("Absolute change") - plt.grid() - plt.legend() - plt.show() - plt.subplot(2,2,1) - plt.title(f"Intrinsic and extrinsics after {int(index) + 1} iterations, threshold {threshold}") - plt.plot(iterations_array, intrinsic_array['f_x'], label = "f_x") - plt.plot([],[], color = "white", label = f"Start: {round(intrinsic_array['f_x'][0], 2)}") - plt.plot([],[], color = "white", label = f"Finish: {round(intrinsic_array['f_x'][len(intrinsic_array['f_x'])-1], 2)}") - plt.plot(iterations_array, intrinsic_array['f_y'], label = "f_y") - plt.plot([],[], color = "white", label = f"Start: {round(intrinsic_array['f_y'][0], 2)}") - plt.plot([],[], color = "white", label = f"Finish: {round(intrinsic_array['f_y'][len(intrinsic_array['f_y'])-1], 2)}") - plt.xlabel("Iterations") - plt.ylabel("Absolute change") - plt.grid() - plt.legend() - - plt.subplot(2,2,2) - plt.plot(iterations_array, intrinsic_array['c_x'], label = "c_x") - plt.plot([],[], color = "white", label = f"Start: {round(intrinsic_array['c_x'][0], 2)}") - plt.plot([],[], color = "white", label = f"Finish: {round(intrinsic_array['c_x'][len(intrinsic_array['c_x'])-1], 2)}") - plt.plot(iterations_array, intrinsic_array['c_y'], label = "c_y") - plt.plot([],[], color = "white", label = f"Start: {round(intrinsic_array['c_y'][0], 2)}") - plt.plot([],[], color = "white", label = f"Finish: {round(intrinsic_array['c_y'][len(intrinsic_array['c_y'])-1], 2)}") - plt.xlabel("Iterations") - plt.ylabel("Absolute change") - plt.grid() - plt.legend() - - plt.subplot(2,1,2) - plt.plot(iterations_array, translation_array_x - translation_array_x[0], label = "Translation x") - plt.plot(iterations_array, translation_array_y - translation_array_y[0], label = "Translation y") - plt.plot(iterations_array, translation_array_z - translation_array_z[0], label = "Translation z") - plt.grid() - plt.xlabel("Iterations") - plt.ylabel("Absolute change") - plt.legend() - plt.show() - traceLevel_copy = self.traceLevel - self.traceLevel = 8 - filtered_corners, filtered_ids , all_error,removed_corners, removed_ids, removed_error= self.features_filtering_function(rotation_vectors, translation_vectors, camera_matrix, distortion_coefficients, ret, allCorners, allIds, camera = name, threshold = 1.0) - self.traceLevel = traceLevel_copy - break - previous_ids = removed_ids - except: - return f"Failed to calibrate camera {name}", None, None, None, None, None, None, None ,None , None - if self.traceLevel == 3 or self.traceLevel == 10: - print('Per View Errors...') - print(perViewErrors) - return ret, camera_matrix, distortion_coefficients, rotation_vectors, translation_vectors, filtered_ids, filtered_corners, allCorners, allIds - - def calibrate_fisheye(self, allCorners, allIds, imsize, hfov, name): - one_pts = self.board.chessboardCorners - obj_points = [] - for i in range(len(allIds)): - obj_points.append(self.charuco_ids_to_objpoints(allIds[i])) - - f_init = imsize[0]/np.deg2rad(hfov)*1.15 - - cameraMatrixInit = np.array([[f_init, 0. , imsize[0]/2], - [0. , f_init, imsize[1]/2], - [0. , 0. , 1. ]]) - distCoeffsInit = np.zeros((4,1)) - # check if there are any suspicious corners with high reprojection error - rvecs = [] - tvecs = [] - for corners, ids in zip(allCorners, allIds): - objpts = self.charuco_ids_to_objpoints(ids) - corners_undist = cv2.fisheye.undistortPoints(corners, cameraMatrixInit, distCoeffsInit) - rvec, tvec, new_ids = self.camera_pose_charuco(objpts, corners_undist,ids, np.eye(3), np.array((0.0,0,0,0))) - tvecs.append(tvec) - rvecs.append(rvec) - corners_removed, filtered_ids, filtered_corners = self.filter_corner_outliers(allIds, allCorners, cameraMatrixInit, distCoeffsInit, rvecs, tvecs) - if corners_removed: - obj_points = [] - for i in range(len(filtered_ids)): - obj_points.append(self.charuco_ids_to_objpoints(filtered_ids[i])) - - print("Camera Matrix initialization.............") - print(cameraMatrixInit) - flags = 0 - flags |= cv2.fisheye.CALIB_CHECK_COND - flags |= cv2.fisheye.CALIB_USE_INTRINSIC_GUESS - flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC - flags |= cv2.fisheye.CALIB_FIX_SKEW - - - term_criteria = (cv2.TERM_CRITERIA_COUNT + - cv2.TERM_CRITERIA_EPS, 30, 1e-9) - try: - res, K, d, rvecs, tvecs = cv2.fisheye.calibrate(obj_points, filtered_corners, None, cameraMatrixInit, distCoeffsInit, flags=flags, criteria=term_criteria) - except: - # calibration failed for full FOV, let's try to limit the corners to smaller part of FOV first to find initial parameters - success = False - crop = 0.95 - while not success: - print(f"trying crop factor {crop}") - obj_points_limited = [] - corners_limited = [] - for obj_pts, corners in zip(obj_points, filtered_corners): - obj_points_tmp = [] - corners_tmp = [] - for obj_pt, corner in zip(obj_pts, corners): - check_x = corner[0,0] > imsize[0]*(1-crop) and corner[0,0] < imsize[0]*crop - check_y = corner[0,1] > imsize[1]*(1-crop) and corner[0,1] < imsize[1]*crop - if check_x and check_y: - obj_points_tmp.append(obj_pt) - corners_tmp.append(corner) - obj_points_limited.append(np.array(obj_points_tmp)) - corners_limited.append(np.array(corners_tmp)) - try: - res, K, d, rvecs, tvecs = cv2.fisheye.calibrate(obj_points_limited, corners_limited, None, cameraMatrixInit, distCoeffsInit, flags=flags, criteria=term_criteria) - print(f"success with crop factor {crop}") - success = True - break - except: - print(f"failed with crop factor {crop}") - if crop > 0.7: - crop -= 0.05 - else: - raise Exception("Calibration failed: Tried maximum crop factor and still no success") - if success: - # trying the full FOV once more with better initial K - print(f"new K init {K}") - print(f"new d_init {d}") - try: - res, K, d, rvecs, tvecs = cv2.fisheye.calibrate(obj_points, filtered_corners, imsize, K, distCoeffsInit, flags=flags, criteria=term_criteria) - except: - print(f"Failed the full res calib, using calibration with crop factor {crop}") - - return res, K, d, rvecs, tvecs, filtered_ids, filtered_corners - - - def calibrate_stereo(self, left_name, right_name, allIds_l, allCorners_l, allIds_r, allCorners_r, cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, t_in, r_in, features = None): - left_corners_sampled = [] - right_corners_sampled = [] - left_ids_sampled = [] - obj_pts = [] - one_pts = self.board.chessboardCorners - - if self.traceLevel == 2 or self.traceLevel == 4 or self.traceLevel == 10: - print('Length of allIds_l') - print(len(allIds_l)) - print('Length of allIds_r') - print(len(allIds_r)) - - for i in range(len(allIds_l)): - left_sub_corners = [] - right_sub_corners = [] - obj_pts_sub = [] - #if len(allIds_l[i]) < 70 or len(allIds_r[i]) < 70: - # continue - for j in range(len(allIds_l[i])): - idx = np.where(allIds_r[i] == allIds_l[i][j]) - if idx[0].size == 0: - continue - left_sub_corners.append(allCorners_l[i][j]) - right_sub_corners.append(allCorners_r[i][idx]) - obj_pts_sub.append(one_pts[allIds_l[i][j]]) - if len(left_sub_corners) > 3 and len(right_sub_corners) > 3: - obj_pts.append(np.array(obj_pts_sub, dtype=np.float32)) - left_corners_sampled.append( - np.array(left_sub_corners, dtype=np.float32)) - left_ids_sampled.append(np.array(allIds_l[i], dtype=np.int32)) - right_corners_sampled.append( - np.array(right_sub_corners, dtype=np.float32)) - else: - return -1, "Stereo Calib failed due to less common features" - - stereocalib_criteria = (cv2.TERM_CRITERIA_COUNT + - cv2.TERM_CRITERIA_EPS, 300, 1e-9) - if per_ccm and extrinsic_per_ccm: - for i in range(len(left_corners_sampled)): - if self.calib_model[left_name] == "perspective": - left_corners_sampled[i] = cv2.undistortPoints(np.array(left_corners_sampled[i]), cameraMatrix_l, distCoeff_l, P=cameraMatrix_l) - #left_corners_sampled[i] = cv2.undistortPoints(np.array(left_corners_sampled[i]), cameraMatrix_l, None) - - else: - left_corners_sampled[i] = cv2.fisheye.undistortPoints(np.array(left_corners_sampled[i]), cameraMatrix_l, distCoeff_l, P=cameraMatrix_l) - #left_corners_sampled[i] = cv2.fisheye.undistortPoints(np.array(left_corners_sampled[i]), cameraMatrix_l, None) - for i in range(len(right_corners_sampled)): - if self.calib_model[right_name] == "perspective": - right_corners_sampled[i] = cv2.undistortPoints(np.array(right_corners_sampled[i]), cameraMatrix_r, distCoeff_r, P=cameraMatrix_r) - #right_corners_sampled[i] = cv2.undistortPoints(np.array(right_corners_sampled[i]), cameraMatrix_r, None) - else: - right_corners_sampled[i] = cv2.fisheye.undistortPoints(np.array(right_corners_sampled[i]), cameraMatrix_r, distCoeff_r, P=cameraMatrix_r) - #right_corners_sampled[i] = cv2.fisheye.undistortPoints(np.array(right_corners_sampled[i]), cameraMatrix_r, None) - - if features == None or features == "charucos": - flags = cv2.CALIB_FIX_INTRINSIC - ret, M1, d1, M2, d2, R, T, E, F, _ = cv2.stereoCalibrateExtended( - obj_pts, left_corners_sampled, right_corners_sampled, - np.eye(3), np.zeros(12), np.eye(3), np.zeros(12), None, - R=r_in, T=t_in, criteria=stereocalib_criteria , flags=flags) - - r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) - scale = ((cameraMatrix_l[0][0]*cameraMatrix_r[0][0])) - print(f'Epipolar error without scale: {ret}') - print(f'Epipolar error with scale: {ret*np.sqrt(scale)}') - print('Printing Extrinsics res...') - print(R) - print(T) - print(f'Euler angles in XYZ {r_euler} degs') - R_l, R_r, P_l, P_r, Q, validPixROI1, validPixROI2 = cv2.stereoRectify( - cameraMatrix_l, - distCoeff_l, - cameraMatrix_r, - distCoeff_r, - None, R, T) # , alpha=0.1 - # self.P_l = P_l - # self.P_r = P_r - r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) - if self.traceLevel == 5 or self.traceLevel == 10: - print(f'R_L Euler angles in XYZ {r_euler}') - r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) - if self.traceLevel == 5 or self.traceLevel == 10: - print(f'R_R Euler angles in XYZ {r_euler}') - - # print(f'P_l is \n {P_l}') - # print(f'P_r is \n {P_r}') - return [ret, R, T, R_l, R_r, P_l, P_r] - #### ADD OTHER CALIBRATION METHODS ### - else: - if self.cameraModel == 'perspective': - flags = 0 - # flags |= cv2.CALIB_USE_EXTRINSIC_GUESS - # print(flags) - flags = cv2.CALIB_FIX_INTRINSIC - distortion_flags = self.get_distortion_flags(left_name) - flags += distortion_flags - # print(flags) - if self.traceLevel == 3 or self.traceLevel == 10: - print('Printing Extrinsics guesses...') - print(r_in) - print(t_in) - ret, M1, d1, M2, d2, R, T, E, F, _ = cv2.stereoCalibrateExtended( - obj_pts, left_corners_sampled, right_corners_sampled, - cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, None, - R=r_in, T=t_in, criteria=stereocalib_criteria , flags=flags) - - r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) - print(f'Epipolar error is {ret}') - print('Printing Extrinsics res...') - print(R) - print(T) - print(f'Euler angles in XYZ {r_euler} degs') - - - R_l, R_r, P_l, P_r, Q, validPixROI1, validPixROI2 = cv2.stereoRectify( - cameraMatrix_l, - distCoeff_l, - cameraMatrix_r, - distCoeff_r, - None, R, T) # , alpha=0.1 - # self.P_l = P_l - # self.P_r = P_r - r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) - if self.traceLevel == 5 or self.traceLevel == 10: - print(f'R_L Euler angles in XYZ {r_euler}') - r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) - if self.traceLevel == 5 or self.traceLevel == 10: - print(f'R_R Euler angles in XYZ {r_euler}') - - return [ret, R, T, R_l, R_r, P_l, P_r] - elif self.cameraModel == 'fisheye': - # make sure all images have the same *number of* points - min_num_points = min([len(pts) for pts in obj_pts]) - obj_pts_truncated = [pts[:min_num_points] for pts in obj_pts] - left_corners_truncated = [pts[:min_num_points] for pts in left_corners_sampled] - right_corners_truncated = [pts[:min_num_points] for pts in right_corners_sampled] - - flags = 0 - # flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC - # flags |= cv2.fisheye.CALIB_CHECK_COND - # flags |= cv2.fisheye.CALIB_FIX_SKEW - flags |= cv2.fisheye.CALIB_FIX_INTRINSIC - # flags |= cv2.fisheye.CALIB_FIX_K1 - # flags |= cv2.fisheye.CALIB_FIX_K2 - # flags |= cv2.fisheye.CALIB_FIX_K3 - # flags |= cv2.fisheye.CALIB_FIX_K4 - # flags |= cv2.CALIB_RATIONAL_MODEL - # TODO(sACHIN): Try without intrinsic guess - # flags |= cv2.CALIB_USE_INTRINSIC_GUESS - # TODO(sACHIN): Try without intrinsic guess - # flags |= cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC - # flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW - if self.traceLevel == 3 or self.traceLevel == 10: - print('Fisyeye stereo model..................') - (ret, M1, d1, M2, d2, R, T), E, F = cv2.fisheye.stereoCalibrate( - obj_pts_truncated, left_corners_truncated, right_corners_truncated, - cameraMatrix_l, distCoeff_l, cameraMatrix_r, distCoeff_r, None, - flags=flags, criteria=stereocalib_criteria), None, None - r_euler = Rotation.from_matrix(R).as_euler('xyz', degrees=True) - print(f'Reprojection error is {ret}') - if self.traceLevel == 3 or self.traceLevel == 10: - print('Printing Extrinsics res...') - print(R) - print(T) - print(f'Euler angles in XYZ {r_euler} degs') - isHorizontal = np.absolute(T[0]) > np.absolute(T[1]) - - R_l, R_r, P_l, P_r, Q = cv2.fisheye.stereoRectify( - cameraMatrix_l, - distCoeff_l, - cameraMatrix_r, - distCoeff_r, - None, R, T, flags=0) - - r_euler = Rotation.from_matrix(R_l).as_euler('xyz', degrees=True) - if self.traceLevel == 3 or self.traceLevel == 10: - print(f'R_L Euler angles in XYZ {r_euler}') - r_euler = Rotation.from_matrix(R_r).as_euler('xyz', degrees=True) - if self.traceLevel == 3 or self.traceLevel == 10: - print(f'R_R Euler angles in XYZ {r_euler}') - - return [ret, R, T, R_l, R_r, P_l, P_r] - - def display_rectification(self, image_data_pairs, images_corners_l, images_corners_r, image_epipolar_color, isHorizontal): - print( - "Displaying Stereo Pair for visual inspection. Press the [ESC] key to exit.") - for idx, image_data_pair in enumerate(image_data_pairs): - if isHorizontal: - img_concat = cv2.hconcat( - [image_data_pair[0], image_data_pair[1]]) - for left_pt, right_pt, colorMode in zip(images_corners_l[idx], images_corners_r[idx], image_epipolar_color[idx]): - cv2.line(img_concat, - (int(left_pt[0][0]), int(left_pt[0][1])), (int(right_pt[0][0]) + image_data_pair[0].shape[1], int(right_pt[0][1])), - colors[colorMode], 1) - else: - img_concat = cv2.vconcat( - [image_data_pair[0], image_data_pair[1]]) - for left_pt, right_pt, colorMode in zip(images_corners_l[idx], images_corners_r[idx], image_epipolar_color[idx]): - cv2.line(img_concat, - (int(left_pt[0][0]), int(left_pt[0][1])), (int(right_pt[0][0]), int(right_pt[0][1]) + image_data_pair[0].shape[0]), - colors[colorMode], 1) - - img_concat = cv2.resize( - img_concat, (0, 0), fx=0.8, fy=0.8) - - # show image - cv2.imshow('Stereo Pair', img_concat) - k = cv2.waitKey(1) - if k == 27: # Esc key to stop - break - - # os._exit(0) - # raise SystemExit() - - cv2.destroyWindow('Stereo Pair') - - def scale_image(self, img, scaled_res): - expected_height = img.shape[0]*(scaled_res[1]/img.shape[1]) - if self.traceLevel == 2 or self.traceLevel == 10: - print("Expected Height: {}".format(expected_height)) - - if not (img.shape[0] == scaled_res[0] and img.shape[1] == scaled_res[1]): - if int(expected_height) == scaled_res[0]: - # resizing to have both stereo and rgb to have same - # resolution to capture extrinsics of the rgb-right camera - img = cv2.resize(img, (scaled_res[1], scaled_res[0]), - interpolation=cv2.INTER_CUBIC) - return img - else: - # resizing and cropping to have both stereo and rgb to have same resolution - # to calculate extrinsics of the rgb-right camera - scale_width = scaled_res[1]/img.shape[1] - dest_res = ( - int(img.shape[1] * scale_width), int(img.shape[0] * scale_width)) - img = cv2.resize( - img, dest_res, interpolation=cv2.INTER_CUBIC) - if img.shape[0] < scaled_res[0]: - raise RuntimeError("resizeed height of rgb is smaller than required. {0} < {1}".format( - img.shape[0], scaled_res[0])) - # print(gray.shape[0] - req_resolution[0]) - del_height = (img.shape[0] - scaled_res[0]) // 2 - # gray = gray[: req_resolution[0], :] - img = img[del_height: del_height + scaled_res[0], :] - return img - else: - return img - - def sgdEpipolar(self, images_left, images_right, M_lp, d_l, M_rp, d_r, r_l, r_r, kScaledL, kScaledR, scaled_res, isHorizontal): - if self.cameraModel == 'perspective': - mapx_l, mapy_l = cv2.initUndistortRectifyMap( - M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) - mapx_r, mapy_r = cv2.initUndistortRectifyMap( - M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) - else: - mapx_l, mapy_l = cv2.fisheye.initUndistortRectifyMap( - M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) - mapx_r, mapy_r = cv2.fisheye.initUndistortRectifyMap( - M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) - - - image_data_pairs = [] - imagesCount = 0 - - for image_left, image_right in zip(images_left, images_right): - # read images - imagesCount += 1 - # print(imagesCount) - img_l = cv2.imread(image_left, 0) - img_r = cv2.imread(image_right, 0) - - img_l = self.scale_image(img_l, scaled_res) - img_r = self.scale_image(img_r, scaled_res) - - # warp right image - # img_l = cv2.warpPerspective(img_l, self.H1, img_l.shape[::-1], - # cv2.INTER_CUBIC + - # cv2.WARP_FILL_OUTLIERS + - # cv2.WARP_INVERSE_MAP) - - # img_r = cv2.warpPerspective(img_r, self.H2, img_r.shape[::-1], - # cv2.INTER_CUBIC + - # cv2.WARP_FILL_OUTLIERS + - # cv2.WARP_INVERSE_MAP) - - img_l = cv2.remap(img_l, mapx_l, mapy_l, cv2.INTER_LINEAR) - img_r = cv2.remap(img_r, mapx_r, mapy_r, cv2.INTER_LINEAR) - - image_data_pairs.append((img_l, img_r)) - - imgpoints_r = [] - imgpoints_l = [] - criteria = (cv2.TERM_CRITERIA_EPS + - cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) - - for i, image_data_pair in enumerate(image_data_pairs): - res2_l = self.detect_charuco_board(image_data_pair[0]) - res2_r = self.detect_charuco_board(image_data_pair[1]) - - if res2_l[1] is not None and res2_r[2] is not None and len(res2_l[1]) > 3 and len(res2_r[1]) > 3: - - cv2.cornerSubPix(image_data_pair[0], res2_l[1], - winSize=(5, 5), - zeroZone=(-1, -1), - criteria=criteria) - cv2.cornerSubPix(image_data_pair[1], res2_r[1], - winSize=(5, 5), - zeroZone=(-1, -1), - criteria=criteria) - - # termination criteria - img_pth_right = Path(images_right[i]) - img_pth_left = Path(images_left[i]) - org = (100, 50) - # cv2.imshow('ltext', lText) - # cv2.waitKey(0) - localError = 0 - corners_l = [] - corners_r = [] - for j in range(len(res2_l[2])): - idx = np.where(res2_r[2] == res2_l[2][j]) - if idx[0].size == 0: - continue - corners_l.append(res2_l[1][j]) - corners_r.append(res2_r[1][idx]) - - imgpoints_l.extend(corners_l) - imgpoints_r.extend(corners_r) - epi_error_sum = 0 - for l_pt, r_pt in zip(corners_l, corners_r): - if isHorizontal: - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) - else: - epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) - # localError = epi_error_sum / len(corners_l) - - # print("Average Epipolar in test Error per image on host in " + img_pth_right.name + " : " + - # str(localError)) - raise SystemExit(1) - - epi_error_sum = 0 - for l_pt, r_pt in zip(imgpoints_l, imgpoints_r): - if isHorizontal: - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) - else: - epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) - - avg_epipolar = epi_error_sum / len(imgpoints_r) - print("Average Epipolar Error in test is : " + str(avg_epipolar)) - return avg_epipolar - - - def test_epipolar_charuco(self, left_name, right_name, left_img_pth, right_img_pth, M_l, d_l, M_r, d_r, t, r_l, r_r): - images_left = glob.glob(left_img_pth + '/*.png') - images_right = glob.glob(right_img_pth + '/*.png') - images_left.sort() - print(images_left) - images_right.sort() - assert len(images_left) != 0, "ERROR: Images not read correctly" - assert len(images_right) != 0, "ERROR: Images not read correctly" - isHorizontal = np.absolute(t[0]) > np.absolute(t[1]) - - scale = None - scale_req = False - frame_left_shape = cv2.imread(images_left[0], 0).shape - frame_right_shape = cv2.imread(images_right[0], 0).shape - scalable_res = frame_left_shape - scaled_res = frame_right_shape - if frame_right_shape[0] < frame_left_shape[0] and frame_right_shape[1] < frame_left_shape[1]: - scale_req = True - scale = frame_right_shape[1] / frame_left_shape[1] - elif frame_right_shape[0] > frame_left_shape[0] and frame_right_shape[1] > frame_left_shape[1]: - scale_req = True - scale = frame_left_shape[1] / frame_right_shape[1] - scalable_res = frame_right_shape - scaled_res = frame_left_shape - - if scale_req: - scaled_height = scale * scalable_res[0] - diff = scaled_height - scaled_res[0] - if diff < 0: - scaled_res = (int(scaled_height), scaled_res[1]) - if self.traceLevel == 3 or self.traceLevel == 10: - print( - f'Is scale Req: {scale_req}\n scale value: {scale} \n scalable Res: {scalable_res} \n scale Res: {scaled_res}') - print("Original res Left :{}".format(frame_left_shape)) - print("Original res Right :{}".format(frame_right_shape)) - # print("Scale res :{}".format(scaled_res)) - - M_lp = self.scale_intrinsics(M_l, frame_left_shape, scaled_res) - M_rp = self.scale_intrinsics(M_r, frame_right_shape, scaled_res) - - criteria = (cv2.TERM_CRITERIA_EPS + - cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.00001) - - # TODO(Sachin): Observe Images by adding visualization - # TODO(Sachin): Check if the stetch is only in calibration Images - print('Original intrinsics ....') - print(f"L {M_lp}") - print(f"R: {M_rp}") - if self.traceLevel == 3 or self.traceLevel == 10: - print(f'Width and height is {scaled_res[::-1]}') - # kScaledL, _ = cv2.getOptimalNewCameraMatrix(M_r, d_r, scaled_res[::-1], 0) - # kScaledL, _ = cv2.getOptimalNewCameraMatrix(M_r, d_l, scaled_res[::-1], 0) - # kScaledR, _ = cv2.getOptimalNewCameraMatrix(M_r, d_r, scaled_res[::-1], 0) - kScaledR = kScaledL = M_rp - - # if self.cameraModel != 'perspective': - # kScaledR = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(M_r, d_r, scaled_res[::-1], np.eye(3), fov_scale=1.1) - # kScaledL = kScaledR - - - print('Intrinsics from the getOptimalNewCameraMatrix/Original ....') - print(f"L: {kScaledL}") - print(f"R: {kScaledR}") - oldEpipolarError = None - epQueue = deque() - movePos = True - - - print(self.calib_model[left_name], self.calib_model[right_name]) - if self.calib_model[left_name] == 'perspective': - mapx_l, mapy_l = cv2.initUndistortRectifyMap( - M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) - else: - mapx_l, mapy_l = cv2.fisheye.initUndistortRectifyMap( - M_lp, d_l, r_l, kScaledL, scaled_res[::-1], cv2.CV_32FC1) - if self.calib_model[right_name] == "perspective": - mapx_r, mapy_r = cv2.initUndistortRectifyMap( - M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) + if PER_CCM and EXTRINSICS_PER_CCM: + if left_cam_info['calib_model'] == "perspective": + left_corners_sampled = pw.run(undistort_points_perspective, left_corners_sampled, left_cam_info) + right_corners_sampled = pw.run(undistort_points_perspective, right_corners_sampled, right_cam_info) else: - mapx_r, mapy_r = cv2.fisheye.initUndistortRectifyMap( - M_rp, d_r, r_r, kScaledR, scaled_res[::-1], cv2.CV_32FC1) - - image_data_pairs = [] - for image_left, image_right in zip(images_left, images_right): - # read images - img_l = cv2.imread(image_left, 0) - img_r = cv2.imread(image_right, 0) - - img_l = self.scale_image(img_l, scaled_res) - img_r = self.scale_image(img_r, scaled_res) - # print(img_l.shape) - # print(img_r.shape) - - # warp right image - # img_l = cv2.warpPerspective(img_l, self.H1, img_l.shape[::-1], - # cv2.INTER_CUBIC + - # cv2.WARP_FILL_OUTLIERS + - # cv2.WARP_INVERSE_MAP) - - # img_r = cv2.warpPerspective(img_r, self.H2, img_r.shape[::-1], - # cv2.INTER_CUBIC + - # cv2.WARP_FILL_OUTLIERS + - # cv2.WARP_INVERSE_MAP) - - img_l = cv2.remap(img_l, mapx_l, mapy_l, cv2.INTER_LINEAR) - img_r = cv2.remap(img_r, mapx_r, mapy_r, cv2.INTER_LINEAR) - - image_data_pairs.append((img_l, img_r)) - - if self.traceLevel == 4 or self.traceLevel == 5 or self.traceLevel == 10: - #cv2.imshow("undistorted-Left", img_l) - #cv2.imshow("undistorted-right", img_r) - # print(f'image path - {im}') - # print(f'Image Undistorted Size {undistorted_img.shape}') - k = cv2.waitKey(0) - if k == 27: # Esc key to stop - break - if self.traceLevel == 4 or self.traceLevel == 5 or self.traceLevel == 10: - cv2.destroyWindow("undistorted-Left") - cv2.destroyWindow("undistorted-right") - # compute metrics - imgpoints_r = [] - imgpoints_l = [] - image_epipolar_color = [] - # new_imagePairs = []) - for i, image_data_pair in enumerate(image_data_pairs): - res2_l = self.detect_charuco_board(image_data_pair[0]) - res2_r = self.detect_charuco_board(image_data_pair[1]) - - # if self.traceLevel == 2 or self.traceLevel == 4 or self.traceLevel == 10: - - - img_concat = cv2.hconcat([image_data_pair[0], image_data_pair[1]]) - img_concat = cv2.cvtColor(img_concat, cv2.COLOR_GRAY2RGB) - line_row = 0 - while line_row < img_concat.shape[0]: - cv2.line(img_concat, - (0, line_row), (img_concat.shape[1], line_row), - (0, 255, 0), 1) - line_row += 30 - - cv2.imshow('Stereo Pair', img_concat) - k = cv2.waitKey(1) - - if res2_l[1] is not None and res2_r[2] is not None and len(res2_l[1]) > 3 and len(res2_r[1]) > 3: - - cv2.cornerSubPix(image_data_pair[0], res2_l[1], - winSize=(5, 5), - zeroZone=(-1, -1), - criteria=criteria) - cv2.cornerSubPix(image_data_pair[1], res2_r[1], - winSize=(5, 5), - zeroZone=(-1, -1), - criteria=criteria) - - # termination criteria - img_pth_right = Path(images_right[i]) - img_pth_left = Path(images_left[i]) - org = (100, 50) - # cv2.imshow('ltext', lText) - # cv2.waitKey(0) - localError = 0 - corners_l = [] - corners_r = [] - for j in range(len(res2_l[2])): - idx = np.where(res2_r[2] == res2_l[2][j]) - if idx[0].size == 0: - continue - corners_l.append(res2_l[1][j]) - corners_r.append(res2_r[1][idx]) - - imgpoints_l.append(corners_l) - imgpoints_r.append(corners_r) - epi_error_sum = 0 - corner_epipolar_color = [] - for l_pt, r_pt in zip(corners_l, corners_r): - if isHorizontal: - curr_epipolar_error = abs(l_pt[0][1] - r_pt[0][1]) - else: - curr_epipolar_error = abs(l_pt[0][0] - r_pt[0][0]) - if curr_epipolar_error >= 1: - corner_epipolar_color.append(1) - else: - corner_epipolar_color.append(0) - epi_error_sum += curr_epipolar_error - localError = epi_error_sum / len(corners_l) - image_epipolar_color.append(corner_epipolar_color) - if self.traceLevel == 2 or self.traceLevel == 3 or self.traceLevel == 4 or self.traceLevel == 10: - print("Epipolar Error per image on host in " + img_pth_right.name + " : " + - str(localError)) - else: - print('Numer of corners is in left -> and right ->') - return -1 - lText = cv2.putText(cv2.cvtColor(image_data_pair[0],cv2.COLOR_GRAY2RGB), img_pth_left.name, org, cv2.FONT_HERSHEY_SIMPLEX, 1, (2, 0, 255), 2, cv2.LINE_AA) - rText = cv2.putText(cv2.cvtColor(image_data_pair[1],cv2.COLOR_GRAY2RGB), img_pth_right.name + " Error: " + str(localError), org, cv2.FONT_HERSHEY_SIMPLEX, 1, (2, 0, 255), 2, cv2.LINE_AA) - image_data_pairs[i] = (lText, rText) - - - epi_error_sum = 0 - total_corners = 0 - for corners_l, corners_r in zip(imgpoints_l, imgpoints_r): - total_corners += len(corners_l) - for l_pt, r_pt in zip(corners_l, corners_r): - if isHorizontal: - epi_error_sum += abs(l_pt[0][1] - r_pt[0][1]) - else: - epi_error_sum += abs(l_pt[0][0] - r_pt[0][0]) - - avg_epipolar = epi_error_sum / total_corners - print("Average Epipolar Error is : " + str(avg_epipolar)) - - if self.enable_rectification_disp: - self.display_rectification(image_data_pairs, imgpoints_l, imgpoints_r, image_epipolar_color, isHorizontal) - - return avg_epipolar - - def create_save_mesh(self): # , output_path): - - curr_path = Path(__file__).parent.resolve() - print("Mesh path") - print(curr_path) - - if self.cameraModel == "perspective": - map_x_l, map_y_l = cv2.initUndistortRectifyMap( - self.M1, self.d1, self.R1, self.M2, self.img_shape, cv2.CV_32FC1) - map_x_r, map_y_r = cv2.initUndistortRectifyMap( - self.M2, self.d2, self.R2, self.M2, self.img_shape, cv2.CV_32FC1) - else: - map_x_l, map_y_l = cv2.fisheye.initUndistortRectifyMap( - self.M1, self.d1, self.R1, self.M2, self.img_shape, cv2.CV_32FC1) - map_x_r, map_y_r = cv2.fisheye.initUndistortRectifyMap( - self.M2, self.d2, self.R2, self.M2, self.img_shape, cv2.CV_32FC1) - - """ - map_x_l_fp32 = map_x_l.astype(np.float32) - map_y_l_fp32 = map_y_l.astype(np.float32) - map_x_r_fp32 = map_x_r.astype(np.float32) - map_y_r_fp32 = map_y_r.astype(np.float32) - - - print("shape of maps") - print(map_x_l.shape) - print(map_y_l.shape) - print(map_x_r.shape) - print(map_y_r.shape) """ - - meshCellSize = 16 - mesh_left = [] - mesh_right = [] - - for y in range(map_x_l.shape[0] + 1): - if y % meshCellSize == 0: - row_left = [] - row_right = [] - for x in range(map_x_l.shape[1] + 1): - if x % meshCellSize == 0: - if y == map_x_l.shape[0] and x == map_x_l.shape[1]: - row_left.append(map_y_l[y - 1, x - 1]) - row_left.append(map_x_l[y - 1, x - 1]) - row_right.append(map_y_r[y - 1, x - 1]) - row_right.append(map_x_r[y - 1, x - 1]) - elif y == map_x_l.shape[0]: - row_left.append(map_y_l[y - 1, x]) - row_left.append(map_x_l[y - 1, x]) - row_right.append(map_y_r[y - 1, x]) - row_right.append(map_x_r[y - 1, x]) - elif x == map_x_l.shape[1]: - row_left.append(map_y_l[y, x - 1]) - row_left.append(map_x_l[y, x - 1]) - row_right.append(map_y_r[y, x - 1]) - row_right.append(map_x_r[y, x - 1]) - else: - row_left.append(map_y_l[y, x]) - row_left.append(map_x_l[y, x]) - row_right.append(map_y_r[y, x]) - row_right.append(map_x_r[y, x]) - if (map_x_l.shape[1] % meshCellSize) % 2 != 0: - row_left.append(0) - row_left.append(0) - row_right.append(0) - row_right.append(0) - - mesh_left.append(row_left) - mesh_right.append(row_right) - - mesh_left = np.array(mesh_left) - mesh_right = np.array(mesh_right) - left_mesh_fpath = str(curr_path) + '/../resources/left_mesh.calib' - right_mesh_fpath = str(curr_path) + '/../resources/right_mesh.calib' - mesh_left.tofile(left_mesh_fpath) - mesh_right.tofile(right_mesh_fpath) + left_corners_sampled = pw.run(undistort_points_fisheye, left_corners_sampled, left_cam_info) + right_corners_sampled = pw.run(undistort_points_fisheye, right_corners_sampled, right_cam_info) + + extrinsics = pw.run(calibrate_stereo_perspective_per_ccm, config, obj_pts, left_corners_sampled, right_corners_sampled, left_cam_info, right_cam_info) + else: + if camera_model == 'perspective': + extrinsics = pw.run(calibrate_stereo_perspective, config, obj_pts, left_corners_sampled, right_corners_sampled, left_cam_info, right_cam_info) + elif camera_model == 'fisheye': + extrinsics = pw.run(calibrate_stereo_fisheye, config, obj_pts, left_corners_sampled, right_corners_sampled, left_cam_info, right_cam_info) + left_cam_info, stereo_config = pw.run(calculate_epipolar_error, left_cam_info, right_cam_info, left, right, board_config, extrinsics)[:2] + allExtrinsics.append(extrinsics) + camInfos[left.name] = left_cam_info + stereoConfigs.append(stereo_config) + + pw.execute() + + # Extract camera info structs and stereo config + for cam, camInfo in camInfos.items(): + for socket in board_config['cameras']: + if board_config['cameras'][socket]['name'] == cam: + board_config['cameras'][socket] = camInfo.ret() + + for stereoConfig in stereoConfigs: + if stereoConfig.ret(): + board_config['stereo_config'].update(stereoConfig.ret()) + + if debug: + return [s.ret() for s in stereoConfigs], [e.ret() for e in allExtrinsics], board_config, {k: v.ret() for k, v in camInfos.items()} + + return board_config \ No newline at end of file diff --git a/types.py b/types.py new file mode 100644 index 0000000..40ef6e0 --- /dev/null +++ b/types.py @@ -0,0 +1,150 @@ +from typing import TypedDict, List, Tuple +import cv2.aruco as aruco +from enum import Enum +import numpy as np +import cv2 + +class StrEnum(Enum): # Doesn't exist in python 3.10 + def __eq__(self, other): + if isinstance(other, str): + print('compare string') + return self.value == other + return super().__eq__(other) + +class CalibrationModel(StrEnum): + Perspective = 'perspective' + Fisheye = 'fisheye' + +class DistortionModel(StrEnum): + Normal = 'NORMAL' + Tilted = 'TILTED' + Prism = 'PRISM' + Thermal = 'THERMAL' # TODO : Is this even a distortion model + +class CharucoBoard: + def __init__(self, squaresX = 16, squaresY = 9, squareSize = 1.0, markerSize = 0.8, dictSize = cv2.aruco.DICT_4X4_1000): + """Charuco board configuration used in a captured dataset + + Args: + squaresX (int, optional): Number of squares horizontally. Defaults to 16. + squaresY (int, optional): Number of squares vertically. Defaults to 9. + squareSize (float, optional): Length of the side of one square (cm). Defaults to 1.0. + markerSize (float, optional): Length of the side of one marker (cm). Defaults to 0.8. + dictSize (_type_, optional): cv2 aruco dictionary size. Defaults to cv2.aruco.DICT_4X4_1000. + """ + self.squaresX = squaresX + self.squaresY = squaresY + self.squareSize = squareSize + self.markerSize = markerSize + self.dictSize = dictSize + + def __getstate__(self): # Magic to allow pickling the instance without pickling the cv2 dictionary + state = self.__dict__.copy() + for hidden in ['_board', '_dictionary']: + if hidden in state: + del state[hidden] + return state + + def __build(self): + self._dictionary = aruco.Dictionary_get(self.dictSize) + self._board = aruco.CharucoBoard_create(self.squaresX, self.squaresY, self.squareSize, self.markerSize, self._dictionary) + + @property + def dictionary(self): + """cv2 aruco dictionary""" + if not hasattr(self, '_dictionary'): + self.__build() + return self._dictionary + + @property + def board(self): + """cv2 aruco board""" + if not hasattr(self, '_board'): + self.__build() + return self._board + +class Dataset: + class Images: + def __init__(self, images: List[np.ndarray | str]): + self._images = images + + def at(self, key): + if isinstance(self._images[key], str): + self._images[key] = cv2.imread(self._images[key], cv2.IMREAD_UNCHANGED) + return self._images[key] + + def __len__(self): + return len(self._images) + + def __iter__(self): + for i in len(self._images): + yield self.at(i) + + def __getitem__(self, key): + return self.at(key) + + def __repr__(self): + return self._images.__repr__() + + def __init__(self, name: str, board: CharucoBoard, images: List[np.ndarray | str] = [], allCorners: List[np.ndarray] = [], allIds: List[np.ndarray] = [], imageSize: Tuple[float, float] = (), enableFiltering: bool = True): + """Create a dataset for camera calibration + + Args: + name (str): Name of the camera for and the key for output data + board (CharucoBoard): Charuco board configuration used in the dataset + images (List[np.ndarray | str], optional): Set of images for calibration, can be left empty if `allCorners`, `allIds` and `imageSize` is provided. Defaults to []. + allCorners (List[np.ndarray], optional): Set of corners used for intrinsic calibration. Defaults to []. + allIds (List[np.ndarray], optional): Set of ids used for intrinsic calibration. Defaults to []. + imageSize (Tuple[float, float], optional): Size of the images captured during calibration, must be provided if `images` is empty. Defaults to (). + enableFiltering (bool, optional): Whether to filter provided corners, or use all of them as is. Defaults to True. + """ + self.name = name + self.images = Dataset.Images(images) + self.allCorners = allCorners + self.allIds = allIds + self.imageSize = imageSize + self.board = board + self.enableFiltering = enableFiltering + +class CalibrationConfig: + def __init__(self, initialMaxThreshold = 0, initialMinFiltered = 0, + cameraModel = 0, stereoCalibCriteria = 0): + """Calibration configuration options + + Args: + initialMaxThreshold (int, optional): _description_. Defaults to 0. + initialMinFiltered (int, optional): _description_. Defaults to 0. + cameraModel (int, optional): _description_. Defaults to 0. + stereoCalibCriteria (int, optional): _description_. Defaults to 0. + """ + self.initialMaxThreshold = initialMaxThreshold + self.initialMinFiltered = initialMinFiltered + self.cameraModel = cameraModel + self.stereoCalibCriteria = stereoCalibCriteria + +class CameraData(TypedDict): + calib_model: CalibrationModel + dist_coeff: str + distortion_model: DistortionModel + extrinsics: str + to_cam: str + filtered_corners: str + type: str + filtered_ids: str + height: str + width: str + hfov: str + socket: str + images_path: str + imsize: str + intrinsics: str + sensorName: str + hasAutofocus: str + model: str + max_threshold: str + min_inliers: str + name: str + reprojection_error: str + size: str + threshold_stepper: str + ids: str \ No newline at end of file diff --git a/worker.py b/worker.py new file mode 100644 index 0000000..2a7df1c --- /dev/null +++ b/worker.py @@ -0,0 +1,251 @@ +from typing import Dict, Any, Callable, Iterable, Tuple, TypeVar, Generic, Sequence, List +from collections import abc +import multiprocessing +import random +import queue + +T = TypeVar('T') + +def allArgs(args, kwargs): + for arg in args: + yield arg + for kwarg in kwargs.values(): + yield kwarg + +class Retvals(Generic[T]): + def __init__(self, taskOrGroup: 'ParallelTask', key: slice | tuple | int): + self._taskOrGroup = taskOrGroup + self._key = key + + def __iter__(self) -> T: # Allow iterating over slice or list retvals + if isinstance(self._key, slice): + if not self._key.stop: + raise RuntimeError('Cannot iterate over an unknown length Retvals') + for i in range(self._key.start or 0, self._key.stop, self._key.step or 1): + yield Retvals(self._taskOrGroup, i) + elif isinstance(self._key, list | tuple): + for i in self._key: + yield Retvals(self._taskOrGroup, i) + else: + yield Retvals(self._taskOrGroup, self._key) + + def __repr__(self): + return f'' + + def ret(self) -> T: + if isinstance(self._key, list | tuple): + ret = self._taskOrGroup.ret() + return [ret[i] for i in self._key] + return self._taskOrGroup.ret()[self._key] + + def finished(self) -> bool: + return self._taskOrGroup.finished() + +class ParallelTask(Generic[T]): + def __init__(self, worker: 'ParallelWorker', fun, args, kwargs): + self._worker = worker + self._fun = fun + self._args = args + self._kwargs = kwargs + self._ret = None + self._exc = None + self._id = random.getrandbits(64) + self._finished = False + + def __getitem__(self, key) -> T: + return Retvals(self, key) + + def __repr__(self): + return f'' + + def resolveArguments(self) -> None: + def _replace(args): + for arg in args: + if isinstance(arg, ParallelTask | ParallelTaskGroup | Retvals): + yield arg.ret() + else: + yield arg + self._args = list(_replace(self._args)) + for key, value in self._kwargs: + if isinstance(value, ParallelTask | ParallelTaskGroup | Retvals): + self._kwargs[key] = value.ret() + + def isExecutable(self) -> bool: + for arg in allArgs(self._args, self._kwargs): + if isinstance(arg, ParallelTask | ParallelTaskGroup | Retvals) and not arg.finished(): + return False + return True + + def finished(self) -> bool: + return self._finished + + def finish(self, ret, exc) -> None: + self._ret = ret + self._exc = exc + self._finished = True + + def exc(self) -> BaseException | None: + return self._exc + + def ret(self) -> T | None: + return self._ret + +class ParallelTaskGroup(Generic[T]): + def __init__(self, fun, args, kwargs): + self._fun = fun + self._args = args + self._kwargs = kwargs + self._tasks = None + + def __getitem__(self, key): + return Retvals(self, key) + + def __repr__(self): + return f'' + + def finished(self) -> bool: + if not self._tasks: + return False + for task in self._tasks: + if not task.finished(): + return False + return True + + def exc(self) -> List[BaseException | None]: + return list(map(lambda t: t.exc(), self._tasks)) + + def tasks(self) -> List[ParallelTask]: + nTasks = 1 + for arg in allArgs(self._args, self._kwargs): + if isinstance(arg, ParallelTask | Retvals): + arg = arg.ret() + if isinstance(arg, abc.Sized) and not isinstance(arg, str | bytes | dict): + nTasks = max(nTasks, len(arg)) + self._tasks = [] + for arg in allArgs(self._args, self._kwargs): + if isinstance(arg, abc.Sized) and len(arg) != nTasks: + raise RuntimeError('All sized arguments must have the same length or 1') + + def argsAtI(i: int): + for arg in self._args: + if isinstance(arg, list): + yield arg[i] + elif isinstance(arg, ParallelTask | Retvals) and isinstance(arg.ret(), abc.Iterable) and not isinstance(arg.ret(), (str, bytes, dict)): + yield arg.ret()[i] + else: + yield arg + def kwargsAtI(i: int): + newKwargs = {} + for key, value in self._kwargs: + if isinstance(arg, abc.Sized): + newKwargs[key] = value[i] + else: + newKwargs[key] = value + return newKwargs + + for i in range(nTasks): + task = ParallelTask(self, self._fun, list(argsAtI(i)), kwargsAtI(i)) + self._tasks.append(task) + return self._tasks + + def ret(self) -> List[T | None]: + def zip_retvals(tasks): + def _iRet(i): + for task in tasks: + yield task.ret()[i] + + l = len(tasks[0].ret()) if isinstance(tasks[0].ret(), abc.Sized) else 1 + if l > 1: + for i in range(len(tasks[0].ret())): + yield list(_iRet(i)) + else: + for task in tasks: + yield task.ret() + + return list(zip_retvals(self._tasks)) + + def isExecutable(self) -> bool: + for arg in allArgs(self._args, self._kwargs): + if isinstance(arg, ParallelTask | ParallelTaskGroup | Retvals) and not arg.finished(): + return False + return True + +def worker_controller(_stop: multiprocessing.Event, _in: multiprocessing.Queue, _out: multiprocessing.Queue, _wId: int) -> None: + while not _stop.is_set(): + try: + task: ParallelTask | None = _in.get(timeout=0.01) + + #try: + ret, exc = None, None + ret = task._fun(*task._args, **task._kwargs) + #except BaseException as e: + #exc = e + #finally: + _out.put((task._id, ret, exc)) + except queue.Empty: + continue + except: + break + +class ParallelWorker: + def __init__(self, workers: int = 16): + self._workers = workers + self._tasks: List[ParallelTask] = [] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + self.execute() + + def run(self, fun: Callable[[Any], T], *args: Tuple[Any], **kwargs: Dict[str, Any]) -> ParallelTask[T]: + task = ParallelTask(self, fun, args, kwargs) + self._tasks.append(task) + return task + + def map(self, fun: Callable[[Any], T], *args: Tuple[Iterable[Any]], **kwargs: Dict[str, Iterable[Any]]) -> ParallelTaskGroup[T]: + taskGroup = ParallelTaskGroup(fun, args, kwargs) + self._tasks.append(taskGroup) + return taskGroup + + def execute(self): + workerIn = multiprocessing.Manager().Queue() + workerOut = multiprocessing.Manager().Queue() + stop = multiprocessing.Event() + processes = [] + for i in range(self._workers): + p = multiprocessing.Process(target=worker_controller, args=(stop, workerIn, workerOut, i)) + processes.append(p) + p.start() + + doneTasks = [] + remaining = len(self._tasks) + + try: + while remaining > 0: + for task in self._tasks: + if task.isExecutable(): + if isinstance(task, ParallelTask): + task.resolveArguments() + #workerIn.put(task) + ret = task._fun(*task._args, **task._kwargs) + workerOut.put((task._id, ret, None)) + + doneTasks.append(task) + self._tasks.remove(task) + else: + newTasks = task.tasks() + remaining += len(newTasks) - 1 + self._tasks.extend(newTasks) + self._tasks.remove(task) + + if not workerOut.empty(): + tId, ret, exc = workerOut.get() + for task in doneTasks: + if task._id == tId: + task.finish(ret, exc) + remaining -= 1 + finally: + stop.set() + for p in processes: + p.join() \ No newline at end of file