diff --git a/colmap/colmap_runner.py b/colmap/colmap_runner.py index e3465354..92df1bce 100644 --- a/colmap/colmap_runner.py +++ b/colmap/colmap_runner.py @@ -1,6 +1,7 @@ import subprocess import os import sys +import logging from pathlib import Path #Usage: python colmap_runner.py --flags @@ -39,12 +40,21 @@ def run_colmap(colmap_path, images_path, output_path): use_gpu = "false" Path(f"{output_path}").mkdir(parents=True, exist_ok=True) + # sfm-worker logger + logger = logging.getLogger('sfm-worker') + + + logger.info("run_colmap()-colmap_path: " + colmap_path) + logger.info("run_colmap()-images_path: " + images_path) + logger.info("run_colmap()-output_path: " + colmap_path) + #Creating a new database for colmap try: database_path = output_path + "/database.db" subprocess.call([colmap_path, "database_creator", "--database_path", database_path]) - print("Created DB") + logger.info("Created DB") except: + logger.error("DB Creation Failed") return 1 #Feature extracting @@ -52,30 +62,37 @@ def run_colmap(colmap_path, images_path, output_path): # --SiftExtraction.use_gpu=false for docker # TODO: make gpu use dynamic subprocess.call([colmap_path, "feature_extractor","--ImageReader.camera_model","PINHOLE",f"--SiftExtraction.use_gpu={use_gpu}","--ImageReader.single_camera=1", "--database_path", database_path, "--image_path", images_path]) - print("Features Extracted") + logger.info("Features Extracted") except: + logger.error("Features unable to be extracted") return 1 #Feature matching try: - print("Feature Matching") subprocess.call([colmap_path, "exhaustive_matcher",f"--SiftMatching.use_gpu={use_gpu}", "--database_path", database_path]) + logger.info("Feature Matched") except: + logger.error("Features unable to be matched") return 1 #Generating model try: subprocess.call([colmap_path, "mapper", "--database_path", database_path, "--image_path", images_path, "--output_path", output_path]) + logger.info("Model generated") except: + logger.error("Model unable to be generated") return 1 #Getting model as text try: # TODO: no longer works on windows fix file paths or run in docker subprocess.call([colmap_path, "model_converter", "--input_path", output_path + r"/0", "--output_path", output_path, "--output_type", "TXT"]) + logger.info("Model as text successful") except: + logger.error("Model as text unsuccessful") return 1 + logger.info("run_colmap successfully executed") return 0 diff --git a/colmap/colmap_worker_unit_tests.py b/colmap/colmap_worker_unit_tests.py new file mode 100644 index 00000000..f945d077 --- /dev/null +++ b/colmap/colmap_worker_unit_tests.py @@ -0,0 +1,16 @@ +import unittest +import colmap_worker + +class colmapWorkerTest(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_data(): + pass + + +if __name__ == "__main__"" + unittest.main() \ No newline at end of file diff --git a/colmap/configs/default.txt b/colmap/configs/default.txt index e69de29b..ef9728f5 100644 --- a/colmap/configs/default.txt +++ b/colmap/configs/default.txt @@ -0,0 +1,5 @@ +#set local run status / run colmap worker with or without webserver +local_run = True + +#Specify input data file path used for local runs ONLY +input_data_path = data/inputs/input.mp4 \ No newline at end of file diff --git a/colmap/configs/local.txt b/colmap/configs/local.txt index 7dae7b13..ef9728f5 100644 --- a/colmap/configs/local.txt +++ b/colmap/configs/local.txt @@ -2,4 +2,4 @@ local_run = True #Specify input data file path used for local runs ONLY -input_data_path = data/inputs/video/input.mp4 \ No newline at end of file +input_data_path = data/inputs/input.mp4 \ No newline at end of file diff --git a/colmap/log.py b/colmap/log.py new file mode 100644 index 00000000..dbc0d8aa --- /dev/null +++ b/colmap/log.py @@ -0,0 +1,24 @@ +import logging + +def sfm_worker_logger(name='root'): + """ + Initializer for a global sfm-worker logger. + -> + To initialize use: 'logger = log.sfm_worker_logger(name)' + To retrieve in different context: 'logger = logging.getLogger(name)' + """ + formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s') + handler = logging.FileHandler(name+'.log', mode='w') + handler.setFormatter(formatter) + + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) + logger.addHandler(handler) + return logger + +if __name__ == "__main__": + theta = sfm_worker_logger('sfm-worker-test') + theta.info("info message") + theta.warning("warning message") + theta.error("error message") + theta.critical("critical message") \ No newline at end of file diff --git a/colmap/main.py b/colmap/main.py index 8e1b72e3..0186b12d 100644 --- a/colmap/main.py +++ b/colmap/main.py @@ -15,6 +15,9 @@ import argparse import sys +import logging +from log import sfm_worker_logger + app = Flask(__name__) # base_url = "http://host.docker.internal:5000/" @@ -44,20 +47,24 @@ def run_full_sfm_pipeline(id, video_file_path, input_data_dir, output_data_dir): output_path = output_data_dir + id Path(f"{output_path}").mkdir(parents=True, exist_ok=True) + # Get logger + logger = logging.getLogger('sfm-worker') + # (1) vid_to_images.py imgs_folder = os.path.join(output_path, "imgs") - print(video_file_path) - split_video_into_frames(video_file_path, imgs_folder, 100) + logger.info("Video file path:{}".format(video_file_path)) + + split_video_into_frames(video_file_path, imgs_folder,100) # imgs are now in output_data_dir/id # (2) colmap_runner.py colmap_path = "/usr/local/bin/colmap" status = run_colmap(colmap_path, imgs_folder, output_path) if status == 0: - print("COLMAP ran successfully.") + logger.info("COLMAP ran successfully.") elif status == 1: - print("ERROR: There was an unknown error running COLMAP") + logger.info("ERROR: There was an unknown error running COLMAP") # (3) matrix.py initial_motion_path = os.path.join(output_path, "images.txt") @@ -81,6 +88,7 @@ def colmap_worker(): Path(f"{input_data_dir}").mkdir(parents=True, exist_ok=True) Path(f"{output_data_dir}").mkdir(parents=True, exist_ok=True) + logger = sfm_worker_logger('sfm-worker') rabbitmq_domain = "rabbitmq" credentials = pika.PlainCredentials("admin", "password123") @@ -94,19 +102,26 @@ def colmap_worker(): channel.queue_declare(queue="sfm-out") def process_colmap_job(ch, method, properties, body): - print("Starting New Job") - print(body.decode()) + logger = logging.getLogger('sfm-worker') + + logger.info("Starting New Job") + logger.info(body.decode()) job_data = json.loads(body.decode()) id = job_data["id"] - print(f"Running New Job With ID: {id}") + + logger.info(f"Running New Job With ID: {id}") + # TODO: Handle exceptions and enable steaming to make safer video = requests.get(job_data["file_path"], timeout=10) - print("Web server pinged") + + logger.info("Web server pinged") video_file_path = f"{input_data_dir}{id}.mp4" - print("Saving video to: {video_file_path}") + + logger.info("Saving video to: {video_file_path}") open(video_file_path, "wb").write(video.content) - print("Video downloaded") + + logger.info("Video downloaded") # RUNS COLMAP AND CONVERSION CODE motion_data, imgs_folder = run_full_sfm_pipeline( @@ -127,29 +142,36 @@ def process_colmap_job(ch, method, properties, body): # confirm to rabbitmq job is done ch.basic_ack(delivery_tag=method.delivery_tag) - print("Job complete") + + logger.info("Job complete") channel.basic_qos(prefetch_count=1) channel.basic_consume(queue="sfm-in", on_message_callback=process_colmap_job) channel.start_consuming() - print("should not get here") + + logger.critical("should not get here") if __name__ == "__main__": - print("~SFM WORKER~") + """ + LOGGER IS ASSUMED TO BE RUN LOCALLY!!! + """ + logger = sfm_worker_logger('sfm-worker') + logger.info("~SFM WORKER~") + input_data_dir = "data/inputs/" output_data_dir = "data/outputs/" Path(f"{input_data_dir}").mkdir(parents=True, exist_ok=True) Path(f"{output_data_dir}").mkdir(parents=True, exist_ok=True) + # Load args from config file args = config_parser() # Local run behavior if args.local_run == True: motion_data, imgs_folder = run_full_sfm_pipeline( - "Local_Test", args.input_data_path, input_data_dir, output_data_dir - ) - print(motion_data) + "Local_Test", args.input_data_path, input_data_dir, output_data_dir) + logger.info(motion_data) json_motion_data = json.dumps(motion_data) # Standard webserver run behavior diff --git a/colmap/matrix.py b/colmap/matrix.py index d4de8812..c284ea67 100644 --- a/colmap/matrix.py +++ b/colmap/matrix.py @@ -22,6 +22,7 @@ import image_position_extractor import json import os +import logging # https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles @@ -122,6 +123,8 @@ def rotation_matrix_from_vectors(vec1, vec2): def get_extrinsic(center_point, fp: str = "parsed_data.csv"): + # sfm-worker logger + logger = logging.getLogger('sfm-worker') # contrains filepath and extrinsic matrix filepaths = [] @@ -173,10 +176,11 @@ def get_extrinsic(center_point, fp: str = "parsed_data.csv"): # stack all extrinsic to perform faster transformations to the whole stack extrinsic_matrices = np.stack(extrinsic_matrices,axis=0) - print(extrinsic_matrices.shape) + + logger.info(extrinsic_matrices.shape) avg_y_axis = np.sum(extrinsic_matrices[:,0:3,1], axis=0) avg_y_axis = avg_y_axis/np.linalg.norm(avg_y_axis) - print("Consensus Y axis: ",avg_y_axis) + # Find a matrix to rotate the average y axis with the y-axis unit vector thus aligning every extrinsic to point in the same direction Rot = np.zeros((4,4)) @@ -189,8 +193,8 @@ def get_extrinsic(center_point, fp: str = "parsed_data.csv"): # Adjust extrinsic to center around the central point #center_point = np.average(extrinsic_matrices[:,0:3,3],axis=0) - print(center_point.shape) - print("center point ",center_point) + logger.info(center_point.shape) + logger.info("center point {}".format(center_point)) extrinsic_matrices[:,0:3,3] -= center_point # Z offset assuming cameras are never below the object @@ -199,15 +203,16 @@ def get_extrinsic(center_point, fp: str = "parsed_data.csv"): # Normalize extrinsic transformation to remain within bounding box translation_magnitudes = np.linalg.norm(extrinsic_matrices[:,0:3,3],axis=1) avg_translation_magnitude = np.average(translation_magnitudes) - print("Translation mag: ",avg_translation_magnitude) + logger.info("Translation mag: {}".format(avg_translation_magnitude)) extrinsic_matrices[:,0:3,3] /= avg_translation_magnitude # scale back up TODO: make dynamic extrinsic_matrices[:,0:3,3] *= 4 - print("Max ",extrinsic_matrices[:,0:3,3].max()) - print("Min ",extrinsic_matrices[:,0:3,3].min()) - print("avg ",np.average(extrinsic_matrices[:,0:3,3])) + logger.info("Max {}".format(extrinsic_matrices[:,0:3,3].max())) + logger.info("Min {}".format(extrinsic_matrices[:,0:3,3].min())) + logger.info("avg {}".format(np.average(extrinsic_matrices[:,0:3,3]))) + # Convert to json frames = [] @@ -251,6 +256,9 @@ def get_intrinsic(fp: str = "cameras.txt"): # COLMAP TO NDC def get_extrinsics_center(fp: str = "points3D.txt"): + # sfm-worker logger + logger = logging.getLogger('sfm-worker') + infile = open(fp, "r") lines = infile.readlines() point_count = 0 @@ -267,7 +275,7 @@ def get_extrinsics_center(fp: str = "points3D.txt"): point_count+=1 central_point /= point_count - print("Central point: ", central_point) + logger.info("Central point: {}".format(central_point)) return central_point diff --git a/colmap/video_to_images.py b/colmap/video_to_images.py index 6ac40c61..b6304215 100644 --- a/colmap/video_to_images.py +++ b/colmap/video_to_images.py @@ -2,6 +2,7 @@ import subprocess import os import sys +import logging from pathlib import Path # new imports @@ -44,6 +45,10 @@ def split_video_into_frames(video_path, output_path, max_frames=200): ## determines whether image is blurry or not. # uses the variance of a laplacian transform to check for edges and returns true # if the variance is less than the threshold and the video is determined to be blurry + + # Get Logger: + logger = logging.getLogger('sfm-worker') + def is_blurry(image, THRESHOLD): ## Convert image to grayscale gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) @@ -71,9 +76,8 @@ def blurriness(image): ## sample up to max frame count sample_count = min(frame_count,max_frames) - print("SAMPLE COUNT:", sample_count) + logger.info("SAMPLE COUNT {}".format(sample_count)) - #print(f"frames = {frame_count}") success, image = vidcap.read() img_height = image.shape[0] @@ -118,9 +122,6 @@ def blurriness(image): needs_adjust = False ## determines if we need to adjust aspect_ratio = img_height / img_width - #print (f"aspect ratio: {aspect_ratio}") - #print (f"img_width: {img_width}") - #print (f"img_height: {img_height}") ## adjust as necessaryx MAX_WIDTH = 200 MAX_HEIGHT = 200 @@ -142,8 +143,6 @@ def blurriness(image): else: img_height = (int) (img_height * aspect_ratio) - #print(f"new img height: {img_height}") - #print(f"new img width: {img_width}") dimensions = (img_width, img_height) @@ -157,7 +156,7 @@ def blurriness(image): if (needs_adjust == True): image = cv2.resize(image, dimensions, interpolation=cv2.INTER_LANCZOS4) cv2.imwrite(f"{output_path}/img_{count}.png", image) - print('Saved image ', count) + logger.info("Saved image {}".format(count)) success, image = vidcap.read() count += 1 @@ -205,8 +204,10 @@ def test(): print("ERROR: Unrecognized flag", sys.argv[i]) quit()""" + #Calling split_video_into_frames status = split_video_into_frames(instance_name, output_path, ffmpeg_path, video_path, wanted_frames=200) + if status == 0: print("ffmpeg ran successfully.") elif status == 1: