Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

N miklu #96

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 20 additions & 3 deletions colmap/colmap_runner.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import subprocess
import os
import sys
import logging
from pathlib import Path

#Usage: python colmap_runner.py --flags
Expand Down Expand Up @@ -39,43 +40,59 @@ def run_colmap(colmap_path, images_path, output_path):
use_gpu = "false"
Path(f"{output_path}").mkdir(parents=True, exist_ok=True)

# sfm-worker logger
logger = logging.getLogger('sfm-worker')


logger.info("run_colmap()-colmap_path: " + colmap_path)
logger.info("run_colmap()-images_path: " + images_path)
logger.info("run_colmap()-output_path: " + colmap_path)

#Creating a new database for colmap
try:
database_path = output_path + "/database.db"
subprocess.call([colmap_path, "database_creator", "--database_path", database_path])
print("Created DB")
logger.info("Created DB")
except:
logger.error("DB Creation Failed")
return 1

#Feature extracting
try:
# --SiftExtraction.use_gpu=false for docker
# TODO: make gpu use dynamic
subprocess.call([colmap_path, "feature_extractor","--ImageReader.camera_model","PINHOLE",f"--SiftExtraction.use_gpu={use_gpu}","--ImageReader.single_camera=1", "--database_path", database_path, "--image_path", images_path])
print("Features Extracted")
logger.info("Features Extracted")
except:
logger.error("Features unable to be extracted")
return 1

#Feature matching
try:
print("Feature Matching")
subprocess.call([colmap_path, "exhaustive_matcher",f"--SiftMatching.use_gpu={use_gpu}", "--database_path", database_path])
logger.info("Feature Matched")
except:
logger.error("Features unable to be matched")
return 1

#Generating model
try:
subprocess.call([colmap_path, "mapper", "--database_path", database_path, "--image_path", images_path, "--output_path", output_path])
logger.info("Model generated")
except:
logger.error("Model unable to be generated")
return 1

#Getting model as text
try:
# TODO: no longer works on windows fix file paths or run in docker
subprocess.call([colmap_path, "model_converter", "--input_path", output_path + r"/0", "--output_path", output_path, "--output_type", "TXT"])
logger.info("Model as text successful")
except:
logger.error("Model as text unsuccessful")
return 1

logger.info("run_colmap successfully executed")
return 0


Expand Down
16 changes: 16 additions & 0 deletions colmap/colmap_worker_unit_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import unittest
import colmap_worker

class colmapWorkerTest(unittest.TestCase):
def setUp(self):
pass

def tearDown(self):
pass

def test_data():
pass


if __name__ == "__main__""
unittest.main()
5 changes: 5 additions & 0 deletions colmap/configs/default.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#set local run status / run colmap worker with or without webserver
local_run = True

#Specify input data file path used for local runs ONLY
input_data_path = data/inputs/input.mp4
2 changes: 1 addition & 1 deletion colmap/configs/local.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
local_run = True

#Specify input data file path used for local runs ONLY
input_data_path = data/inputs/video/input.mp4
input_data_path = data/inputs/input.mp4
24 changes: 24 additions & 0 deletions colmap/log.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import logging

def sfm_worker_logger(name='root'):
"""
Initializer for a global sfm-worker logger.
->
To initialize use: 'logger = log.sfm_worker_logger(name)'
To retrieve in different context: 'logger = logging.getLogger(name)'
"""
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.FileHandler(name+'.log', mode='w')
handler.setFormatter(formatter)

logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger

if __name__ == "__main__":
theta = sfm_worker_logger('sfm-worker-test')
theta.info("info message")
theta.warning("warning message")
theta.error("error message")
theta.critical("critical message")
54 changes: 38 additions & 16 deletions colmap/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
import argparse
import sys

import logging
from log import sfm_worker_logger


app = Flask(__name__)
# base_url = "http://host.docker.internal:5000/"
Expand Down Expand Up @@ -44,20 +47,24 @@ def run_full_sfm_pipeline(id, video_file_path, input_data_dir, output_data_dir):
output_path = output_data_dir + id
Path(f"{output_path}").mkdir(parents=True, exist_ok=True)

# Get logger
logger = logging.getLogger('sfm-worker')

# (1) vid_to_images.py
imgs_folder = os.path.join(output_path, "imgs")
print(video_file_path)

split_video_into_frames(video_file_path, imgs_folder, 100)
logger.info("Video file path:{}".format(video_file_path))

split_video_into_frames(video_file_path, imgs_folder,100)
# imgs are now in output_data_dir/id

# (2) colmap_runner.py
colmap_path = "/usr/local/bin/colmap"
status = run_colmap(colmap_path, imgs_folder, output_path)
if status == 0:
print("COLMAP ran successfully.")
logger.info("COLMAP ran successfully.")
elif status == 1:
print("ERROR: There was an unknown error running COLMAP")
logger.info("ERROR: There was an unknown error running COLMAP")

# (3) matrix.py
initial_motion_path = os.path.join(output_path, "images.txt")
Expand All @@ -81,6 +88,7 @@ def colmap_worker():
Path(f"{input_data_dir}").mkdir(parents=True, exist_ok=True)
Path(f"{output_data_dir}").mkdir(parents=True, exist_ok=True)

logger = sfm_worker_logger('sfm-worker')

rabbitmq_domain = "rabbitmq"
credentials = pika.PlainCredentials("admin", "password123")
Expand All @@ -94,19 +102,26 @@ def colmap_worker():
channel.queue_declare(queue="sfm-out")

def process_colmap_job(ch, method, properties, body):
print("Starting New Job")
print(body.decode())
logger = logging.getLogger('sfm-worker')

logger.info("Starting New Job")
logger.info(body.decode())
job_data = json.loads(body.decode())
id = job_data["id"]
print(f"Running New Job With ID: {id}")

logger.info(f"Running New Job With ID: {id}")


# TODO: Handle exceptions and enable steaming to make safer
video = requests.get(job_data["file_path"], timeout=10)
print("Web server pinged")

logger.info("Web server pinged")
video_file_path = f"{input_data_dir}{id}.mp4"
print("Saving video to: {video_file_path}")

logger.info("Saving video to: {video_file_path}")
open(video_file_path, "wb").write(video.content)
print("Video downloaded")

logger.info("Video downloaded")

# RUNS COLMAP AND CONVERSION CODE
motion_data, imgs_folder = run_full_sfm_pipeline(
Expand All @@ -127,29 +142,36 @@ def process_colmap_job(ch, method, properties, body):

# confirm to rabbitmq job is done
ch.basic_ack(delivery_tag=method.delivery_tag)
print("Job complete")

logger.info("Job complete")

channel.basic_qos(prefetch_count=1)
channel.basic_consume(queue="sfm-in", on_message_callback=process_colmap_job)
channel.start_consuming()
print("should not get here")

logger.critical("should not get here")

if __name__ == "__main__":
print("~SFM WORKER~")
"""
LOGGER IS ASSUMED TO BE RUN LOCALLY!!!
"""
logger = sfm_worker_logger('sfm-worker')
logger.info("~SFM WORKER~")

input_data_dir = "data/inputs/"
output_data_dir = "data/outputs/"
Path(f"{input_data_dir}").mkdir(parents=True, exist_ok=True)
Path(f"{output_data_dir}").mkdir(parents=True, exist_ok=True)


# Load args from config file
args = config_parser()

# Local run behavior
if args.local_run == True:
motion_data, imgs_folder = run_full_sfm_pipeline(
"Local_Test", args.input_data_path, input_data_dir, output_data_dir
)
print(motion_data)
"Local_Test", args.input_data_path, input_data_dir, output_data_dir)
logger.info(motion_data)
json_motion_data = json.dumps(motion_data)

# Standard webserver run behavior
Expand Down
26 changes: 17 additions & 9 deletions colmap/matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import image_position_extractor
import json
import os
import logging


# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
Expand Down Expand Up @@ -122,6 +123,8 @@ def rotation_matrix_from_vectors(vec1, vec2):

def get_extrinsic(center_point, fp: str = "parsed_data.csv"):

# sfm-worker logger
logger = logging.getLogger('sfm-worker')

# contrains filepath and extrinsic matrix
filepaths = []
Expand Down Expand Up @@ -173,10 +176,11 @@ def get_extrinsic(center_point, fp: str = "parsed_data.csv"):

# stack all extrinsic to perform faster transformations to the whole stack
extrinsic_matrices = np.stack(extrinsic_matrices,axis=0)
print(extrinsic_matrices.shape)

logger.info(extrinsic_matrices.shape)
avg_y_axis = np.sum(extrinsic_matrices[:,0:3,1], axis=0)
avg_y_axis = avg_y_axis/np.linalg.norm(avg_y_axis)
print("Consensus Y axis: ",avg_y_axis)


# Find a matrix to rotate the average y axis with the y-axis unit vector thus aligning every extrinsic to point in the same direction
Rot = np.zeros((4,4))
Expand All @@ -189,8 +193,8 @@ def get_extrinsic(center_point, fp: str = "parsed_data.csv"):

# Adjust extrinsic to center around the central point
#center_point = np.average(extrinsic_matrices[:,0:3,3],axis=0)
print(center_point.shape)
print("center point ",center_point)
logger.info(center_point.shape)
logger.info("center point {}".format(center_point))
extrinsic_matrices[:,0:3,3] -= center_point

# Z offset assuming cameras are never below the object
Expand All @@ -199,15 +203,16 @@ def get_extrinsic(center_point, fp: str = "parsed_data.csv"):
# Normalize extrinsic transformation to remain within bounding box
translation_magnitudes = np.linalg.norm(extrinsic_matrices[:,0:3,3],axis=1)
avg_translation_magnitude = np.average(translation_magnitudes)
print("Translation mag: ",avg_translation_magnitude)
logger.info("Translation mag: {}".format(avg_translation_magnitude))
extrinsic_matrices[:,0:3,3] /= avg_translation_magnitude

# scale back up TODO: make dynamic
extrinsic_matrices[:,0:3,3] *= 4

print("Max ",extrinsic_matrices[:,0:3,3].max())
print("Min ",extrinsic_matrices[:,0:3,3].min())
print("avg ",np.average(extrinsic_matrices[:,0:3,3]))
logger.info("Max {}".format(extrinsic_matrices[:,0:3,3].max()))
logger.info("Min {}".format(extrinsic_matrices[:,0:3,3].min()))
logger.info("avg {}".format(np.average(extrinsic_matrices[:,0:3,3])))


# Convert to json
frames = []
Expand Down Expand Up @@ -251,6 +256,9 @@ def get_intrinsic(fp: str = "cameras.txt"):

# COLMAP TO NDC
def get_extrinsics_center(fp: str = "points3D.txt"):
# sfm-worker logger
logger = logging.getLogger('sfm-worker')

infile = open(fp, "r")
lines = infile.readlines()
point_count = 0
Expand All @@ -267,7 +275,7 @@ def get_extrinsics_center(fp: str = "points3D.txt"):
point_count+=1

central_point /= point_count
print("Central point: ", central_point)
logger.info("Central point: {}".format(central_point))
return central_point


Expand Down
Loading