Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
thuanaislab committed Sep 20, 2024
1 parent 6509c13 commit 2a4920b
Show file tree
Hide file tree
Showing 23 changed files with 289 additions and 95 deletions.
11 changes: 4 additions & 7 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
datasets/gt_3Dmodels/
datasets/imgs_datasets/
datasets/__pycache__/
detectors/line2d/__pycache__/
detectors/line2d/DeepLSD/__pycache__/
detectors/line2d/LSD/__pycache__/
detectors/point2d/__pycache__/
detectors/point2d/SuperPoint/__pycache__/
detectors/point2d/SuperPoint/weights/
util/__pycache__/
visualization/
visualization_all/
logs/
pre_train_logs/
experiments/
__pycache__/
train_test_datasets/
train_test_datasets_origin/

*.npy
*.png
Expand Down
6 changes: 6 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[submodule "third_party/DeepLSD"]
path = third_party/DeepLSD
url = https://github.com/cvg/DeepLSD.git
[submodule "third_party/pytlsd"]
path = third_party/pytlsd
url = https://github.com/iago-suarez/pytlsd.git
57 changes: 57 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,60 @@ git submodule update --init --recursive
python -m pip install torch==1.12.0 torchvision==0.13.0
python -m pip install -r requirements.txt
```
## Datasets
- [Microsoft 7scenes](https://www.microsoft.com/en-us/research/project/rgb-d-dataset-7-scenes/)
- [Cambridge Landmarks](https://www.repository.cam.ac.uk/handle/1810/251342/)
- [Indoor-6](https://github.com/microsoft/SceneLandmarkLocalization)

Please run the provided scripts to prepare and download the data which has been undistorted by running:
- 7scenes:
```
./prepare_scripts/seven_scenes.sh
```
- Cambridge Landmarks
```
./prepare_scripts/cambridge.sh
```
- Indoor-6
```
./prepare_scripts/indoor6.sh
```

## Evaluation with pre-trained models
Please download the pre-trained models by running:
```
./prepare_scripts/download_pre_trained_models.sh
```
For example, evaluate KingsCollege scene:
```
python runners/eval.py --dataset Cambridge --scene KingsCollege -expv pl2map
```

## Training
```
python runners/train.py --dataset Cambridge --scene KingsCollege -expv pl2map_test
```

## Detectors
### Lines
- [LSD](https://github.com/iago-suarez/pytlsd)
- [DeepLSD](https://github.com/cvg/DeepLSD)
### Points
- [Superpoint](https://github.com/rpautrat/SuperPoint)


## Citation
If you use this code in your project, please consider citing the following paper:
```bibtex
@article{bui2024representing,
title={Representing 3D sparse map points and lines for camera relocalization},
author={Bui, Bach-Thuan and Bui, Huy-Hoang and Tran, Dinh-Tuan and Lee, Joo-Ho},
journal={arXiv preprint arXiv:2402.18011},
year={2024}
}
```

## Acknowledgement
This code is built based on [Limap](https://github.com/cvg/limap). We thank the authors for their useful source code.


4 changes: 2 additions & 2 deletions cfgs/7scenes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ point2d:
matcher: "NN-superpoint" # ["superglue", "gluestick"] # not implemented (for unlabeled learning)

regressor:
name: pl2map # ["pl2map", "d2s"]
name: pl2map # ["pl2map", or others]
use_line: True
use_point: True
n_line_keypoints: 10 # number of keypoints used to represent a line

train: # train configs
batch_size: 1
num_iters: 2500000 # number training iterations
num_iters: 1500000 # number training iterations
loader_shuffle: True
loader_num_workers: 8
log_interval: 500 # log every n batches (visdom graph)
Expand Down
18 changes: 2 additions & 16 deletions cfgs/Cambridge.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,20 +77,6 @@ optimizer:
num_lr_decay_step: 7 # decay every n epochs, 7

localization:
2d_matcher: "sold2"
epipolar_filter: False
IoU_threshold: 0.2
reprojection_filter: null
ransac:
method: "hybrid" # [null, "hybrid"] null for pose refinement, hybrid for pose estimation
thres: 10 # ??
thres_point: 10 # ??
thres_line: 10 # ??
weight_point: 1.0 # data type weights for scoring
weight_line: 1.0 # data type weights for scoring
final_least_squares: True
min_num_iterations: 100
optimize:
loss_func: "HuberLoss"
loss_func_args: [1.0]
line_cost_func: "PerpendicularDist"
max_reproj_error: 12.0
max_epipolar_error: 10.0
2 changes: 1 addition & 1 deletion cfgs/indoor6_UnDis.yaml → cfgs/indoor6.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ regressor:

train: # train configs
batch_size: 1
num_iters: 2500000 # number training iterations
num_iters: 1500000 # number training iterations
loader_shuffle: True
loader_num_workers: 8
log_interval: 500 # log every n batches (visdom graph)
Expand Down
3 changes: 0 additions & 3 deletions datasets/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,9 +281,6 @@ def get_image_path(self, image_name):
'''
Return a path to image
'''
if self.args.dataset == "indoor6":
img_path = os.path.join(self.args.dataset_dir, self.args.dataset, self.args.scene, 'rgb', image_name)
return img_path
img_path = os.path.join(self.args.dataset_dir, self.args.dataset, self.args.scene, image_name)
return img_path

Expand Down
3 changes: 1 addition & 2 deletions datasets/data_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,8 @@ def __init__(self, args:dict, cfg:dict, mode="train")->None:

def load_all_2Dpoints_by_dataset(self, dataset):
if dataset == "7scenes":
self.gt_3Dmodels_path = self.args.sfm_dir / f"{self.args.dataset}_dslamGT/{self.args.scene}"
self.load_all_2Dpoints_7scenes()
elif dataset == "Cambridge" or dataset == "indoor6_UnDis":
elif dataset == "Cambridge" or dataset == "indoor6":
self.load_all_2Dpoints_Cambridge()
else:
raise NotImplemented
Expand Down
5 changes: 1 addition & 4 deletions datasets/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,7 @@ def __init__(self, args, cfg, mode="train"):
self.image_list = self.DataCol.test_imgs
else:
raise ValueError("Error! Mode {0} not supported.".format(mode))
# sort image_list
# self.image_list = sorted(self.image_list)
# # create new image_list with uniform sample of only 30 images
# self.image_list = self.image_list[::int(len(self.image_list)/100)]

def __len__(self):
return len(self.image_list)

Expand Down
2 changes: 1 addition & 1 deletion datasets/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def parse_config():
arg_parser.add_argument('--use_depth', type=int, default=0, choices=[0,1], help='use SfM corrected by depth or not')
arg_parser.add_argument('-o','--outputs', type=Path, default='logs/',
help='Path to the output directory, default: %(default)s')
arg_parser.add_argument('-expv', '--experiment_version', type=str, default="00_00_00", help='experiment version folder')
arg_parser.add_argument('-expv', '--experiment_version', type=str, default="pl2map", help='experiment version folder')
args, _ = arg_parser.parse_known_args()
args.outputs = os.path.join(args.outputs, args.scene + "_" + args.experiment_version)
print("Dataset: {} | Scene: {}".format(args.dataset, args.scene))
Expand Down
34 changes: 0 additions & 34 deletions models/util_learner.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,40 +52,6 @@ def forward(self, pred, target, iteration=2000000):
points_proj_loss = self.zero if (isinstance(points_proj_loss, int) or isinstance(points_proj_loss, float)) else points_proj_loss
lines_proj_loss = self.zero if (isinstance(lines_proj_loss, int) or isinstance(lines_proj_loss, float)) else lines_proj_loss
return total_loss, loss_points, uncer_loss_points, loss_lines, uncer_loss_lines, points_proj_loss, lines_proj_loss

class CriterionPoint(nn.Module):
'''
# original implementation from https://arxiv.org/abs/2307.15250
Criterion for point only'''
def __init__(self, rpj_cfg, total_iterations=2000000):
super(CriterionPoint, self).__init__()
self.rpj_cfg = rpj_cfg
self.reprojection_loss = ReproLoss(total_iterations, self.rpj_cfg.soft_clamp,
self.rpj_cfg.soft_clamp_min, self.rpj_cfg.type,
self.rpj_cfg.circle_schedule)
self.zero = fakezero()
def forward(self, pred, target, iteration=2000000):
batch_size, _, _ = pred['points3D'].shape
validPoints = target["validPoints"]
# get losses for points
square_errors_points = torch.norm((pred['points3D'][:,:3,:] - target["points3D"]), dim = 1)
loss_points = torch.sum(validPoints*square_errors_points)/batch_size
uncer_loss_points = torch.sum(torch.norm(validPoints - 1/(1+100*torch.abs(pred['points3D'][:,3,:])), dim = 1))/batch_size

# get projection losses for points
points_proj_loss = 0

if self.rpj_cfg.apply:
# get projection losses for points
for i in range(batch_size): # default batch_size = 1
prp_error, prp= project_loss_points(pred['keypoints'][i,:,:], pred['points3D'][i,:3,:],
target['pose'][i,:], target['camera'][i,:], validPoints[i,:])
points_proj_loss += self.reprojection_loss.compute_point(prp_error, prp, iteration, validPoints[i,:])
points_proj_loss = points_proj_loss / batch_size

total_loss = loss_points + uncer_loss_points + points_proj_loss
points_proj_loss = self.zero if (isinstance(points_proj_loss, int) or isinstance(points_proj_loss, float)) else points_proj_loss
return total_loss, loss_points, uncer_loss_points, self.zero, self.zero, points_proj_loss, self.zero


class fakezero(object):
Expand Down
42 changes: 42 additions & 0 deletions prepare_scripts/cambridge.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Description: Prepare the directory structure for the seven scene dataset

if [ ! -d "train_test_datasets" ]; then
mkdir train_test_datasets
fi

if [ ! -d "train_test_datasets/gt_3Dmodels" ]; then
mkdir train_test_datasets/gt_3Dmodels
fi

if [ ! -d "train_test_datasets/imgs_datasets" ]; then
mkdir train_test_datasets/imgs_datasets
fi

TARGET_FOLDER="train_test_datasets/gt_3Dmodels"
OUTPUT_FILE="Cambridge.zip"
FILE_ID="19LRQ5j9I4YdrUykkoavcRTR6ekygU5iU"

# Download the file from Google Drive using gdown and save it in the target folder
gdown --id $FILE_ID -O $TARGET_FOLDER/$OUTPUT_FILE

# Unzip the downloaded file in the target folder
unzip $TARGET_FOLDER/$OUTPUT_FILE -d $TARGET_FOLDER

# Remove the zip file after extraction
rm $TARGET_FOLDER/$OUTPUT_FILE

echo "Download, extraction, and cleanup completed in $TARGET_FOLDER."

TARGET_FOLDER="train_test_datasets/imgs_datasets"
FILE_ID="1MZyLPu9Z7tKCeuM4DchseoX4STIhKyi7"

# Download the file from Google Drive using gdown and save it in the target folder
gdown --id $FILE_ID -O $TARGET_FOLDER/$OUTPUT_FILE

# Unzip the downloaded file in the target folder
unzip $TARGET_FOLDER/$OUTPUT_FILE -d $TARGET_FOLDER

# Remove the zip file after extraction
rm $TARGET_FOLDER/$OUTPUT_FILE

echo "Download, extraction, and cleanup completed in $TARGET_FOLDER."
13 changes: 13 additions & 0 deletions prepare_scripts/download_pre_trained_models.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
OUTPUT_FILE="logs.zip"
FILE_ID="1iH8PfqgPPQod0q_I8T_ZSO_mSj5XRUuO"

# Download the file from Google Drive using gdown and save it in the target folder
gdown --id $FILE_ID -O $OUTPUT_FILE

# Unzip the downloaded file in the target folder
unzip $OUTPUT_FILE

# Remove the zip file after extraction
rm $OUTPUT_FILE

echo "Download, extraction, and cleanup completed."
42 changes: 42 additions & 0 deletions prepare_scripts/indoor6.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Description: Prepare the directory structure for the seven scene dataset

if [ ! -d "train_test_datasets" ]; then
mkdir train_test_datasets
fi

if [ ! -d "train_test_datasets/gt_3Dmodels" ]; then
mkdir train_test_datasets/gt_3Dmodels
fi

if [ ! -d "train_test_datasets/imgs_datasets" ]; then
mkdir train_test_datasets/imgs_datasets
fi

TARGET_FOLDER="train_test_datasets/gt_3Dmodels"
OUTPUT_FILE="indoor6.zip"
FILE_ID="1q28Tkldc--ucD4l7q15RDVsuZ7IN3CEV"

# Download the file from Google Drive using gdown and save it in the target folder
gdown --id $FILE_ID -O $TARGET_FOLDER/$OUTPUT_FILE

# Unzip the downloaded file in the target folder
unzip $TARGET_FOLDER/$OUTPUT_FILE -d $TARGET_FOLDER

# Remove the zip file after extraction
rm $TARGET_FOLDER/$OUTPUT_FILE

echo "Download, extraction, and cleanup completed in $TARGET_FOLDER."

TARGET_FOLDER="train_test_datasets/imgs_datasets"
FILE_ID="1kzLPt7LuVJIqKrJMYSFicJ231KDDJxVh"

# Download the file from Google Drive using gdown and save it in the target folder
gdown --id $FILE_ID -O $TARGET_FOLDER/$OUTPUT_FILE

# Unzip the downloaded file in the target folder
unzip $TARGET_FOLDER/$OUTPUT_FILE -d $TARGET_FOLDER

# Remove the zip file after extraction
rm $TARGET_FOLDER/$OUTPUT_FILE

echo "Download, extraction, and cleanup completed in $TARGET_FOLDER."
64 changes: 64 additions & 0 deletions prepare_scripts/seven_scenes.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# Description: Prepare the directory structure for the seven scene dataset

if [ ! -d "train_test_datasets" ]; then
mkdir train_test_datasets
fi

if [ ! -d "train_test_datasets/gt_3Dmodels" ]; then
mkdir train_test_datasets/gt_3Dmodels
fi

if [ ! -d "train_test_datasets/imgs_datasets" ]; then
mkdir train_test_datasets/imgs_datasets
fi

TARGET_FOLDER="train_test_datasets/gt_3Dmodels"
OUTPUT_FILE="7scenes.zip"
FILE_ID="1X8_tV0Y4b_W-vPgeXKoqtFaDCQ5_csL3"

# Download the file from Google Drive using gdown and save it in the target folder
gdown --id $FILE_ID -O $TARGET_FOLDER/$OUTPUT_FILE

# Unzip the downloaded file in the target folder
unzip $TARGET_FOLDER/$OUTPUT_FILE -d $TARGET_FOLDER

# Remove the zip file after extraction
rm $TARGET_FOLDER/$OUTPUT_FILE

echo "Download, extraction, and cleanup completed in $TARGET_FOLDER."


cd train_test_datasets/imgs_datasets
mkdir 7scenes
cd 7scenes

# List of datasets
datasets=("chess" "fire" "heads" "office" "pumpkin" "redkitchen" "stairs")

# Loop through each dataset
for ds in "${datasets[@]}"; do
# Check if the dataset directory exists
if [ ! -d "$ds" ]; then
echo "=== Downloading 7scenes Data: $ds ==============================="

# Download the dataset zip file
wget "http://download.microsoft.com/download/2/8/5/28564B23-0828-408F-8631-23B1EFF1DAC8/$ds.zip"

# Unzip the dataset
unzip "$ds.zip"

# Remove the zip file
rm "$ds.zip"

# Loop through the dataset folder and unzip any additional zip files
for file in "$ds"/*.zip; do
if [ -f "$file" ]; then
echo "Unpacking $file"
unzip "$file" -d "$ds"
rm "$file"
fi
done
else
echo "Found data of scene $ds already. Assuming its complete and skipping download."
fi
done
Loading

0 comments on commit 2a4920b

Please sign in to comment.