forked from AIS-Bonn/temporal_latticenet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_ln.py
284 lines (229 loc) · 15.8 KB
/
test_ln.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
#!/usr/bin/env python3.6
import torch
import sys, os, argparse, time
from tqdm import tqdm
print(time.asctime())
print("PID: ", os.getpid()) # you can use any application to remind you if the script breaks by checking for this PID
from dataloader.kitti_dataloader import *
from dataloader.parisLille_dataloader import *
from easypbr import *
from latticenet import ModelParams
from latticenet_py.lattice.lovasz_loss import LovaszSoftmax
from callbacks.callback import *
from callbacks.viewer_callback import *
from callbacks.visdom_callback import *
from callbacks.state_callback import *
from callbacks.phase import *
from cfgParser import *
from pathlib import Path
from seq_lattice.models import *
class CloudReadingException(Exception):
pass
from datetime import datetime
#torch.manual_seed(0)
torch.set_printoptions(edgeitems=3)
def write_prediction(pred_softmax, cloud, pred_path):
mesh_pred=cloud.clone()
l_pred=pred_softmax.detach().argmax(axis=1).cpu().numpy()
l_pred = np.expand_dims(l_pred, axis=1)
mesh_pred.color_from_label_indices(l_pred)
mesh_pred.L_pred=l_pred
mesh_pred.save_to_file(pred_path)
def write_gt(cloud, gt_path):
mesh_gt=cloud.clone()
mesh_gt.color_from_label_indices(cloud.L_gt)
mesh_gt.save_to_file(gt_path)
# train_border and valid_border are integers
def create_loader(dataset_name, config_parser, sequence_learning = False, shuffle = False, train_border = None, valid_border = None):
if(dataset_name=="semantickitti"):
# you can use split="test" for the test set and split="valid" for the validation set
test_dataset = SemanticKittiDataset(split = "test", config_parser = config_parser, sequence_learning = sequence_learning)
elif(dataset_name=="parislille"):
test_dataset = ParisLille3DDataset(split = "test", config_parser = config_parser, sequence_learning = sequence_learning)
else:
err="Dataset name not recognized. It is " + dataset_name
sys.exit(err)
test_sampler = list(range(len(test_dataset)))[valid_border:] if valid_border is not None else None
test_dataloader = torch.utils.data.DataLoader(test_dataset, num_workers = 8, batch_size=1, shuffle = False, sampler = test_sampler)
return test_dataloader, test_dataset
def run(dataset_name = "semantickitti"):
if dataset_name == "semantickitti":
print("\n-------- Using SemanticKitti Dataset --------")
config_file="/workspace/temporal_latticenet/seq_config/lnn_eval_semantic_kitti.cfg"
print("Config file: ", config_file)
elif(dataset_name=="parislille"):
sys.exit("Currently ParisLille3D isn't supported!")
print("\n-------- Using ParisLille3D Dataset --------")
config_file="/workspace/temporal_latticenet/seq_config/lnn_eval_paris_lille.cfg"
else:
sys.exit("Dataset name not recognized. It is {}. Available options are semantickitti or parislille.".format(dataset_name) )
if not torch.cuda.is_available():
sys.exit("The GPU is not available!")
config_parser = cfgParser(config_file)
model_params=ModelParams.create(config_file)
model_config = config_parser.get_model_vars()
eval_config = config_parser.get_eval_vars()
loader_params = config_parser.get_loader_semantic_kitti_vars()
label_mngr_params = config_parser.get_label_mngr_vars()
lattice_gpu_config = config_parser.get_lattice_gpu_vars()
loader_config = config_parser.get_loader_vars()
# Print some nice information
print("Lattice sigma: ", str(lattice_gpu_config["sigma_0"])[0:3])
print("Sequences: #scans: {}, cloud scope: {}".format((loader_config['frames_per_seq'] if model_config["sequence_learning"] else 1), loader_config['cloud_scope']))
print("Features: ", model_config["values_mode"])
if eval_config["do_write_predictions"]:
Path(eval_config["output_predictions_path"]).mkdir(parents=True, exist_ok=True)
print("The predictions will be saved to: ", str(eval_config["output_predictions_path"]))
# initialize all callbacks
cb_list = []
if(eval_config["with_viewer"]):
cb_list.append(ViewerCallback())
cb_list.append(StateCallback())
cb = CallbacksGroup(cb_list)
# initialize the LabelMngr and the viewer
m_ignore_index = label_mngr_params["unlabeled_idx"]
labels_file=str(label_mngr_params["labels_file"])
colorscheme_file=str(label_mngr_params["color_scheme_file"])
frequency_file=str(label_mngr_params["frequency_file_all"]) if loader_params["include_moving_classes"] else str(label_mngr_params["frequency_file"])
label_mngr=LabelMngr(labels_file, colorscheme_file, frequency_file, m_ignore_index )
if eval_config["with_viewer"]:
view=Viewer.create(config_file)
# Initialize the networks model
lattice=Lattice.create(config_file, "lattice") # create Lattice
model = None
if not loader_params["include_moving_classes"] and (eval_config["dataset_name"] == "semantickitti"):
model=LNN_SEQ(20, model_params, config_parser).to("cuda")
elif (eval_config["dataset_name"] == "semantickitti"):
#print("Including moving classes - therefore 26 classes")
model=LNN_SEQ(26, model_params, config_parser).to("cuda")
elif not loader_params["include_moving_classes"] and (eval_config["dataset_name"] == "parislille"):
model=LNN_SEQ(10, model_params, config_parser).to("cuda") # parislille has only 10 classes
elif (eval_config["dataset_name"] == "parislille"):
model=LNN_SEQ(12, model_params, config_parser).to("cuda")
# Define the loss functions
loss_fn, loss=LovaszSoftmax(ignore_index=m_ignore_index), None
secondary_fn=torch.nn.NLLLoss(ignore_index=m_ignore_index) #combination of nll and dice https://arxiv.org/pdf/1809.10486.pdf
#create dataloaders for both phases
loader_test,_ = create_loader(eval_config["dataset_name"], config_parser, model_config["sequence_learning"], loader_params["shuffle"])
phases= [
Phase('test', loader_test, grad=False)
]
nr_batches_processed, nr_epochs, first_time = 0,0,True # set some parameters that track the progress
while True:
for phase in phases:
# cb.epoch_started(phase=phase)
# cb.phase_started(phase=phase)
model.train(phase.grad)
is_training = phase.grad
pbar = tqdm(total=len(phase.loader.dataset))
loader_iter = phase.loader.__iter__()
for batch_idx, (positions_seq, values_seq, target_seq, path_seq, len_seq) in enumerate(loader_iter):
assert positions_seq is not None, "positions_seq for batch_idx {} is None!".format(batch_idx)
for i in range(0,len(positions_seq)):
positions = positions_seq[i].squeeze(0).to("cuda") #.detach().clone().to("cuda")
values = values_seq[i].squeeze(0).to("cuda") #.detach().clone().to("cuda")
target = target_seq[i].squeeze(0).to("cuda") #.detach().clone().to("cuda")
assert positions.shape[0] == target.shape[0], "Position shape {} and target shape {} have to be the same in the first dimension!".format(positions.shape[0], target.shape[0])
#forward pass
with torch.set_grad_enabled(is_training):
early_return = (i != len(positions_seq)-1)
if i == len(positions_seq)-1:
cb.before_forward_pass(lattice=lattice) #sets the appropriate sigma for the lattice
pred_logsoftmax, pred_raw, lattice = model(lattice, positions, values, early_return, with_gradient = is_training) # lattice here is ls
#if its the first time we do a forward on the model we need to load here the checkpoint
if first_time and i==len(positions_seq)-1:
first_time = False
# now that all the parameters are created we can fill them with a model from a file
model_path = os.path.join(eval_config["checkpoint_path"], eval_config["load_checkpoint_model"])
print("Loading state dict from ", model_path)
model.load_state_dict(torch.load(model_path))
model.train(phase.grad)
model.reset_sequence()
lattice=Lattice.create(config_file, "lattice")
#need to rerun forward with the new parameters to get an accurate prediction
for k in range(0,len(positions_seq)):
early_return = (k != len(positions_seq)-1)
positions = positions_seq[k].squeeze(0).to("cuda")
values = values_seq[k].squeeze(0).to("cuda")
target = target_seq[k].squeeze(0).to("cuda")
pred_logsoftmax, pred_raw, lattice = model(lattice, positions, values, early_return, is_training)
if (i == (len(positions_seq)-1)):
pbar.update(1)
cloud = create_cloud(positions, target, path_seq[-1][0], label_mngr, pred_logsoftmax) # the viewer uses this cloud structure
cb.after_forward_pass(pred_softmax=pred_logsoftmax, target=target, cloud=cloud, loss=0, loss_dice=0, phase=phase, lr=0) #visualizes the prediction
if eval_config["do_write_predictions"] and i==len(positions_seq)-1:
#if isinstance(phase.loader, DataLoaderSemanticKitti):
# full path in which we save the cloud depends on the data loader. If it's semantic kitti we save also with the sequence, if it's scannet
# cloud_path_full=scan_path[0]
cloud_path_full=cloud.m_disk_path
# cloud_path=os.path.join(os.path.dirname(cloud_path), "../../")
basename=os.path.splitext(os.path.basename(cloud_path_full))[0]
cloud_path_base=os.path.abspath(os.path.join(os.path.dirname(cloud_path_full), "../../"))
cloud_path_head=os.path.relpath( cloud_path_full, cloud_path_base )
# print("cloud_path_head is ", cloud_path_head)
# print("cloud_path head dirnmake ", os.path.dirname(os.path.dirname(cloud_path_head)) )
# print("basename is ", basename)
path_before_file=os.path.join(eval_config["output_predictions_path"], "sequences", os.path.dirname(os.path.dirname(cloud_path_head)), "predictions")
os.makedirs(path_before_file, exist_ok=True)
# write ply files that represents the prediction
to_save_path=os.path.join(path_before_file, basename )
#print("saving in ", to_save_path)
pred_path=to_save_path+"_pred.ply"
gt_path=to_save_path+"_gt.ply"
# print("writing prediction to ", pred_path)
#write_prediction(pred_logsoftmax, cloud, pred_path)
#write_gt(cloud, gt_path)
#write labels file (just a file containing for each point the predicted label)
l_pred = pred_logsoftmax.clone().detach().argmax(axis=1).cpu().numpy()
l_pred = l_pred[-1*len_seq[-1]:] # for the ACCUM case I need to get only the points of the last point cloud
l_pred = l_pred.reshape((-1))
l_pred = l_pred.astype(np.uint32)
#print(l_pred)
labels_file= os.path.join(path_before_file, (basename+".label") )
#print("Saving label file here: ", labels_file)
#print(labels_file)
l_pred.tofile(labels_file)
with open(labels_file, 'w') as f:
for idx in range(l_pred.shape[0]):
line= str(l_pred[idx]) + "\n"
f.write(line)
################################################################################
##############IMPORTANT for competition################
#after running this test.py script and getting all the .label files. You need to run the scripts from https://github.com/PRBonn/semantic-kitti-api/
# you need to run with the --inverse flag and the correct .config (depending if you use 20 classes or 26 classes with the moving objects) in order to get the original labels and only then you can upload to the codalab server
# example for the test set with 26 classes:
# in remap you have to change: label = np.fromfile(label_file, dtype=np.uint32, sep = "\n")
# ./remap_semantic_labels.py --predictions ../temporal_latticenet/predictions/ --split test --datacfg config/semantic-kitti-all.yaml --inverse
# Now you have to zip your predictions into a file submission.zip:
# zip -r ../temporal_latticenet/predictions/submission.zip ../temporal_latticenet/predictions/
# ./validate_submission.py --task segmentation ../temporal_latticenet/predictions/submission.zip /workspace/Data/SemanticKitti/dataset/
# to evaluate the valid set
# ./remap_semantic_labels.py --predictions ../temporal_latticenet/predictions/ --split valid --datacfg config/semantic-kitti-all.yaml --inverse
# 26: ./evaluate_semantics.py --dataset /workspace/Data/SemanticKitti/dataset/ --predictions /workspace/temporal_latticenet/predictions/ --split valid -dc config/semantic-kitti-all.yaml
################################################################################
# #write GT labels file (just a file containing for each point the predicted label)
# gt = np.squeeze(cloud.L_gt)
# labels_file= os.path.join(path_before_file, (basename+".gt") )
# with open(labels_file, 'w') as f:
# for i in range(gt.shape[0]):
# line= str(gt[i]) + "\n"
# f.write(line)
cloud = None
# reset the hash map after each sequence
if (i == len(positions_seq)-1):
model.reset_sequence()
lattice=Lattice.create(config_file, "lattice")
if batch_idx == len(loader_iter)-1:
pbar.close()
return
if eval_config["with_viewer"]:
view.update()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Train the network on a dataset.')
parser.add_argument('--dataset', type=str, nargs = "?", const = "semantickitti",
help='the dataset name, options are semantickitti OR parislille')
args = parser.parse_args()
if args.dataset:
run(args.dataset)
else: # when you do not give any arguments the parser just assumes you want semantickitti
run()