forked from JonathonLuiten/TrackEval
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_soccernet_mot.py
136 lines (119 loc) · 5.62 KB
/
run_soccernet_mot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
""" run_soccernet_mot.py
Run example:
python run_soccernet_mot.py \
--BENCHMARK SNMOT \
--DO_PREPROC False \
--SEQMAP_FILE tools/SNMOT-test.txt \
--TRACKERS_TO_EVAL test \
--SPLIT_TO_EVAL test \
--OUTPUT_SUB_FOLDER eval_results \
--TRACKERS_FOLDER_ZIP soccernet_mot_results.zip \
--GT_FOLDER_ZIP gt.zip
run_soccernet_mot.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL Lif_T
Command Line Arguments: Defaults, # Comments
Eval arguments:
'USE_PARALLEL': False,
'NUM_PARALLEL_CORES': 8,
'BREAK_ON_ERROR': True,
'PRINT_RESULTS': True,
'PRINT_ONLY_COMBINED': False,
'PRINT_CONFIG': True,
'TIME_PROGRESS': True,
'OUTPUT_SUMMARY': True,
'OUTPUT_DETAILED': True,
'PLOT_CURVES': True,
Dataset arguments:
'GT_FOLDER': os.path.join(code_path, 'data/gt/mot_challenge/'), # Location of GT data
'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/mot_challenge/'), # Trackers location
'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
'CLASSES_TO_EVAL': ['pedestrian'], # Valid: ['pedestrian']
'BENCHMARK': 'MOT17', # Valid: 'MOT17', 'MOT16', 'MOT20', 'MOT15'
'SPLIT_TO_EVAL': 'train', # Valid: 'train', 'test', 'all'
'INPUT_AS_ZIP': False, # Whether tracker input files are zipped
'PRINT_CONFIG': True, # Whether to print current config
'DO_PREPROC': True, # Whether to perform preprocessing (never done for 2D_MOT_2015)
'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
Metric arguments:
'METRICS': ['HOTA', 'CLEAR', 'Identity', 'VACE']
"""
import sys
import os
import argparse
from multiprocessing import freeze_support
import zipfile
import shutil
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import trackeval # noqa: E402
if __name__ == '__main__':
freeze_support()
# Command line interface:
default_eval_config = trackeval.Evaluator.get_default_eval_config()
default_eval_config['DISPLAY_LESS_PROGRESS'] = False
default_dataset_config = trackeval.datasets.MotChallenge2DBox.get_default_dataset_config()
default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity'], 'THRESHOLD': 0.5}
config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs
parser = argparse.ArgumentParser()
for setting in config.keys():
if type(config[setting]) == list or type(config[setting]) == type(None):
parser.add_argument("--" + setting, nargs='+')
else:
parser.add_argument("--" + setting)
parser.add_argument('--TRACKERS_FOLDER_ZIP', type=str, default='')
parser.add_argument('--GT_FOLDER_ZIP', type=str, default='')
# args = parser.parse_args().__dict__
args = parser.parse_args()
# import pdb; pdb.set_trace()
# if not empty ..., extract and modify trackers folder
assert len(args.TRACKERS_FOLDER_ZIP) > 0
assert len(args.GT_FOLDER_ZIP) > 0
os.mkdir('./temp')
os.mkdir('./temp/gt')
os.mkdir('./temp/SNMOT-test/')
os.mkdir('./temp/SNMOT-test/test')
os.mkdir('./temp/SNMOT-test/test/data')
with zipfile.ZipFile(args.TRACKERS_FOLDER_ZIP, 'r') as zip_ref:
zip_ref.extractall('./temp/SNMOT-test/test/data')
with zipfile.ZipFile(args.GT_FOLDER_ZIP, 'r') as zip_ref:
zip_ref.extractall('./temp/gt/SNMOT-test_0')
if os.path.exists('./temp/gt/SNMOT-test_0/test-evalAI/'):
shutil.move('./temp/gt/SNMOT-test_0/test-evalAI/', './temp/gt/SNMOT-test_0/test/')
shutil.move('./temp/gt/SNMOT-test_0/test/', './temp/gt/SNMOT-test/')
args.TRACKERS_FOLDER = './temp'
args.GT_FOLDER = './temp/gt'
args = args.__dict__
args['SEQMAP_FILE'] = args['SEQMAP_FILE'][0]
args.pop('TRACKERS_FOLDER_ZIP', None)
args.pop('GT_FOLDER_ZIP', None)
for setting in args.keys():
if args[setting] is not None:
if type(config[setting]) == type(True):
if args[setting] == 'True':
x = True
elif args[setting] == 'False':
x = False
else:
raise Exception('Command line parameter ' + setting + 'must be True or False')
elif type(config[setting]) == type(1):
x = int(args[setting])
elif type(args[setting]) == type(None):
x = None
elif setting == 'SEQ_INFO':
x = dict(zip(args[setting], [None] * len(args[setting])))
else:
x = args[setting]
config[setting] = x
eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()}
metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()}
# Run code
evaluator = trackeval.Evaluator(eval_config)
dataset_list = [trackeval.datasets.MotChallenge2DBox(dataset_config)]
metrics_list = []
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE]:
if metric.get_name() in metrics_config['METRICS']:
metrics_list.append(metric(metrics_config))
if len(metrics_list) == 0:
raise Exception('No metrics selected for evaluation')
evaluator.evaluate(dataset_list, metrics_list)