-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest_open_vocab.py
executable file
·53 lines (48 loc) · 1.79 KB
/
test_open_vocab.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os
import argparse
from gorilla.config import Config
from os.path import join as opj
from utils import *
import torch
from torch_cluster import fps
# Argument Parser
def parse_args():
parser = argparse.ArgumentParser(description="Test model on unseen affordances")
parser.add_argument("--config", help="config file path")
parser.add_argument("--checkpoint", help="the dir to saved model")
parser.add_argument(
"--gpu",
type=str,
default=None,
help="Number of gpus to use"
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
cfg = Config.fromfile(args.config)
logger = IOStream(opj(cfg.work_dir, 'result_' + cfg.model.type + '.log'))
if cfg.get('seed', None) != None:
set_random_seed(cfg.seed)
logger.cprint('Set seed to %d' % cfg.seed)
if args.gpu != None:
cfg.training_cfg.gpu = args.gpu
os.environ["CUDA_VISIBLE_DEVICES"] = 'cuda' #cfg.training_cfg.gpu
model = build_model(cfg).cuda()
if args.checkpoint == None:
print("Please specify the path to the saved model")
exit()
else:
print("Loading model....")
_, exten = os.path.splitext(args.checkpoint)
if exten == '.t7':
model.load_state_dict(torch.load(args.checkpoint))
print('done')
elif exten == '.pth':
check = torch.load(args.checkpoint)
model.load_state_dict(check['model_state_dict'])
dataset_dict = build_dataset(cfg) # build the dataset
loader_dict = build_loader(cfg, dataset_dict) # build the loader
val_loader = loader_dict.get("val_loader", None)
val_affordance = cfg.training_cfg.val_affordance
mIoU = evaluation(logger, cfg, model, val_loader, val_affordance)