-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy patheval_se3_flows.py
119 lines (97 loc) · 4.33 KB
/
eval_se3_flows.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
"""
Code adapted from
https://github.com/microsoft/protein-frame-flow/blob/main/experiments/inference_se3_flows.py
"""
import os
import time
import numpy as np
import hydra
import torch
import GPUtil
# from pytorch_lightning import Trainer
from omegaconf import DictConfig, OmegaConf
import src.utils as eu
from src.models.flow_module import FlowModule
from src.data.pdb_na_dataset_base import LengthDataset
from src.analysis.evalsuite import EvalSuite
torch._dynamo.config.verbose = True
torch.set_float32_matmul_precision('high')
log = eu.get_pylogger(__name__)
class Sampler:
def __init__(self, cfg: DictConfig):
"""Initialize sampler.
Args:
cfg: inference config.
"""
ckpt_path = cfg.inference.ckpt_path
ckpt_dir = os.path.dirname(ckpt_path)
ckpt_cfg = OmegaConf.load(os.path.join(ckpt_dir, 'config.yaml'))
# Set-up config.
OmegaConf.set_struct(cfg, False)
OmegaConf.set_struct(ckpt_cfg, False)
cfg = OmegaConf.merge(cfg, ckpt_cfg)
cfg.experiment.checkpointer.dirpath = './'
self._cfg = cfg
self._infer_cfg = cfg.inference
self._samples_cfg = self._infer_cfg.samples
self._rng = np.random.default_rng(self._infer_cfg.seed)
# Set-up directories to write results to
self._ckpt_name = '/'.join(ckpt_path.replace('.ckpt', '').split('/')[-3:])
self._output_dir = os.path.join(
self._infer_cfg.output_dir,
self._infer_cfg.name,
)
os.makedirs(self._output_dir, exist_ok=True)
log.info(f'Saving results to {self._output_dir}')
config_path = os.path.join(self._output_dir, 'config.yaml')
with open(config_path, 'w') as f:
OmegaConf.save(config=self._cfg, f=f)
log.info(f'Saving inference config to {config_path}')
# Read checkpoint and initialize module.
self._flow_module = FlowModule.load_from_checkpoint(checkpoint_path=ckpt_path)
self._flow_module.eval()
self._flow_module._infer_cfg = self._infer_cfg
self._flow_module._samples_cfg = self._samples_cfg
self._flow_module._output_dir = self._output_dir
# def run_sampling(self):
# devices = GPUtil.getAvailable(order='memory', limit = 8)[:self._infer_cfg.num_gpus]
# log.info(f"Using devices: {devices}")
# eval_dataset = LengthDataset(self._samples_cfg)
# dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=1, shuffle=False, drop_last=False)
# trainer = Trainer(
# accelerator="gpu",
# strategy="ddp",
# devices=devices,
# )
# start_time = time.time()
# trainer.predict(self._flow_module, dataloaders=dataloader)
# elapsed_time = time.time() - start_time
# log.info(f'Finished in {elapsed_time:.2f}s')
# log.info(f'Generated samples are stored here: {self._cfg.inference.output_dir}/{self._cfg.inference.name}/')
@hydra.main(version_base=None, config_path="./camera_ready_ckpts", config_name="inference")
def run(cfg: DictConfig) -> None:
# Run optional eval
if cfg.inference.evalsuite.run_eval:
print ("Starting EvalSuite on generated backbones ...")
print (f"Sample directory: {cfg.inference.output_dir}/{cfg.inference.name}/")
rna_bb_samples_dir = f"{cfg.inference.output_dir}/{cfg.inference.name}"
saving_dir = cfg.inference.evalsuite.eval_save_dir
# init evaluation module
evalsuite = EvalSuite(
save_dir=saving_dir,
paths=cfg.inference.evalsuite.paths,
constants=cfg.inference.evalsuite.constants,
gpu_id1=0, # cuda:0 -> for inverse-folding model
gpu_id2=1, # cuda:1 -> for forward-folding model
)
# run self-consistency pipeline
metric_dict = evalsuite.perform_eval(
rna_bb_samples_dir,
flatten_dir=False
)
# print out global self-consistency metrics
metrics_fp = os.path.join(saving_dir, "final_metrics.pt")
metric_dict = evalsuite.load_from_metric_dict(metrics_fp)
evalsuite.print_metrics(metric_dict) # print eval metrics
if __name__ == '__main__':
run()