diff --git a/projects/online/online/main.py b/projects/online/online/main.py index d383903b..9b077567 100644 --- a/projects/online/online/main.py +++ b/projects/online/online/main.py @@ -187,12 +187,11 @@ def search( # and we had enough previous to build whitening filter # search for events in the integrated output event = None - if snapshotter.full_psd_present: # and ready: + if snapshotter.full_psd_present and ready: event = searcher.search(integrated, t0 + time_offset) # if we found an event, process it! if event is not None: - # event_processor(event) logging.info("Putting event in event queue") event_queue.put(event) logging.info("Running AMPLFI") @@ -238,7 +237,6 @@ def pastro_subprocess( event = pastro_queue.get() logging.info("Calculating p_astro") pastro = pastro_model(event.detection_statistic) - # pastro = pastro_model(7) graceid = pastro_queue.get() logging.info(f"Submitting p_astro: {pastro}") gdb.submit_pastro(float(pastro), graceid, event.gpstime) @@ -428,7 +426,7 @@ def main( input_buffer_length: Length of strain data buffer in seconds output_buffer_length: - Length of inference output buffer in seconds + Length of inference output buffer in seconds samples_per_event: Number of posterior samples to generate per event for creating skymaps and other parameter estimation @@ -439,10 +437,10 @@ def main( Device to run inference on ("cpu" or "cuda") """ # run kinit and htgettoken - # if server != "local": - # logging.info("Authenticating") - # authenticate() - # logging.info("Authentication complete") + if server != "local": + logging.info("Authenticating") + authenticate() + logging.info("Authentication complete") fftlength = fftlength or kernel_length + fduration data = torch.randn(samples_per_event * len(inference_params)) diff --git a/projects/online/online/utils/pe.py b/projects/online/online/utils/pe.py index e3a70408..d5c32c93 100644 --- a/projects/online/online/utils/pe.py +++ b/projects/online/online/utils/pe.py @@ -47,15 +47,13 @@ def run_amplfi( mask = freqs > amplfi_whitener.highpass pe_psd = pe_psd[:, :, mask] - # asds = torch.sqrt(pe_psd) - logging.info("Computed AMPLFI ASD") + asds = torch.sqrt(pe_psd) # sample from the model and descale back to physical units - # samples = amplfi.sample(samples_per_event, context=(whitened, asds)) - # descaled_samples = std_scaler(samples.mT, reverse=True).mT.cpu() + samples = amplfi.sample(samples_per_event, context=(whitened, asds)) + descaled_samples = std_scaler(samples.mT, reverse=True).mT.cpu() logging.info("Finished AMPLFI") - return torch.randn((20000, 8)) - # return descaled_samples + return descaled_samples def skymap_from_samples( @@ -73,7 +71,6 @@ def skymap_from_samples( ["chirp_mass", "mass_ratio", "distance"], f"{event_time} result", ) - logging.info("Computed posterior") phi_idx = inference_params.index("phi") dec_idx = inference_params.index("dec") diff --git a/projects/online/online/utils/searcher.py b/projects/online/online/utils/searcher.py index a4b83cd1..d081dfc8 100644 --- a/projects/online/online/utils/searcher.py +++ b/projects/online/online/utils/searcher.py @@ -194,9 +194,6 @@ def search(self, y: np.ndarray, t0: float) -> Optional[Event]: # check if the integrated output is still # ramping as we get to the end of the frame idx = np.argmax(y) - # timestamp = t0 + idx / self.inference_sampling_rate - # detection_time = float(tconvert(datetime.now(tz=timezone.utc))) - # logging.info(f"Detection latency: {detection_time - timestamp}") if idx < (len(y) - 1): # if not, assume the event is in this # frame and build an event around it