From 744ebda11aa85cc127efab93ef6358c843c64617 Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Thu, 1 Jun 2023 16:24:02 +0200 Subject: [PATCH] Log the number of seconds spent training each epoch In order to extrapolate a long training process, and to better see the impact of the batch steps, it's important to see the amount of time spent on each epoch. --- vamb/encode.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vamb/encode.py b/vamb/encode.py index a5ba3abe..5e380b34 100644 --- a/vamb/encode.py +++ b/vamb/encode.py @@ -8,6 +8,7 @@ from torch import Tensor from torch import nn as _nn from math import log as _log +from time import time __doc__ = """Encode a depths matrix and a tnf matrix to latent representation. @@ -367,6 +368,7 @@ def trainepoch( epoch: int, optimizer, batchsteps: list[int], + start_time: float, logfile, ) -> _DataLoader[tuple[Tensor, Tensor, Tensor]]: self.train() @@ -405,9 +407,11 @@ def trainepoch( epoch_celoss += ce.data.item() if logfile is not None: + elapsed = time() - start_time print( - "\tEpoch: {}\tLoss: {:.6f}\tCE: {:.7f}\tSSE: {:.6f}\tKLD: {:.4f}\tBatchsize: {}".format( + "\tEpoch: {}\tLoss: {:.6f}\tCE: {:.7f}\tSSE: {:.6f}\tKLD: {:.4f}\tBatchsize: {}\tSeconds: {:.2f}".format( epoch + 1, + elapsed, epoch_loss / len(data_loader), epoch_celoss / len(data_loader), epoch_sseloss / len(data_loader), @@ -599,7 +603,7 @@ def trainmodel( # Train for epoch in range(nepochs): dataloader = self.trainepoch( - dataloader, epoch, optimizer, sorted(batchsteps_set), logfile + dataloader, epoch, optimizer, sorted(batchsteps_set), time(), logfile ) # Save weights - Lord forgive me, for I have sinned when catching all exceptions