Skip to content

Commit

Permalink
Log the number of seconds spent training each epoch
Browse files Browse the repository at this point in the history
In order to extrapolate a long training process, and to better see the impact
of the batch steps, it's important to see the amount of time spent on each
epoch.
  • Loading branch information
jakobnissen committed Jun 1, 2023
1 parent 3f491b6 commit 744ebda
Showing 1 changed file with 6 additions and 2 deletions.
8 changes: 6 additions & 2 deletions vamb/encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from torch import Tensor
from torch import nn as _nn
from math import log as _log
from time import time

__doc__ = """Encode a depths matrix and a tnf matrix to latent representation.
Expand Down Expand Up @@ -367,6 +368,7 @@ def trainepoch(
epoch: int,
optimizer,
batchsteps: list[int],
start_time: float,
logfile,
) -> _DataLoader[tuple[Tensor, Tensor, Tensor]]:
self.train()
Expand Down Expand Up @@ -405,9 +407,11 @@ def trainepoch(
epoch_celoss += ce.data.item()

if logfile is not None:
elapsed = time() - start_time
print(
"\tEpoch: {}\tLoss: {:.6f}\tCE: {:.7f}\tSSE: {:.6f}\tKLD: {:.4f}\tBatchsize: {}".format(
"\tEpoch: {}\tLoss: {:.6f}\tCE: {:.7f}\tSSE: {:.6f}\tKLD: {:.4f}\tBatchsize: {}\tSeconds: {:.2f}".format(
epoch + 1,
elapsed,
epoch_loss / len(data_loader),
epoch_celoss / len(data_loader),
epoch_sseloss / len(data_loader),
Expand Down Expand Up @@ -599,7 +603,7 @@ def trainmodel(
# Train
for epoch in range(nepochs):
dataloader = self.trainepoch(
dataloader, epoch, optimizer, sorted(batchsteps_set), logfile
dataloader, epoch, optimizer, sorted(batchsteps_set), time(), logfile
)

# Save weights - Lord forgive me, for I have sinned when catching all exceptions
Expand Down

0 comments on commit 744ebda

Please sign in to comment.