From f9f329c2ee54cb509aec597d96f41c028d7e8046 Mon Sep 17 00:00:00 2001 From: Virginia Fernandez Date: Wed, 18 Sep 2024 22:40:37 +0100 Subject: [PATCH] Fix issue with the plots for autoencoder tutorials. Add anomaly detection nbs to the README. Signed-off-by: Virginia Fernandez --- generation/2d_autoencoderkl/2d_autoencoderkl_tutorial.ipynb | 2 ++ generation/3d_autoencoderkl/3d_autoencoderkl_tutorial.ipynb | 2 ++ generation/README.md | 4 ++++ 3 files changed, 8 insertions(+) diff --git a/generation/2d_autoencoderkl/2d_autoencoderkl_tutorial.ipynb b/generation/2d_autoencoderkl/2d_autoencoderkl_tutorial.ipynb index 94c719f81..d869cb81e 100644 --- a/generation/2d_autoencoderkl/2d_autoencoderkl_tutorial.ipynb +++ b/generation/2d_autoencoderkl/2d_autoencoderkl_tutorial.ipynb @@ -92,6 +92,7 @@ "from torch.nn import L1Loss\n", "from monai.losses import PatchAdversarialLoss, PerceptualLoss\n", "from monai.networks.nets import AutoencoderKL, PatchDiscriminator\n", + "from monai.utils.misc import ensure_tuple\n", "\n", "print_config()" ] @@ -694,6 +695,7 @@ "# Plot every evaluation as a new line and example as columns\n", "val_samples = np.linspace(val_interval, max_epochs, int(max_epochs / val_interval))\n", "fig, ax = plt.subplots(nrows=len(val_samples), ncols=1, sharey=True)\n", + "ax = ensure_tuple(ax) \n", "for image_n in range(len(val_samples)):\n", " reconstructions = torch.reshape(intermediary_images[image_n], (64 * n_example_images, 64)).T\n", " ax[image_n].imshow(reconstructions.cpu(), cmap=\"gray\")\n", diff --git a/generation/3d_autoencoderkl/3d_autoencoderkl_tutorial.ipynb b/generation/3d_autoencoderkl/3d_autoencoderkl_tutorial.ipynb index 4a0eda3b4..7112954a2 100644 --- a/generation/3d_autoencoderkl/3d_autoencoderkl_tutorial.ipynb +++ b/generation/3d_autoencoderkl/3d_autoencoderkl_tutorial.ipynb @@ -88,6 +88,7 @@ "from torch.amp import autocast\n", "from monai.networks.nets import AutoencoderKL, PatchDiscriminator\n", "from monai.losses import PatchAdversarialLoss, PerceptualLoss\n", + "from monai.utils.misc import ensure_tuple\n", "\n", "print_config()" ] @@ -731,6 +732,7 @@ ], "source": [ "fig, ax = plt.subplots(nrows=1, ncols=2)\n", + "ax = ensure_tuple(ax)\n", "ax[0].imshow(images[0, channel, ..., images.shape[2] // 2].cpu(), vmin=0, vmax=1, cmap=\"gray\")\n", "ax[0].axis(\"off\")\n", "ax[0].title.set_text(\"Inputted Image\")\n", diff --git a/generation/README.md b/generation/README.md index 422163001..d998bffcf 100644 --- a/generation/README.md +++ b/generation/README.md @@ -63,3 +63,7 @@ Example shows how to use a DDPM to inpaint of 2D images from the MedNIST dataset ## [Guiding the 2D diffusion synthesis using ControlNet](./controlnet/2d_controlnet.ipynb) Example shows how to use ControlNet to condition a diffusion model trained on 2D brain MRI images on binary brain masks. + +## Performing anomaly detection with diffusion models: [implicit guidance](./anomaly_detection/2d_classifierfree_guidance_anomalydetection_tutorial.ipynb), [using transformers](./anomaly_detection/anomaly_detection_with_transformers.ipynb) and [classifier free guidance](./anomaly_detection/anomalydetection_tutorial_classifier_guidance.ipynb) +Examples show how to perform anomaly detection in 2D, using implicit guidance [2D-classifier free guiance](./anomaly_detection/2d_classifierfree_guidance_anomalydetection_tutorial.ipynb), transformers [using transformers](./anomaly_detection/anomaly_detection_with_transformers.ipynb) and [classifier free guidance](./anomalydetection_tutorial_classifier_guidance). +