diff --git a/generation/2d_vqgan/2d_vqgan_tutorial.ipynb b/generation/2d_vqgan/2d_vqgan_tutorial.ipynb index 0b54d5fdd..156d547ec 100644 --- a/generation/2d_vqgan/2d_vqgan_tutorial.ipynb +++ b/generation/2d_vqgan/2d_vqgan_tutorial.ipynb @@ -624,7 +624,7 @@ "val_samples = np.linspace(val_interval, max_epochs, int(max_epochs / val_interval))\n", "num_inters = min(len(intermediary_images), 4)\n", "fig, ax = plt.subplots(nrows=num_inters, ncols=1, sharey=True)\n", - "ax=ensure_tuple(ax)\n", + "ax = ensure_tuple(ax)\n", "\n", "ax = ensure_tuple(ax)\n", "for image_n in range(num_inters):\n", diff --git a/generation/2d_vqvae/2d_vqvae_tutorial.ipynb b/generation/2d_vqvae/2d_vqvae_tutorial.ipynb index 66c32e697..100a490ac 100644 --- a/generation/2d_vqvae/2d_vqvae_tutorial.ipynb +++ b/generation/2d_vqvae/2d_vqvae_tutorial.ipynb @@ -542,7 +542,7 @@ "# Plot every evaluation as a new line and example as columns\n", "val_samples = np.linspace(val_interval, max_epochs, int(max_epochs / val_interval))\n", "fig, ax = plt.subplots(nrows=len(val_samples), ncols=1, sharey=True)\n", - "ax=ensure_tuple(ax)\n", + "ax = ensure_tuple(ax)\n", "fig.set_size_inches(18.5, 30.5)\n", "for image_n in range(len(val_samples)):\n", " reconstructions = torch.reshape(intermediary_images[image_n], (64 * n_example_images, 64)).T\n",