diff --git a/unit3/01_stable_diffusion_introduction.ipynb b/unit3/01_stable_diffusion_introduction.ipynb
index 316cd20..0edea04 100644
--- a/unit3/01_stable_diffusion_introduction.ipynb
+++ b/unit3/01_stable_diffusion_introduction.ipynb
@@ -469,8 +469,8 @@
    ],
    "source": [
     "# Get the final text embeddings using the pipeline's encode_prompt function\n",
-    "text_embeddings = pipe._encode_prompt(\"A painting of a flooble\", device, 1, False, '')\n",
-    "text_embeddings.shape"
+    "text_embeddings = pipe.encode_prompt(\"A painting of a flooble\", device, 1, False, '')\n",
+    "print(\"Text embedding shape:\", text_embeddings[0].shape)"
    ]
   },
   {
@@ -673,7 +673,14 @@
     "negative_prompt = \"zoomed in, blurry, oversaturated, warped\" #@param\n",
     "\n",
     "# Encode the prompt\n",
-    "text_embeddings = pipe._encode_prompt(prompt, device, 1, True, negative_prompt)\n",
+    "text_embeddings = pipe.encode_prompt(\n",
+    "        prompt=prompt,\n",
+    "        device=device,\n",
+    "        num_images_per_prompt=1,\n",
+    "        do_classifier_free_guidance=True,\n",
+    "        negative_prompt=negative_prompt,\n",
+    "    )\n",
+    "text_embeddings = torch.cat([text_embeddings[1], text_embeddings[0]])\n",
     "\n",
     "# Create our random starting point\n",
     "latents = torch.randn((1, 4, 64, 64), device=device, generator=generator)\n",
@@ -704,10 +711,13 @@
     "\n",
     "# Decode the resulting latents into an image\n",
     "with torch.no_grad():\n",
-    "  image = pipe.decode_latents(latents.detach())\n",
+    "    image = pipe.vae.decode(latents.detach() / pipe.vae.config.scaling_factor, return_dict=False)[0]\n",
     "\n",
+    "    image = pipe.image_processor.postprocess(image, \n",
+    "                                     output_type=\"pil\",\n",
+    "                                     do_denormalize=[True])[0]\n",
     "# View\n",
-    "pipe.numpy_to_pil(image)[0]"
+    "display(image)"
    ]
   },
   {
@@ -857,7 +867,7 @@
    "outputs": [],
    "source": [
     "# Load the inpainting pipeline (requires a suitable inpainting model)\n",
-    "pipe = StableDiffusionInpaintPipeline.from_pretrained(\"runwayml/stable-diffusion-inpainting\")\n",
+    "pipe = StableDiffusionInpaintPipeline.from_pretrained(\"stabilityai/stable-diffusion-2-inpainting\")\n",
     "pipe = pipe.to(device)"
    ]
   },