diff --git a/projects/professional_development/mentorship_program.md b/projects/professional_development/mentorship_program.md index cf9bb8851..d49641293 100644 --- a/projects/professional_development/mentorship_program.md +++ b/projects/professional_development/mentorship_program.md @@ -38,8 +38,5 @@ I’m a student, how can I make the most out of my mentoring experience? **Keep a respectful conversation**: mentoring is a social interaction. You may construct a long-lasting relationship with your mentor and your peers. Thus, be kind and have fun, as you’ll be discussing your dreams and career goals, as well as the efforts you’ve made so far to follow them. ---- - -# Find additional Professional Development resources through our website -https://neuromatch.io/resources/ +**Find additional Professional Development resources through [our website](https://neuromatch.io/resources/)** diff --git a/tutorials/W1D2_ComparingTasks/W1D2_Tutorial1.ipynb b/tutorials/W1D2_ComparingTasks/W1D2_Tutorial1.ipynb index 7178cd8e5..d16874946 100644 --- a/tutorials/W1D2_ComparingTasks/W1D2_Tutorial1.ipynb +++ b/tutorials/W1D2_ComparingTasks/W1D2_Tutorial1.ipynb @@ -49,8 +49,6 @@ "2. Train a network to accomplish these tasks and compare the performance of these networks.\n", "3. Measure how well different representations generalize\n", "\n", - "**Important note**: this tutorial leverages GPU acceleration. Using a GPU runtime in colab will make the the tutorial run 10x faster.\n", - "\n", "Let's get started.\n" ] }, @@ -1396,8 +1394,8 @@ " for batch_idx, (X, y) in enumerate(sampled_val_loader):\n", " predictions = model(X)\n", " _, predicted_classes = torch.max(predictions, 1)\n", - " all_preds.extend(predicted_classes.cpu().numpy())\n", - " all_labels.extend(y.cpu().numpy())\n", + " all_preds.extend(predicted_classes.numpy())\n", + " all_labels.extend(y.numpy())\n", "\n", " #Compute confusion matrix\n", " conf_matrix = confusion_matrix(all_labels, all_preds)\n", @@ -1570,8 +1568,7 @@ "\n", "- $N$ is the number of samples\n", "- $y_i$ is the true label for the $i^{th}$ sample, the number of on pixels\n", - "- $\\mu_i = \\text{CNN}(x_i)$ is the output of the model for the $i^{th}$ sample\n", - "- $f$ is the ReLU activation function" + "- $\\mu_i = \\text{CNN}(x_i)$ is the output of the model for the $i^{th}$ sample" ] }, { @@ -2148,9 +2145,9 @@ " for batch_idx, (X, _) in enumerate(sampled_val_loader):\n", " if batch_idx == 0:\n", " outputs = model(X)\n", - " orig = X.cpu().numpy()\n", + " orig = X.numpy()\n", " original_images.extend(orig)\n", - " recon = outputs.cpu().numpy()\n", + " recon = outputs.numpy()\n", " reconstructed_images.extend(recon)\n", " plot_reconstructions(original_images, reconstructed_images, N_train_data, epochs_max_autoencoder)\n", " break\n", @@ -2496,9 +2493,9 @@ " for batch_idx, (X, _) in enumerate(sampled_val_loader):\n", " if batch_idx == 0: # Only visualize the first batch for simplicity\n", " outputs = model(X)\n", - " orig = X.cpu().numpy()\n", + " orig = X.numpy()\n", " original_images.extend(orig)\n", - " recon = outputs.cpu().numpy()\n", + " recon = outputs.numpy()\n", " reconstructed_images.extend(recon)\n", " fig = plt.figure(figsize=(8, 4))\n", " rows, cols = 2, 6\n", diff --git a/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial1.ipynb b/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial1.ipynb index 6358c4939..5f1d99e38 100644 --- a/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial1.ipynb +++ b/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial1.ipynb @@ -49,8 +49,6 @@ "2. Train a network to accomplish these tasks and compare the performance of these networks.\n", "3. Measure how well different representations generalize\n", "\n", - "**Important note**: this tutorial leverages GPU acceleration. Using a GPU runtime in colab will make the the tutorial run 10x faster.\n", - "\n", "Let's get started.\n" ] }, @@ -1398,8 +1396,8 @@ " for batch_idx, (X, y) in enumerate(sampled_val_loader):\n", " predictions = model(X)\n", " _, predicted_classes = torch.max(predictions, 1)\n", - " all_preds.extend(predicted_classes.cpu().numpy())\n", - " all_labels.extend(y.cpu().numpy())\n", + " all_preds.extend(predicted_classes.numpy())\n", + " all_labels.extend(y.numpy())\n", "\n", " #Compute confusion matrix\n", " conf_matrix = confusion_matrix(all_labels, all_preds)\n", @@ -1572,8 +1570,7 @@ "\n", "- $N$ is the number of samples\n", "- $y_i$ is the true label for the $i^{th}$ sample, the number of on pixels\n", - "- $\\mu_i = \\text{CNN}(x_i)$ is the output of the model for the $i^{th}$ sample\n", - "- $f$ is the ReLU activation function" + "- $\\mu_i = \\text{CNN}(x_i)$ is the output of the model for the $i^{th}$ sample" ] }, { @@ -2154,9 +2151,9 @@ " for batch_idx, (X, _) in enumerate(sampled_val_loader):\n", " if batch_idx == 0:\n", " outputs = model(X)\n", - " orig = X.cpu().numpy()\n", + " orig = X.numpy()\n", " original_images.extend(orig)\n", - " recon = outputs.cpu().numpy()\n", + " recon = outputs.numpy()\n", " reconstructed_images.extend(recon)\n", " plot_reconstructions(original_images, reconstructed_images, N_train_data, epochs_max_autoencoder)\n", " break\n", @@ -2502,9 +2499,9 @@ " for batch_idx, (X, _) in enumerate(sampled_val_loader):\n", " if batch_idx == 0: # Only visualize the first batch for simplicity\n", " outputs = model(X)\n", - " orig = X.cpu().numpy()\n", + " orig = X.numpy()\n", " original_images.extend(orig)\n", - " recon = outputs.cpu().numpy()\n", + " recon = outputs.numpy()\n", " reconstructed_images.extend(recon)\n", " fig = plt.figure(figsize=(8, 4))\n", " rows, cols = 2, 6\n", diff --git a/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial1.ipynb b/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial1.ipynb index 809914711..990f65aad 100644 --- a/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial1.ipynb +++ b/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial1.ipynb @@ -49,8 +49,6 @@ "2. Train a network to accomplish these tasks and compare the performance of these networks.\n", "3. Measure how well different representations generalize\n", "\n", - "**Important note**: this tutorial leverages GPU acceleration. Using a GPU runtime in colab will make the the tutorial run 10x faster.\n", - "\n", "Let's get started.\n" ] }, @@ -1390,8 +1388,8 @@ " for batch_idx, (X, y) in enumerate(sampled_val_loader):\n", " predictions = model(X)\n", " _, predicted_classes = torch.max(predictions, 1)\n", - " all_preds.extend(predicted_classes.cpu().numpy())\n", - " all_labels.extend(y.cpu().numpy())\n", + " all_preds.extend(predicted_classes.numpy())\n", + " all_labels.extend(y.numpy())\n", "\n", " #Compute confusion matrix\n", " conf_matrix = confusion_matrix(all_labels, all_preds)\n", @@ -1556,8 +1554,7 @@ "\n", "- $N$ is the number of samples\n", "- $y_i$ is the true label for the $i^{th}$ sample, the number of on pixels\n", - "- $\\mu_i = \\text{CNN}(x_i)$ is the output of the model for the $i^{th}$ sample\n", - "- $f$ is the ReLU activation function" + "- $\\mu_i = \\text{CNN}(x_i)$ is the output of the model for the $i^{th}$ sample" ] }, { @@ -2118,9 +2115,9 @@ " for batch_idx, (X, _) in enumerate(sampled_val_loader):\n", " if batch_idx == 0:\n", " outputs = model(X)\n", - " orig = X.cpu().numpy()\n", + " orig = X.numpy()\n", " original_images.extend(orig)\n", - " recon = outputs.cpu().numpy()\n", + " recon = outputs.numpy()\n", " reconstructed_images.extend(recon)\n", " plot_reconstructions(original_images, reconstructed_images, N_train_data, epochs_max_autoencoder)\n", " break\n", @@ -2459,9 +2456,9 @@ " for batch_idx, (X, _) in enumerate(sampled_val_loader):\n", " if batch_idx == 0: # Only visualize the first batch for simplicity\n", " outputs = model(X)\n", - " orig = X.cpu().numpy()\n", + " orig = X.numpy()\n", " original_images.extend(orig)\n", - " recon = outputs.cpu().numpy()\n", + " recon = outputs.numpy()\n", " reconstructed_images.extend(recon)\n", " fig = plt.figure(figsize=(8, 4))\n", " rows, cols = 2, 6\n",