Skip to content

Commit

Permalink
Process tutorial notebooks
Browse files Browse the repository at this point in the history
  • Loading branch information
actions-user committed Aug 7, 2024
1 parent ecc5837 commit bfa1e48
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 50 deletions.
113 changes: 80 additions & 33 deletions tutorials/W1D2_ComparingTasks/W1D2_Tutorial2.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@
"id": "aceddeef-ea13-4224-8ca8-70563edbcf00",
"metadata": {
"cellView": "form",
"execution": {},
"tags": [
"remove-input"
]
Expand Down Expand Up @@ -106,6 +107,7 @@
"id": "f3eeb07c-5de3-428e-a422-3a58f1124b43",
"metadata": {
"cellView": "form",
"execution": {},
"tags": [
"hide-input"
]
Expand Down Expand Up @@ -137,6 +139,7 @@
"id": "b56685b9-3f17-4a55-acca-73dad5623992",
"metadata": {
"cellView": "form",
"execution": {},
"tags": [
"hide-input"
]
Expand Down Expand Up @@ -179,6 +182,7 @@
"id": "cebc5798-a8dd-4985-b0ec-d5facf4ee700",
"metadata": {
"cellView": "form",
"execution": {},
"tags": [
"hide-input"
]
Expand All @@ -200,6 +204,7 @@
"id": "386b49a0-9409-496a-8656-579ca3e4af5f",
"metadata": {
"cellView": "form",
"execution": {},
"tags": [
"hide-input"
]
Expand Down Expand Up @@ -270,7 +275,8 @@
"execution_count": null,
"id": "a47d3240-0a70-4e3b-af13-2c02528867fa",
"metadata": {
"cellView": "form"
"cellView": "form",
"execution": {}
},
"outputs": [],
"source": [
Expand Down Expand Up @@ -327,7 +333,8 @@
"execution_count": null,
"id": "2cb688bf-38c8-40e1-b40b-c4014cdaffba",
"metadata": {
"cellView": "form"
"cellView": "form",
"execution": {}
},
"outputs": [],
"source": [
Expand Down Expand Up @@ -378,7 +385,9 @@
"cell_type": "code",
"execution_count": null,
"id": "d7c432bf-1de5-43f6-b887-087d17e9ec0f",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"class ResidualBlock(nn.Module):\n",
Expand Down Expand Up @@ -428,7 +437,9 @@
"cell_type": "code",
"execution_count": null,
"id": "8cdc7833-aafd-4ee0-83fd-af9922dd5857",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"class Model(nn.Module):\n",
Expand Down Expand Up @@ -465,7 +476,9 @@
"cell_type": "code",
"execution_count": null,
"id": "d72d9231-35a5-405e-b2f6-135e134e0ae9",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Define the transformations for the MNIST dataset\n",
Expand Down Expand Up @@ -509,7 +522,9 @@
"cell_type": "code",
"execution_count": null,
"id": "ffd3ab89-ffd9-408c-8396-25b88e0930fb",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Initialize the model with specified input, output, and hidden dimensions\n",
Expand Down Expand Up @@ -541,7 +556,9 @@
"cell_type": "code",
"execution_count": null,
"id": "b2f9122b-ce83-464f-b6c9-5af2feceac62",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# First try with untrained network, find the cosine similarities within a class and across classes\n",
Expand Down Expand Up @@ -600,7 +617,9 @@
"cell_type": "code",
"execution_count": null,
"id": "0JcR1VwliL1f",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Dictionary to store normalized embeddings for each class\n",
Expand Down Expand Up @@ -643,7 +662,9 @@
"cell_type": "code",
"execution_count": null,
"id": "9594e7b0-e401-4792-8798-7f23109bcf94",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# to_remove solution\n",
Expand Down Expand Up @@ -684,7 +705,8 @@
"execution_count": null,
"id": "26dd2a45-38ed-45ea-ad27-dfddfeb1613f",
"metadata": {
"cellView": "form"
"cellView": "form",
"execution": {}
},
"outputs": [],
"source": [
Expand All @@ -708,7 +730,9 @@
"cell_type": "code",
"execution_count": null,
"id": "728682ab",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# to_remove explanation\n",
Expand All @@ -729,7 +753,8 @@
"execution_count": null,
"id": "d13d5761-e2ec-4ae1-adb4-edbe57b77ba9",
"metadata": {
"cellView": "form"
"cellView": "form",
"execution": {}
},
"outputs": [],
"source": [
Expand Down Expand Up @@ -805,7 +830,9 @@
"cell_type": "code",
"execution_count": null,
"id": "1bfe95da-cd87-4fea-bb22-c1419450f9cb",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"def dcl_loss(pos_pairs, neg_pairs, indices_tuple, temperature=0.07):\n",
Expand Down Expand Up @@ -970,7 +997,9 @@
"cell_type": "code",
"execution_count": null,
"id": "67db2b80",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# to_remove solution\n",
Expand Down Expand Up @@ -1140,7 +1169,9 @@
"cell_type": "code",
"execution_count": null,
"id": "4d4f11e3-07e8-4104-9ae4-6a9fd6f037eb",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"def get_embeddings_labels(loader, model):\n",
Expand Down Expand Up @@ -1195,7 +1226,8 @@
"execution_count": null,
"id": "de3b7b2b-4351-4df8-8877-8f81b1d3878b",
"metadata": {
"cellView": "form"
"cellView": "form",
"execution": {}
},
"outputs": [],
"source": [
Expand All @@ -1219,7 +1251,9 @@
"cell_type": "code",
"execution_count": null,
"id": "e88a8b40-faa7-4071-b070-0ca192e06d55",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Create DataLoader for the test dataset with a batch size of 50\n",
Expand Down Expand Up @@ -1332,7 +1366,9 @@
"cell_type": "code",
"execution_count": null,
"id": "767e0f93-3f68-4db0-9edf-4891a3858f63",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Convert list of embeddings to a numpy array\n",
Expand Down Expand Up @@ -1365,7 +1401,9 @@
"cell_type": "code",
"execution_count": null,
"id": "d4cdd26a-15c9-4c5f-82f7-4a65ca0bd5f7",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Convert list of embeddings to a numpy array\n",
Expand Down Expand Up @@ -1398,7 +1436,9 @@
"cell_type": "code",
"execution_count": null,
"id": "2afe3c0d-7eed-4b2b-bdaa-bb258fc92d10",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Use t-SNE embeddings for visualization\n",
Expand Down Expand Up @@ -1454,7 +1494,9 @@
"cell_type": "code",
"execution_count": null,
"id": "013D0NbXowjj",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# Calculate the cosine similarity matrix between all the test images and the train images\n",
Expand Down Expand Up @@ -1492,7 +1534,9 @@
"cell_type": "code",
"execution_count": null,
"id": "1ae6f966",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"best_idxs = np.argmax(sims_all, axis=1)\n",
Expand Down Expand Up @@ -1551,20 +1595,22 @@
"cell_type": "code",
"execution_count": null,
"id": "fa73993e-bbaa-4d32-a7c1-3903c784883f",
"metadata": {},
"metadata": {
"execution": {}
},
"outputs": [],
"source": [
"# to_remove explanation\n",
"\"\"\"\n",
"Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream. \n",
"This area of the brain processes visual information and has been shown to develop hierarchical features that \n",
"capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior \n",
"regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial \n",
"information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022). \n",
"Instead, these representations emerge through domain-general learning from natural image structures, where the brain \n",
"differentiates between individual views and categories based on the inherent statistical properties of visual input \n",
"(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual \n",
"system can form complex object representations and categorization using self-supervised learning frameworks similar to \n",
"Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream.\n",
"This area of the brain processes visual information and has been shown to develop hierarchical features that\n",
"capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior\n",
"regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial\n",
"information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022).\n",
"Instead, these representations emerge through domain-general learning from natural image structures, where the brain\n",
"differentiates between individual views and categories based on the inherent statistical properties of visual input\n",
"(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual\n",
"system can form complex object representations and categorization using self-supervised learning frameworks similar to\n",
"those in artificial neural networks.\n",
"\"\"\""
]
Expand All @@ -1574,7 +1620,8 @@
"execution_count": null,
"id": "af3f6a4f-2185-41db-a451-09ade489790a",
"metadata": {
"cellView": "form"
"cellView": "form",
"execution": {}
},
"outputs": [],
"source": [
Expand Down
17 changes: 10 additions & 7 deletions tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial2.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1606,13 +1606,16 @@
"source": [
"# to_remove explanation\n",
"\"\"\"\n",
"Konkle and Alvarez (2022) speculate that perceptual judgements of visual similarity might be\n",
"driven by a process of contrastive learning not unlike that in AI. Different views of the same\n",
"scene, like the augmentations used in contrastive learning, can be generated by eye movements,\n",
"body movements and the passage of time. The hippocampus could hold these views in working memory\n",
"to implement a form of contrastive learning in neocortex. These ideas are closely related to\n",
"long-standing ideas about predictive coding in the brain. This remains an active and fascinating\n",
"area of research.\n",
"Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream.\n",
"This area of the brain processes visual information and has been shown to develop hierarchical features that\n",
"capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior\n",
"regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial\n",
"information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022).\n",
"Instead, these representations emerge through domain-general learning from natural image structures, where the brain\n",
"differentiates between individual views and categories based on the inherent statistical properties of visual input\n",
"(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual\n",
"system can form complex object representations and categorization using self-supervised learning frameworks similar to\n",
"those in artificial neural networks.\n",
"\"\"\""
]
},
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
"""
Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream.
This area of the brain processes visual information and has been shown to develop hierarchical features that
capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior
regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial
information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022).
Instead, these representations emerge through domain-general learning from natural image structures, where the brain
differentiates between individual views and categories based on the inherent statistical properties of visual input
(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual
system can form complex object representations and categorization using self-supervised learning frameworks similar to
those in artificial neural networks.
"""

This file was deleted.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial2.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1413,7 +1413,7 @@
"execution": {}
},
"source": [
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_81063c05.py)\n",
"[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_0cd5a0dc.py)\n",
"\n"
]
},
Expand Down

0 comments on commit bfa1e48

Please sign in to comment.