diff --git a/tutorials/W1D2_ComparingTasks/W1D2_Tutorial2.ipynb b/tutorials/W1D2_ComparingTasks/W1D2_Tutorial2.ipynb index bc3a822d1..fc2922051 100644 --- a/tutorials/W1D2_ComparingTasks/W1D2_Tutorial2.ipynb +++ b/tutorials/W1D2_ComparingTasks/W1D2_Tutorial2.ipynb @@ -1602,13 +1602,16 @@ "source": [ "# to_remove explanation\n", "\"\"\"\n", - "Konkle and Alvarez (2022) speculate that perceptual judgements of visual similarity might be\n", - "driven by a process of contrastive learning not unlike that in AI. Different views of the same\n", - "scene, like the augmentations used in contrastive learning, can be generated by eye movements,\n", - "body movements and the passage of time. The hippocampus could hold these views in working memory\n", - "to implement a form of contrastive learning in neocortex. These ideas are closely related to\n", - "long-standing ideas about predictive coding in the brain. This remains an active and fascinating\n", - "area of research.\n", + "Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream.\n", + "This area of the brain processes visual information and has been shown to develop hierarchical features that\n", + "capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior\n", + "regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial\n", + "information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022).\n", + "Instead, these representations emerge through domain-general learning from natural image structures, where the brain\n", + "differentiates between individual views and categories based on the inherent statistical properties of visual input\n", + "(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual\n", + "system can form complex object representations and categorization using self-supervised learning frameworks similar to\n", + "those in artificial neural networks.\n", "\"\"\"" ] }, diff --git a/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial2.ipynb b/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial2.ipynb index 7e33d3e08..971f52ec2 100644 --- a/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial2.ipynb +++ b/tutorials/W1D2_ComparingTasks/instructor/W1D2_Tutorial2.ipynb @@ -1606,13 +1606,16 @@ "source": [ "# to_remove explanation\n", "\"\"\"\n", - "Konkle and Alvarez (2022) speculate that perceptual judgements of visual similarity might be\n", - "driven by a process of contrastive learning not unlike that in AI. Different views of the same\n", - "scene, like the augmentations used in contrastive learning, can be generated by eye movements,\n", - "body movements and the passage of time. The hippocampus could hold these views in working memory\n", - "to implement a form of contrastive learning in neocortex. These ideas are closely related to\n", - "long-standing ideas about predictive coding in the brain. This remains an active and fascinating\n", - "area of research.\n", + "Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream.\n", + "This area of the brain processes visual information and has been shown to develop hierarchical features that\n", + "capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior\n", + "regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial\n", + "information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022).\n", + "Instead, these representations emerge through domain-general learning from natural image structures, where the brain\n", + "differentiates between individual views and categories based on the inherent statistical properties of visual input\n", + "(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual\n", + "system can form complex object representations and categorization using self-supervised learning frameworks similar to\n", + "those in artificial neural networks.\n", "\"\"\"" ] }, diff --git a/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_0cd5a0dc.py b/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_0cd5a0dc.py new file mode 100644 index 000000000..4ac4c10df --- /dev/null +++ b/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_0cd5a0dc.py @@ -0,0 +1,12 @@ +""" +Our brain's ability to perform contrastive learning is linked to the function of the ventral visual stream. +This area of the brain processes visual information and has been shown to develop hierarchical features that +capture the structure of visual input through self-supervised learning mechanisms. Evidence suggests that anterior +regions of the ventral visual stream, particularly the anterior occipito-temporal cortex (aOTC), encode substantial +information about object categories without requiring explicit category-level supervision (Konkle and Alvarez, 2022). +Instead, these representations emerge through domain-general learning from natural image structures, where the brain +differentiates between individual views and categories based on the inherent statistical properties of visual input +(Livingstone et al., 2019; Arcaro and Livingstone, 2021). This capability supports the notion that the brain's visual +system can form complex object representations and categorization using self-supervised learning frameworks similar to +those in artificial neural networks. +""" \ No newline at end of file diff --git a/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_81063c05.py b/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_81063c05.py deleted file mode 100644 index ca9b56299..000000000 --- a/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_81063c05.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -Konkle and Alvarez (2022) speculate that perceptual judgements of visual similarity might be -driven by a process of contrastive learning not unlike that in AI. Different views of the same -scene, like the augmentations used in contrastive learning, can be generated by eye movements, -body movements and the passage of time. The hippocampus could hold these views in working memory -to implement a form of contrastive learning in neocortex. These ideas are closely related to -long-standing ideas about predictive coding in the brain. This remains an active and fascinating -area of research. -""" \ No newline at end of file diff --git a/tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial2_Solution_39154423_0.png b/tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial2_Solution_39154423_0.png index 92740d9d5..34a92df09 100644 Binary files a/tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial2_Solution_39154423_0.png and b/tutorials/W1D2_ComparingTasks/static/W1D2_Tutorial2_Solution_39154423_0.png differ diff --git a/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial2.ipynb b/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial2.ipynb index e15f23bee..043b6a000 100644 --- a/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial2.ipynb +++ b/tutorials/W1D2_ComparingTasks/student/W1D2_Tutorial2.ipynb @@ -1413,7 +1413,7 @@ "execution": {} }, "source": [ - "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_81063c05.py)\n", + "[*Click for solution*](https://github.com/neuromatch/NeuroAI_Course/tree/main/tutorials/W1D2_ComparingTasks/solutions/W1D2_Tutorial2_Solution_0cd5a0dc.py)\n", "\n" ] },