diff --git a/binder/requirements.txt b/binder/requirements.txt
index 87f9877..0290305 100644
--- a/binder/requirements.txt
+++ b/binder/requirements.txt
@@ -4,6 +4,7 @@ ipython
matplotlib
networkx
nilearn
+scikit-learn==0.20.3
notebook
nxviz
pandas
diff --git a/tutorials/08-connectivity.ipynb b/tutorials/08-connectivity.ipynb
index 95f3ca8..1fcc2b0 100644
--- a/tutorials/08-connectivity.ipynb
+++ b/tutorials/08-connectivity.ipynb
@@ -9,7 +9,6 @@
},
"source": [
"# Connectivity\n",
- "[Contributions](#contributions)\n",
"\n",
"Traditional multivariate fMRI techniques focus on the information present in patterns of activity in localized regions (ROIs or searchlights). Sometimes, the relevant information may be represented across a network of brain regions and thus would not be identified via ROI analysis or searchlights. Functional connectivity measures help examine information at a global level, in regions that are far apart, focusing on network interaction rather than spatial localization. When performing connectivity analyses, BOLD timeseries are compared across regions (usually with correlation) and the strength of the relationship determines their functional connectivity. By including or excluding stimulus/task variables, we can study the modulation of connectivity by different cognitive states.\n",
"\n",
@@ -46,13 +45,17 @@
"### Exercises: \n",
">[1](#ex1) [2](#ex2) [3](#ex3) [4](#ex4) [5](#ex5) [6](#ex6) [7](#ex7) [8](#ex8) [9](#ex9) \n",
"\n",
- ">[Novel contribution](#novel) "
+ ">[Novel contribution](#novel) \n",
+ "\n",
+ "[Contributions](#contributions)\n",
+ "\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
+ "collapsed": true,
"nbpresent": {
"id": "1f7f9d75-833f-410f-8988-58c1618fa753"
}
@@ -117,7 +120,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"from utils import latatt_dir\n",
@@ -174,7 +179,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Use the utilities from the simulator to create an event time course based on an FSL onset file\n",
@@ -213,7 +220,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Get the nifti object\n",
@@ -235,7 +244,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Plot the stim time course. \n",
@@ -244,7 +255,7 @@
"plt.plot(left_stim_lag)\n",
"plt.yticks([0,1])\n",
"plt.xlabel('Timepoints')\n",
- "\n",
+ "plt.ylabel('Stimulus')\n",
"plt.legend(('Attend Right', 'Attend Left'), loc='upper right')\n",
"plt.ylim(0, 1.5)\n",
"sns.despine()"
@@ -262,7 +273,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Init a masker object that also standardizes the data\n",
@@ -287,7 +300,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"\"\"\"\n",
@@ -296,10 +311,11 @@
"voxel_ids = [0,10,100]\n",
"\n",
"plt.figure(figsize=(14, 4))\n",
- "plt.title('Voxel activity for rightward trials, voxel ids = ' + str(voxel_ids));\n",
- "plt.plot(bold_wb_r[:, voxel_ids]);\n",
- "plt.ylabel('Evoked activity');\n",
- "plt.xlabel('Timepoints');\n",
+ "plt.title('Voxel activity for rightward trials, voxel ids = ' + str(voxel_ids))\n",
+ "plt.plot(bold_wb_r[:, voxel_ids])\n",
+ "plt.legend(('voxel 0', 'voxel 10', 'voxel 100'))\n",
+ "plt.ylabel('Evoked activity')\n",
+ "plt.xlabel('Timepoints')\n",
"sns.despine()"
]
},
@@ -326,7 +342,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Specify the center of the left and right ROIs\n",
@@ -353,7 +371,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Init the masking object\n",
@@ -374,16 +394,18 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"plt.figure(figsize=(14, 4))\n",
"plt.title('Left PPA ROI activity for right and left attention conditions')\n",
- "plt.plot(bold_lPPA_r);\n",
- "plt.plot(bold_lPPA_l);\n",
- "plt.legend(('Attend Right', 'Attend Left'));\n",
- "plt.ylabel('Evoked activity');\n",
- "plt.xlabel('Timepoints');\n",
+ "plt.plot(bold_lPPA_r)\n",
+ "plt.plot(bold_lPPA_l)\n",
+ "plt.legend(('Attend Right', 'Attend Left'))\n",
+ "plt.ylabel('Evoked activity')\n",
+ "plt.xlabel('Timepoints')\n",
"sns.despine()"
]
},
@@ -401,7 +423,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Correlate seed with every brain voxel. Loop through and extract data for every voxel.\n",
@@ -425,7 +449,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"def seed_correlation(wbBold, seedBold):\n",
@@ -452,7 +478,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Let's use the function and print out the range of results\n",
@@ -473,7 +501,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# We can tranform the correlation array back to a Nifti image object that we can save\n",
@@ -493,7 +523,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Let's also visualize the correlation of the seed with every voxel\n",
@@ -523,7 +555,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Create a glass brain\n",
@@ -548,7 +582,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Insert code here"
@@ -565,7 +601,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Insert code here"
@@ -587,7 +625,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"atlas = datasets.fetch_atlas_harvard_oxford('cort-maxprob-thr25-2mm')\n",
@@ -604,7 +644,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Print the labels\n",
@@ -618,7 +660,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Create a masker object that we can use to select ROIs\n",
@@ -640,7 +684,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Get data for rightward attention only\n",
@@ -668,7 +714,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Like before we want to correlate the whole brain time course with the seed we have pulled out\n",
@@ -691,7 +739,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Map back to the whole brain image\n",
@@ -740,7 +790,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Set up the connectivity object\n",
@@ -767,7 +819,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Alternatively, we could use Nilearn's own plotting function\n",
@@ -793,7 +847,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Insert code here"
@@ -813,7 +869,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Load in the data\n",
@@ -834,7 +892,7 @@
"plt.figure(figsize=(14,4))\n",
"plt.plot(bold_lPPA_r_mcg)\n",
"plt.plot(bold_lPPA_l_mcg)\n",
- "plt.legend(('Attend Right', 'Attend Left'));\n",
+ "plt.legend(('Attend Right', 'Attend Left'))\n",
"plt.ylabel('BOLD signal, standardized')\n",
"plt.xlabel('TRs of right attention blocks')\n",
"plt.title('Background activity in seed region')\n",
@@ -844,7 +902,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Calculate the voxelwise seed-based correlation\n",
@@ -865,7 +925,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Plot the correlation of each voxel in the brain with the seed\n",
@@ -943,7 +1005,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Parcellate the time course to get the background connectivity parcels\n",
@@ -983,7 +1047,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Insert code here"
@@ -1004,7 +1070,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Insert code here\n"
@@ -1022,7 +1090,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Insert code here\n"
@@ -1040,7 +1110,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Load the atlas\n",
@@ -1086,7 +1158,8 @@
"M. Kumar, C. Ellis and N. Turk-Browne produced the initial notebook 3/15/18 \n",
"Q. Lu add solution \n",
"K.A. Norman provided suggestions on the overall content and made edits to this notebook. \n",
- "C. Ellis implemented updates from cmhn-s19"
+ "C. Ellis implemented updates from cmhn-s19
\n",
+ "X. Li improved figures in section 1.3 and 1.4"
]
}
],
@@ -1107,7 +1180,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.4"
+ "version": "3.6.8"
}
},
"nbformat": 4,
diff --git a/tutorials/09-fcma/run_fcma_classify.sh b/tutorials/09-fcma/run_fcma_classify.sh
index dde84dd..bfa56df 100755
--- a/tutorials/09-fcma/run_fcma_classify.sh
+++ b/tutorials/09-fcma/run_fcma_classify.sh
@@ -28,7 +28,5 @@ if [ $configuration == "cluster" ]
then
srun --mpi=pmi2 python ./fcma_classify.py $data_dir $suffix $mask_file $epoch_file $left_out_subj $second_mask
else
- echo Cannot run python ./fcma_classify.py $data_dir $suffix $mask_file $epoch_file $left_out_subj $second_mask
- echo Must have two cores in order to setup master and worker control
- echo Try running this on a cluster
+ mpirun -np 2 python ./fcma_classify.py $data_dir $suffix $mask_file $epoch_file $left_out_subj $second_mask
fi
diff --git a/tutorials/09-fcma/run_fcma_voxel_selection_cv.sh b/tutorials/09-fcma/run_fcma_voxel_selection_cv.sh
index c2d5d57..5b66d2f 100755
--- a/tutorials/09-fcma/run_fcma_voxel_selection_cv.sh
+++ b/tutorials/09-fcma/run_fcma_voxel_selection_cv.sh
@@ -27,5 +27,5 @@ if [ $configuration == "cluster" ]
then
srun --mpi=pmi2 python ./fcma_voxel_selection_cv.py $data_dir $suffix $mask_file $epoch_file $left_out_subj $output_dir
else
- python ./fcma_voxel_selection_cv.py $data_dir $suffix $mask_file $epoch_file $left_out_subj $output_dir
+ mpirun -np 2 python ./fcma_voxel_selection_cv.py $data_dir $suffix $mask_file $epoch_file $left_out_subj $output_dir
fi
diff --git a/tutorials/10-isc.ipynb b/tutorials/10-isc.ipynb
index 2322cb2..4fa39ed 100644
--- a/tutorials/10-isc.ipynb
+++ b/tutorials/10-isc.ipynb
@@ -6,7 +6,6 @@
"source": [
"\n",
"## Inter-Subject Correlation and Inter-Subject Functional Correlation \n",
- "[Contributions](#contributions)\n",
"\n",
"The functional connectivity methods that we used in previous notebooks compared time series of BOLD activity between voxels within participant to infer how different regions of the brain were interacting. However, BOLD activity contains multiple components ([Figure a](#fig1)):\n",
"1. Task-based/stimulus-evoked signal that is reliable across participants\n",
@@ -19,7 +18,9 @@
"\n",
"[Figure d](#fig1) shows ISFC: the correlation of every voxel in one participant with every other voxel in another participant (or average of other participants). This is like FCMA except it is between participants rather than within participants. In fact, these analyses use the same computational tricks. ISFC is valuable because it allows us to identify activity coupling in voxels that are not aligned across participants: the off diagonal in [Figure e](#fig1) represents correlations for voxels in different parts of the brain.\n",
"\n",
- ""
+ "
\n",
+ "\n",
+ "\n"
]
},
{
@@ -53,7 +54,10 @@
"\n",
"#### Exercises\n",
">[1](#ex1) [2](#ex2) [3](#ex3) [4](#ex4) [5](#ex5) [6](#ex6) [7](#ex7) [8](#ex8) [9](#ex9) \n",
- ">[Novel contribution](#novel) "
+ ">[Novel contribution](#novel) \n",
+ "\n",
+ "[Contributions](#contributions)\n",
+ "\n"
]
},
{
@@ -917,6 +921,7 @@
"cell_type": "code",
"execution_count": null,
"metadata": {
+ "collapsed": true,
"scrolled": false
},
"outputs": [],
diff --git a/tutorials/11-srm.ipynb b/tutorials/11-srm.ipynb
index 4d3365a..f39737c 100644
--- a/tutorials/11-srm.ipynb
+++ b/tutorials/11-srm.ipynb
@@ -5,7 +5,6 @@
"metadata": {},
"source": [
"# Shared Response Modeling\n",
- "[Contributions](#contributions)\n",
"\n",
"What is reliable across different participants when performing the same task, for example watching a movie? In previous notebooks, inter-subject correlation and inter-subject functional correlation revealed similarity in patterns of brain activity across participants. We can exploit this similarity to find a lower-dimensional shared space that retains features of the data that are common across participants. The shared response model (SRM) aims to learn this shared feature space and how to map individual participant data into it ([Chen et al., 2015](https://papers.nips.cc/paper/5855-a-reduced-dimension-fmri-shared-response-model)). SRM can be used as a functional alignment technique: once fit, other data from the same participants can be transformed into the shared space with these mappings, to be used as input for other analyses.\n",
"\n",
@@ -13,7 +12,7 @@
"\n",
"SRM can be summarized graphically as follows (from [Cohen et al., 2017](https://www.nature.com/articles/nn.4499)):\n",
"\n",
- "SRM requires that each participant be exposed to the same sequence of stimulus events. For this reason, continuous movies or stories are often used, although datasets (e.g., block-design localizers) where the trials are in the same order across participants have also been used with SRM. If counterbalancing was used between participants, it is technically possible to still perform SRM by rearranging the data, although mileage may vary. For best results, the epochs that are counterbalanced ought to be long (e.g., >30s). It is also important to note that SRM needs a lot of training data, up to 400 TRs or more for stable performance.\n",
+ "SRM requires that each participant is exposed to the same sequence of stimulus events. For this reason, continuous movies or stories are often used, although datasets (e.g., block-design localizers) where the trials are in the same order across participants have also been used with SRM. If counterbalancing was used between participants, it is technically possible to still perform SRM by rearranging the data, although mileage may vary. For best results, the epochs that are counterbalanced ought to be long (e.g., >30s). It is also important to note that SRM needs a lot of training data, up to 400 TRs or more for stable performance.\n",
"\n",
"## Goal of this script\n",
" 1. Learn how to fit SRM. \n",
@@ -40,7 +39,10 @@
"\n",
"#### Exercises\n",
">[1](#ex1) [2](#ex2) [3](#ex3) [4](#ex4) [5](#ex5) [6](#ex6) [7](#ex7) [8](#ex8) [9](#ex9) [10](#ex10) \n",
- ">[Novel contribution](#novel) "
+ ">[Novel contribution](#novel) \n",
+ "\n",
+ "[Contributions](#contributions)\n",
+ "\n"
]
},
{
@@ -91,7 +93,7 @@
"\n",
"1. Raider dataset ([Haxby et al., 2011](https://doi.org/10.1016/j.neuron.2011.08.026)): We are using a pre-processed version of the data containing voxels from ventral temporal (VT) cortex in 10 subjects watching the movie, \"Raiders of the Lost Ark\". For these same participants and voxels, the dataset also includes eight runs where participants viewed images from seven categories. Each TR was 3s long and the movie was 2203 TRs long in total. Because the data have already been pre-processed and turned into a numpy file, we don't need to load them with nibabel tools.\n",
"\n",
- "2. Pieman dataset ([Simony et al., 2016](https://doi.org/10.1038/ncomms12141)): A full description of the dataset can be found in the ISC notebook. It includes 18 subjects who listened to a real-life story lasting seven minutes (intact condition). Subjects also listened to two scrambled versions of the story, generated by dividing the story into segments of different timescales (paragraphs and words, respectively) and then permuting the order of these segments.\n",
+ "2. Pieman dataset ([Simony et al., 2016](https://doi.org/10.1038/ncomms12141)): A full description of the dataset can be found in [the ISC notebook](https://brainiak.org/tutorials/10-isc/). It includes 18 subjects who listened to a real-life story lasting seven minutes (intact condition). Subjects also listened to two scrambled versions of the story, generated by dividing the story into segments of different timescales (paragraphs and words, respectively) and then permuting the order of these segments.\n",
"\n",
"First we will load in the data:"
]
@@ -200,11 +202,11 @@
"# Iterate through the subjects\n",
"for sub in range(num_subs): \n",
" \n",
- " # Do it for training data\n",
+ " # Normalize training data\n",
" train_data[sub] = stats.zscore(train_data[sub], axis=1, ddof=1)\n",
" train_data[sub] = np.nan_to_num(train_data[sub])\n",
" \n",
- " # Do it for test data\n",
+ " # Normalize test data\n",
" test_data[sub] = stats.zscore(test_data[sub], axis=1, ddof=1)\n",
" test_data[sub] = np.nan_to_num(test_data[sub])"
]
@@ -279,7 +281,7 @@
"plt.xlabel('TR')\n",
"plt.ylabel('feature')\n",
"plt.imshow(srm.s_, cmap='viridis')\n",
- "plt.colorbar();"
+ "plt.colorbar()"
]
},
{
@@ -300,7 +302,7 @@
"plt.figure(figsize=(15, 4))\n",
"plt.title('SRM: top feature')\n",
"plt.xlabel('TR')\n",
- "plt.plot(srm.s_[0, :]);"
+ "plt.plot(srm.s_[0, :])"
]
},
{
@@ -318,7 +320,7 @@
"source": [
"### 2.1 Similarity of timepoints \n",
"\n",
- "We can now use this shared response to estimate how similar each timepoint is to one another. In other words, we treat each time point as a pattern of activity across the features and we correlate the pattern for each time point with the pattern for all other time points. This is analogous to the spatial ISC analysis from the ISC notebook. To do this, we calculate the distance between all time points in this $k$-dimensional space. Below, blue means that the time points are closer together or more similar. The yellow bands indicate time points where the patterns of activity diverge, potentially reflecting a unique time point in the movie."
+ "We can now use this shared response to estimate how similar each timepoint is to one another. In other words, we treat each time point as a pattern of activity across the features and we correlate the pattern for each time point with the pattern for all other time points. This is analogous to the spatial ISC analysis from [the ISC notebook](https://brainiak.org/tutorials/10-isc/). To do this, we calculate the distance between all time points in this $k$-dimensional space. Below, blue means that the time points are closer together or more similar. The yellow bands indicate time points where the patterns of activity diverge, potentially reflecting a unique time point in the movie."
]
},
{
@@ -357,12 +359,13 @@
"source": [
"plt.subplot(211)\n",
"\n",
- "plt.plot(srm.w_[0][0,:])\n",
- "plt.plot(srm.w_[1][0,:])\n",
+ "plt.plot(srm.w_[0][0,:], label='sub1')\n",
+ "plt.plot(srm.w_[1][0,:], label='sub2')\n",
"feature_corr = np.corrcoef(srm.w_[0][0,:], srm.w_[1][0,:].T)[0,1]\n",
"plt.title('SRM: Weights x Features for one voxel (correlation of loading, r: %0.3f)' % feature_corr) \n",
"plt.xlabel('feature')\n",
"plt.ylabel('weight for one voxel')\n",
+ "plt.legend(loc='lower right')\n",
"plt.tight_layout()"
]
},
@@ -413,11 +416,12 @@
},
"outputs": [],
"source": [
- "# Reorganize the data back into an appropriate space for ISC\n",
+ "# Reorganize the data back into an appropriate space for ISC. Use np.transpose to make it TRs x Voxels x subj\n",
"raw_obj = np.zeros((train_data[0].shape[0], train_data[0].shape[1], len(train_data)))\n",
"for ppt in range(len(train_data)):\n",
" raw_obj[:, :, ppt] = train_data[ppt]\n",
- " \n",
+ "raw_obj=np.transpose(raw_obj,[1,0,2]) \n",
+ "\n",
"# Perform ISC on all participants, collapsing across participants \n",
"corr_raw = isc(raw_obj, summary_statistic='mean')\n",
"corr_raw = np.nan_to_num(corr_raw) \n",
@@ -426,7 +430,8 @@
"shared_obj = np.zeros((shared_train[0].shape[0], shared_train[0].shape[1], len(train_data)))\n",
"for ppt in range(len(train_data)):\n",
" shared_obj[:, :, ppt] = shared_train[ppt]\n",
- " \n",
+ "shared_obj=np.transpose(shared_obj,[1,0,2]) # TRs x Voxels x Subjects for ISC.\n",
+ "\n",
"# Perform ISC on all participants, collapsing across participants \n",
"corr_shared = isc(shared_obj, summary_statistic='mean')\n",
"corr_shared = np.nan_to_num(corr_shared)"
@@ -443,17 +448,17 @@
"plt.figure(figsize=(14,5))\n",
"plt.subplot(1, 2, 1)\n",
"plt.title('ISC for all voxels')\n",
- "plt.hist(corr_raw);\n",
+ "plt.hist(corr_raw)\n",
"plt.xlabel('correlation')\n",
"plt.ylabel('number of voxels')\n",
- "plt.xlim([-1, 1]);\n",
+ "plt.xlim([-1, 1])\n",
"\n",
"plt.subplot(1, 2, 2)\n",
"plt.title('ISC for shared features')\n",
- "plt.hist(corr_shared);\n",
+ "plt.hist(corr_shared)\n",
"plt.xlabel('correlation')\n",
"plt.ylabel('number of features')\n",
- "plt.xlim([-1, 1]);\n",
+ "plt.xlim([-1, 1])\n",
"\n",
"plt.tight_layout()\n",
"\n",
@@ -593,9 +598,10 @@
"# Do the reconstruction on all individual participants and organize it for ISC\n",
"signal_srm = np.zeros((test_data[0].shape[0], test_data[0].shape[1], len(test_data)))\n",
"for ppt in range(len(test_data)):\n",
- " signal_srm[:, :, ppt] = w0.dot(shared_test[ppt])\n",
- "\n",
- "corr_reconstructed = isc(signal_srm, summary_statistic='mean')\n",
+ " w = srm.w_[ppt]\n",
+ " signal_srm[:, :, ppt] = w.dot(shared_test[ppt])\n",
+ "signal_srm_for_isc=np.transpose(signal_srm,[1,0,2]) # TRs x Voxels x Subjects for ISC.\n",
+ "corr_reconstructed = isc(signal_srm_for_isc, summary_statistic='mean')\n",
"corr_reconstructed = np.nan_to_num(corr_reconstructed)"
]
},
@@ -611,17 +617,17 @@
"plt.figure(figsize=(14,5))\n",
"plt.subplot(1, 2, 1)\n",
"plt.title('ISC for all voxels')\n",
- "plt.hist(corr_raw);\n",
+ "plt.hist(corr_raw)\n",
"plt.xlabel('correlation')\n",
"plt.ylabel('number of voxels')\n",
- "plt.xlim([-1, 1]);\n",
+ "plt.xlim([-1, 1])\n",
"\n",
"plt.subplot(1, 2, 2)\n",
"plt.title('ISC for shared features')\n",
- "plt.hist(corr_reconstructed[0]);\n",
+ "plt.hist(corr_reconstructed)\n",
"plt.xlabel('correlation')\n",
"plt.ylabel('number of features')\n",
- "plt.xlim([-1, 1]);\n",
+ "plt.xlim([-1, 1])\n",
"\n",
"plt.tight_layout()\n",
"\n",
@@ -654,7 +660,7 @@
"> - Create a sliding window of size `win_size`.\n",
"> - Break time series into `nseg` overlapping windows. \n",
"> - Extract data for each of these segments.\n",
- "> - One subject is taken out for leave-one-subject-out testing. Note the input data contain all subjects. The removal of one subject from a list for cross-validation is similar to what we covered in previous notebooks (e.g., FCMA).\n",
+ "> - One subject is taken out for leave-one-subject-out testing. Note the input data contain all subjects. The removal of one subject from a list for cross-validation is similar to what we covered in previous notebooks (e.g., [FCMA](https://brainiak.org/tutorials/09-fcma/)).\n",
"> - A correlation is computed between the segments in the held-out subject and the segments from the average of the other subjects (similar ISC). The `compute_correlation` function in BrainIAK is used to calculate this. \n",
"> - For each segment in the held-out subject, the segment from the average of others with the maximum correlation value is found. \n",
"> - If the maximum correlation corresponds to the same time period in the test and training data, we score that as an accurate classification. \n",
@@ -835,7 +841,7 @@
"source": [
"## 6. Pieman analysis\n",
"\n",
- "In the ISC notebook you used the Pieman dataset (see that notebook for more details). This dataset can also be used for SRM. It is much shorter in duration, with only 300 TRs per condition. However, we can still run a time-segment matching analysis.\n",
+ "In the ISC notebook you used the Pieman dataset (see that [notebook](https://brainiak.org/tutorials/10-isc/) for more details). This dataset can also be used for SRM. It is much shorter in duration, with only 300 TRs per condition. However, we can still run a time-segment matching analysis.\n",
"\n",
"Below we load the data from an A1 mask; participants are hearing an audio clip, so we expect auditory cortex to be driven by this stimulus. By using a small mask we can compensate for the relatively small number of data points in this dataset. We will also split the data into training (200 TRs) and test (100 TRs) sets.\n",
"\n",
@@ -914,6 +920,17 @@
"print('Finished loading in Pieman data')"
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "masked_data.shape"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -1084,7 +1101,8 @@
"H. Zhang major edits, added more exercises, added image class prediction section, filled in solutions, processed raider dataset, novel contribution ideas. \n",
"M. Kumar edited sections and added details to the time-segment matching function. \n",
"K.A. Norman provided suggestions on the overall content and made edits to this notebook. \n",
- "C. Ellis incorporated edits from cmhn-s19. "
+ "C. Ellis incorporated edits from cmhn-s19. \n",
+ "X. Li added hyperlinks to the notebooks and added figure legend in section 2.2"
]
}
],
@@ -1104,7 +1122,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.7"
+ "version": "3.6.8"
},
"varInspector": {
"cols": {
diff --git a/tutorials/12-hmm.ipynb b/tutorials/12-hmm.ipynb
index d8313ba..61dd285 100644
--- a/tutorials/12-hmm.ipynb
+++ b/tutorials/12-hmm.ipynb
@@ -5,7 +5,6 @@
"metadata": {},
"source": [
"# Hidden Markov Models for Event Segmentation\n",
- "[Contributions](#contributions)\n",
"\n",
"If asked to give a quick description of a dinner with friends, you might say something like the following: \"First, we met outside the restaurant while waiting for our table. Once we got to our table, we continued talking for a bit before we ordered something to drink and a few appetizers. Later, everyone ordered their dinner. The food arrived after some time and we all began eating. Finally, it was time for dessert and we continued chatting with each other until desert arrived. After this, we split the bill and headed out of the restaurant. We said our goodbyes to each other, while waiting for our taxis, and went home.\" From this description it is clear that the dinner meeting was composed of stages, or events, that occurred sequentially. Furthermore, these events can be perceived at varying scales. At the longest time scale, the entire dinner could be treated as one event. At smaller time scales, subsets of the meeting such as entering the restaurant, taking off coats, being seated, looking at menus and so on, can be treated as different events. At an even smaller scale, the event of entering the restaurant can be broken up into different sub-events. Regardless of scale, all of these accounts share the property that the event can be represented as a sequence of stages. \n",
"\n",
@@ -50,13 +49,18 @@
"\n",
"#### Exercises \n",
">[1](#ex1) [2](#ex2) [3](#ex3) [4](#ex4) [5](#ex5) [6](#ex6) [7](#ex7) [8](#ex8) \n",
- ">[Novel contribution](#novel) "
+ ">[Novel contribution](#novel) \n",
+ "\n",
+ "[Contributions](#contributions)\n",
+ "\n"
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"import warnings\n",
@@ -157,7 +161,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Parameters for creating small simulated datasets\n",
@@ -173,7 +179,8 @@
"\n",
"# Check the data. \n",
"f, ax = plt.subplots(1,1, figsize=(12, 4))\n",
- "ax.imshow(D, interpolation='nearest', cmap='viridis', aspect='auto')\n",
+ "img = ax.imshow(D, interpolation='nearest', cmap='viridis', aspect='auto')\n",
+ "f.colorbar(img)\n",
"ax.set_ylabel('Voxels')\n",
"ax.set_title('Simulated brain activity')\n",
"ax.set_xlabel('Timepoints')"
@@ -189,15 +196,18 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"f, ax = plt.subplots(1,1, figsize = (10,8))\n",
- "ax.imshow(np.corrcoef(D.T), cmap='viridis')\n",
+ "img = ax.imshow(np.corrcoef(D.T), cmap='viridis')\n",
"title_text = '''\n",
"TR-TR correlation matrix\n",
"simulated data \n",
"'''\n",
+ "f.colorbar(img)\n",
"ax.set_title(title_text)\n",
"ax.set_xlabel('TR')\n",
"ax.set_ylabel('TR')"
@@ -213,7 +223,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# Find the events in this dataset\n",
@@ -231,12 +243,14 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"f, ax = plt.subplots(1,1, figsize=(8, 4))\n",
- "\n",
- "ax.imshow(hmm_sim.event_pat_.T, cmap='viridis', aspect='auto')\n",
+ "img = ax.imshow(hmm_sim.event_pat_.T, cmap='viridis', aspect='auto')\n",
+ "f.colorbar(img)\n",
"ax.set_title('Estimated brain pattern for each event')\n",
"ax.set_ylabel('Event id')\n",
"ax.set_xlabel('Voxels')"
@@ -252,14 +266,17 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# plot \n",
"f, ax = plt.subplots(1,1, figsize=(12,4))\n",
"\n",
"pred_seg = hmm_sim.segments_[0]\n",
- "ax.imshow(pred_seg.T, aspect='auto', cmap='viridis')\n",
+ "img = ax.imshow(pred_seg.T, aspect='auto', cmap='viridis')\n",
+ "f.colorbar(img)\n",
"ax.set_xlabel('Timepoints')\n",
"ax.set_ylabel('Event label')\n",
"ax.set_title('Predicted event segmentation, by HMM with the ground truth n_events')\n",
@@ -286,7 +303,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"f, ax = plt.subplots(1,1, figsize=(12, 4))\n",
@@ -294,6 +313,7 @@
"ax.plot(hmm_sim.ll_)\n",
"ax.set_title('Log likelihood')\n",
"ax.set_xlabel('EM steps')\n",
+ "ax.set_ylabel('log-likelihood values')\n",
"sns.despine()"
]
},
@@ -312,11 +332,12 @@
},
"outputs": [],
"source": [
- "def plot_tt_similarity_matrix(ax, data_matrix, bounds, n_TRs, title_text):\n",
- " ax.imshow(np.corrcoef(data_matrix.T), cmap='viridis')\n",
+ "def plot_tt_similarity_matrix(f, ax, data_matrix, bounds, n_TRs, title_text):\n",
+ " img = ax.imshow(np.corrcoef(data_matrix.T), cmap='viridis')\n",
" ax.set_title(title_text)\n",
" ax.set_xlabel('TR')\n",
" ax.set_ylabel('TR')\n",
+ " f.colorbar(img)\n",
" # plot the boundaries \n",
" bounds_aug = np.concatenate(([0],bounds,[n_TRs]))\n",
" for i in range(len(bounds_aug)-1):\n",
@@ -332,7 +353,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# extract the boundaries \n",
@@ -344,7 +367,7 @@
"on top of the TR-TR correlation matrix\n",
"simulated data \n",
"'''\n",
- "plot_tt_similarity_matrix(ax, D, bounds, T, title_text)\n",
+ "plot_tt_similarity_matrix(f, ax, D, bounds, T, title_text)\n",
"f.tight_layout()"
]
},
@@ -370,13 +393,15 @@
"source": [
"### 1.1 Load data \n",
"\n",
- "Download whole-brain data for 17 subjects. Voxels with low inter-subject correlation (that were not consistently activated across subjects) were removed, and then the data were downsampled into 141 large regions (from a [resting-state atlas](http://www.dpmlab.org/peerj-784.pdf)). After putting this .h5 file into the same directory as this notebook, we can load in the data. In addition to the fMRI data, we have the coordinates of each region, as well as human-labeled boundaries for event boundaries."
+ "Download whole-brain data for 17 subjects. Voxels with low inter-subject correlation (that were not consistently activated across subjects) were removed, and then the data were downsampled into 141 large regions (from a [resting-state atlas](http://www.dpmlab.org/papers/peerj-784.pdf)). After putting this .h5 file into the same directory as this notebook, we can load in the data. In addition to the fMRI data, we have the coordinates of each region, as well as human-labeled boundaries for event boundaries."
]
},
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# download the data, just need to run these lines once\n",
@@ -448,7 +473,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# set up the nested cross validation template\n",
@@ -509,7 +536,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# hold out a subject \n",
@@ -540,7 +569,7 @@
"source": [
"#### 1.2.2 Outer loop: statistical testing of boundaries \n",
" \n",
- "One way to test whether the model-identified boundaries are consistent across subjects is to fit the model on all but one subject and try to predict something about the held-out subject. There are multiple approaches for doing this (see [here](http://www.dpmlab.org/Neuron17.pdf)), but the simplest is to check whether the model boundaries predict pattern changes in the held-out subject. We therefore measure whether activity patterns 5 TRs apart show a drop in correlation when they are on opposite sides of an event boundary. Here, we pick a `k` and do the full leave-one-subject-out procedure to test the quality of this choice.\n",
+ "One way to test whether the model-identified boundaries are consistent across subjects is to fit the model on all but one subject and try to predict something about the held-out subject. There are multiple approaches for doing this (see [here](http://www.dpmlab.org/papers/Neuron17.pdf)), but the simplest is to check whether the model boundaries predict pattern changes in the held-out subject. We therefore measure whether activity patterns 5 TRs apart show a drop in correlation when they are on opposite sides of an event boundary. Here, we pick a `k` and do the full leave-one-subject-out procedure to test the quality of this choice.\n",
"\n",
"For comparison, we generate permuted versions of the model boundaries, in which the distribution of event lengths (the distances between the boundaries) is held constant but the order of the event lengths is shuffled. This should produce a within vs. across boundary difference of zero on average, but the variance of these null boundaries lets us know the interval of chance values and we can see that we are well above chance."
]
@@ -548,7 +577,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"k = 60\n",
@@ -585,7 +616,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"plt.figure(figsize=(2,5))\n",
@@ -618,7 +651,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"np.random.seed(0)\n",
@@ -692,7 +727,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"# fit event seg models \n",
@@ -711,17 +748,21 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"plt.figure(figsize=(16, 5))\n",
"plt.subplot(1,2,1)\n",
"plt.imshow(hmm_ag_mvr.segments_[0].T,aspect='auto',origin='lower',cmap='viridis')\n",
+ "plt.colorbar()\n",
"plt.xlabel('Movie TRs')\n",
"plt.ylabel('Events')\n",
"\n",
"plt.subplot(1,2,2)\n",
"plt.imshow(hmm_ag_mvr.segments_[1].T,aspect='auto',origin='lower',cmap='viridis')\n",
+ "plt.colorbar()\n",
"plt.xlabel('Recall TRs')\n",
"plt.ylabel('Events')"
]
@@ -743,12 +784,14 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"f, ax = plt.subplots(1,1, figsize=(10,8)) \n",
"\n",
- "ax.imshow(\n",
+ "img = ax.imshow(\n",
" np.dot(hmm_ag_mvr.segments_[1], hmm_ag_mvr.segments_[0].T),\n",
" origin='lower',cmap='viridis'\n",
")\n",
@@ -769,6 +812,7 @@
" linewidth=1,edgecolor='w',facecolor='none'\n",
" )\n",
" ax.add_patch(rect)\n",
+ "f.colorbar(img, shrink=0.6)\n",
"ax.set_xlabel('Movie TRs')\n",
"ax.set_ylabel('Recall TRs')\n",
"\n",
@@ -819,7 +863,9 @@
{
"cell_type": "code",
"execution_count": null,
- "metadata": {},
+ "metadata": {
+ "collapsed": true
+ },
"outputs": [],
"source": [
"subj_id = 2 \n",
@@ -880,7 +926,8 @@
"brainiak demo. \n",
"- Q add exs; nested cv; tune k; modularize code. \n",
"- M. Kumar added introduction and edited section descriptions. \n",
- "- K.A. Norman provided suggestions on the overall content and made edits to this notebook."
+ "- K.A. Norman provided suggestions on the overall content and made edits to this notebook.\n",
+ "- X. Li added colorbars for heatmaps."
]
}
],
diff --git a/tutorials/13-real-time.ipynb b/tutorials/13-real-time.ipynb
index dbf0dc5..b0d35e2 100644
--- a/tutorials/13-real-time.ipynb
+++ b/tutorials/13-real-time.ipynb
@@ -5,7 +5,6 @@
"metadata": {},
"source": [
"# Real-Time fMRI Analysis\n",
- "[Contributions](#contributions)\n",
"\n",
"In typical fMRI experiments the researcher creates a sequence of stimuli or events they want to show in advance and then measures the participant's brain activity in response to this pre-specified task. This provides an assessment of the relationship between behavior in the task and BOLD activity. However, although tempting, it is not possible to conclude from this correlation alone that active brain regions are causing the behavior. Such causal inferences have typically required directly intervening to disrupt or enhance the functioning of a brain region (e.g., via stimulation, lesioning, cooling, etc.) and then examining the impact on behavior. Such experimental manipulations of brain function, especially in a regionally specific way, require invasive techniques and so are not possible in healthy humans for ethical reasons.\n",
"\n",
@@ -39,7 +38,9 @@
"\n",
"Exercises\n",
">[1](#ex1) [2](#ex2) [3](#ex3) [4](#ex4) [5](#ex5) [6](#ex6) [7](#ex7) [8](#ex8) \n",
- ">[Novel contribution](#novel) "
+ ">[Novel contribution](#novel) \n",
+ "\n",
+ "[Contributions](#contributions)\n"
]
},
{
@@ -79,10 +80,9 @@
"import time\n",
"import numpy as np # type: ignore\n",
"import matplotlib.pyplot as plt\n",
+ "import nibabel as nib\n",
"%matplotlib inline\n",
"from sklearn.linear_model import LogisticRegression # type: ignore\n",
- "from watchdog.events import PatternMatchingEventHandler # type: ignore\n",
- "from watchdog.observers import Observer # type: ignore\n",
"from queue import Queue\n",
"from sklearn import svm\n",
"from sklearn import linear_model\n",
@@ -151,6 +151,17 @@
"Below we will set the paths to be used. We also want to specify the proportion of trials that will be used for training the model (time until the real-time neurofeedback kicks in)."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "os.chdir('..')"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -194,7 +205,8 @@
" # When the file exists, load it and output it\n",
" vol = np.load(filename)\n",
" \n",
- " return vol"
+ " return vol\n",
+ "use_simple_watcher = 1\n"
]
},
{
@@ -215,6 +227,9 @@
"outputs": [],
"source": [
"# Create a file watching algorithm\n",
+ "from watchdog.events import PatternMatchingEventHandler # type: ignore\n",
+ "from watchdog.observers import Observer # type: ignore\n",
+ "\n",
"def tr_watcher(filename, file_queue):\n",
" \n",
" # Does the file exist?\n",
@@ -247,7 +262,8 @@
" # When an event occurs, put it in the queue\n",
" def on_created(self, event):\n",
" self.q.put(event)\n",
- " "
+ "\n",
+ "use_simple_watcher = 0 # Only overwrite if you get to the end of the function "
]
},
{
@@ -265,25 +281,37 @@
},
"outputs": [],
"source": [
- "file_observer = Observer()\n",
- "file_queue = Queue() # type: ignore\n",
+ "if use_simple_watcher == 1:\n",
"\n",
- "# set up the notifications for when a new TR is created\n",
- "notify_file_pattern = '*.npy' \n",
- "file_notify = file_notify_handler(file_queue, [notify_file_pattern]) \n",
- "file_observer.schedule(file_notify, data_dir, recursive=False) \n",
- "file_observer.start() \n",
+ " for idx in range(train_count): \n",
"\n",
- "for idx in range(train_count): \n",
- "\n",
- " # What file name are you going to load\n",
- " next_filename = data_dir + file_pattern.format(idx) \n",
- " vol = tr_watcher(next_filename, file_queue)\n",
+ " # What file name are you going to load\n",
+ " next_filename = data_dir + file_pattern.format(idx) \n",
+ " vol = tr_watcher_simple(next_filename)\n",
"\n",
- " # When the file exists, load it and output it\n",
- " print('Recieved:', next_filename)\n",
+ " # When the file exists, load it and output it\n",
+ " print('Recieved:', next_filename)\n",
" \n",
- "file_observer.stop()"
+ "else:\n",
+ " file_observer = Observer()\n",
+ " file_queue = Queue() # type: ignore\n",
+ "\n",
+ " # set up the notifications for when a new TR is created\n",
+ " notify_file_pattern = '*.npy' \n",
+ " file_notify = file_notify_handler(file_queue, [notify_file_pattern]) \n",
+ " file_observer.schedule(file_notify, data_dir, recursive=False) \n",
+ " file_observer.start() \n",
+ "\n",
+ " for idx in range(train_count): \n",
+ "\n",
+ " # What file name are you going to load\n",
+ " next_filename = data_dir + file_pattern.format(idx) \n",
+ " vol = tr_watcher(next_filename, file_queue)\n",
+ "\n",
+ " # When the file exists, load it and output it\n",
+ " print('Recieved:', next_filename)\n",
+ "\n",
+ " file_observer.stop()"
]
},
{
@@ -313,8 +341,7 @@
"outputs": [],
"source": [
"# Insert code here\n",
- "def preprocess_vol(vol, mask):\n",
- "\n"
+ "def preprocess_vol(vol, mask):\n"
]
},
{
@@ -342,13 +369,14 @@
"tr_data = np.full((train_count, mask.sum()), np.nan)\n",
"\n",
"# set up the notifications for when a new TR is created\n",
- "file_observer = Observer()\n",
- "file_queue = Queue() # type: ignore\n",
+ "if use_simple_watcher == 0:\n",
+ " file_observer = Observer()\n",
+ " file_queue = Queue() # type: ignore\n",
"\n",
- "notify_file_pattern = '*.npy' \n",
- "file_notify = file_notify_handler(file_queue, [notify_file_pattern]) \n",
- "file_observer.schedule(file_notify, data_dir, recursive=False) \n",
- "file_observer.start() \n",
+ " notify_file_pattern = '*.npy' \n",
+ " file_notify = file_notify_handler(file_queue, [notify_file_pattern]) \n",
+ " file_observer.schedule(file_notify, data_dir, recursive=False) \n",
+ " file_observer.start() \n",
"\n",
"# Cycle through TRs\n",
"for idx in range(train_count): \n",
@@ -357,7 +385,10 @@
" \n",
" # What file name are you going to load\n",
" next_filename = data_dir + file_pattern.format(idx) \n",
- " vol = tr_watcher(next_filename, file_queue)\n",
+ " if use_simple_watcher == 1:\n",
+ " vol = tr_watcher_simple(next_filename)\n",
+ " else:\n",
+ " vol = tr_watcher(next_filename, file_queue)\n",
" \n",
" # Store the volume as a preprocessed vector\n",
" tr_data[idx, :] = preprocess_vol(vol, mask)\n",
@@ -366,7 +397,9 @@
" \n",
" # Print the timing\n",
"\n",
- "file_observer.stop()"
+ "# Stop the file watcher \n",
+ "if use_simple_watcher == 0:\n",
+ " file_observer.stop()"
]
},
{
@@ -459,6 +492,33 @@
"plt.colorbar()"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Like we mentioned all the way back in notebook 01, it is possible to view these weights using more sophisticated tools such as niwidgets"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "collapsed": true
+ },
+ "outputs": [],
+ "source": [
+ "try:\n",
+ " # Load in the new variable\n",
+ " from niwidgets import NiftiWidget\n",
+ "\n",
+ " coefs_nii = nib.Nifti1Image(coefs, np.eye(4))\n",
+ " viewer = NiftiWidget(coefs_nii)\n",
+ " viewer.nifti_plotter();\n",
+ "\n",
+ "except:\n",
+ " print('niwidgets cannot run, try installing it or some other viewing tool')"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -548,12 +608,13 @@
" is_print=0\n",
" \n",
" # set up the notifications for when a new TR is created\n",
- " file_observer = Observer()\n",
- " file_queue = Queue() # type: ignore\n",
- " \n",
- " file_notify = file_notify_handler(file_queue, [notify_file_pattern]) \n",
- " file_observer.schedule(file_notify, data_dir, recursive=False) \n",
- " file_observer.start() \n",
+ " if use_simple_watcher == 0:\n",
+ " file_observer = Observer()\n",
+ " file_queue = Queue() # type: ignore\n",
+ "\n",
+ " file_notify = file_notify_handler(file_queue, [notify_file_pattern]) \n",
+ " file_observer.schedule(file_notify, data_dir, recursive=False) \n",
+ " file_observer.start() \n",
"\n",
" # Listen for TRs\n",
" num_correct = 0 # Preset the number of correct answers to zero\n",
@@ -561,7 +622,11 @@
"\n",
" # What file name are you going to load\n",
" next_filename = data_dir + file_pattern.format(idx, '02d') \n",
- " vol = tr_watcher(next_filename, file_queue)\n",
+ " \n",
+ " if use_simple_watcher == 1:\n",
+ " vol = tr_watcher_simple(next_filename)\n",
+ " else:\n",
+ " vol = tr_watcher(next_filename, file_queue)\n",
"\n",
" # Store the volume as a preprocessed vector\n",
" tr_data[idx, :] = preprocess_vol(vol, mask)\n",
@@ -618,7 +683,8 @@
" plt.ylim((0, 3))\n",
" \n",
" # Stop listening\n",
- " file_observer.stop()\n",
+ " if use_simple_watcher == 0:\n",
+ " file_observer.stop()\n",
" \n",
" # Return the accuracy overall\n",
" return accuracy"
@@ -639,7 +705,8 @@
" incremental_batch=0,\n",
" )\n",
"except Exception as err:\n",
- " file_observer.stop()\n",
+ " if use_simple_watcher == 0:\n",
+ " file_observer.stop()\n",
" print(\"Exception: {}\".format(err))"
]
},
@@ -750,7 +817,8 @@
" incremental_batch=0,\n",
" )\n",
"except Exception as err:\n",
- " file_observer.stop()\n",
+ " if use_simple_watcher == 0:\n",
+ " file_observer.stop()\n",
" print(\"Exception: {}\".format(err))"
]
},
@@ -777,7 +845,8 @@
" incremental_batch=40,\n",
" )\n",
"except Exception as err:\n",
- " file_observer.stop()\n",
+ " if use_simple_watcher == 0:\n",
+ " file_observer.stop()\n",
" print(\"Exception: {}\".format(err))"
]
},
diff --git a/tutorials/imgs/10-isc-figure1.jpg b/tutorials/imgs/10-isc-figure1.jpg
new file mode 100644
index 0000000..1f44cc5
Binary files /dev/null and b/tutorials/imgs/10-isc-figure1.jpg differ