From be6f481d86ccfd4b10091eb3418be72267393547 Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Mon, 19 Nov 2018 20:16:18 +0100 Subject: [PATCH 1/8] wip: building up fmri scripts --- .gitignore | 1 + neuro_pypes/anat/utils.py | 2 - .../dual_regression.m | 0 .../rsn_multiple_regressions.py | 0 .../rsn_multiple_regressions_template.m | 0 scripts/rest_fmri_preprocessing/README.md | 0 scripts/rest_fmri_preprocessing/process.ipynb | 468 ++++++++++++++++++ scripts/rest_fmri_preprocessing/process.py | 434 ++++++++++++++++ 8 files changed, 903 insertions(+), 2 deletions(-) rename scripts/{ => ica_multiple_regression}/dual_regression.m (100%) rename scripts/{ => ica_multiple_regression}/rsn_multiple_regressions.py (100%) rename scripts/{ => ica_multiple_regression}/rsn_multiple_regressions_template.m (100%) create mode 100644 scripts/rest_fmri_preprocessing/README.md create mode 100644 scripts/rest_fmri_preprocessing/process.ipynb create mode 100644 scripts/rest_fmri_preprocessing/process.py diff --git a/.gitignore b/.gitignore index 4b48089..1533295 100644 --- a/.gitignore +++ b/.gitignore @@ -64,3 +64,4 @@ target/ *.xls *.html slices* +pyscript.m diff --git a/neuro_pypes/anat/utils.py b/neuro_pypes/anat/utils.py index fb0863b..4043154 100644 --- a/neuro_pypes/anat/utils.py +++ b/neuro_pypes/anat/utils.py @@ -30,9 +30,7 @@ def biasfield_correct(anat_filepath=traits.Undefined): n4.inputs.convergence_threshold = 1e-6 #n4.inputs.bspline_order = 5 n4.inputs.save_bias = True - n4.inputs.input_image = anat_filepath - return n4 diff --git a/scripts/dual_regression.m b/scripts/ica_multiple_regression/dual_regression.m similarity index 100% rename from scripts/dual_regression.m rename to scripts/ica_multiple_regression/dual_regression.m diff --git a/scripts/rsn_multiple_regressions.py b/scripts/ica_multiple_regression/rsn_multiple_regressions.py similarity index 100% rename from scripts/rsn_multiple_regressions.py rename to scripts/ica_multiple_regression/rsn_multiple_regressions.py diff --git a/scripts/rsn_multiple_regressions_template.m b/scripts/ica_multiple_regression/rsn_multiple_regressions_template.m similarity index 100% rename from scripts/rsn_multiple_regressions_template.m rename to scripts/ica_multiple_regression/rsn_multiple_regressions_template.m diff --git a/scripts/rest_fmri_preprocessing/README.md b/scripts/rest_fmri_preprocessing/README.md new file mode 100644 index 0000000..e69de29 diff --git a/scripts/rest_fmri_preprocessing/process.ipynb b/scripts/rest_fmri_preprocessing/process.ipynb new file mode 100644 index 0000000..7eb3355 --- /dev/null +++ b/scripts/rest_fmri_preprocessing/process.ipynb @@ -0,0 +1,468 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from hansel import Crumb\n", + "from hansel.operations import joint_value_map, valuesmap_to_dict\n", + "import nipype.pipeline.engine as pe\n", + "from nipype.algorithms.misc import Gunzip\n", + "from nipype.interfaces import spm\n", + "from nipype.interfaces.utility import IdentityInterface, Function\n", + "from nipype.interfaces.io import DataSink\n", + "from nipype.interfaces.ants import N4BiasFieldCorrection\n", + "from nipype.interfaces.base import traits\n", + "\n", + "from neuro_pypes.crumb import DataCrumb\n", + "from neuro_pypes.preproc.slicetime_params import STCParametersInterface\n", + "from neuro_pypes.interfaces.nilearn import math_img\n", + "from neuro_pypes.preproc import get_bounding_box\n", + "from neuro_pypes.utils import (\n", + " remove_ext,\n", + " joinstrings,\n", + " selectindex\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "wf_name = 'spm_rest_preprocessing'\n", + "\n", + "work_dir = os.path.expanduser(f'~/data/neuro_pypes/{wf_name}/')\n", + "output_dir = os.path.join(work_dir, 'out')\n", + "cache_dir = os.path.join(work_dir, 'wd')\n", + "\n", + "input_dir = os.path.expanduser('~/projects/neuro/multimodal_test_data/raw')\n", + "\n", + "data_path = os.path.join(os.path.expanduser(input_dir), '{subject_id}', '{session}', '{image}')\n", + "data_crumb = Crumb(data_path, ignore_list=['.*'])\n", + "crumb_modalities = {\n", + " 'anat': [('image', 'anat.nii.gz')],\n", + " 'fmri': [('image', 'rest.nii.gz')]\n", + "}\n", + "\n", + "anat_voxel_sizes = [1, 1, 1]\n", + "\n", + "fmri_smoothing_kernel_fwhm = 8" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "wf = pe.Workflow(name=wf_name, base_dir=work_dir)\n", + "\n", + "\n", + "# ------------------------------------------------------------------------------------------------\n", + "# DATA INPUT AND SINK\n", + "# ------------------------------------------------------------------------------------------------\n", + "datasource = pe.Node(\n", + " DataCrumb(crumb=data_crumb, templates=crumb_modalities, raise_on_empty=False),\n", + " name='selectfiles'\n", + ")\n", + "\n", + "datasink = pe.Node(\n", + " DataSink(parameterization=False, base_directory=output_dir, ),\n", + " name=\"datasink\"\n", + ")\n", + " \n", + "# basic file name substitutions for the datasink\n", + "undef_args = datasource.interface._infields\n", + "substitutions = [(name, \"\") for name in undef_args]\n", + "substitutions.append((\"__\", \"_\"))\n", + "\n", + "datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions, substitutions)\n", + "\n", + "# Infosource - the information source that iterates over crumb values map from the filesystem\n", + "infosource = pe.Node(interface=IdentityInterface(fields=undef_args), name=\"infosrc\")\n", + "infosource.iterables = list(valuesmap_to_dict(joint_value_map(data_crumb, undef_args)).items())\n", + "infosource.synchronize = True\n", + "\n", + "# connect the input_wf to the datasink\n", + "joinpath = pe.Node(joinstrings(len(undef_args)), name='joinpath')\n", + "\n", + "# Connect the infosrc node to the datasink\n", + "input_joins = [(name, 'arg{}'.format(arg_no + 1)) for arg_no, name in enumerate(undef_args)]\n", + "\n", + "wf.connect([\n", + " (infosource, datasource, [(field, field) for field in undef_args]),\n", + " (datasource, joinpath, input_joins),\n", + " (joinpath, datasink, [(\"out\", \"container\")]),\n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# ------------------------------------------------------------------------------------------------\n", + "# ANAT\n", + "# ------------------------------------------------------------------------------------------------\n", + "\n", + "# T1 preprocessing nodes\n", + "\n", + "# ANTs N4 Bias field correction\n", + "n4 = N4BiasFieldCorrection()\n", + "n4.inputs.dimension = 3\n", + "n4.inputs.bspline_fitting_distance = 300\n", + "n4.inputs.shrink_factor = 3\n", + "n4.inputs.n_iterations = [50, 50, 30, 20]\n", + "n4.inputs.convergence_threshold = 1e-6\n", + "n4.inputs.save_bias = True\n", + "n4.inputs.input_image = traits.Undefined\n", + "biascor = pe.Node(n4, name=\"bias_correction\")\n", + "\n", + "gunzip_anat = pe.Node(Gunzip(), name=\"gunzip_anat\")\n", + "\n", + "# SPM New Segment\n", + "spm_info = spm.Info()\n", + "priors_path = os.path.join(spm_info.path(), 'tpm', 'TPM.nii')\n", + "segment = spm.NewSegment()\n", + "tissue1 = ((priors_path, 1), 1, (True, True), (True, True))\n", + "tissue2 = ((priors_path, 2), 1, (True, True), (True, True))\n", + "tissue3 = ((priors_path, 3), 2, (True, True), (True, True))\n", + "tissue4 = ((priors_path, 4), 3, (True, True), (True, True))\n", + "tissue5 = ((priors_path, 5), 4, (True, False), (False, False))\n", + "tissue6 = ((priors_path, 6), 2, (False, False), (False, False))\n", + "segment.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6]\n", + "segment.inputs.channel_info = (0.0001, 60, (True, True))\n", + "segment.inputs.write_deformation_fields = [True, True]\n", + "segment.inputs.channel_files = traits.Undefined\n", + "segment = pe.Node(segment, name=\"new_segment\")\n", + "\n", + "# Apply deformations\n", + "normalize_anat = spm.Normalize12(jobtype='write')\n", + "normalize_anat.inputs.write_voxel_sizes = anat_voxel_sizes\n", + "normalize_anat.inputs.deformation_file = traits.Undefined\n", + "normalize_anat.inputs.image_to_align = traits.Undefined\n", + "normalize_anat.inputs.write_bounding_box = traits.Undefined\n", + "warp_anat = pe.Node(normalize_anat, name=\"warp_anat\")\n", + "\n", + "tpm_bbox = pe.Node(\n", + " Function(function=get_bounding_box, input_names=[\"in_file\"], output_names=[\"bbox\"]),\n", + " name=\"tpm_bbox\"\n", + ")\n", + "tpm_bbox.inputs.in_file = priors_path\n", + "\n", + "# calculate brain mask from tissue maps\n", + "tissues = pe.Node(\n", + " IdentityInterface(fields=[\"gm\", \"wm\", \"csf\"], mandatory_inputs=True),\n", + " name=\"tissues\"\n", + ")\n", + "brain_mask = pe.Node(\n", + " Function(\n", + " function=math_img, \n", + " input_names=[\"formula\", \"out_file\", \"gm\", \"wm\", \"csf\"], \n", + " output_names=[\"out_file\"],\n", + " imports=['from neuro_pypes.interfaces.nilearn import ni2file']),\n", + " name='brain_mask'\n", + ")\n", + "brain_mask.inputs.out_file = \"tissues_brain_mask.nii.gz\"\n", + "brain_mask.inputs.formula = \"np.abs(gm + wm + csf) > 0\"\n", + "\n", + "# Connect the nodes\n", + "wf.connect([\n", + " # input to biasfieldcorrection\n", + " (datasource, biascor, [(\"anat\", \"input_image\")]),\n", + "\n", + " # new segment\n", + " (biascor, gunzip_anat, [(\"output_image\", \"in_file\")]),\n", + " (gunzip_anat, segment, [(\"out_file\", \"channel_files\")]),\n", + "\n", + " # Normalize12\n", + " (segment, warp_anat, [(\"forward_deformation_field\", \"deformation_file\")]),\n", + " (segment, warp_anat, [(\"bias_corrected_images\", \"apply_to_files\")]),\n", + " (tpm_bbox, warp_anat, [(\"bbox\", \"write_bounding_box\")]),\n", + "\n", + " # brain mask from tissues\n", + " (segment, tissues,[\n", + " ((\"native_class_images\", selectindex, 0), \"gm\"),\n", + " ((\"native_class_images\", selectindex, 1), \"wm\"),\n", + " ((\"native_class_images\", selectindex, 2), \"csf\"),\n", + " ]),\n", + "\n", + " (tissues, brain_mask, [(\"gm\", \"gm\"), (\"wm\", \"wm\"), (\"csf\", \"csf\"),]),\n", + "\n", + " # output\n", + " (warp_anat, datasink, [(\"normalized_files\", \"anat.@mni\")]),\n", + " (segment, datasink, [(\"modulated_class_images\", \"anat.tissues.warped\"),\n", + " (\"native_class_images\", \"anat.tissues.native\"),\n", + " (\"transformation_mat\", \"anat.transform.@linear\"),\n", + " (\"forward_deformation_field\", \"anat.transform.@forward\"),\n", + " (\"inverse_deformation_field\", \"anat.transform.@inverse\"),\n", + " (\"bias_corrected_images\", \"anat.@biascor\")]),\n", + " (brain_mask, datasink, [(\"out_file\", \"anat.@brain_mask\")]),\n", + "])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ------------------------------------------------------------------------------------------------\n", + "# FMRI Clean\n", + "# ------------------------------------------------------------------------------------------------\n", + "\n", + "# rs-fMRI preprocessing nodes\n", + "trim = pe.Node(Trim(), name=\"trim\")\n", + "\n", + "# slice-timing correction\n", + "params = setup_node(STCParametersInterface(in_files=in_file), name='stc_params')\n", + "gunzip = setup_node(Gunzip(), name=\"gunzip\")\n", + "\n", + "stc = spm.SliceTiming()\n", + "stc.inputs.in_files = traits.Undefined\n", + "stc.inputs.out_prefix = 'stc'\n", + "slice_timing = pe.Node(stc, name='slice_timing')\n", + "wf.connect([(stc_input, params, [(\"in_file\", \"in_files\"),\n", + " (\"num_slices\", \"num_slices\"),\n", + " (\"slice_order\", \"slice_order\"),\n", + " (\"time_repetition\", \"time_repetition\"),\n", + " (\"time_acquisition\", \"time_acquisition\"),\n", + " (\"ref_slice\", \"ref_slice\"),\n", + " (\"slice_mode\", \"slice_mode\"),\n", + " ]),\n", + "\n", + " # processing nodes\n", + " (params, gunzip, [((\"in_files\", _pick_first), \"in_file\")]),\n", + " (params, stc, [((\"slice_order\", _sum_one_to_each), \"slice_order\"),\n", + " ((\"ref_slice\", _sum_one), \"ref_slice\"),\n", + " (\"num_slices\", \"num_slices\"),\n", + " (\"time_acquisition\", \"time_acquisition\"),\n", + " (\"time_repetition\", \"time_repetition\"),\n", + " ]),\n", + " (gunzip, stc, [(\"out_file\", \"in_files\")]),\n", + "\n", + " # output node\n", + " (params, stc_output, [(\"time_repetition\", \"time_repetition\")]),\n", + " (stc, stc_output, [(\"timecorrected_files\", \"timecorrected_files\")]),\n", + " ])\n", + "\n", + "\n", + "realign = pe.Node(nipy_motion_correction(), name='realign')\n", + "\n", + "# average\n", + "average = pe.Node(\n", + " Function(\n", + " function=mean_img,\n", + " input_names=[\"in_file\"],\n", + " output_names=[\"out_file\"],\n", + " imports=['from neuro_pypes.interfaces.nilearn import ni2file']\n", + " ),\n", + " name='average_epi'\n", + ")\n", + "\n", + "mean_gunzip = pe.Node(Gunzip(), name=\"mean_gunzip\")\n", + "\n", + "# co-registration nodes\n", + "coreg = pe.Node(spm_coregister(cost_function=\"mi\"), name=\"coreg_fmri\")\n", + "brain_sel = pe.Node(Select(index=[0, 1, 2]), name=\"brain_sel\")\n", + "\n", + "# brain mask made with EPI\n", + "epi_mask = pe.Node(ComputeMask(), name='epi_mask')\n", + "\n", + "# brain mask made with the merge of the tissue segmentations\n", + "tissue_mask = pe.Node(fsl.MultiImageMaths(), name='tissue_mask')\n", + "tissue_mask.inputs.op_string = \"-add %s -add %s -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin\"\n", + "tissue_mask.inputs.out_file = \"tissue_brain_mask.nii.gz\"\n", + "\n", + "# select tissues\n", + "gm_select = pe.Node(Select(index=[0]), name=\"gm_sel\")\n", + "wmcsf_select = pe.Node(Select(index=[1, 2]), name=\"wmcsf_sel\")\n", + "\n", + "# noise filter\n", + "noise_wf = rest_noise_filter_wf()\n", + "wm_select = pe.Node(Select(index=[1]), name=\"wm_sel\")\n", + "csf_select = pe.Node(Select(index=[2]), name=\"csf_sel\")\n", + "\n", + "# bandpass filtering\n", + "bandpass = pe.Node(\n", + " Function(\n", + " input_names=['files', 'lowpass_freq', 'highpass_freq', 'tr'],\n", + " output_names=['out_files'],\n", + " function=bandpass_filter\n", + " ),\n", + " name='bandpass'\n", + ")\n", + "\n", + "# smooth\n", + "smooth = pe.Node(\n", + " Function(\n", + " function=smooth_img,\n", + " input_names=[\"in_file\", \"fwhm\"],\n", + " output_names=[\"out_file\"],\n", + " imports=['from neuro_pypes.interfaces.nilearn import ni2file']\n", + " ),\n", + " name=\"smooth\"\n", + ")\n", + "smooth.inputs.fwhm = fmri_smoothing_kernel_fwhm\n", + "smooth.inputs.out_file = \"smooth_std_{}.nii.gz\".format(wf_name)\n", + "\n", + "# output identities\n", + "rest_output = setup_node(IdentityInterface(fields=out_fields), name=\"rest_output\")\n", + "\n", + "# Connect the nodes\n", + "\n", + "# (in_files, cleanup_wf, [(\"rest\", \"rest_input.in_file\")]),\n", + "\n", + "# # anat to fMRI registration inputs\n", + "# (anat_output, cleanup_wf, [\n", + "# (\"tissues_native\", \"rest_input.tissues\"),\n", + "# (\"anat_biascorr\", \"rest_input.anat\"),\n", + "# ]),\n", + "\n", + "# # clean_up_wf to datasink\n", + "# (cleanup_wf, datasink, [\n", + "# (\"rest_output.epi_brain_mask\", \"rest.@epi_brain_mask\"),\n", + "# (\"rest_output.tissues_brain_mask\", \"rest.@tissues_brain_mask\"),\n", + "# (\"rest_output.tissues\", \"rest.@tissues\"),\n", + "# (\"rest_output.anat\", \"rest.@anat\"),\n", + "# (\"rest_output.motion_regressors\", \"rest.@motion_regressors\"),\n", + "# (\"rest_output.compcor_regressors\", \"rest.@compcor_regressors\"),\n", + "# (\"rest_output.gsr_regressors\", \"rest.@gsr_regressors\"),\n", + "# (\"rest_output.motion_params\", \"rest.@motion_params\"),\n", + "# (\"rest_output.motion_corrected\", \"rest.@motion_corrected\"),\n", + "# (\"rest_output.nuis_corrected\", \"rest.@nuis_corrected\"),\n", + "# (\"rest_output.time_filtered\", \"rest.@time_filtered\"),\n", + "# (\"rest_output.smooth\", \"rest.@smooth\"),\n", + "# (\"rest_output.avg_epi\", \"rest.@avg_epi\"),\n", + "# (\"rest_output.tsnr_file\", \"rest.@tsnr\"),\n", + "# (\"rest_output.art_displacement_files\", \"rest.artifact_stats.@displacement\"),\n", + "# (\"rest_output.art_intensity_files\", \"rest.artifact_stats.@art_intensity\"),\n", + "# (\"rest_output.art_norm_files\", \"rest.artifact_stats.@art_norm\"),\n", + "# (\"rest_output.art_outlier_files\", \"rest.artifact_stats.@art_outlier\"),\n", + "# (\"rest_output.art_plot_files\", \"rest.artifact_stats.@art_plot\"),\n", + "# (\"rest_output.art_statistic_files\", \"rest.artifact_stats.@art_statistic\"),\n", + "# ]),\n", + "# ])\n", + "\n", + "wf.connect([\n", + " # trim\n", + " (rest_input, trim, [(\"in_file\", \"in_file\")]),\n", + "\n", + " # slice time correction\n", + " (trim, stc_wf, [(\"out_file\", \"stc_input.in_file\")]),\n", + "\n", + " # motion correction\n", + " (stc_wf, realign, [(\"stc_output.timecorrected_files\", \"in_file\")]),\n", + "\n", + " # coregistration target\n", + " (realign, average, [(\"out_file\", \"in_file\")]),\n", + " (average, mean_gunzip, [(\"out_file\", \"in_file\")]),\n", + " (mean_gunzip, coreg, [(\"out_file\", \"target\")]),\n", + "\n", + " # epi brain mask\n", + " (average, epi_mask, [(\"out_file\", \"mean_volume\")]),\n", + "\n", + " # coregistration\n", + " (rest_input, coreg, [(\"anat\", \"source\")]),\n", + " (rest_input, brain_sel, [(\"tissues\", \"inlist\")]),\n", + " (brain_sel, coreg, [((\"out\", flatten_list), \"apply_to_files\")]),\n", + "\n", + " # tissue brain mask\n", + " (coreg, gm_select, [(\"coregistered_files\", \"inlist\")]),\n", + " (coreg, wmcsf_select, [(\"coregistered_files\", \"inlist\")]),\n", + " (gm_select, tissue_mask, [((\"out\", flatten_list), \"in_file\")]),\n", + " (wmcsf_select, tissue_mask, [((\"out\", flatten_list), \"operand_files\")]),\n", + "\n", + " # nuisance correction\n", + " (coreg, wm_select, [(\"coregistered_files\", \"inlist\",)]),\n", + " (coreg, csf_select, [(\"coregistered_files\", \"inlist\",)]),\n", + " (realign, noise_wf, [(\"out_file\", \"rest_noise_input.in_file\",)]),\n", + " (tissue_mask, noise_wf, [(\"out_file\", \"rest_noise_input.brain_mask\")]),\n", + " (wm_select, noise_wf, [((\"out\", flatten_list), \"rest_noise_input.wm_mask\")]),\n", + " (csf_select, noise_wf, [((\"out\", flatten_list), \"rest_noise_input.csf_mask\")]),\n", + "\n", + " (realign, noise_wf, [(\"par_file\", \"rest_noise_input.motion_params\",)]),\n", + "\n", + " # temporal filtering\n", + " (noise_wf, bandpass, [(\"rest_noise_output.nuis_corrected\", \"files\")]),\n", + " # (realign, bandpass, [(\"out_file\", \"files\")]),\n", + " (stc_wf, bandpass, [(\"stc_output.time_repetition\", \"tr\")]),\n", + " (rest_input, bandpass, [\n", + " (\"lowpass_freq\", \"lowpass_freq\"),\n", + " (\"highpass_freq\", \"highpass_freq\"),\n", + " ]),\n", + " (bandpass, smooth, [(\"out_files\", \"in_file\")]),\n", + "\n", + " # output\n", + " (epi_mask, rest_output, [(\"brain_mask\", \"epi_brain_mask\")]),\n", + " (tissue_mask, rest_output, [(\"out_file\", \"tissues_brain_mask\")]),\n", + " (realign, rest_output, [\n", + " (\"out_file\", \"motion_corrected\"),\n", + " (\"par_file\", \"motion_params\"),\n", + " ]),\n", + " (coreg, rest_output, [\n", + " (\"coregistered_files\", \"tissues\"),\n", + " (\"coregistered_source\", \"anat\"),\n", + " ]),\n", + " (noise_wf, rest_output, [\n", + " (\"rest_noise_output.motion_regressors\", \"motion_regressors\"),\n", + " (\"rest_noise_output.compcor_regressors\", \"compcor_regressors\"),\n", + " (\"rest_noise_output.gsr_regressors\", \"gsr_regressors\"),\n", + " (\"rest_noise_output.nuis_corrected\", \"nuis_corrected\"),\n", + " (\"rest_noise_output.tsnr_file\", \"tsnr_file\"),\n", + " (\"rest_noise_output.art_displacement_files\", \"art_displacement_files\"),\n", + " (\"rest_noise_output.art_intensity_files\", \"art_intensity_files\"),\n", + " (\"rest_noise_output.art_norm_files\", \"art_norm_files\"),\n", + " (\"rest_noise_output.art_outlier_files\", \"art_outlier_files\"),\n", + " (\"rest_noise_output.art_plot_files\", \"art_plot_files\"),\n", + " (\"rest_noise_output.art_statistic_files\", \"art_statistic_files\"),\n", + " ]),\n", + " (average, rest_output, [(\"out_file\", \"avg_epi\")]),\n", + " (bandpass, rest_output, [(\"out_files\", \"time_filtered\")]),\n", + " (smooth, rest_output, [(\"out_file\", \"smooth\")]),\n", + "])\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if n_cpus > 1:\n", + " wf.run(plugin=plugin, plugin_args={\"n_procs\": n_cpus})\n", + "else:\n", + " wf.run(plugin=None)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.6" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/scripts/rest_fmri_preprocessing/process.py b/scripts/rest_fmri_preprocessing/process.py new file mode 100644 index 0000000..964ea48 --- /dev/null +++ b/scripts/rest_fmri_preprocessing/process.py @@ -0,0 +1,434 @@ +import os + +import nipype.pipeline.engine as pe +from nipype.interfaces.io import DataSink +from nipype.interfaces.utility import IdentityInterface + +from neuro_pypes.crumb import DataCrumb + +# from nipype.algorithms.misc import Gunzip +# from nipype.interfaces import fsl +# from nipype.interfaces.nipy.preprocess import Trim, ComputeMask +# from nipype.interfaces.utility import Function, Select, IdentityInterface + +# from neuro_pypes._utils import format_pair_list, flatten_list +# from neuro_pypes.config import setup_node, get_config_setting +# from neuro_pypes.fmri.filter import bandpass_filter +# from neuro_pypes.fmri.nuisance import rest_noise_filter_wf +# from neuro_pypes.interfaces.nilearn import mean_img, smooth_img +# from neuro_pypes.preproc import ( +# auto_spm_slicetime, +# nipy_motion_correction, +# spm_coregister +# ) +# from neuro_pypes.utils import (remove_ext, +# extend_trait_list, +# get_input_node, +# get_interface_node, +# get_datasink, +# get_input_file_name, +# extension_duplicates) + +# ------------------------------------------------------------------------------------------------ +# GLOBAL VARIABLES +# ------------------------------------------------------------------------------------------------ + +wf_name = 'rest_fmri_preprocess' + +# STDB_DIR = os.path.expanduser('~/projects/neuro/std_brains') +# SPM_DIR = os.path.expanduser('~/Software/spm_mcr') +# BASE_DIR = os.path.expanduser('~/Data/nuk/petnet') +# data_dir = os.path.join(BASE_DIR, 'raw') +# cache_dir = os.path.join(BASE_DIR, 'wd') +# output_dir = os.path.join(BASE_DIR, 'out') +# plugin = None +# n_cpus = 5 + +# HAMM_DIR = os.path.join(STDB_DIR, 'atlases', 'hammers') +# HAMM_MNI = os.path.join(HAMM_DIR, 'Hammers_mith_atlas_n30r83_SPM5.nii.gz') +# HAMM_LABELS = os.path.join(HAMM_DIR, 'labels.txt') + +# SPM_CANONICAL_BRAIN_2MM = os.path.join(STDB_DIR, 'templates', 'spm_canonical', 'single_subj_T1_brain.nii.gz') + +# # template files +# PET_MNI = os.path.join(STDB_DIR, 'templates', 'spm_canonical', 'pet.nii') +# MNI_MASK = os.path.join(STDB_DIR, 'templates', 'avg152T1_brain.nii.gz') + +# # data input/output os.path.dirname(__file__) means alongside with this .py file +# #settings_file = os.path.join(os.path.dirname(__file__), 'pypes_config.yml') +# settings_file = ('/home/iripp/projects/alex/nuk_experiments/MRPET_15/Preproc_30_60pi_pet_recon_NEW/pypes_config.yml') +# #nipype_cfg_file = os.path.join(os.path.dirname(__file__), 'nipype.cfg') +# nipype_cfg_file = ('/home/iripp/projects/alex/nuk_experiments/MRPET_15/Preproc_30_60pi_pet_recon_NEW/nipype.cfg') + + + +# mrpet15_preproc_wf_2 = dict([ +# ("spm_anat_preproc", attach_spm_anat_preprocessing), +# ("spm_pet_preproc", attach_spm_pet_preprocessing), +# ("spm_mrpet_preproc", attach_spm_mrpet_preprocessing), +# ("spm_pet_grouptemplate", attach_spm_pet_grouptemplate), +# ]) + +# data_path = os.path.join(os.path.expanduser(data_dir), '{session}', '{subject_id}', '{scan}', '{image}') +# data_crumb = Crumb(data_path, ignore_list=['.*']) + + +# crumb_modalities = { +# 'anat': [('scan', 'T1'), ('image', 'Head_MPRAGE_highContrast.nii.gz')], +# 'pet': [('scan', 'PET_recon_first_30min'), ('image', 'pet_recon.nii.gz')], +# } + + +# ------------------------------------------------------------------------------------------------ +# DATA INPUT AND SINK +# ------------------------------------------------------------------------------------------------ + +wf = pe.Workflow(name=wf_name, base_dir=work_dir) + +# datasink +datasink = pe.Node( + DataSink(parameterization=False, base_directory=output_dir, ), + name="datasink" +) + +# input workflow +# (work_dir, data_crumb, crumb_arg_values, files_crumb_args, wf_name="input_files"): +select_files = pe.Node( + DataCrumb(crumb=data_crumb, templates=file_templates, raise_on_empty=False), + name='selectfiles' +) + +# basic file name substitutions for the datasink +undef_args = select_files.interface._infields +substitutions = [(name, "") for name in undef_args] +substitutions.append(("__", "_")) + +datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions, + substitutions) + +# Infosource - the information source that iterates over crumb values map from the filesystem +infosource = pe.Node(interface=IdentityInterface(fields=undef_args), name="infosrc") +infosource.iterables = list(valuesmap_to_dict(joint_value_map(data_crumb, undef_args)).items()) +infosource.synchronize = True + +# connect the input_wf to the datasink +joinpath = pe.Node(joinstrings(len(undef_args)), name='joinpath') + +# Connect the infosrc node to the datasink +input_joins = [(name, 'arg{}'.format(arg_no + 1)) + for arg_no, name in enumerate(undef_args)] + +wf.connect([ + (infosource, select_files, [(field, field) for field in undef_args]), + (select_files, joinpath, input_joins), + (joinpath, datasink, [("out", "container")]), +], +) + +# ------------------------------------------------------------------------------------------------ +# ANAT +# ------------------------------------------------------------------------------------------------ + + # input node + anat_input = pe.Node(IdentityInterface(fields=in_fields, mandatory_inputs=True), + name="anat_input") + + # atlas registration + if do_atlas and not isdefined(anat_input.inputs.atlas_file): + anat_input.inputs.set(atlas_file=atlas_file) + + # T1 preprocessing nodes + biascor = setup_node(biasfield_correct(), name="bias_correction") + gunzip_anat = setup_node(Gunzip(), name="gunzip_anat") + segment = setup_node(spm_segment(), name="new_segment") + warp_anat = setup_node(spm_apply_deformations(), name="warp_anat") + + tpm_bbox = setup_node(Function(function=get_bounding_box, + input_names=["in_file"], + output_names=["bbox"]), + name="tpm_bbox") + tpm_bbox.inputs.in_file = spm_tpm_priors_path() + + # calculate brain mask from tissue maps + tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"], mandatory_inputs=True), + name="tissues") + + brain_mask = setup_node(Function(function=math_img, + input_names=["formula", "out_file", "gm", "wm", "csf"], + output_names=["out_file"], + imports=['from neuro_pypes.interfaces.nilearn import ni2file']), + name='brain_mask') + brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz" + brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0" + + # output node + anat_output = pe.Node(IdentityInterface(fields=out_fields), name="anat_output") + + # Connect the nodes + wf.connect([ + # input to biasfieldcorrection + (anat_input, biascor , [("in_file", "input_image")]), + + # new segment + (biascor, gunzip_anat, [("output_image", "in_file")]), + (gunzip_anat, segment, [("out_file", "channel_files")]), + + # Normalize12 + (segment, warp_anat, [("forward_deformation_field", "deformation_file")]), + (segment, warp_anat, [("bias_corrected_images", "apply_to_files")]), + (tpm_bbox, warp_anat, [("bbox", "write_bounding_box")]), + + # brain mask from tissues + (segment, tissues, [(("native_class_images", selectindex, 0), "gm"), + (("native_class_images", selectindex, 1), "wm"), + (("native_class_images", selectindex, 2), "csf"), + ]), + + (tissues, brain_mask, [("gm", "gm"), ("wm", "wm"), ("csf", "csf"),]), + + # output + (warp_anat, anat_output, [("normalized_files", "anat_mni")]), + (segment, anat_output, [("modulated_class_images", "tissues_warped"), + ("native_class_images", "tissues_native"), + ("transformation_mat", "affine_transform"), + ("forward_deformation_field", "warp_forward"), + ("inverse_deformation_field", "warp_inverse"), + ("bias_corrected_images", "anat_biascorr")]), + (brain_mask, anat_output, [("out_file", "brain_mask")]), + ]) + + # atlas warping nodes + if do_atlas: + gunzip_atlas = pe.Node(Gunzip(), name="gunzip_atlas") + warp_atlas = setup_node(spm_apply_deformations(), name="warp_atlas") + anat_bbox = setup_node(Function(function=get_bounding_box, + input_names=["in_file"], + output_names=["bbox"]), + name="anat_bbox") + + # set the warping interpolation to nearest neighbour. + warp_atlas.inputs.write_interp = 0 + + # connect the atlas registration nodes + wf.connect([ + (anat_input, gunzip_atlas, [("atlas_file", "in_file")]), + (gunzip_anat, anat_bbox, [("out_file", "in_file")]), + (gunzip_atlas, warp_atlas, [("out_file", "apply_to_files")]), + (segment, warp_atlas, [("inverse_deformation_field", "deformation_file")]), + (anat_bbox, warp_atlas, [("bbox", "write_bounding_box")]), + (warp_atlas, anat_output, [("normalized_files", "atlas_anat")]), + ]) + +# Create the workflow object +wf = pe.Workflow(name=wf_name) + +wf.connect([(in_files, anat_wf, [("anat", "anat_input.in_file")]), + (anat_wf, datasink, [ + ("anat_output.anat_mni", "anat.@mni"), + ("anat_output.tissues_warped", "anat.tissues.warped"), + ("anat_output.tissues_native", "anat.tissues.native"), + ("anat_output.affine_transform", "anat.transform.@linear"), + ("anat_output.warp_forward", "anat.transform.@forward"), + ("anat_output.warp_inverse", "anat.transform.@inverse"), + ("anat_output.anat_biascorr", "anat.@biascor"), + ("anat_output.brain_mask", "anat.@brain_mask"), + ]), + ]) + +# check optional outputs +if do_atlas: + wf.connect([(anat_wf, datasink, [("anat_output.atlas_anat", "anat.@atlas")]),]) + +do_cortical_thickness = get_config_setting('anat_preproc.do_cortical_thickness', False) +if do_cortical_thickness: + wf.connect([(anat_wf, datasink, [("anat_output.cortical_thickness", "anat.@cortical_thickness"), + ("anat_output.warped_white_matter", "anat.@warped_white_matter"), + ]), + ]) + +# ------------------------------------------------------------------------------------------------ +# FMRI +# ------------------------------------------------------------------------------------------------ + +# # specify input and output fields +# in_fields = [ +# "in_file", +# "anat", +# "atlas_anat", +# "coreg_target", +# "tissues", +# "lowpass_freq", +# "highpass_freq", +# ] + +# out_fields = [ +# "motion_corrected", +# "motion_params", +# "tissues", +# "anat", +# "avg_epi", +# "time_filtered", +# "smooth", +# "tsnr_file", +# "epi_brain_mask", +# "tissues_brain_mask", +# "motion_regressors", +# "compcor_regressors", +# "gsr_regressors", +# "nuis_corrected", +# "art_displacement_files", +# "art_intensity_files", +# "art_norm_files", +# "art_outlier_files", +# "art_plot_files", +# "art_statistic_files", +# ] + +# # input identities +# rest_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True), +# name="rest_input") + +# # rs-fMRI preprocessing nodes +# trim = setup_node(Trim(), name="trim") + +# stc_wf = auto_spm_slicetime() +# realign = setup_node(nipy_motion_correction(), name='realign') + +# # average +# average = setup_node( +# Function( +# function=mean_img, +# input_names=["in_file"], +# output_names=["out_file"], +# imports=['from neuro_pypes.interfaces.nilearn import ni2file'] +# ), +# name='average_epi' +# ) + +# mean_gunzip = setup_node(Gunzip(), name="mean_gunzip") + +# # co-registration nodes +# coreg = setup_node(spm_coregister(cost_function="mi"), name="coreg_fmri") +# brain_sel = setup_node(Select(index=[0, 1, 2]), name="brain_sel") + +# # brain mask made with EPI +# epi_mask = setup_node(ComputeMask(), name='epi_mask') + +# # brain mask made with the merge of the tissue segmentations +# tissue_mask = setup_node(fsl.MultiImageMaths(), name='tissue_mask') +# tissue_mask.inputs.op_string = "-add %s -add %s -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin" +# tissue_mask.inputs.out_file = "tissue_brain_mask.nii.gz" + +# # select tissues +# gm_select = setup_node(Select(index=[0]), name="gm_sel") +# wmcsf_select = setup_node(Select(index=[1, 2]), name="wmcsf_sel") + +# # noise filter +# noise_wf = rest_noise_filter_wf() +# wm_select = setup_node(Select(index=[1]), name="wm_sel") +# csf_select = setup_node(Select(index=[2]), name="csf_sel") + +# # bandpass filtering +# bandpass = setup_node( +# Function( +# input_names=['files', 'lowpass_freq', 'highpass_freq', 'tr'], +# output_names=['out_files'], +# function=bandpass_filter +# ), +# name='bandpass' +# ) + +# # smooth +# smooth = setup_node( +# Function( +# function=smooth_img, +# input_names=["in_file", "fwhm"], +# output_names=["out_file"], +# imports=['from neuro_pypes.interfaces.nilearn import ni2file'] +# ), +# name="smooth" +# ) +# smooth.inputs.fwhm = get_config_setting('fmri_smooth.fwhm', default=8) +# smooth.inputs.out_file = "smooth_std_{}.nii.gz".format(wf_name) + +# # output identities +# rest_output = setup_node(IdentityInterface(fields=out_fields), name="rest_output") + +# # Connect the nodes +# wf.connect([ +# # trim +# (rest_input, trim, [("in_file", "in_file")]), + +# # slice time correction +# (trim, stc_wf, [("out_file", "stc_input.in_file")]), + +# # motion correction +# (stc_wf, realign, [("stc_output.timecorrected_files", "in_file")]), + +# # coregistration target +# (realign, average, [("out_file", "in_file")]), +# (average, mean_gunzip, [("out_file", "in_file")]), +# (mean_gunzip, coreg, [("out_file", "target")]), + +# # epi brain mask +# (average, epi_mask, [("out_file", "mean_volume")]), + +# # coregistration +# (rest_input, coreg, [("anat", "source")]), +# (rest_input, brain_sel, [("tissues", "inlist")]), +# (brain_sel, coreg, [(("out", flatten_list), "apply_to_files")]), + +# # tissue brain mask +# (coreg, gm_select, [("coregistered_files", "inlist")]), +# (coreg, wmcsf_select, [("coregistered_files", "inlist")]), +# (gm_select, tissue_mask, [(("out", flatten_list), "in_file")]), +# (wmcsf_select, tissue_mask, [(("out", flatten_list), "operand_files")]), + +# # nuisance correction +# (coreg, wm_select, [("coregistered_files", "inlist",)]), +# (coreg, csf_select, [("coregistered_files", "inlist",)]), +# (realign, noise_wf, [("out_file", "rest_noise_input.in_file",)]), +# (tissue_mask, noise_wf, [("out_file", "rest_noise_input.brain_mask")]), +# (wm_select, noise_wf, [(("out", flatten_list), "rest_noise_input.wm_mask")]), +# (csf_select, noise_wf, [(("out", flatten_list), "rest_noise_input.csf_mask")]), + +# (realign, noise_wf, [("par_file", "rest_noise_input.motion_params",)]), + +# # temporal filtering +# (noise_wf, bandpass, [("rest_noise_output.nuis_corrected", "files")]), +# # (realign, bandpass, [("out_file", "files")]), +# (stc_wf, bandpass, [("stc_output.time_repetition", "tr")]), +# (rest_input, bandpass, [ +# ("lowpass_freq", "lowpass_freq"), +# ("highpass_freq", "highpass_freq"), +# ]), +# (bandpass, smooth, [("out_files", "in_file")]), + +# # output +# (epi_mask, rest_output, [("brain_mask", "epi_brain_mask")]), +# (tissue_mask, rest_output, [("out_file", "tissues_brain_mask")]), +# (realign, rest_output, [ +# ("out_file", "motion_corrected"), +# ("par_file", "motion_params"), +# ]), +# (coreg, rest_output, [ +# ("coregistered_files", "tissues"), +# ("coregistered_source", "anat"), +# ]), +# (noise_wf, rest_output, [ +# ("rest_noise_output.motion_regressors", "motion_regressors"), +# ("rest_noise_output.compcor_regressors", "compcor_regressors"), +# ("rest_noise_output.gsr_regressors", "gsr_regressors"), +# ("rest_noise_output.nuis_corrected", "nuis_corrected"), +# ("rest_noise_output.tsnr_file", "tsnr_file"), +# ("rest_noise_output.art_displacement_files", "art_displacement_files"), +# ("rest_noise_output.art_intensity_files", "art_intensity_files"), +# ("rest_noise_output.art_norm_files", "art_norm_files"), +# ("rest_noise_output.art_outlier_files", "art_outlier_files"), +# ("rest_noise_output.art_plot_files", "art_plot_files"), +# ("rest_noise_output.art_statistic_files", "art_statistic_files"), +# ]), +# (average, rest_output, [("out_file", "avg_epi")]), +# (bandpass, rest_output, [("out_files", "time_filtered")]), +# (smooth, rest_output, [("out_file", "smooth")]), +# ]) From bca8408daf89511eed1ea24e2110b3e5ade9674a Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Wed, 28 Nov 2018 22:51:30 +0100 Subject: [PATCH 2/8] fix(fmri/nuisance): remove code typo --- neuro_pypes/fmri/nuisance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neuro_pypes/fmri/nuisance.py b/neuro_pypes/fmri/nuisance.py index 31b248d..c926c18 100644 --- a/neuro_pypes/fmri/nuisance.py +++ b/neuro_pypes/fmri/nuisance.py @@ -208,7 +208,7 @@ def rest_noise_filter_wf(wf_name='rest_noise_removal'): ("in_file", "realigned_files"), ("motion_params", "realignment_parameters"), ("brain_mask", "mask_file"), - ß]), + ]), # calculte motion regressors (rest_noise_input, motion_regs, [("motion_params", "motion_params")]), From ce4a8855222754284e58473b4c2541039eaf4fd0 Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Wed, 28 Nov 2018 23:20:26 +0100 Subject: [PATCH 3/8] fix(scripts): wip for the fmri preproc script --- scripts/rest_fmri_preprocessing/README.md | 16 ++ scripts/rest_fmri_preprocessing/process.ipynb | 215 ++++++++++-------- 2 files changed, 134 insertions(+), 97 deletions(-) diff --git a/scripts/rest_fmri_preprocessing/README.md b/scripts/rest_fmri_preprocessing/README.md index e69de29..2ae2d9e 100644 --- a/scripts/rest_fmri_preprocessing/README.md +++ b/scripts/rest_fmri_preprocessing/README.md @@ -0,0 +1,16 @@ + +Run the neurita/neuro_docker container: + +``` +docker run -it -p 8888:8888 --name neuro -v $PWD/../multimodal_test_data:/data -v $PWD/../neuro_pypes:/root/projects/neuro_pypes neurita/neuro_docker:0.2 /bin/bash +``` + +Inside the container: +``` +pyenv activate neuro +pip install jupyter jupyterlab +``` + +``` +jupyter lab --ip 0.0.0.0 --no-browser --allow-root +``` diff --git a/scripts/rest_fmri_preprocessing/process.ipynb b/scripts/rest_fmri_preprocessing/process.ipynb index 7eb3355..7a12831 100644 --- a/scripts/rest_fmri_preprocessing/process.ipynb +++ b/scripts/rest_fmri_preprocessing/process.ipynb @@ -2,9 +2,18 @@ "cells": [ { "cell_type": "code", - "execution_count": 5, + "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/root/.pyenv/versions/3.6.7/lib/python3.6/importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\n", + " return f(*args, **kwds)\n" + ] + } + ], "source": [ "import os\n", "\n", @@ -25,28 +34,32 @@ "from neuro_pypes.utils import (\n", " remove_ext,\n", " joinstrings,\n", - " selectindex\n", + " selectindex,\n", + " extend_trait_list\n", ")" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "wf_name = 'spm_rest_preprocessing'\n", "\n", - "work_dir = os.path.expanduser(f'~/data/neuro_pypes/{wf_name}/')\n", + "#work_dir = os.path.expanduser(f'~/data/neuro_pypes/{wf_name}/')\n", + "work_dir = os.path.expanduser(f'/data/neuro_pypes/{wf_name}/')\n", + "\n", + "#input_dir = os.path.expanduser('~/projects/neuro/multimodal_test_data/raw')\n", + "input_dir = os.path.expanduser('/data/raw')\n", + "\n", "output_dir = os.path.join(work_dir, 'out')\n", "cache_dir = os.path.join(work_dir, 'wd')\n", "\n", - "input_dir = os.path.expanduser('~/projects/neuro/multimodal_test_data/raw')\n", - "\n", "data_path = os.path.join(os.path.expanduser(input_dir), '{subject_id}', '{session}', '{image}')\n", "data_crumb = Crumb(data_path, ignore_list=['.*'])\n", "crumb_modalities = {\n", - " 'anat': [('image', 'anat.nii.gz')],\n", + " 'anat': [('image', 'anat_hc.nii.gz')],\n", " 'fmri': [('image', 'rest.nii.gz')]\n", "}\n", "\n", @@ -57,13 +70,12 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "wf = pe.Workflow(name=wf_name, base_dir=work_dir)\n", "\n", - "\n", "# ------------------------------------------------------------------------------------------------\n", "# DATA INPUT AND SINK\n", "# ------------------------------------------------------------------------------------------------\n", @@ -82,7 +94,7 @@ "substitutions = [(name, \"\") for name in undef_args]\n", "substitutions.append((\"__\", \"_\"))\n", "\n", - "datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions, substitutions)\n", + "# datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions, substitutions)\n", "\n", "# Infosource - the information source that iterates over crumb values map from the filesystem\n", "infosource = pe.Node(interface=IdentityInterface(fields=undef_args), name=\"infosrc\")\n", @@ -104,7 +116,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -115,15 +127,15 @@ "# T1 preprocessing nodes\n", "\n", "# ANTs N4 Bias field correction\n", - "n4 = N4BiasFieldCorrection()\n", - "n4.inputs.dimension = 3\n", - "n4.inputs.bspline_fitting_distance = 300\n", - "n4.inputs.shrink_factor = 3\n", - "n4.inputs.n_iterations = [50, 50, 30, 20]\n", - "n4.inputs.convergence_threshold = 1e-6\n", - "n4.inputs.save_bias = True\n", - "n4.inputs.input_image = traits.Undefined\n", - "biascor = pe.Node(n4, name=\"bias_correction\")\n", + "# n4 = N4BiasFieldCorrection()\n", + "# n4.inputs.dimension = 3\n", + "# n4.inputs.bspline_fitting_distance = 300\n", + "# n4.inputs.shrink_factor = 3\n", + "# n4.inputs.n_iterations = [50, 50, 30, 20]\n", + "# n4.inputs.convergence_threshold = 1e-6\n", + "# n4.inputs.save_bias = True\n", + "# n4.inputs.input_image = traits.Undefined\n", + "# biascor = pe.Node(n4, name=\"bias_correction\")\n", "\n", "gunzip_anat = pe.Node(Gunzip(), name=\"gunzip_anat\")\n", "\n", @@ -176,11 +188,12 @@ "# Connect the nodes\n", "wf.connect([\n", " # input to biasfieldcorrection\n", - " (datasource, biascor, [(\"anat\", \"input_image\")]),\n", + "# (datasource, biascor, [(\"anat\", \"input_image\")]),\n", "\n", " # new segment\n", - " (biascor, gunzip_anat, [(\"output_image\", \"in_file\")]),\n", - " (gunzip_anat, segment, [(\"out_file\", \"channel_files\")]),\n", + "# (biascor, gunzip_anat, [(\"output_image\", \"in_file\")]),\n", + " (datasource, gunzip_anat, [(\"anat\", \"in_file\")]),\n", + " (gunzip_anat, segment, [(\"out_file\", \"channel_files\")]),\n", "\n", " # Normalize12\n", " (segment, warp_anat, [(\"forward_deformation_field\", \"deformation_file\")]),\n", @@ -210,10 +223,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'in_file' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# slice-timing correction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSTCParametersInterface\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0min_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0min_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'stc_params'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0mgunzip\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGunzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"gunzip\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'in_file' is not defined" + ] + } + ], "source": [ + "from nipype.interfaces.nipy.preprocess import Trim, ComputeMask\n", + "\n", "# ------------------------------------------------------------------------------------------------\n", "# FMRI Clean\n", "# ------------------------------------------------------------------------------------------------\n", @@ -222,36 +249,35 @@ "trim = pe.Node(Trim(), name=\"trim\")\n", "\n", "# slice-timing correction\n", - "params = setup_node(STCParametersInterface(in_files=in_file), name='stc_params')\n", - "gunzip = setup_node(Gunzip(), name=\"gunzip\")\n", + "params = pe.Node(STCParametersInterface(in_files=in_file), name='stc_params')\n", + "gunzip = pe.Node(Gunzip(), name=\"gunzip\")\n", "\n", "stc = spm.SliceTiming()\n", "stc.inputs.in_files = traits.Undefined\n", "stc.inputs.out_prefix = 'stc'\n", "slice_timing = pe.Node(stc, name='slice_timing')\n", - "wf.connect([(stc_input, params, [(\"in_file\", \"in_files\"),\n", - " (\"num_slices\", \"num_slices\"),\n", - " (\"slice_order\", \"slice_order\"),\n", - " (\"time_repetition\", \"time_repetition\"),\n", - " (\"time_acquisition\", \"time_acquisition\"),\n", - " (\"ref_slice\", \"ref_slice\"),\n", - " (\"slice_mode\", \"slice_mode\"),\n", - " ]),\n", - "\n", - " # processing nodes\n", - " (params, gunzip, [((\"in_files\", _pick_first), \"in_file\")]),\n", - " (params, stc, [((\"slice_order\", _sum_one_to_each), \"slice_order\"),\n", - " ((\"ref_slice\", _sum_one), \"ref_slice\"),\n", - " (\"num_slices\", \"num_slices\"),\n", - " (\"time_acquisition\", \"time_acquisition\"),\n", - " (\"time_repetition\", \"time_repetition\"),\n", - " ]),\n", - " (gunzip, stc, [(\"out_file\", \"in_files\")]),\n", - "\n", - " # output node\n", - " (params, stc_output, [(\"time_repetition\", \"time_repetition\")]),\n", - " (stc, stc_output, [(\"timecorrected_files\", \"timecorrected_files\")]),\n", - " ])\n", + "\n", + "wf.connect([\n", + " # trim\n", + " (datasource, trim, [(\"rest\", \"in_file\")]),\n", + "\n", + " # slice time correction\n", + " (trim, params, [(\"out_file\", \"in_files\")]),\n", + " \n", + " # processing nodes\n", + " (params, gunzip, [((\"in_files\", _pick_first), \"in_file\")]),\n", + " (params, stc, [((\"slice_order\", _sum_one_to_each), \"slice_order\"),\n", + " ((\"ref_slice\", _sum_one), \"ref_slice\"),\n", + " (\"num_slices\", \"num_slices\"),\n", + " (\"time_acquisition\", \"time_acquisition\"),\n", + " (\"time_repetition\", \"time_repetition\"),\n", + " ]),\n", + " (gunzip, stc, [(\"out_file\", \"in_files\")]),\n", + " \n", + " # output node\n", + " (params, stc_output, [(\"time_repetition\", \"time_repetition\")]),\n", + " (stc, stc_output, [(\"timecorrected_files\", \"timecorrected_files\")]),\n", + "])\n", "\n", "\n", "realign = pe.Node(nipy_motion_correction(), name='realign')\n", @@ -313,53 +339,10 @@ "smooth.inputs.fwhm = fmri_smoothing_kernel_fwhm\n", "smooth.inputs.out_file = \"smooth_std_{}.nii.gz\".format(wf_name)\n", "\n", - "# output identities\n", - "rest_output = setup_node(IdentityInterface(fields=out_fields), name=\"rest_output\")\n", - "\n", "# Connect the nodes\n", - "\n", - "# (in_files, cleanup_wf, [(\"rest\", \"rest_input.in_file\")]),\n", - "\n", - "# # anat to fMRI registration inputs\n", - "# (anat_output, cleanup_wf, [\n", - "# (\"tissues_native\", \"rest_input.tissues\"),\n", - "# (\"anat_biascorr\", \"rest_input.anat\"),\n", - "# ]),\n", - "\n", - "# # clean_up_wf to datasink\n", - "# (cleanup_wf, datasink, [\n", - "# (\"rest_output.epi_brain_mask\", \"rest.@epi_brain_mask\"),\n", - "# (\"rest_output.tissues_brain_mask\", \"rest.@tissues_brain_mask\"),\n", - "# (\"rest_output.tissues\", \"rest.@tissues\"),\n", - "# (\"rest_output.anat\", \"rest.@anat\"),\n", - "# (\"rest_output.motion_regressors\", \"rest.@motion_regressors\"),\n", - "# (\"rest_output.compcor_regressors\", \"rest.@compcor_regressors\"),\n", - "# (\"rest_output.gsr_regressors\", \"rest.@gsr_regressors\"),\n", - "# (\"rest_output.motion_params\", \"rest.@motion_params\"),\n", - "# (\"rest_output.motion_corrected\", \"rest.@motion_corrected\"),\n", - "# (\"rest_output.nuis_corrected\", \"rest.@nuis_corrected\"),\n", - "# (\"rest_output.time_filtered\", \"rest.@time_filtered\"),\n", - "# (\"rest_output.smooth\", \"rest.@smooth\"),\n", - "# (\"rest_output.avg_epi\", \"rest.@avg_epi\"),\n", - "# (\"rest_output.tsnr_file\", \"rest.@tsnr\"),\n", - "# (\"rest_output.art_displacement_files\", \"rest.artifact_stats.@displacement\"),\n", - "# (\"rest_output.art_intensity_files\", \"rest.artifact_stats.@art_intensity\"),\n", - "# (\"rest_output.art_norm_files\", \"rest.artifact_stats.@art_norm\"),\n", - "# (\"rest_output.art_outlier_files\", \"rest.artifact_stats.@art_outlier\"),\n", - "# (\"rest_output.art_plot_files\", \"rest.artifact_stats.@art_plot\"),\n", - "# (\"rest_output.art_statistic_files\", \"rest.artifact_stats.@art_statistic\"),\n", - "# ]),\n", - "# ])\n", - "\n", "wf.connect([\n", - " # trim\n", - " (rest_input, trim, [(\"in_file\", \"in_file\")]),\n", - "\n", - " # slice time correction\n", - " (trim, stc_wf, [(\"out_file\", \"stc_input.in_file\")]),\n", - "\n", " # motion correction\n", - " (stc_wf, realign, [(\"stc_output.timecorrected_files\", \"in_file\")]),\n", + " (stc, realign, [(\"timecorrected_files\", \"in_file\")]),\n", "\n", " # coregistration target\n", " (realign, average, [(\"out_file\", \"in_file\")]),\n", @@ -393,7 +376,7 @@ " # temporal filtering\n", " (noise_wf, bandpass, [(\"rest_noise_output.nuis_corrected\", \"files\")]),\n", " # (realign, bandpass, [(\"out_file\", \"files\")]),\n", - " (stc_wf, bandpass, [(\"stc_output.time_repetition\", \"tr\")]),\n", + " (params, bandpass, [(\"time_repetition\", \"tr\")]),\n", " (rest_input, bandpass, [\n", " (\"lowpass_freq\", \"lowpass_freq\"),\n", " (\"highpass_freq\", \"highpass_freq\"),\n", @@ -428,6 +411,37 @@ " (bandpass, rest_output, [(\"out_files\", \"time_filtered\")]),\n", " (smooth, rest_output, [(\"out_file\", \"smooth\")]),\n", "])\n", + " \n", + "# # anat to fMRI registration inputs\n", + "# (anat_output, cleanup_wf, [\n", + "# (\"tissues_native\", \"rest_input.tissues\"),\n", + "# (\"anat_biascorr\", \"rest_input.anat\"),\n", + "# ]),\n", + "\n", + "# # clean_up_wf to datasink\n", + "# (cleanup_wf, datasink, [\n", + "# (\"rest_output.epi_brain_mask\", \"rest.@epi_brain_mask\"),\n", + "# (\"rest_output.tissues_brain_mask\", \"rest.@tissues_brain_mask\"),\n", + "# (\"rest_output.tissues\", \"rest.@tissues\"),\n", + "# (\"rest_output.anat\", \"rest.@anat\"),\n", + "# (\"rest_output.motion_regressors\", \"rest.@motion_regressors\"),\n", + "# (\"rest_output.compcor_regressors\", \"rest.@compcor_regressors\"),\n", + "# (\"rest_output.gsr_regressors\", \"rest.@gsr_regressors\"),\n", + "# (\"rest_output.motion_params\", \"rest.@motion_params\"),\n", + "# (\"rest_output.motion_corrected\", \"rest.@motion_corrected\"),\n", + "# (\"rest_output.nuis_corrected\", \"rest.@nuis_corrected\"),\n", + "# (\"rest_output.time_filtered\", \"rest.@time_filtered\"),\n", + "# (\"rest_output.smooth\", \"rest.@smooth\"),\n", + "# (\"rest_output.avg_epi\", \"rest.@avg_epi\"),\n", + "# (\"rest_output.tsnr_file\", \"rest.@tsnr\"),\n", + "# (\"rest_output.art_displacement_files\", \"rest.artifact_stats.@displacement\"),\n", + "# (\"rest_output.art_intensity_files\", \"rest.artifact_stats.@art_intensity\"),\n", + "# (\"rest_output.art_norm_files\", \"rest.artifact_stats.@art_norm\"),\n", + "# (\"rest_output.art_outlier_files\", \"rest.artifact_stats.@art_outlier\"),\n", + "# (\"rest_output.art_plot_files\", \"rest.artifact_stats.@art_plot\"),\n", + "# (\"rest_output.art_statistic_files\", \"rest.artifact_stats.@art_statistic\"),\n", + "# ]),\n", + "# ])\n", "\n" ] }, @@ -442,6 +456,13 @@ "else:\n", " wf.run(plugin=None)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -460,7 +481,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.7" } }, "nbformat": 4, From 12824fa070ce5a9f6234821b935a6a23e74e8ba3 Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Wed, 28 Nov 2018 23:20:56 +0100 Subject: [PATCH 4/8] style(preproc/slicetime): small style fix --- neuro_pypes/preproc/slicetime.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/neuro_pypes/preproc/slicetime.py b/neuro_pypes/preproc/slicetime.py index 3d0a2a5..2dd5b3c 100644 --- a/neuro_pypes/preproc/slicetime.py +++ b/neuro_pypes/preproc/slicetime.py @@ -306,8 +306,7 @@ def _pick_first(sequence): ] # the input and output nodes - stc_input = setup_node(IdentityInterface(fields=input_fields), - name="stc_input") + stc_input = setup_node(IdentityInterface(fields=input_fields), name="stc_input") stc_output = setup_node(IdentityInterface(fields=["timecorrected_files", "time_repetition", From bb468634e3ec6a63c8b02e2d78234f15078ac953 Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Sun, 2 Dec 2018 22:15:28 +0100 Subject: [PATCH 5/8] feat(scripts): full anat+restfmri preprocessing script --- scripts/rest_fmri_preprocessing/process.ipynb | 1609 +++++++++++++++-- scripts/rest_fmri_preprocessing/process.py | 947 ++++++---- 2 files changed, 2044 insertions(+), 512 deletions(-) diff --git a/scripts/rest_fmri_preprocessing/process.ipynb b/scripts/rest_fmri_preprocessing/process.ipynb index 7a12831..c987441 100644 --- a/scripts/rest_fmri_preprocessing/process.ipynb +++ b/scripts/rest_fmri_preprocessing/process.ipynb @@ -21,8 +21,8 @@ "from hansel.operations import joint_value_map, valuesmap_to_dict\n", "import nipype.pipeline.engine as pe\n", "from nipype.algorithms.misc import Gunzip\n", - "from nipype.interfaces import spm\n", - "from nipype.interfaces.utility import IdentityInterface, Function\n", + "from nipype.interfaces import spm, fsl\n", + "from nipype.interfaces.utility import IdentityInterface, Function, Select\n", "from nipype.interfaces.io import DataSink\n", "from nipype.interfaces.ants import N4BiasFieldCorrection\n", "from nipype.interfaces.base import traits\n", @@ -31,6 +31,7 @@ "from neuro_pypes.preproc.slicetime_params import STCParametersInterface\n", "from neuro_pypes.interfaces.nilearn import math_img\n", "from neuro_pypes.preproc import get_bounding_box\n", + "from neuro_pypes._utils import flatten_list\n", "from neuro_pypes.utils import (\n", " remove_ext,\n", " joinstrings,\n", @@ -60,7 +61,7 @@ "data_crumb = Crumb(data_path, ignore_list=['.*'])\n", "crumb_modalities = {\n", " 'anat': [('image', 'anat_hc.nii.gz')],\n", - " 'fmri': [('image', 'rest.nii.gz')]\n", + " 'rest': [('image', 'rest.nii.gz')]\n", "}\n", "\n", "anat_voxel_sizes = [1, 1, 1]\n", @@ -223,21 +224,25 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "ename": "NameError", - "evalue": "name 'in_file' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m# slice-timing correction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mSTCParametersInterface\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0min_files\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0min_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'stc_params'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0mgunzip\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpe\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNode\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGunzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"gunzip\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mNameError\u001b[0m: name 'in_file' is not defined" - ] - } - ], + "outputs": [], + "source": [ + "def _sum_one_to_each(slice_order): # SPM starts count from 1\n", + " return [i+1 for i in slice_order]\n", + "\n", + "def _sum_one(num):\n", + " return num + 1\n", + "\n", + "def _pick_first(sequence):\n", + " return sequence[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from nipype.interfaces.nipy.preprocess import Trim, ComputeMask\n", "\n", @@ -249,7 +254,10 @@ "trim = pe.Node(Trim(), name=\"trim\")\n", "\n", "# slice-timing correction\n", - "params = pe.Node(STCParametersInterface(in_files=in_file), name='stc_params')\n", + "params = pe.Node(STCParametersInterface(), name='stc_params')\n", + "params.inputs.time_repetition = 2\n", + "params.inputs.slice_mode = 'alt_inc'\n", + "\n", "gunzip = pe.Node(Gunzip(), name=\"gunzip\")\n", "\n", "stc = spm.SliceTiming()\n", @@ -265,22 +273,38 @@ " (trim, params, [(\"out_file\", \"in_files\")]),\n", " \n", " # processing nodes\n", - " (params, gunzip, [((\"in_files\", _pick_first), \"in_file\")]),\n", - " (params, stc, [((\"slice_order\", _sum_one_to_each), \"slice_order\"),\n", - " ((\"ref_slice\", _sum_one), \"ref_slice\"),\n", - " (\"num_slices\", \"num_slices\"),\n", - " (\"time_acquisition\", \"time_acquisition\"),\n", - " (\"time_repetition\", \"time_repetition\"),\n", - " ]),\n", - " (gunzip, stc, [(\"out_file\", \"in_files\")]),\n", + " (params, gunzip, [((\"in_files\", _pick_first), \"in_file\")]),\n", + " (params, slice_timing, [\n", + " ((\"slice_order\", _sum_one_to_each), \"slice_order\"),\n", + " ((\"ref_slice\", _sum_one), \"ref_slice\"),\n", + " (\"num_slices\", \"num_slices\"),\n", + " (\"time_acquisition\", \"time_acquisition\"),\n", + " (\"time_repetition\", \"time_repetition\"),\n", + " ]),\n", " \n", - " # output node\n", - " (params, stc_output, [(\"time_repetition\", \"time_repetition\")]),\n", - " (stc, stc_output, [(\"timecorrected_files\", \"timecorrected_files\")]),\n", - "])\n", + " (gunzip, slice_timing, [(\"out_file\", \"in_files\")]),\n", + " \n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ------------------------------------------------------------------------------------------------\n", + "# FMRI Warp, Align, Filtering, Smoothing\n", + "# ------------------------------------------------------------------------------------------------\n", + "from nipype.interfaces.nipy import SpaceTimeRealigner\n", + "from nipype.algorithms.confounds import TSNR\n", + "from nipype.algorithms.rapidart import ArtifactDetect\n", "\n", + "from neuro_pypes.fmri.nuisance import rest_noise_filter_wf\n", + "from neuro_pypes.interfaces.nilearn import mean_img, smooth_img\n", "\n", - "realign = pe.Node(nipy_motion_correction(), name='realign')\n", + "\n", + "realign = pe.Node(SpaceTimeRealigner(), name='realign')\n", "\n", "# average\n", "average = pe.Node(\n", @@ -296,7 +320,11 @@ "mean_gunzip = pe.Node(Gunzip(), name=\"mean_gunzip\")\n", "\n", "# co-registration nodes\n", - "coreg = pe.Node(spm_coregister(cost_function=\"mi\"), name=\"coreg_fmri\")\n", + "coreg = spm.Coregister()\n", + "coreg.inputs.cost_function = \"mi\"\n", + "coreg.inputs.jobtype = 'estwrite'\n", + "\n", + "coregister = pe.Node(coreg, name=\"coregister_fmri\")\n", "brain_sel = pe.Node(Select(index=[0, 1, 2]), name=\"brain_sel\")\n", "\n", "# brain mask made with EPI\n", @@ -312,10 +340,278 @@ "wmcsf_select = pe.Node(Select(index=[1, 2]), name=\"wmcsf_sel\")\n", "\n", "# noise filter\n", - "noise_wf = rest_noise_filter_wf()\n", "wm_select = pe.Node(Select(index=[1]), name=\"wm_sel\")\n", "csf_select = pe.Node(Select(index=[2]), name=\"csf_sel\")\n", "\n", + "\n", + "# anat to fMRI registration inputs\n", + "wf.connect([\n", + "# (biascorr, coregister), [(\"output_image\", \"source\")],\n", + " (datasource, coregister, [(\"anat\", \"source\")]),\n", + " (segment, brain_sel, [(\"native_class_images\", \"inlist\")]),\n", + "])\n", + "\n", + "\n", + "wf.connect([\n", + " # motion correction\n", + " (slice_timing, realign, [(\"timecorrected_files\", \"in_file\")]),\n", + "\n", + " # coregistration target\n", + " (realign, average, [(\"out_file\", \"in_file\")]),\n", + " (average, mean_gunzip, [(\"out_file\", \"in_file\")]),\n", + " (mean_gunzip, coregister, [(\"out_file\", \"target\")]),\n", + "\n", + " # epi brain mask\n", + " (average, epi_mask, [(\"out_file\", \"mean_volume\")]),\n", + "\n", + " # coregistration\n", + " (brain_sel, coregister, [((\"out\", flatten_list), \"apply_to_files\")]),\n", + "\n", + " # tissue brain mask\n", + " (coregister, gm_select, [(\"coregistered_files\", \"inlist\")]),\n", + " (coregister, wmcsf_select, [(\"coregistered_files\", \"inlist\")]),\n", + " (gm_select, tissue_mask, [((\"out\", flatten_list), \"in_file\")]),\n", + " (wmcsf_select, tissue_mask, [((\"out\", flatten_list), \"operand_files\")]),\n", + "\n", + " # nuisance correction\n", + " (coregister, wm_select, [(\"coregistered_files\", \"inlist\",)]),\n", + " (coregister, csf_select, [(\"coregistered_files\", \"inlist\",)]),\n", + "])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ------------------------------------------------------------------------------------------------\n", + "# FMRI Noise removal\n", + "# ------------------------------------------------------------------------------------------------\n", + "from neuro_pypes.preproc import motion_regressors, extract_noise_components, create_regressors\n", + "from neuro_pypes.utils import selectindex, rename\n", + "\n", + "# CompCor rsfMRI filters (at least compcor_csf should be True).\n", + "filters = {\n", + " 'compcor_csf': True,\n", + " 'compcor_wm': False,\n", + " 'gsr': False\n", + "}\n", + "\n", + "# Compute TSNR on realigned data regressing polynomial up to order 2\n", + "tsnr = pe.Node(TSNR(regress_poly=2), name='tsnr')\n", + "\n", + "# Use :class:`nipype.algorithms.rapidart` to determine which of the\n", + "# images in the functional series are outliers based on deviations in\n", + "# intensity or movement.\n", + "art = pe.Node(ArtifactDetect(), name=\"rapidart_artifacts\")\n", + "# # Threshold to use to detect motion-related outliers when composite motion is being used\n", + "art.inputs.use_differences = [True, False]\n", + "art.inputs.use_norm = True\n", + "art.inputs.zintensity_threshold = 2\n", + "art.inputs.use_norm = True\n", + "art.inputs.norm_threshold = 1\n", + "art.inputs.mask_type = 'file'\n", + "art.inputs.parameter_source = 'NiPy'\n", + "\n", + "# Compute motion regressors\n", + "motion_regs = pe.Node(\n", + " Function(\n", + " input_names=['motion_params', 'order', 'derivatives'],\n", + " output_names=['out_files'],\n", + " function=motion_regressors\n", + " ),\n", + " name='motion_regressors'\n", + ")\n", + "# motion regressors upto given order and derivative\n", + "# motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic)\n", + "motion_regs.inputs.order = 0\n", + "motion_regs.inputs.derivatives = 1\n", + "\n", + "# Create a filter to remove motion and art confounds\n", + "motart_pars = pe.Node(\n", + " Function(\n", + " input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],\n", + " output_names=['out_files'],\n", + " function=create_regressors\n", + " ),\n", + " name='motart_parameters'\n", + ")\n", + "# # number of polynomials to add to detrend\n", + "motart_pars.inputs.detrend_poly = 2\n", + "\n", + "motion_filter = pe.Node(\n", + " fsl.GLM(\n", + " out_f_name='F_mcart.nii.gz',\n", + " out_pf_name='pF_mcart.nii.gz',\n", + " demean=True\n", + " ),\n", + " name='motion_filter'\n", + ")\n", + "\n", + "# Noise confound regressors\n", + "compcor_pars = pe.Node(\n", + " Function(\n", + " input_names=['realigned_file', 'mask_file', 'num_components', 'extra_regressors'],\n", + " output_names=['components_file'],\n", + " function=extract_noise_components\n", + " ),\n", + " name='compcor_pars'\n", + ")\n", + "# Number of principal components to calculate when running CompCor. 5 or 6 is recommended.\n", + "compcor_pars.inputs.num_components = 6\n", + "\n", + "compcor_filter = pe.Node(\n", + " fsl.GLM(out_f_name='F.nii.gz', out_pf_name='pF.nii.gz', demean=True),\n", + " name='compcor_filter'\n", + ")\n", + "\n", + "# Global signal regression\n", + "gsr_pars = pe.Node(\n", + " Function(\n", + " input_names=['realigned_file', 'mask_file', 'num_components', 'extra_regressors'],\n", + " output_names=['components_file'],\n", + " function=extract_noise_components\n", + " ),\n", + " name='gsr_pars'\n", + ")\n", + "# Number of principal components to calculate when running Global Signal Regression. 1 is recommended.\n", + "gsr_pars.inputs.num_components: 1\n", + "\n", + "gsr_filter = pe.Node(\n", + " fsl.GLM(out_f_name='F_gsr.nii.gz', out_pf_name='pF_gsr.nii.gz', demean=True),\n", + " name='gsr_filter'\n", + ")\n", + "\n", + "wf.connect([\n", + " # tsnr\n", + " (realign, tsnr, [\n", + " (\"out_file\", \"in_file\"),\n", + " ]),\n", + "\n", + " # artifact detection\n", + " (tissue_mask, art, [(\"out_file\", \"mask_file\")]),\n", + " (realign, art, [\n", + " (\"out_file\", \"realigned_files\"),\n", + " (\"par_file\", \"realignment_parameters\")\n", + " ]),\n", + " \n", + " # calculte motion regressors\n", + " (realign, motion_regs, [\n", + " (\"par_file\", \"motion_params\")\n", + " ]),\n", + "\n", + " # create motion and confound regressors parameters file\n", + " (art, motart_pars, [\n", + " (\"norm_files\", \"comp_norm\"),\n", + " (\"outlier_files\", \"outliers\"),\n", + " ]),\n", + " (motion_regs, motart_pars, [\n", + " (\"out_files\", \"motion_params\")\n", + " ]),\n", + "\n", + " # motion filtering\n", + " (realign, motion_filter, [\n", + " (\"out_file\", \"in_file\"),\n", + " ((\"out_file\", rename, \"_filtermotart\"), \"out_res_name\"),\n", + " ]),\n", + " (motart_pars, motion_filter, [\n", + " ((\"out_files\", selectindex, 0), \"design\")\n", + " ]),\n", + "])\n", + "\n", + "wf.connect([\n", + " # output\n", + " (tsnr, datasink, [(\"tsnr_file\", \"rest.@tsnr\")]),\n", + " \n", + " (motart_pars, datasink, [(\"out_files\", \"rest.@motion_regressors\")]),\n", + " (motion_filter, datasink, [(\"out_res\", \"rest.@motion_corrected\")]),\n", + " (art, datasink, [\n", + " (\"displacement_files\", \"rest.artifact_stats.@displacement\"),\n", + " (\"intensity_files\", \"rest.artifact_stats.@intensity\"),\n", + " (\"norm_files\", \"rest.artifact_stats.@norm\"),\n", + " (\"outlier_files\", \"rest.artifact_stats.@outliers\"),\n", + " (\"plot_files\", \"rest.artifact_stats.@plots\"),\n", + " (\"statistic_files\", \"rest.artifact_stats.@stats\"),\n", + " ]),\n", + "])\n", + "\n", + "\n", + "last_filter = motion_filter\n", + "\n", + "# compcor filter\n", + "if filters['compcor_csf'] or filters['compcor_wm']:\n", + " wf.connect([\n", + " # calculate compcor regressor and parameters file\n", + " (motart_pars, compcor_pars, [((\"out_files\", selectindex, 0), \"extra_regressors\"), ]),\n", + " (motion_filter, compcor_pars, [(\"out_res\", \"realigned_file\"), ]),\n", + "\n", + " # the compcor filter\n", + " (motion_filter, compcor_filter, [(\"out_res\", \"in_file\"),\n", + " ((\"out_res\", rename, \"_cleaned\"), \"out_res_name\"),\n", + " ]),\n", + " (compcor_pars, compcor_filter, [(\"components_file\", \"design\")]),\n", + " (tissue_mask, compcor_filter, [(\"out_file\", \"mask\")]),\n", + "\n", + " # output\n", + " (compcor_pars, datasink, [(\"components_file\", \"rest.@compcor_regressors\")]),\n", + " ])\n", + " last_filter = compcor_filter\n", + "\n", + "# global signal regression\n", + "if filters['gsr']:\n", + " wf.connect([\n", + " # calculate gsr regressors parameters file\n", + " (last_filter, gsr_pars, [(\"out_res\", \"realigned_file\")]),\n", + " (tissue_mask, gsr_pars, [(\"out_file\", \"mask_file\")]),\n", + "\n", + " # the output file name\n", + " (tissue_mask, gsr_filter, [(\"out_file\", \"mask\")]),\n", + " (last_filter, gsr_filter, [\n", + " (\"out_res\", \"in_file\"),\n", + " ((\"out_res\", rename, \"_gsr\"), \"out_res_name\"),\n", + " ]),\n", + " (gsr_pars, gsr_filter, [(\"components_file\", \"design\")]),\n", + "\n", + " # output\n", + " (gsr_pars, datasink, [(\"components_file\", \"rest.@gsr_regressors\")]),\n", + " ])\n", + " last_filter = gsr_filter\n", + "\n", + "# connect the final nuisance correction output node\n", + "wf.connect([(last_filter, datasink, [(\"out_res\", \"rest.@nuis_corrected\")]), ])\n", + "\n", + "if filters['compcor_csf'] and filters['compcor_wm']:\n", + " mask_merge = setup_node(Merge(2), name=\"mask_merge\")\n", + " wf.connect([\n", + " ## the mask for the compcor filter\n", + " (wm_select, mask_merge, [((\"out\", flatten_list), \"in1\")]),\n", + " (csf_select, mask_merge, [((\"out\", flatten_list), \"in2\")]),\n", + " (mask_merge, compcor_pars, [(\"out\", \"mask_file\")]),\n", + " ])\n", + "\n", + "elif filters['compcor_csf']:\n", + " wf.connect([\n", + " ## the mask for the compcor filter\n", + " (csf_select, compcor_pars, [((\"out\", flatten_list), \"mask_file\")]),\n", + " ])\n", + "\n", + "elif filters['compcor_wm']:\n", + " wf.connect([\n", + " ## the mask for the compcor filter\n", + " (wm_select, compcor_pars, [((\"out\", flatten_list), \"mask_file\")]),\n", + " ])\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from neuro_pypes.fmri.filter import bandpass_filter\n", + "from neuro_pypes.interfaces.nilearn import smooth_img\n", + "\n", "# bandpass filtering\n", "bandpass = pe.Node(\n", " Function(\n", @@ -325,6 +621,8 @@ " ),\n", " name='bandpass'\n", ")\n", + "bandpass.inputs.lowpass_freq = 0.1\n", + "bandpass.inputs.highpass_freq = 0.01\n", "\n", "# smooth\n", "smooth = pe.Node(\n", @@ -339,118 +637,1169 @@ "smooth.inputs.fwhm = fmri_smoothing_kernel_fwhm\n", "smooth.inputs.out_file = \"smooth_std_{}.nii.gz\".format(wf_name)\n", "\n", - "# Connect the nodes\n", - "wf.connect([\n", - " # motion correction\n", - " (stc, realign, [(\"timecorrected_files\", \"in_file\")]),\n", - "\n", - " # coregistration target\n", - " (realign, average, [(\"out_file\", \"in_file\")]),\n", - " (average, mean_gunzip, [(\"out_file\", \"in_file\")]),\n", - " (mean_gunzip, coreg, [(\"out_file\", \"target\")]),\n", - "\n", - " # epi brain mask\n", - " (average, epi_mask, [(\"out_file\", \"mean_volume\")]),\n", - "\n", - " # coregistration\n", - " (rest_input, coreg, [(\"anat\", \"source\")]),\n", - " (rest_input, brain_sel, [(\"tissues\", \"inlist\")]),\n", - " (brain_sel, coreg, [((\"out\", flatten_list), \"apply_to_files\")]),\n", - "\n", - " # tissue brain mask\n", - " (coreg, gm_select, [(\"coregistered_files\", \"inlist\")]),\n", - " (coreg, wmcsf_select, [(\"coregistered_files\", \"inlist\")]),\n", - " (gm_select, tissue_mask, [((\"out\", flatten_list), \"in_file\")]),\n", - " (wmcsf_select, tissue_mask, [((\"out\", flatten_list), \"operand_files\")]),\n", - "\n", - " # nuisance correction\n", - " (coreg, wm_select, [(\"coregistered_files\", \"inlist\",)]),\n", - " (coreg, csf_select, [(\"coregistered_files\", \"inlist\",)]),\n", - " (realign, noise_wf, [(\"out_file\", \"rest_noise_input.in_file\",)]),\n", - " (tissue_mask, noise_wf, [(\"out_file\", \"rest_noise_input.brain_mask\")]),\n", - " (wm_select, noise_wf, [((\"out\", flatten_list), \"rest_noise_input.wm_mask\")]),\n", - " (csf_select, noise_wf, [((\"out\", flatten_list), \"rest_noise_input.csf_mask\")]),\n", - "\n", - " (realign, noise_wf, [(\"par_file\", \"rest_noise_input.motion_params\",)]),\n", "\n", + "wf.connect([\n", " # temporal filtering\n", - " (noise_wf, bandpass, [(\"rest_noise_output.nuis_corrected\", \"files\")]),\n", + " (last_filter, bandpass, [(\"out_res\", \"files\")]),\n", + "\n", " # (realign, bandpass, [(\"out_file\", \"files\")]),\n", " (params, bandpass, [(\"time_repetition\", \"tr\")]),\n", - " (rest_input, bandpass, [\n", - " (\"lowpass_freq\", \"lowpass_freq\"),\n", - " (\"highpass_freq\", \"highpass_freq\"),\n", - " ]),\n", " (bandpass, smooth, [(\"out_files\", \"in_file\")]),\n", "\n", " # output\n", - " (epi_mask, rest_output, [(\"brain_mask\", \"epi_brain_mask\")]),\n", - " (tissue_mask, rest_output, [(\"out_file\", \"tissues_brain_mask\")]),\n", - " (realign, rest_output, [\n", - " (\"out_file\", \"motion_corrected\"),\n", - " (\"par_file\", \"motion_params\"),\n", - " ]),\n", - " (coreg, rest_output, [\n", - " (\"coregistered_files\", \"tissues\"),\n", - " (\"coregistered_source\", \"anat\"),\n", + " (epi_mask, datasink, [(\"brain_mask\", \"rest.@epi_brain_mask\")]),\n", + " (tissue_mask, datasink, [(\"out_file\", \"rest.@tissues_brain_mask\")]),\n", + " (realign, datasink, [\n", + " (\"out_file\", \"rest.@realigned\"),\n", + " (\"par_file\", \"rest.@motion_params\"),\n", " ]),\n", - " (noise_wf, rest_output, [\n", - " (\"rest_noise_output.motion_regressors\", \"motion_regressors\"),\n", - " (\"rest_noise_output.compcor_regressors\", \"compcor_regressors\"),\n", - " (\"rest_noise_output.gsr_regressors\", \"gsr_regressors\"),\n", - " (\"rest_noise_output.nuis_corrected\", \"nuis_corrected\"),\n", - " (\"rest_noise_output.tsnr_file\", \"tsnr_file\"),\n", - " (\"rest_noise_output.art_displacement_files\", \"art_displacement_files\"),\n", - " (\"rest_noise_output.art_intensity_files\", \"art_intensity_files\"),\n", - " (\"rest_noise_output.art_norm_files\", \"art_norm_files\"),\n", - " (\"rest_noise_output.art_outlier_files\", \"art_outlier_files\"),\n", - " (\"rest_noise_output.art_plot_files\", \"art_plot_files\"),\n", - " (\"rest_noise_output.art_statistic_files\", \"art_statistic_files\"),\n", + " (coregister, datasink, [\n", + " (\"coregistered_files\", \"rest.@tissues\"),\n", + " (\"coregistered_source\", \"rest.@anat\"),\n", " ]),\n", - " (average, rest_output, [(\"out_file\", \"avg_epi\")]),\n", - " (bandpass, rest_output, [(\"out_files\", \"time_filtered\")]),\n", - " (smooth, rest_output, [(\"out_file\", \"smooth\")]),\n", - "])\n", - " \n", - "# # anat to fMRI registration inputs\n", - "# (anat_output, cleanup_wf, [\n", - "# (\"tissues_native\", \"rest_input.tissues\"),\n", - "# (\"anat_biascorr\", \"rest_input.anat\"),\n", - "# ]),\n", - "\n", - "# # clean_up_wf to datasink\n", - "# (cleanup_wf, datasink, [\n", - "# (\"rest_output.epi_brain_mask\", \"rest.@epi_brain_mask\"),\n", - "# (\"rest_output.tissues_brain_mask\", \"rest.@tissues_brain_mask\"),\n", - "# (\"rest_output.tissues\", \"rest.@tissues\"),\n", - "# (\"rest_output.anat\", \"rest.@anat\"),\n", - "# (\"rest_output.motion_regressors\", \"rest.@motion_regressors\"),\n", - "# (\"rest_output.compcor_regressors\", \"rest.@compcor_regressors\"),\n", - "# (\"rest_output.gsr_regressors\", \"rest.@gsr_regressors\"),\n", - "# (\"rest_output.motion_params\", \"rest.@motion_params\"),\n", - "# (\"rest_output.motion_corrected\", \"rest.@motion_corrected\"),\n", - "# (\"rest_output.nuis_corrected\", \"rest.@nuis_corrected\"),\n", - "# (\"rest_output.time_filtered\", \"rest.@time_filtered\"),\n", - "# (\"rest_output.smooth\", \"rest.@smooth\"),\n", - "# (\"rest_output.avg_epi\", \"rest.@avg_epi\"),\n", - "# (\"rest_output.tsnr_file\", \"rest.@tsnr\"),\n", - "# (\"rest_output.art_displacement_files\", \"rest.artifact_stats.@displacement\"),\n", - "# (\"rest_output.art_intensity_files\", \"rest.artifact_stats.@art_intensity\"),\n", - "# (\"rest_output.art_norm_files\", \"rest.artifact_stats.@art_norm\"),\n", - "# (\"rest_output.art_outlier_files\", \"rest.artifact_stats.@art_outlier\"),\n", - "# (\"rest_output.art_plot_files\", \"rest.artifact_stats.@art_plot\"),\n", - "# (\"rest_output.art_statistic_files\", \"rest.artifact_stats.@art_statistic\"),\n", - "# ]),\n", - "# ])\n", - "\n" + " (average, datasink, [(\"out_file\", \"rest.@avg_epi\")]),\n", + " (bandpass, datasink, [(\"out_files\", \"rest.@time_filtered\")]),\n", + " (smooth, datasink, [(\"out_file\", \"rest.@smooth\")]),\n", + "])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, - "outputs": [], - "source": [ + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,116 nipype.workflow INFO:\n", + "\t Workflow spm_rest_preprocessing settings: ['check', 'execution', 'logging', 'monitoring']\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:Workflow spm_rest_preprocessing settings: ['check', 'execution', 'logging', 'monitoring']\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,207 nipype.workflow INFO:\n", + "\t Running serially.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:Running serially.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,210 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.selectfiles\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/selectfiles\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.selectfiles\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/selectfiles\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,215 nipype.workflow INFO:\n", + "\t [Node] Running \"selectfiles\" (\"neuro_pypes.crumb.DataCrumb\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"selectfiles\" (\"neuro_pypes.crumb.DataCrumb\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,223 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.selectfiles\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/utils.py:307: DeprecationWarning: use \"HasTraits.trait_set\" instead\n", + " result.outputs.set(**modify_paths(tosave, relative=True, basedir=cwd))\n", + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.selectfiles\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,224 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.trim\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/trim\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.trim\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/trim\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,228 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.trim\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.trim\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,229 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.trim\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.trim\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,230 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.stc_params\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/stc_params\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.stc_params\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/stc_params\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,236 nipype.workflow INFO:\n", + "\t [Node] Running \"stc_params\" (\"neuro_pypes.preproc.slicetime_params.STCParametersInterface\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"stc_params\" (\"neuro_pypes.preproc.slicetime_params.STCParametersInterface\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,246 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.stc_params\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.stc_params\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,247 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.gunzip\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/gunzip\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.gunzip\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/gunzip\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,251 nipype.workflow INFO:\n", + "\t [Node] Running \"gunzip\" (\"nipype.algorithms.misc.Gunzip\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"gunzip\" (\"nipype.algorithms.misc.Gunzip\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,901 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.gunzip\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.gunzip\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,902 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.slice_timing\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/slice_timing\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.slice_timing\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/slice_timing\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:16,910 nipype.workflow INFO:\n", + "\t [Node] Running \"slice_timing\" (\"nipype.interfaces.spm.preprocess.SliceTiming\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"slice_timing\" (\"nipype.interfaces.spm.preprocess.SliceTiming\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:45,690 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.slice_timing\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.slice_timing\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:45,693 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.realign\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/realign\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.realign\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/realign\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:08:45,702 nipype.workflow INFO:\n", + "\t [Node] Running \"realign\" (\"nipype.interfaces.nipy.preprocess.SpaceTimeRealigner\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"realign\" (\"nipype.interfaces.nipy.preprocess.SpaceTimeRealigner\")\n", + "/root/.pyenv/versions/3.6.7/lib/python3.6/importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\n", + " return f(*args, **kwds)\n", + "/root/.pyenv/versions/3.6.7/lib/python3.6/importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__ or __package__, falling back on __name__ and __path__\n", + " return f(*args, **kwds)\n", + "/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipy/algorithms/registration/groupwise_registration.py:481: UserWarning: Minimization failed\n", + " warnings.warn('Minimization failed')\n", + "/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipy/io/files.py:145: FutureWarning: Default `strict` currently False; this will change to True in a future version of nipy\n", + " ni_img = nipy2nifti(img, data_dtype = io_dtype)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:09,377 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.realign\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/utils.py:307: DeprecationWarning: use \"HasTraits.trait_set\" instead\n", + " result.outputs.set(**modify_paths(tosave, relative=True, basedir=cwd))\n", + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.realign\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:09,379 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.motion_regressors\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/motion_regressors\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.motion_regressors\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/motion_regressors\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:09,383 nipype.workflow INFO:\n", + "\t [Node] Running \"motion_regressors\" (\"nipype.interfaces.utility.wrappers.Function\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"motion_regressors\" (\"nipype.interfaces.utility.wrappers.Function\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:09,391 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.motion_regressors\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.motion_regressors\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:09,393 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.tsnr\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/tsnr\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.tsnr\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/tsnr\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:09,398 nipype.workflow INFO:\n", + "\t [Node] Running \"tsnr\" (\"nipype.algorithms.confounds.TSNR\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"tsnr\" (\"nipype.algorithms.confounds.TSNR\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:28,321 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.tsnr\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.tsnr\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:28,322 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.average_epi\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/average_epi\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.average_epi\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/average_epi\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:28,326 nipype.workflow INFO:\n", + "\t [Node] Running \"average_epi\" (\"nipype.interfaces.utility.wrappers.Function\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"average_epi\" (\"nipype.interfaces.utility.wrappers.Function\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,931 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.average_epi\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.average_epi\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,932 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.epi_mask\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/epi_mask\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.epi_mask\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/epi_mask\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,935 nipype.workflow INFO:\n", + "\t [Node] Running \"epi_mask\" (\"nipype.interfaces.nipy.preprocess.ComputeMask\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"epi_mask\" (\"nipype.interfaces.nipy.preprocess.ComputeMask\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,972 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.epi_mask\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.epi_mask\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,973 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.mean_gunzip\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/mean_gunzip\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.mean_gunzip\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/mean_gunzip\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,977 nipype.workflow INFO:\n", + "\t [Node] Running \"mean_gunzip\" (\"nipype.algorithms.misc.Gunzip\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"mean_gunzip\" (\"nipype.algorithms.misc.Gunzip\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,992 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.mean_gunzip\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.mean_gunzip\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,993 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.gunzip_anat\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/gunzip_anat\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.gunzip_anat\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/gunzip_anat\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,997 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.gunzip_anat\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.gunzip_anat\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:30,999 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.gunzip_anat\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.gunzip_anat\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,0 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.new_segment\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/new_segment\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.new_segment\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/new_segment\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,8 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.new_segment\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.new_segment\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,10 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.new_segment\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.new_segment\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,11 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.brain_mask\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/brain_mask\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.brain_mask\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/brain_mask\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,19 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.brain_mask\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.brain_mask\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,21 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.brain_mask\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.brain_mask\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,23 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.brain_sel\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/brain_sel\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.brain_sel\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/brain_sel\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,31 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.brain_sel\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.brain_sel\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,33 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.brain_sel\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.brain_sel\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,34 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.coregister_fmri\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/coregister_fmri\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.coregister_fmri\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/coregister_fmri\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,38 nipype.workflow ERROR:\n", + "\t Node coregister_fmri.a3 failed to run on host 3641d5e749ab.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:nipype.workflow:Node coregister_fmri.a3 failed to run on host 3641d5e749ab.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,45 nipype.workflow ERROR:\n", + "\t Saving crash info to /root/projects/neuro_pypes/scripts/rest_fmri_preprocessing/crash-20181202-211231-root-coregister_fmri.a3-21e781ae-5010-4f3d-84e4-c5c9d21cf60a.pklz\n", + "Traceback (most recent call last):\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/plugins/linear.py\", line 44, in run\n", + " node.run(updatehash=updatehash)\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 408, in run\n", + " cached, updated = self.is_cached()\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 294, in is_cached\n", + " hashed_inputs, hashvalue = self._get_hashval()\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 488, in _get_hashval\n", + " self._get_inputs()\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 531, in _get_inputs\n", + " self.set_input(key, deepcopy(output_value))\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 276, in set_input\n", + " setattr(self.inputs, parameter, deepcopy(val))\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/interfaces/base/traits_extension.py\", line 341, in validate\n", + " value = super(MultiObject, self).validate(object, name, newvalue)\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_types.py\", line 2336, in validate\n", + " return TraitListObject( self, object, name, value )\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_handlers.py\", line 2313, in __init__\n", + " raise excp\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_handlers.py\", line 2305, in __init__\n", + " value = [ validate( object, name, val ) for val in value ]\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_handlers.py\", line 2305, in \n", + " value = [ validate( object, name, val ) for val in value ]\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/interfaces/base/traits_extension.py\", line 260, in validate\n", + " validated_value, ', '.join(_exts)))\n", + "traits.trait_errors.TraitError: /data/raw/subject_4/session_0/anat_hc.nii.gz is not included in allowed types: .img, .nii, .hdr\n", + "Error setting node input:\n", + "Node: coregister_fmri\n", + "input: source\n", + "results_file: /data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/selectfiles/result_selectfiles.pklz\n", + "value: /data/raw/subject_4/session_0/anat_hc.nii.gz\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "ERROR:nipype.workflow:Saving crash info to /root/projects/neuro_pypes/scripts/rest_fmri_preprocessing/crash-20181202-211231-root-coregister_fmri.a3-21e781ae-5010-4f3d-84e4-c5c9d21cf60a.pklz\n", + "Traceback (most recent call last):\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/plugins/linear.py\", line 44, in run\n", + " node.run(updatehash=updatehash)\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 408, in run\n", + " cached, updated = self.is_cached()\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 294, in is_cached\n", + " hashed_inputs, hashvalue = self._get_hashval()\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 488, in _get_hashval\n", + " self._get_inputs()\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 531, in _get_inputs\n", + " self.set_input(key, deepcopy(output_value))\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/pipeline/engine/nodes.py\", line 276, in set_input\n", + " setattr(self.inputs, parameter, deepcopy(val))\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/interfaces/base/traits_extension.py\", line 341, in validate\n", + " value = super(MultiObject, self).validate(object, name, newvalue)\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_types.py\", line 2336, in validate\n", + " return TraitListObject( self, object, name, value )\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_handlers.py\", line 2313, in __init__\n", + " raise excp\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_handlers.py\", line 2305, in __init__\n", + " value = [ validate( object, name, val ) for val in value ]\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/traits/trait_handlers.py\", line 2305, in \n", + " value = [ validate( object, name, val ) for val in value ]\n", + " File \"/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipype/interfaces/base/traits_extension.py\", line 260, in validate\n", + " validated_value, ', '.join(_exts)))\n", + "traits.trait_errors.TraitError: /data/raw/subject_4/session_0/anat_hc.nii.gz is not included in allowed types: .img, .nii, .hdr\n", + "Error setting node input:\n", + "Node: coregister_fmri\n", + "input: source\n", + "results_file: /data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/selectfiles/result_selectfiles.pklz\n", + "value: /data/raw/subject_4/session_0/anat_hc.nii.gz\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,48 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.joinpath\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/joinpath\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.joinpath\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_4/joinpath\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,53 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.joinpath\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.joinpath\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,55 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.joinpath\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.joinpath\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,57 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.selectfiles\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/selectfiles\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.selectfiles\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/selectfiles\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,64 nipype.workflow INFO:\n", + "\t [Node] Running \"selectfiles\" (\"neuro_pypes.crumb.DataCrumb\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"selectfiles\" (\"neuro_pypes.crumb.DataCrumb\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,72 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.selectfiles\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.selectfiles\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,74 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.trim\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/trim\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.trim\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/trim\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,79 nipype.workflow INFO:\n", + "\t [Node] Cached \"spm_rest_preprocessing.trim\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Cached \"spm_rest_preprocessing.trim\" - collecting precomputed outputs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,80 nipype.workflow INFO:\n", + "\t [Node] \"spm_rest_preprocessing.trim\" found cached.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] \"spm_rest_preprocessing.trim\" found cached.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,82 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.stc_params\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/stc_params\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.stc_params\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/stc_params\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,89 nipype.workflow INFO:\n", + "\t [Node] Running \"stc_params\" (\"neuro_pypes.preproc.slicetime_params.STCParametersInterface\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"stc_params\" (\"neuro_pypes.preproc.slicetime_params.STCParametersInterface\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,101 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.stc_params\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.stc_params\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,103 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.gunzip\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/gunzip\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.gunzip\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/gunzip\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,109 nipype.workflow INFO:\n", + "\t [Node] Running \"gunzip\" (\"nipype.algorithms.misc.Gunzip\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"gunzip\" (\"nipype.algorithms.misc.Gunzip\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,851 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.gunzip\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.gunzip\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,852 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.slice_timing\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/slice_timing\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.slice_timing\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/slice_timing\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:31,859 nipype.workflow INFO:\n", + "\t [Node] Running \"slice_timing\" (\"nipype.interfaces.spm.preprocess.SliceTiming\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"slice_timing\" (\"nipype.interfaces.spm.preprocess.SliceTiming\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:59,946 nipype.workflow INFO:\n", + "\t [Node] Finished \"spm_rest_preprocessing.slice_timing\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Finished \"spm_rest_preprocessing.slice_timing\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:59,948 nipype.workflow INFO:\n", + "\t [Node] Setting-up \"spm_rest_preprocessing.realign\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/realign\".\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Setting-up \"spm_rest_preprocessing.realign\" in \"/data/neuro_pypes/spm_rest_preprocessing/spm_rest_preprocessing/_session_session_0_subject_id_subject_3/realign\".\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "181202-21:12:59,954 nipype.workflow INFO:\n", + "\t [Node] Running \"realign\" (\"nipype.interfaces.nipy.preprocess.SpaceTimeRealigner\")\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:nipype.workflow:[Node] Running \"realign\" (\"nipype.interfaces.nipy.preprocess.SpaceTimeRealigner\")\n", + "/root/.pyenv/versions/3.6.7/envs/neuro/lib/python3.6/site-packages/nipy/algorithms/registration/groupwise_registration.py:481: UserWarning: Minimization failed\n", + " warnings.warn('Minimization failed')\n" + ] + } + ], + "source": [ + "n_cpus = 1\n", + "\n", "if n_cpus > 1:\n", " wf.run(plugin=plugin, plugin_args={\"n_procs\": n_cpus})\n", "else:\n", diff --git a/scripts/rest_fmri_preprocessing/process.py b/scripts/rest_fmri_preprocessing/process.py index 964ea48..fe52432 100644 --- a/scripts/rest_fmri_preprocessing/process.py +++ b/scripts/rest_fmri_preprocessing/process.py @@ -1,110 +1,78 @@ +#!/usr/bin/env python +# coding: utf-8 + +# In[1]: + + import os +from hansel import Crumb +from hansel.operations import joint_value_map, valuesmap_to_dict import nipype.pipeline.engine as pe +from nipype.algorithms.misc import Gunzip +from nipype.interfaces import spm, fsl +from nipype.interfaces.utility import IdentityInterface, Function, Select from nipype.interfaces.io import DataSink -from nipype.interfaces.utility import IdentityInterface +from nipype.interfaces.ants import N4BiasFieldCorrection +from nipype.interfaces.base import traits from neuro_pypes.crumb import DataCrumb +from neuro_pypes.preproc.slicetime_params import STCParametersInterface +from neuro_pypes.interfaces.nilearn import math_img +from neuro_pypes.preproc import get_bounding_box +from neuro_pypes._utils import flatten_list +from neuro_pypes.utils import ( + remove_ext, + joinstrings, + selectindex, + extend_trait_list +) -# from nipype.algorithms.misc import Gunzip -# from nipype.interfaces import fsl -# from nipype.interfaces.nipy.preprocess import Trim, ComputeMask -# from nipype.interfaces.utility import Function, Select, IdentityInterface - -# from neuro_pypes._utils import format_pair_list, flatten_list -# from neuro_pypes.config import setup_node, get_config_setting -# from neuro_pypes.fmri.filter import bandpass_filter -# from neuro_pypes.fmri.nuisance import rest_noise_filter_wf -# from neuro_pypes.interfaces.nilearn import mean_img, smooth_img -# from neuro_pypes.preproc import ( -# auto_spm_slicetime, -# nipy_motion_correction, -# spm_coregister -# ) -# from neuro_pypes.utils import (remove_ext, -# extend_trait_list, -# get_input_node, -# get_interface_node, -# get_datasink, -# get_input_file_name, -# extension_duplicates) - -# ------------------------------------------------------------------------------------------------ -# GLOBAL VARIABLES -# ------------------------------------------------------------------------------------------------ - -wf_name = 'rest_fmri_preprocess' - -# STDB_DIR = os.path.expanduser('~/projects/neuro/std_brains') -# SPM_DIR = os.path.expanduser('~/Software/spm_mcr') -# BASE_DIR = os.path.expanduser('~/Data/nuk/petnet') -# data_dir = os.path.join(BASE_DIR, 'raw') -# cache_dir = os.path.join(BASE_DIR, 'wd') -# output_dir = os.path.join(BASE_DIR, 'out') -# plugin = None -# n_cpus = 5 - -# HAMM_DIR = os.path.join(STDB_DIR, 'atlases', 'hammers') -# HAMM_MNI = os.path.join(HAMM_DIR, 'Hammers_mith_atlas_n30r83_SPM5.nii.gz') -# HAMM_LABELS = os.path.join(HAMM_DIR, 'labels.txt') - -# SPM_CANONICAL_BRAIN_2MM = os.path.join(STDB_DIR, 'templates', 'spm_canonical', 'single_subj_T1_brain.nii.gz') -# # template files -# PET_MNI = os.path.join(STDB_DIR, 'templates', 'spm_canonical', 'pet.nii') -# MNI_MASK = os.path.join(STDB_DIR, 'templates', 'avg152T1_brain.nii.gz') +wf_name = 'spm_rest_preprocessing' -# # data input/output os.path.dirname(__file__) means alongside with this .py file -# #settings_file = os.path.join(os.path.dirname(__file__), 'pypes_config.yml') -# settings_file = ('/home/iripp/projects/alex/nuk_experiments/MRPET_15/Preproc_30_60pi_pet_recon_NEW/pypes_config.yml') -# #nipype_cfg_file = os.path.join(os.path.dirname(__file__), 'nipype.cfg') -# nipype_cfg_file = ('/home/iripp/projects/alex/nuk_experiments/MRPET_15/Preproc_30_60pi_pet_recon_NEW/nipype.cfg') +#work_dir = os.path.expanduser(f'~/data/neuro_pypes/{wf_name}/') +work_dir = os.path.expanduser(f'/data/neuro_pypes/{wf_name}/') +#input_dir = os.path.expanduser('~/projects/neuro/multimodal_test_data/raw') +input_dir = os.path.expanduser('/data/raw') +output_dir = os.path.join(work_dir, 'out') +cache_dir = os.path.join(work_dir, 'wd') -# mrpet15_preproc_wf_2 = dict([ -# ("spm_anat_preproc", attach_spm_anat_preprocessing), -# ("spm_pet_preproc", attach_spm_pet_preprocessing), -# ("spm_mrpet_preproc", attach_spm_mrpet_preprocessing), -# ("spm_pet_grouptemplate", attach_spm_pet_grouptemplate), -# ]) +data_path = os.path.join(os.path.expanduser(input_dir), '{subject_id}', '{session}', '{image}') +data_crumb = Crumb(data_path, ignore_list=['.*']) +crumb_modalities = { + 'anat': [('image', 'anat_hc.nii.gz')], + 'rest': [('image', 'rest.nii.gz')] +} -# data_path = os.path.join(os.path.expanduser(data_dir), '{session}', '{subject_id}', '{scan}', '{image}') -# data_crumb = Crumb(data_path, ignore_list=['.*']) +anat_voxel_sizes = [1, 1, 1] +fmri_smoothing_kernel_fwhm = 8 -# crumb_modalities = { -# 'anat': [('scan', 'T1'), ('image', 'Head_MPRAGE_highContrast.nii.gz')], -# 'pet': [('scan', 'PET_recon_first_30min'), ('image', 'pet_recon.nii.gz')], -# } +wf = pe.Workflow(name=wf_name, base_dir=work_dir) # ------------------------------------------------------------------------------------------------ # DATA INPUT AND SINK # ------------------------------------------------------------------------------------------------ +datasource = pe.Node( + DataCrumb(crumb=data_crumb, templates=crumb_modalities, raise_on_empty=False), + name='selectfiles' +) -wf = pe.Workflow(name=wf_name, base_dir=work_dir) - -# datasink datasink = pe.Node( DataSink(parameterization=False, base_directory=output_dir, ), name="datasink" ) -# input workflow -# (work_dir, data_crumb, crumb_arg_values, files_crumb_args, wf_name="input_files"): -select_files = pe.Node( - DataCrumb(crumb=data_crumb, templates=file_templates, raise_on_empty=False), - name='selectfiles' -) - # basic file name substitutions for the datasink -undef_args = select_files.interface._infields +undef_args = datasource.interface._infields substitutions = [(name, "") for name in undef_args] substitutions.append(("__", "_")) -datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions, - substitutions) +# datasink.inputs.substitutions = extend_trait_list(datasink.inputs.substitutions, substitutions) # Infosource - the information source that iterates over crumb values map from the filesystem infosource = pe.Node(interface=IdentityInterface(fields=undef_args), name="infosrc") @@ -115,320 +83,535 @@ joinpath = pe.Node(joinstrings(len(undef_args)), name='joinpath') # Connect the infosrc node to the datasink -input_joins = [(name, 'arg{}'.format(arg_no + 1)) - for arg_no, name in enumerate(undef_args)] +input_joins = [(name, 'arg{}'.format(arg_no + 1)) for arg_no, name in enumerate(undef_args)] wf.connect([ - (infosource, select_files, [(field, field) for field in undef_args]), - (select_files, joinpath, input_joins), + (infosource, datasource, [(field, field) for field in undef_args]), + (datasource, joinpath, input_joins), (joinpath, datasink, [("out", "container")]), -], -) +]) + # ------------------------------------------------------------------------------------------------ # ANAT # ------------------------------------------------------------------------------------------------ - # input node - anat_input = pe.Node(IdentityInterface(fields=in_fields, mandatory_inputs=True), - name="anat_input") - - # atlas registration - if do_atlas and not isdefined(anat_input.inputs.atlas_file): - anat_input.inputs.set(atlas_file=atlas_file) - - # T1 preprocessing nodes - biascor = setup_node(biasfield_correct(), name="bias_correction") - gunzip_anat = setup_node(Gunzip(), name="gunzip_anat") - segment = setup_node(spm_segment(), name="new_segment") - warp_anat = setup_node(spm_apply_deformations(), name="warp_anat") - - tpm_bbox = setup_node(Function(function=get_bounding_box, - input_names=["in_file"], - output_names=["bbox"]), - name="tpm_bbox") - tpm_bbox.inputs.in_file = spm_tpm_priors_path() - - # calculate brain mask from tissue maps - tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"], mandatory_inputs=True), - name="tissues") - - brain_mask = setup_node(Function(function=math_img, - input_names=["formula", "out_file", "gm", "wm", "csf"], - output_names=["out_file"], - imports=['from neuro_pypes.interfaces.nilearn import ni2file']), - name='brain_mask') - brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz" - brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0" - - # output node - anat_output = pe.Node(IdentityInterface(fields=out_fields), name="anat_output") - - # Connect the nodes - wf.connect([ - # input to biasfieldcorrection - (anat_input, biascor , [("in_file", "input_image")]), - - # new segment - (biascor, gunzip_anat, [("output_image", "in_file")]), - (gunzip_anat, segment, [("out_file", "channel_files")]), - - # Normalize12 - (segment, warp_anat, [("forward_deformation_field", "deformation_file")]), - (segment, warp_anat, [("bias_corrected_images", "apply_to_files")]), - (tpm_bbox, warp_anat, [("bbox", "write_bounding_box")]), - - # brain mask from tissues - (segment, tissues, [(("native_class_images", selectindex, 0), "gm"), - (("native_class_images", selectindex, 1), "wm"), - (("native_class_images", selectindex, 2), "csf"), - ]), - - (tissues, brain_mask, [("gm", "gm"), ("wm", "wm"), ("csf", "csf"),]), - - # output - (warp_anat, anat_output, [("normalized_files", "anat_mni")]), - (segment, anat_output, [("modulated_class_images", "tissues_warped"), - ("native_class_images", "tissues_native"), - ("transformation_mat", "affine_transform"), - ("forward_deformation_field", "warp_forward"), - ("inverse_deformation_field", "warp_inverse"), - ("bias_corrected_images", "anat_biascorr")]), - (brain_mask, anat_output, [("out_file", "brain_mask")]), - ]) - - # atlas warping nodes - if do_atlas: - gunzip_atlas = pe.Node(Gunzip(), name="gunzip_atlas") - warp_atlas = setup_node(spm_apply_deformations(), name="warp_atlas") - anat_bbox = setup_node(Function(function=get_bounding_box, - input_names=["in_file"], - output_names=["bbox"]), - name="anat_bbox") - - # set the warping interpolation to nearest neighbour. - warp_atlas.inputs.write_interp = 0 - - # connect the atlas registration nodes - wf.connect([ - (anat_input, gunzip_atlas, [("atlas_file", "in_file")]), - (gunzip_anat, anat_bbox, [("out_file", "in_file")]), - (gunzip_atlas, warp_atlas, [("out_file", "apply_to_files")]), - (segment, warp_atlas, [("inverse_deformation_field", "deformation_file")]), - (anat_bbox, warp_atlas, [("bbox", "write_bounding_box")]), - (warp_atlas, anat_output, [("normalized_files", "atlas_anat")]), - ]) - -# Create the workflow object -wf = pe.Workflow(name=wf_name) - -wf.connect([(in_files, anat_wf, [("anat", "anat_input.in_file")]), - (anat_wf, datasink, [ - ("anat_output.anat_mni", "anat.@mni"), - ("anat_output.tissues_warped", "anat.tissues.warped"), - ("anat_output.tissues_native", "anat.tissues.native"), - ("anat_output.affine_transform", "anat.transform.@linear"), - ("anat_output.warp_forward", "anat.transform.@forward"), - ("anat_output.warp_inverse", "anat.transform.@inverse"), - ("anat_output.anat_biascorr", "anat.@biascor"), - ("anat_output.brain_mask", "anat.@brain_mask"), - ]), - ]) - -# check optional outputs -if do_atlas: - wf.connect([(anat_wf, datasink, [("anat_output.atlas_anat", "anat.@atlas")]),]) - -do_cortical_thickness = get_config_setting('anat_preproc.do_cortical_thickness', False) -if do_cortical_thickness: - wf.connect([(anat_wf, datasink, [("anat_output.cortical_thickness", "anat.@cortical_thickness"), - ("anat_output.warped_white_matter", "anat.@warped_white_matter"), - ]), - ]) +# T1 preprocessing nodes + +# ANTs N4 Bias field correction +# n4 = N4BiasFieldCorrection() +# n4.inputs.dimension = 3 +# n4.inputs.bspline_fitting_distance = 300 +# n4.inputs.shrink_factor = 3 +# n4.inputs.n_iterations = [50, 50, 30, 20] +# n4.inputs.convergence_threshold = 1e-6 +# n4.inputs.save_bias = True +# n4.inputs.input_image = traits.Undefined +# biascor = pe.Node(n4, name="bias_correction") + +gunzip_anat = pe.Node(Gunzip(), name="gunzip_anat") + +# SPM New Segment +spm_info = spm.Info() +priors_path = os.path.join(spm_info.path(), 'tpm', 'TPM.nii') +segment = spm.NewSegment() +tissue1 = ((priors_path, 1), 1, (True, True), (True, True)) +tissue2 = ((priors_path, 2), 1, (True, True), (True, True)) +tissue3 = ((priors_path, 3), 2, (True, True), (True, True)) +tissue4 = ((priors_path, 4), 3, (True, True), (True, True)) +tissue5 = ((priors_path, 5), 4, (True, False), (False, False)) +tissue6 = ((priors_path, 6), 2, (False, False), (False, False)) +segment.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6] +segment.inputs.channel_info = (0.0001, 60, (True, True)) +segment.inputs.write_deformation_fields = [True, True] +segment.inputs.channel_files = traits.Undefined +segment = pe.Node(segment, name="new_segment") + +# Apply deformations +normalize_anat = spm.Normalize12(jobtype='write') +normalize_anat.inputs.write_voxel_sizes = anat_voxel_sizes +normalize_anat.inputs.deformation_file = traits.Undefined +normalize_anat.inputs.image_to_align = traits.Undefined +normalize_anat.inputs.write_bounding_box = traits.Undefined +warp_anat = pe.Node(normalize_anat, name="warp_anat") + +tpm_bbox = pe.Node( + Function(function=get_bounding_box, input_names=["in_file"], output_names=["bbox"]), + name="tpm_bbox" +) +tpm_bbox.inputs.in_file = priors_path + +# calculate brain mask from tissue maps +tissues = pe.Node( + IdentityInterface(fields=["gm", "wm", "csf"], mandatory_inputs=True), + name="tissues" +) +brain_mask = pe.Node( + Function( + function=math_img, + input_names=["formula", "out_file", "gm", "wm", "csf"], + output_names=["out_file"], + imports=['from neuro_pypes.interfaces.nilearn import ni2file']), + name='brain_mask' +) +brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz" +brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0" + +# Connect the nodes +wf.connect([ + # input to biasfieldcorrection +# (datasource, biascor, [("anat", "input_image")]), + + # new segment +# (biascor, gunzip_anat, [("output_image", "in_file")]), + (datasource, gunzip_anat, [("anat", "in_file")]), + (gunzip_anat, segment, [("out_file", "channel_files")]), + + # Normalize12 + (segment, warp_anat, [("forward_deformation_field", "deformation_file")]), + (segment, warp_anat, [("bias_corrected_images", "apply_to_files")]), + (tpm_bbox, warp_anat, [("bbox", "write_bounding_box")]), + + # brain mask from tissues + (segment, tissues,[ + (("native_class_images", selectindex, 0), "gm"), + (("native_class_images", selectindex, 1), "wm"), + (("native_class_images", selectindex, 2), "csf"), + ]), + + (tissues, brain_mask, [("gm", "gm"), ("wm", "wm"), ("csf", "csf"),]), + + # output + (warp_anat, datasink, [("normalized_files", "anat.@mni")]), + (segment, datasink, [("modulated_class_images", "anat.tissues.warped"), + ("native_class_images", "anat.tissues.native"), + ("transformation_mat", "anat.transform.@linear"), + ("forward_deformation_field", "anat.transform.@forward"), + ("inverse_deformation_field", "anat.transform.@inverse"), + ("bias_corrected_images", "anat.@biascor")]), + (brain_mask, datasink, [("out_file", "anat.@brain_mask")]), +]) + + + +def _sum_one_to_each(slice_order): # SPM starts count from 1 + return [i+1 for i in slice_order] + +def _sum_one(num): + return num + 1 + +def _pick_first(sequence): + return sequence[0] + + +from nipype.interfaces.nipy.preprocess import Trim, ComputeMask + +# ------------------------------------------------------------------------------------------------ +# FMRI Clean +# ------------------------------------------------------------------------------------------------ + +# rs-fMRI preprocessing nodes +trim = pe.Node(Trim(), name="trim") + +# slice-timing correction +params = pe.Node(STCParametersInterface(), name='stc_params') +params.inputs.time_repetition = 2 +params.inputs.slice_mode = 'alt_inc' + +gunzip = pe.Node(Gunzip(), name="gunzip") + +stc = spm.SliceTiming() +stc.inputs.in_files = traits.Undefined +stc.inputs.out_prefix = 'stc' +slice_timing = pe.Node(stc, name='slice_timing') + +wf.connect([ + # trim + (datasource, trim, [("rest", "in_file")]), + + # slice time correction + (trim, params, [("out_file", "in_files")]), + + # processing nodes + (params, gunzip, [(("in_files", _pick_first), "in_file")]), + (params, slice_timing, [ + (("slice_order", _sum_one_to_each), "slice_order"), + (("ref_slice", _sum_one), "ref_slice"), + ("num_slices", "num_slices"), + ("time_acquisition", "time_acquisition"), + ("time_repetition", "time_repetition"), + ]), + + (gunzip, slice_timing, [("out_file", "in_files")]), + +]) + + +# ------------------------------------------------------------------------------------------------ +# FMRI Warp, Align, Filtering, Smoothing +# ------------------------------------------------------------------------------------------------ +from nipype.interfaces.nipy import SpaceTimeRealigner +from nipype.algorithms.confounds import TSNR +from nipype.algorithms.rapidart import ArtifactDetect + +from neuro_pypes.fmri.nuisance import rest_noise_filter_wf +from neuro_pypes.interfaces.nilearn import mean_img, smooth_img + + +realign = pe.Node(SpaceTimeRealigner(), name='realign') + +# average +average = pe.Node( + Function( + function=mean_img, + input_names=["in_file"], + output_names=["out_file"], + imports=['from neuro_pypes.interfaces.nilearn import ni2file'] + ), + name='average_epi' +) + +mean_gunzip = pe.Node(Gunzip(), name="mean_gunzip") + +# co-registration nodes +coreg = spm.Coregister() +coreg.inputs.cost_function = "mi" +coreg.inputs.jobtype = 'estwrite' + +coregister = pe.Node(coreg, name="coregister_fmri") +brain_sel = pe.Node(Select(index=[0, 1, 2]), name="brain_sel") + +# brain mask made with EPI +epi_mask = pe.Node(ComputeMask(), name='epi_mask') + +# brain mask made with the merge of the tissue segmentations +tissue_mask = pe.Node(fsl.MultiImageMaths(), name='tissue_mask') +tissue_mask.inputs.op_string = "-add %s -add %s -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin" +tissue_mask.inputs.out_file = "tissue_brain_mask.nii.gz" + +# select tissues +gm_select = pe.Node(Select(index=[0]), name="gm_sel") +wmcsf_select = pe.Node(Select(index=[1, 2]), name="wmcsf_sel") + +# noise filter +wm_select = pe.Node(Select(index=[1]), name="wm_sel") +csf_select = pe.Node(Select(index=[2]), name="csf_sel") + + +# anat to fMRI registration inputs +wf.connect([ +# (biascorr, coregister), [("output_image", "source")], + (datasource, coregister, [("anat", "source")]), + (segment, brain_sel, [("native_class_images", "inlist")]), +]) + + +wf.connect([ + # motion correction + (slice_timing, realign, [("timecorrected_files", "in_file")]), + + # coregistration target + (realign, average, [("out_file", "in_file")]), + (average, mean_gunzip, [("out_file", "in_file")]), + (mean_gunzip, coregister, [("out_file", "target")]), + + # epi brain mask + (average, epi_mask, [("out_file", "mean_volume")]), + + # coregistration + (brain_sel, coregister, [(("out", flatten_list), "apply_to_files")]), + + # tissue brain mask + (coregister, gm_select, [("coregistered_files", "inlist")]), + (coregister, wmcsf_select, [("coregistered_files", "inlist")]), + (gm_select, tissue_mask, [(("out", flatten_list), "in_file")]), + (wmcsf_select, tissue_mask, [(("out", flatten_list), "operand_files")]), + + # nuisance correction + (coregister, wm_select, [("coregistered_files", "inlist",)]), + (coregister, csf_select, [("coregistered_files", "inlist",)]), +]) + # ------------------------------------------------------------------------------------------------ -# FMRI +# FMRI Noise removal # ------------------------------------------------------------------------------------------------ +from neuro_pypes.preproc import motion_regressors, extract_noise_components, create_regressors +from neuro_pypes.utils import selectindex, rename + +# CompCor rsfMRI filters (at least compcor_csf should be True). +filters = { + 'compcor_csf': True, + 'compcor_wm': False, + 'gsr': False +} + +# Compute TSNR on realigned data regressing polynomial up to order 2 +tsnr = pe.Node(TSNR(regress_poly=2), name='tsnr') + +# Use :class:`nipype.algorithms.rapidart` to determine which of the +# images in the functional series are outliers based on deviations in +# intensity or movement. +art = pe.Node(ArtifactDetect(), name="rapidart_artifacts") +# # Threshold to use to detect motion-related outliers when composite motion is being used +art.inputs.use_differences = [True, False] +art.inputs.use_norm = True +art.inputs.zintensity_threshold = 2 +art.inputs.use_norm = True +art.inputs.norm_threshold = 1 +art.inputs.mask_type = 'file' +art.inputs.parameter_source = 'NiPy' + +# Compute motion regressors +motion_regs = pe.Node( + Function( + input_names=['motion_params', 'order', 'derivatives'], + output_names=['out_files'], + function=motion_regressors + ), + name='motion_regressors' +) +# motion regressors upto given order and derivative +# motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) +motion_regs.inputs.order = 0 +motion_regs.inputs.derivatives = 1 + +# Create a filter to remove motion and art confounds +motart_pars = pe.Node( + Function( + input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'], + output_names=['out_files'], + function=create_regressors + ), + name='motart_parameters' +) +# # number of polynomials to add to detrend +motart_pars.inputs.detrend_poly = 2 + +motion_filter = pe.Node( + fsl.GLM( + out_f_name='F_mcart.nii.gz', + out_pf_name='pF_mcart.nii.gz', + demean=True + ), + name='motion_filter' +) -# # specify input and output fields -# in_fields = [ -# "in_file", -# "anat", -# "atlas_anat", -# "coreg_target", -# "tissues", -# "lowpass_freq", -# "highpass_freq", -# ] - -# out_fields = [ -# "motion_corrected", -# "motion_params", -# "tissues", -# "anat", -# "avg_epi", -# "time_filtered", -# "smooth", -# "tsnr_file", -# "epi_brain_mask", -# "tissues_brain_mask", -# "motion_regressors", -# "compcor_regressors", -# "gsr_regressors", -# "nuis_corrected", -# "art_displacement_files", -# "art_intensity_files", -# "art_norm_files", -# "art_outlier_files", -# "art_plot_files", -# "art_statistic_files", -# ] - -# # input identities -# rest_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True), -# name="rest_input") - -# # rs-fMRI preprocessing nodes -# trim = setup_node(Trim(), name="trim") - -# stc_wf = auto_spm_slicetime() -# realign = setup_node(nipy_motion_correction(), name='realign') - -# # average -# average = setup_node( -# Function( -# function=mean_img, -# input_names=["in_file"], -# output_names=["out_file"], -# imports=['from neuro_pypes.interfaces.nilearn import ni2file'] -# ), -# name='average_epi' -# ) - -# mean_gunzip = setup_node(Gunzip(), name="mean_gunzip") - -# # co-registration nodes -# coreg = setup_node(spm_coregister(cost_function="mi"), name="coreg_fmri") -# brain_sel = setup_node(Select(index=[0, 1, 2]), name="brain_sel") - -# # brain mask made with EPI -# epi_mask = setup_node(ComputeMask(), name='epi_mask') - -# # brain mask made with the merge of the tissue segmentations -# tissue_mask = setup_node(fsl.MultiImageMaths(), name='tissue_mask') -# tissue_mask.inputs.op_string = "-add %s -add %s -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin" -# tissue_mask.inputs.out_file = "tissue_brain_mask.nii.gz" - -# # select tissues -# gm_select = setup_node(Select(index=[0]), name="gm_sel") -# wmcsf_select = setup_node(Select(index=[1, 2]), name="wmcsf_sel") - -# # noise filter -# noise_wf = rest_noise_filter_wf() -# wm_select = setup_node(Select(index=[1]), name="wm_sel") -# csf_select = setup_node(Select(index=[2]), name="csf_sel") - -# # bandpass filtering -# bandpass = setup_node( -# Function( -# input_names=['files', 'lowpass_freq', 'highpass_freq', 'tr'], -# output_names=['out_files'], -# function=bandpass_filter -# ), -# name='bandpass' -# ) - -# # smooth -# smooth = setup_node( -# Function( -# function=smooth_img, -# input_names=["in_file", "fwhm"], -# output_names=["out_file"], -# imports=['from neuro_pypes.interfaces.nilearn import ni2file'] -# ), -# name="smooth" -# ) -# smooth.inputs.fwhm = get_config_setting('fmri_smooth.fwhm', default=8) -# smooth.inputs.out_file = "smooth_std_{}.nii.gz".format(wf_name) - -# # output identities -# rest_output = setup_node(IdentityInterface(fields=out_fields), name="rest_output") - -# # Connect the nodes -# wf.connect([ -# # trim -# (rest_input, trim, [("in_file", "in_file")]), - -# # slice time correction -# (trim, stc_wf, [("out_file", "stc_input.in_file")]), - -# # motion correction -# (stc_wf, realign, [("stc_output.timecorrected_files", "in_file")]), - -# # coregistration target -# (realign, average, [("out_file", "in_file")]), -# (average, mean_gunzip, [("out_file", "in_file")]), -# (mean_gunzip, coreg, [("out_file", "target")]), - -# # epi brain mask -# (average, epi_mask, [("out_file", "mean_volume")]), - -# # coregistration -# (rest_input, coreg, [("anat", "source")]), -# (rest_input, brain_sel, [("tissues", "inlist")]), -# (brain_sel, coreg, [(("out", flatten_list), "apply_to_files")]), - -# # tissue brain mask -# (coreg, gm_select, [("coregistered_files", "inlist")]), -# (coreg, wmcsf_select, [("coregistered_files", "inlist")]), -# (gm_select, tissue_mask, [(("out", flatten_list), "in_file")]), -# (wmcsf_select, tissue_mask, [(("out", flatten_list), "operand_files")]), - -# # nuisance correction -# (coreg, wm_select, [("coregistered_files", "inlist",)]), -# (coreg, csf_select, [("coregistered_files", "inlist",)]), -# (realign, noise_wf, [("out_file", "rest_noise_input.in_file",)]), -# (tissue_mask, noise_wf, [("out_file", "rest_noise_input.brain_mask")]), -# (wm_select, noise_wf, [(("out", flatten_list), "rest_noise_input.wm_mask")]), -# (csf_select, noise_wf, [(("out", flatten_list), "rest_noise_input.csf_mask")]), - -# (realign, noise_wf, [("par_file", "rest_noise_input.motion_params",)]), - -# # temporal filtering -# (noise_wf, bandpass, [("rest_noise_output.nuis_corrected", "files")]), -# # (realign, bandpass, [("out_file", "files")]), -# (stc_wf, bandpass, [("stc_output.time_repetition", "tr")]), -# (rest_input, bandpass, [ -# ("lowpass_freq", "lowpass_freq"), -# ("highpass_freq", "highpass_freq"), -# ]), -# (bandpass, smooth, [("out_files", "in_file")]), - -# # output -# (epi_mask, rest_output, [("brain_mask", "epi_brain_mask")]), -# (tissue_mask, rest_output, [("out_file", "tissues_brain_mask")]), -# (realign, rest_output, [ -# ("out_file", "motion_corrected"), -# ("par_file", "motion_params"), -# ]), -# (coreg, rest_output, [ -# ("coregistered_files", "tissues"), -# ("coregistered_source", "anat"), -# ]), -# (noise_wf, rest_output, [ -# ("rest_noise_output.motion_regressors", "motion_regressors"), -# ("rest_noise_output.compcor_regressors", "compcor_regressors"), -# ("rest_noise_output.gsr_regressors", "gsr_regressors"), -# ("rest_noise_output.nuis_corrected", "nuis_corrected"), -# ("rest_noise_output.tsnr_file", "tsnr_file"), -# ("rest_noise_output.art_displacement_files", "art_displacement_files"), -# ("rest_noise_output.art_intensity_files", "art_intensity_files"), -# ("rest_noise_output.art_norm_files", "art_norm_files"), -# ("rest_noise_output.art_outlier_files", "art_outlier_files"), -# ("rest_noise_output.art_plot_files", "art_plot_files"), -# ("rest_noise_output.art_statistic_files", "art_statistic_files"), -# ]), -# (average, rest_output, [("out_file", "avg_epi")]), -# (bandpass, rest_output, [("out_files", "time_filtered")]), -# (smooth, rest_output, [("out_file", "smooth")]), -# ]) +# Noise confound regressors +compcor_pars = pe.Node( + Function( + input_names=['realigned_file', 'mask_file', 'num_components', 'extra_regressors'], + output_names=['components_file'], + function=extract_noise_components + ), + name='compcor_pars' +) +# Number of principal components to calculate when running CompCor. 5 or 6 is recommended. +compcor_pars.inputs.num_components = 6 + +compcor_filter = pe.Node( + fsl.GLM(out_f_name='F.nii.gz', out_pf_name='pF.nii.gz', demean=True), + name='compcor_filter' +) + +# Global signal regression +gsr_pars = pe.Node( + Function( + input_names=['realigned_file', 'mask_file', 'num_components', 'extra_regressors'], + output_names=['components_file'], + function=extract_noise_components + ), + name='gsr_pars' +) +# Number of principal components to calculate when running Global Signal Regression. 1 is recommended. +gsr_pars.inputs.num_components: 1 + +gsr_filter = pe.Node( + fsl.GLM(out_f_name='F_gsr.nii.gz', out_pf_name='pF_gsr.nii.gz', demean=True), + name='gsr_filter' +) + +wf.connect([ + # tsnr + (realign, tsnr, [ + ("out_file", "in_file"), + ]), + + # artifact detection + (tissue_mask, art, [("out_file", "mask_file")]), + (realign, art, [ + ("out_file", "realigned_files"), + ("par_file", "realignment_parameters") + ]), + + # calculte motion regressors + (realign, motion_regs, [ + ("par_file", "motion_params") + ]), + + # create motion and confound regressors parameters file + (art, motart_pars, [ + ("norm_files", "comp_norm"), + ("outlier_files", "outliers"), + ]), + (motion_regs, motart_pars, [ + ("out_files", "motion_params") + ]), + + # motion filtering + (realign, motion_filter, [ + ("out_file", "in_file"), + (("out_file", rename, "_filtermotart"), "out_res_name"), + ]), + (motart_pars, motion_filter, [ + (("out_files", selectindex, 0), "design") + ]), +]) + +wf.connect([ + # output + (tsnr, datasink, [("tsnr_file", "rest.@tsnr")]), + + (motart_pars, datasink, [("out_files", "rest.@motion_regressors")]), + (motion_filter, datasink, [("out_res", "rest.@motion_corrected")]), + (art, datasink, [ + ("displacement_files", "rest.artifact_stats.@displacement"), + ("intensity_files", "rest.artifact_stats.@intensity"), + ("norm_files", "rest.artifact_stats.@norm"), + ("outlier_files", "rest.artifact_stats.@outliers"), + ("plot_files", "rest.artifact_stats.@plots"), + ("statistic_files", "rest.artifact_stats.@stats"), + ]), +]) + + +last_filter = motion_filter + +# compcor filter +if filters['compcor_csf'] or filters['compcor_wm']: + wf.connect([ + # calculate compcor regressor and parameters file + (motart_pars, compcor_pars, [(("out_files", selectindex, 0), "extra_regressors"), ]), + (motion_filter, compcor_pars, [("out_res", "realigned_file"), ]), + + # the compcor filter + (motion_filter, compcor_filter, [("out_res", "in_file"), + (("out_res", rename, "_cleaned"), "out_res_name"), + ]), + (compcor_pars, compcor_filter, [("components_file", "design")]), + (tissue_mask, compcor_filter, [("out_file", "mask")]), + + # output + (compcor_pars, datasink, [("components_file", "rest.@compcor_regressors")]), + ]) + last_filter = compcor_filter + +# global signal regression +if filters['gsr']: + wf.connect([ + # calculate gsr regressors parameters file + (last_filter, gsr_pars, [("out_res", "realigned_file")]), + (tissue_mask, gsr_pars, [("out_file", "mask_file")]), + + # the output file name + (tissue_mask, gsr_filter, [("out_file", "mask")]), + (last_filter, gsr_filter, [ + ("out_res", "in_file"), + (("out_res", rename, "_gsr"), "out_res_name"), + ]), + (gsr_pars, gsr_filter, [("components_file", "design")]), + + # output + (gsr_pars, datasink, [("components_file", "rest.@gsr_regressors")]), + ]) + last_filter = gsr_filter + +# connect the final nuisance correction output node +wf.connect([(last_filter, datasink, [("out_res", "rest.@nuis_corrected")]), ]) + +if filters['compcor_csf'] and filters['compcor_wm']: + mask_merge = setup_node(Merge(2), name="mask_merge") + wf.connect([ + ## the mask for the compcor filter + (wm_select, mask_merge, [(("out", flatten_list), "in1")]), + (csf_select, mask_merge, [(("out", flatten_list), "in2")]), + (mask_merge, compcor_pars, [("out", "mask_file")]), + ]) + +elif filters['compcor_csf']: + wf.connect([ + ## the mask for the compcor filter + (csf_select, compcor_pars, [(("out", flatten_list), "mask_file")]), + ]) + +elif filters['compcor_wm']: + wf.connect([ + ## the mask for the compcor filter + (wm_select, compcor_pars, [(("out", flatten_list), "mask_file")]), + ]) + + +# In[ ]: + + +from neuro_pypes.fmri.filter import bandpass_filter +from neuro_pypes.interfaces.nilearn import smooth_img + +# bandpass filtering +bandpass = pe.Node( + Function( + input_names=['files', 'lowpass_freq', 'highpass_freq', 'tr'], + output_names=['out_files'], + function=bandpass_filter + ), + name='bandpass' +) +bandpass.inputs.lowpass_freq = 0.1 +bandpass.inputs.highpass_freq = 0.01 + +# smooth +smooth = pe.Node( + Function( + function=smooth_img, + input_names=["in_file", "fwhm"], + output_names=["out_file"], + imports=['from neuro_pypes.interfaces.nilearn import ni2file'] + ), + name="smooth" +) +smooth.inputs.fwhm = fmri_smoothing_kernel_fwhm +smooth.inputs.out_file = "smooth_std_{}.nii.gz".format(wf_name) + + +wf.connect([ + # temporal filtering + (last_filter, bandpass, [("out_res", "files")]), + + # (realign, bandpass, [("out_file", "files")]), + (params, bandpass, [("time_repetition", "tr")]), + (bandpass, smooth, [("out_files", "in_file")]), + + # output + (epi_mask, datasink, [("brain_mask", "rest.@epi_brain_mask")]), + (tissue_mask, datasink, [("out_file", "rest.@tissues_brain_mask")]), + (realign, datasink, [ + ("out_file", "rest.@realigned"), + ("par_file", "rest.@motion_params"), + ]), + (coregister, datasink, [ + ("coregistered_files", "rest.@tissues"), + ("coregistered_source", "rest.@anat"), + ]), + (average, datasink, [("out_file", "rest.@avg_epi")]), + (bandpass, datasink, [("out_files", "rest.@time_filtered")]), + (smooth, datasink, [("out_file", "rest.@smooth")]), +]) + + +if __name__ == '__main__': + n_cpus = 1 + + if n_cpus > 1: + wf.run(plugin=plugin, plugin_args={"n_procs": n_cpus}) + else: + wf.run(plugin=None) From 7908f00a1aa1320dec538b159f0f956c5069c61f Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Sun, 2 Dec 2018 22:15:55 +0100 Subject: [PATCH 6/8] fix(preproc/slicetime_params.py): make sure function returns a list --- neuro_pypes/preproc/slicetime_params.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neuro_pypes/preproc/slicetime_params.py b/neuro_pypes/preproc/slicetime_params.py index d0e63cb..5d7dadf 100644 --- a/neuro_pypes/preproc/slicetime_params.py +++ b/neuro_pypes/preproc/slicetime_params.py @@ -203,7 +203,7 @@ def calculate_slice_order(n_slices, slice_mode): img = nib.load(in_file) times = get_nii_slice_times(img) if times is not None: - return order_from_times(times) + return list(order_from_times(times)) # read the slice mode code from the file if slice_mode == 'unknown': From 60719c2b5fc28d44304bc286ef27822678ca4aa4 Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Sun, 2 Dec 2018 22:16:21 +0100 Subject: [PATCH 7/8] chore(.gitignore): add ipynb stuff and nipype crash files --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 1533295..0186e34 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,8 @@ var/ .installed.cfg *.egg Pipfile.lock +**/.ipynb_checkpoints +**crash* # PyInstaller # Usually these files are written by a python script from a template From c5b2f8717186f44cfee49ff9f7d71aa01f303298 Mon Sep 17 00:00:00 2001 From: Alexandre Savio Date: Sun, 2 Dec 2018 22:26:15 +0100 Subject: [PATCH 8/8] docs(README): update README --- scripts/rest_fmri_preprocessing/README.md | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/scripts/rest_fmri_preprocessing/README.md b/scripts/rest_fmri_preprocessing/README.md index 2ae2d9e..b1304f5 100644 --- a/scripts/rest_fmri_preprocessing/README.md +++ b/scripts/rest_fmri_preprocessing/README.md @@ -1,8 +1,24 @@ -Run the neurita/neuro_docker container: +Build the neurita/neuro_docker container: ``` -docker run -it -p 8888:8888 --name neuro -v $PWD/../multimodal_test_data:/data -v $PWD/../neuro_pypes:/root/projects/neuro_pypes neurita/neuro_docker:0.2 /bin/bash +git clone https://github.com/Neurita/neuro_ansible.git +``` + +Install the dependencies of neuro_ansible. Then: + +``` +cd neuro_ansible +make docker-run +``` +Exit from the container and delete it. + +Run the container again with your options: +``` +export DATA_DIR=$HOME/projects/multimodal_test_data +export PYPES_DIR=$HOME/projects/neuro_pypes + +docker run -it -p 8888:8888 --name neuro -v $DATA_DIR:/data -v $PYPES_DIR:/root/projects/neuro_pypes neurita/neuro_docker:0.2 /bin/bash ``` Inside the container: