From f677c43d662a8f65d1ba17c5920bdcdbf89730ca Mon Sep 17 00:00:00 2001 From: NicolasGensollen Date: Wed, 13 Nov 2024 17:14:12 +0100 Subject: [PATCH] remove factory and adapt to GroupID new type --- .../anatomical/freesurfer/atlas/pipeline.py | 18 +- .../longitudinal/correction/pipeline.py | 12 +- .../longitudinal/template/pipeline.py | 20 +- .../anatomical/freesurfer/t1/pipeline.py | 18 +- clinica/pipelines/dwi/connectome/pipeline.py | 53 ++-- clinica/pipelines/dwi/dti/pipeline.py | 8 +- .../dwi/preprocessing/fmap/pipeline.py | 22 +- .../dwi/preprocessing/t1/pipeline.py | 19 +- .../spatial_svm_pipeline.py | 31 +-- clinica/pipelines/pet/linear/pipeline.py | 21 +- clinica/pipelines/pet/volume/pipeline.py | 28 +-- .../pet_surface/pet_surface_pipeline.py | 36 ++- .../pipelines/statistics_surface/pipeline.py | 5 +- .../statistics_volume_pipeline.py | 29 ++- .../t1_linear/anat_linear_pipeline.py | 11 +- .../t1_volume_create_dartel_pipeline.py | 6 +- .../t1_volume_dartel2mni_pipeline.py | 23 +- .../t1_volume_parcellation_pipeline.py | 8 +- .../t1_volume_register_dartel_pipeline.py | 20 +- .../t1_volume_tissue_segmentation_pipeline.py | 5 +- clinica/pydra/query.py | 55 ++--- clinica/utils/input_files.py | 233 +++++------------- clinica/utils/spm.py | 5 +- test/unittests/pydra/test_interfaces.py | 5 +- test/unittests/pydra/test_query.py | 5 +- test/unittests/utils/test_input_files.py | 160 +++++++----- 26 files changed, 381 insertions(+), 475 deletions(-) diff --git a/clinica/pipelines/anatomical/freesurfer/atlas/pipeline.py b/clinica/pipelines/anatomical/freesurfer/atlas/pipeline.py index 18c236765..29d756b25 100644 --- a/clinica/pipelines/anatomical/freesurfer/atlas/pipeline.py +++ b/clinica/pipelines/anatomical/freesurfer/atlas/pipeline.py @@ -51,7 +51,12 @@ def get_to_process_with_atlases( get_processed_images, ) from clinica.utils.filemanip import extract_image_ids - from clinica.utils.input_files import T1_FS_DESTRIEUX + from clinica.utils.image import HemiSphere + from clinica.utils.input_files import ( + Parcellation, + get_t1_freesurfer_segmentation, + get_t1_freesurfer_statistics, + ) from clinica.utils.inputs import clinica_file_reader part_ids, sess_ids, list_long_id = grab_image_ids_from_caps_directory( @@ -66,13 +71,6 @@ def get_to_process_with_atlases( if caps_directory.is_dir(): for atlas in atlas_list: - atlas_info = dict( - { - "pattern": f"t1/freesurfer_cross_sectional/sub-*_ses-*/stats/rh.{atlas}.stats", - "description": f"{atlas}-based segmentation", - "needed_pipeline": "t1-freesurfer", - } - ) t1_freesurfer_longitudinal_output = get_processed_images( caps_directory, part_ids, sess_ids, list_long_id ) @@ -87,13 +85,13 @@ def get_to_process_with_atlases( subjects, sessions, caps_directory, - T1_FS_DESTRIEUX, + get_t1_freesurfer_segmentation(Parcellation.DESTRIEUX), ) t1_freesurfer_files, _ = clinica_file_reader( subjects, sessions, caps_directory, - atlas_info, + get_t1_freesurfer_statistics(atlas, HemiSphere.RIGHT), ) image_ids = extract_image_ids(t1_freesurfer_files) image_ids_2 = extract_image_ids(t1_freesurfer_output) diff --git a/clinica/pipelines/anatomical/freesurfer/longitudinal/correction/pipeline.py b/clinica/pipelines/anatomical/freesurfer/longitudinal/correction/pipeline.py index 0767474c3..c2072ff62 100644 --- a/clinica/pipelines/anatomical/freesurfer/longitudinal/correction/pipeline.py +++ b/clinica/pipelines/anatomical/freesurfer/longitudinal/correction/pipeline.py @@ -61,8 +61,8 @@ def _build_input_node(self): from clinica.utils.exceptions import ClinicaException from clinica.utils.input_files import ( Parcellation, - QueryPatternName, - query_pattern_factory, + get_t1_freesurfer_segmentation, + get_t1_freesurfer_template, ) from clinica.utils.inputs import ( clinica_file_reader, @@ -123,15 +123,11 @@ def _build_input_node(self): ) = extract_subject_session_longitudinal_ids_from_filename( to_process_ids ) - pattern_segmentation = query_pattern_factory( - QueryPatternName.T1_FREESURFER_SEGMENTATION - )(Parcellation.DESTRIEUX) + pattern_segmentation = get_t1_freesurfer_segmentation(Parcellation.DESTRIEUX) _, errors_destrieux = clinica_file_reader( self.subjects, self.sessions, self.caps_directory, pattern_segmentation ) - pattern_template = query_pattern_factory( - QueryPatternName.T1_FREESURFER_TEMPLATE - )(Parcellation.DESTRIEUX) + pattern_template = get_t1_freesurfer_template(Parcellation.DESTRIEUX) _, errors_t_destrieux = clinica_file_reader( self.subjects, list_long_id, self.caps_directory, pattern_template ) diff --git a/clinica/pipelines/anatomical/freesurfer/longitudinal/template/pipeline.py b/clinica/pipelines/anatomical/freesurfer/longitudinal/template/pipeline.py index 32b0ebd94..240e4e821 100644 --- a/clinica/pipelines/anatomical/freesurfer/longitudinal/template/pipeline.py +++ b/clinica/pipelines/anatomical/freesurfer/longitudinal/template/pipeline.py @@ -4,8 +4,8 @@ from clinica.pipelines.engine import Pipeline from clinica.utils.input_files import ( Parcellation, - QueryPatternName, - query_pattern_factory, + get_t1_freesurfer_segmentation, + get_t1_freesurfer_template, ) @@ -34,11 +34,11 @@ def get_processed_images( ] image_ids: List[str] = [] if caps_directory.is_dir(): - pattern = query_pattern_factory(QueryPatternName.T1_FREESURFER_TEMPLATE)( - Parcellation.DESTRIEUX - ) t1_freesurfer_files, _ = clinica_file_reader( - list_participant_id, list_long_id, caps_directory, pattern + list_participant_id, + list_long_id, + caps_directory, + get_t1_freesurfer_template(Parcellation.DESTRIEUX), ) image_ids = [ re.search(r"(sub-[a-zA-Z0-9]+)_(long-[a-zA-Z0-9]+)", file).group() @@ -153,11 +153,11 @@ def _build_input_node(self): self.subjects, self.sessions = extract_subjects_sessions_from_filename( to_process_ids ) - pattern = query_pattern_factory(QueryPatternName.T1_FREESURFER_SEGMENTATION)( - Parcellation.DESTRIEUX - ) _, self.subjects, self.sessions = clinica_file_filter( - self.subjects, self.sessions, self.caps_directory, pattern + self.subjects, + self.sessions, + self.caps_directory, + get_t1_freesurfer_segmentation(Parcellation.DESTRIEUX), ) long_ids = get_participants_long_id(self.subjects, self.sessions) save_part_sess_long_ids_to_tsv( diff --git a/clinica/pipelines/anatomical/freesurfer/t1/pipeline.py b/clinica/pipelines/anatomical/freesurfer/t1/pipeline.py index 41ad6c597..bbb8dc5bf 100644 --- a/clinica/pipelines/anatomical/freesurfer/t1/pipeline.py +++ b/clinica/pipelines/anatomical/freesurfer/t1/pipeline.py @@ -4,7 +4,6 @@ from nipype import config from clinica.pipelines.engine import Pipeline -from clinica.utils.input_files import QueryPattern cfg = dict(execution={"parameterize_dirs": False}) config.update_config(cfg) @@ -24,18 +23,17 @@ def get_processed_images( from clinica.utils.filemanip import extract_image_ids from clinica.utils.input_files import ( Parcellation, - QueryPatternName, - query_pattern_factory, + get_t1_freesurfer_segmentation, ) from clinica.utils.inputs import clinica_file_reader image_ids: List[str] = [] if caps_directory.is_dir(): - pattern = query_pattern_factory( - QueryPatternName.T1_FREESURFER_SEGMENTATION - )(Parcellation.DESTRIEUX) t1_freesurfer_files, _ = clinica_file_reader( - subjects, sessions, caps_directory, pattern + subjects, + sessions, + caps_directory, + get_t1_freesurfer_segmentation(Parcellation.DESTRIEUX), ) image_ids = extract_image_ids(t1_freesurfer_files) return image_ids @@ -104,7 +102,7 @@ def _build_input_node(self): extract_subjects_sessions_from_filename, save_participants_sessions, ) - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import get_t1w_mri from clinica.utils.inputs import clinica_file_filter from clinica.utils.stream import cprint from clinica.utils.ux import print_images_to_process @@ -138,11 +136,9 @@ def _build_input_node(self): to_process_ids ) - pattern = query_pattern_factory(QueryPatternName.T1W)() t1w_files, self.subjects, self.sessions = clinica_file_filter( - self.subjects, self.sessions, self.bids_directory, pattern + self.subjects, self.sessions, self.bids_directory, get_t1w_mri() ) - if not t1w_files: raise ClinicaException("Empty dataset or already processed") diff --git a/clinica/pipelines/dwi/connectome/pipeline.py b/clinica/pipelines/dwi/connectome/pipeline.py index aa7c2f02c..fea4abe8a 100644 --- a/clinica/pipelines/dwi/connectome/pipeline.py +++ b/clinica/pipelines/dwi/connectome/pipeline.py @@ -4,6 +4,7 @@ from nipype import config from clinica.pipelines.engine import Pipeline +from clinica.utils.input_files import QueryPattern cfg = dict(execution={"parameterize_dirs": False}) config.update_config(cfg) @@ -54,50 +55,54 @@ def get_output_fields(self) -> List[str]: """ return ["response", "fod", "tracts", "nodes", "connectomes"] - def _build_input_node(self): - """Build and connect an input node to the pipeline.""" - import re - - import nipype.interfaces.utility as nutil - import nipype.pipeline.engine as npe - - from clinica.utils.exceptions import ClinicaCAPSError - from clinica.utils.filemanip import save_participants_sessions + @staticmethod + def _get_input_patterns() -> list[QueryPattern]: from clinica.utils.input_files import ( DWIFileType, Parcellation, - QueryPatternName, - query_pattern_factory, + get_dwi_preprocessed_brainmask, + get_dwi_preprocessed_file, + get_t1_freesurfer_extracted_brain, + get_t1_freesurfer_segmentation, + get_t1_freesurfer_segmentation_white_matter, ) - from clinica.utils.inputs import clinica_list_of_files_reader - from clinica.utils.stream import cprint - from clinica.utils.ux import print_images_to_process - # Read CAPS files - patterns = [ - query_pattern_factory(QueryPatternName.T1_FREESURFER_WHITE_MATTER)() - ] + patterns = [get_t1_freesurfer_segmentation_white_matter()] patterns.extend( [ - query_pattern_factory(QueryPatternName.T1_FREESURFER_SEGMENTATION)(p) + get_t1_freesurfer_segmentation(p) for p in (Parcellation.DESIKAN, Parcellation.DESTRIEUX) ] ) - patterns.append(query_pattern_factory(QueryPatternName.T1_FREESURFER_BRAIN)()) + patterns.append(get_t1_freesurfer_extracted_brain()) patterns.extend( [ - query_pattern_factory(QueryPatternName.DWI_PREPROC)(file_type) + get_dwi_preprocessed_file(file_type) for file_type in (DWIFileType.NII, DWIFileType.BVEC, DWIFileType.BVAL) ] ) - patterns.append(query_pattern_factory(QueryPatternName.DWI_PREPROC_BRAINMASK)()) + patterns.append(get_dwi_preprocessed_brainmask()) + return patterns + + def _build_input_node(self): + """Build and connect an input node to the pipeline.""" + import re + + import nipype.interfaces.utility as nutil + import nipype.pipeline.engine as npe + + from clinica.utils.exceptions import ClinicaCAPSError + from clinica.utils.filemanip import save_participants_sessions + from clinica.utils.inputs import clinica_list_of_files_reader + from clinica.utils.stream import cprint + from clinica.utils.ux import print_images_to_process + list_caps_files = clinica_list_of_files_reader( self.subjects, self.sessions, self.caps_directory, - patterns, + self._get_input_patterns(), ) - # Check space of DWI dataset dwi_file_spaces = [ re.search( diff --git a/clinica/pipelines/dwi/dti/pipeline.py b/clinica/pipelines/dwi/dti/pipeline.py index d439b2dfa..d3bb148b5 100644 --- a/clinica/pipelines/dwi/dti/pipeline.py +++ b/clinica/pipelines/dwi/dti/pipeline.py @@ -70,18 +70,18 @@ def _build_input_node(self): from clinica.utils.filemanip import save_participants_sessions from clinica.utils.input_files import ( DWIFileType, - QueryPatternName, - query_pattern_factory, + get_dwi_preprocessed_brainmask, + get_dwi_preprocessed_file, ) from clinica.utils.inputs import clinica_list_of_files_reader from clinica.utils.stream import cprint from clinica.utils.ux import print_images_to_process patterns = [ - query_pattern_factory(QueryPatternName.DWI_PREPROC)(file_type) + get_dwi_preprocessed_file(file_type) for file_type in (DWIFileType.NII, DWIFileType.BVEC, DWIFileType.BVAL) ] - patterns.append(query_pattern_factory(QueryPatternName.DWI_PREPROC_BRAINMASK)()) + patterns.append(get_dwi_preprocessed_brainmask()) list_caps_files = clinica_list_of_files_reader( self.subjects, self.sessions, diff --git a/clinica/pipelines/dwi/preprocessing/fmap/pipeline.py b/clinica/pipelines/dwi/preprocessing/fmap/pipeline.py index dc94e7b9a..c9faf7571 100644 --- a/clinica/pipelines/dwi/preprocessing/fmap/pipeline.py +++ b/clinica/pipelines/dwi/preprocessing/fmap/pipeline.py @@ -6,8 +6,10 @@ from clinica.pipelines.dwi.preprocessing.engine import DWIPreprocessingPipeline from clinica.utils.input_files import ( DWIFileType, - QueryPatternName, - query_pattern_factory, + get_dwi_file, + get_dwi_fmap_magnitude1_file, + get_dwi_fmap_phasediff_file, + get_dwi_preprocessed_file, ) # Use hash instead of parameters for iterables folder names @@ -39,11 +41,11 @@ def get_processed_images( image_ids: List[str] = [] if caps_directory.is_dir(): - pattern = query_pattern_factory(QueryPatternName.DWI_PREPROC)( - DWIFileType.NII - ) preproc_files, _ = clinica_file_reader( - subjects, sessions, caps_directory, pattern + subjects, + sessions, + caps_directory, + get_dwi_preprocessed_file(DWIFileType.NII), ) image_ids = extract_image_ids(preproc_files) return image_ids @@ -109,7 +111,7 @@ def _build_input_node(self): from clinica.utils.ux import print_images_to_process patterns = [ - query_pattern_factory(QueryPatternName.DWI)(file_type) + get_dwi_file(file_type) for file_type in ( DWIFileType.NII, DWIFileType.BVEC, @@ -117,12 +119,10 @@ def _build_input_node(self): DWIFileType.JSON, ) ] - patterns.append( - query_pattern_factory(QueryPatternName.DWI_FMAP_MAGNITUDE1)(DWIFileType.NII) - ) + patterns.append(get_dwi_fmap_magnitude1_file(DWIFileType.NII)) patterns.extend( [ - query_pattern_factory(QueryPatternName.DWI_FMAP_PHASEDIFF)(file_type) + get_dwi_fmap_phasediff_file(file_type) for file_type in (DWIFileType.NII, DWIFileType.JSON) ] ) diff --git a/clinica/pipelines/dwi/preprocessing/t1/pipeline.py b/clinica/pipelines/dwi/preprocessing/t1/pipeline.py index 0e86a2f42..197b5c885 100644 --- a/clinica/pipelines/dwi/preprocessing/t1/pipeline.py +++ b/clinica/pipelines/dwi/preprocessing/t1/pipeline.py @@ -6,8 +6,9 @@ from clinica.pipelines.dwi.preprocessing.engine import DWIPreprocessingPipeline from clinica.utils.input_files import ( DWIFileType, - QueryPatternName, - query_pattern_factory, + get_dwi_file, + get_dwi_preprocessed_file, + get_t1w_mri, ) from clinica.utils.inputs import clinica_file_reader @@ -43,11 +44,11 @@ def get_processed_images( image_ids: List[str] = [] if caps_directory.is_dir(): - pattern = query_pattern_factory(QueryPatternName.DWI_PREPROC)( - DWIFileType.NII - ) preproc_files, _ = clinica_file_reader( - subjects, sessions, caps_directory, pattern + subjects, + sessions, + caps_directory, + get_dwi_preprocessed_file(DWIFileType.NII), ) image_ids = extract_image_ids(preproc_files) return image_ids @@ -103,7 +104,7 @@ def filter_qc(self) -> tuple[list[str], list[str]]: subjects = [] sessions = [] patterns = [ - query_pattern_factory(QueryPatternName.DWI)(file_type) + get_dwi_file(file_type) for file_type in (DWIFileType.NII, DWIFileType.BVEC, DWIFileType.BVAL) ] list_bids_files = clinica_list_of_files_reader( @@ -145,7 +146,7 @@ def _build_input_node(self): from clinica.utils.ux import print_images_to_process patterns = [ - query_pattern_factory(QueryPatternName.DWI)(file_type) + get_dwi_file(file_type) for file_type in ( DWIFileType.NII, DWIFileType.JSON, @@ -153,7 +154,7 @@ def _build_input_node(self): DWIFileType.BVAL, ) ] - patterns.insert(0, query_pattern_factory(QueryPatternName.T1W)()) + patterns.insert(0, get_t1w_mri()) list_bids_files = clinica_list_of_files_reader( self.subjects, self.sessions, diff --git a/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_pipeline.py b/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_pipeline.py index f005c6aab..e66ffab88 100644 --- a/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_pipeline.py +++ b/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_pipeline.py @@ -1,11 +1,10 @@ -from pathlib import Path from typing import List from clinica.pipelines.engine import GroupPipeline from clinica.utils.input_files import ( - QueryPattern, - QueryPatternName, - query_pattern_factory, + get_pet_volume_normalized_suvr, + get_t1_volume_group_template, + get_t1_volume_template_tpm_in_mni, ) @@ -64,6 +63,7 @@ def _build_input_node(self): clinica_group_reader, format_clinica_file_reader_errors, ) + from clinica.utils.spm import SPMTissue from clinica.utils.ux import print_groups_in_caps_directory if not self.group_directory.exists(): @@ -81,16 +81,8 @@ def _build_input_node(self): ) all_errors = [] if self.parameters["orig_input_data_ml"] == "t1-volume": - pattern = QueryPattern( - str( - Path("t1") - / "spm" - / "dartel" - / str(self.group_id) - / "*_T1w_segm-graymatter_space-Ixi549Space_modulated-on_probability.nii.gz" - ), - "graymatter tissue segmented in T1w MRI in Ixi549 space", - "t1-volume-tissue-segmentation", + pattern = get_t1_volume_template_tpm_in_mni( + self.group_id, SPMTissue.GRAY_MATTER, modulation=True ) elif self.parameters["orig_input_data_ml"] == "pet-volume": if not ( @@ -103,11 +95,9 @@ def _build_input_node(self): f"- suvr_reference_region: {self.parameters['suvr_reference_region']}\n" f"- use_pvc_data: {self.parameters['use_pvc_data']}\n" ) - pattern = query_pattern_factory( - QueryPatternName.PET_VOLUME_NORMALIZED_SUVR - )( + pattern = get_pet_volume_normalized_suvr( tracer=self.parameters["acq_label"], - group_label=self.parameters["group_label"], + group_id=self.group_id, suvr_reference_region=self.parameters["suvr_reference_region"], use_brainmasked_image=False, use_pvc_data=self.parameters["use_pvc_data"], @@ -123,10 +113,9 @@ def _build_input_node(self): if caps_error: all_errors.append(format_clinica_file_reader_errors(caps_error, pattern)) try: - pattern = query_pattern_factory(QueryPatternName.T1_VOLUME_GROUP_TEMPLATE)( - self.group_label + dartel_input = clinica_group_reader( + self.caps_directory, get_t1_volume_group_template(self.group_id) ) - dartel_input = clinica_group_reader(self.caps_directory, pattern) except ClinicaException as e: all_errors.append(e) if any(all_errors): diff --git a/clinica/pipelines/pet/linear/pipeline.py b/clinica/pipelines/pet/linear/pipeline.py index 374aef2c0..835a6a5c6 100644 --- a/clinica/pipelines/pet/linear/pipeline.py +++ b/clinica/pipelines/pet/linear/pipeline.py @@ -5,6 +5,7 @@ from nipype import config from clinica.pipelines.pet.engine import PETPipeline +from clinica.utils.input_files import get_t1w_linear cfg = dict(execution={"parameterize_dirs": False}) config.update_config(cfg) @@ -62,7 +63,11 @@ def _build_input_node(self): from clinica.pipelines.pet.utils import get_suvr_mask from clinica.utils.exceptions import ClinicaBIDSError, ClinicaCAPSError from clinica.utils.image import get_mni_template - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import ( + get_t1w_linear, + get_t1w_mri, + get_t1w_to_mni_transform, + ) from clinica.utils.inputs import ( clinica_file_reader, format_clinica_file_reader_errors, @@ -86,8 +91,7 @@ def _build_input_node(self): pet_errors, self._get_pet_scans_query() ) ) - - pattern = query_pattern_factory(QueryPatternName.T1W)() + pattern = get_t1w_mri() t1w_files, t1w_errors = clinica_file_reader( self.subjects, self.sessions, self.bids_directory, pattern ) @@ -95,14 +99,11 @@ def _build_input_node(self): raise ClinicaBIDSError( format_clinica_file_reader_errors(t1w_errors, pattern) ) - - # Inputs from t1-linear pipeline - # T1w images registered - pattern = query_pattern_factory(QueryPatternName.T1W_LINEAR)( + t1w_linear_file_pattern = get_t1w_linear( cropped=not self.parameters.get("uncropped_image", False) ) t1w_linear_files, t1w_linear_errors = clinica_file_reader( - self.subjects, self.sessions, self.caps_directory, pattern + self.subjects, self.sessions, self.caps_directory, t1w_linear_file_pattern ) if t1w_linear_errors: raise ClinicaCAPSError( @@ -110,8 +111,7 @@ def _build_input_node(self): t1w_linear_errors, t1w_linear_file_pattern ) ) - # Transformation files from T1w files to MNI: - pattern = query_pattern_factory(QueryPatternName.T1W_TO_MNI_TRANSFORM)() + pattern = get_t1w_to_mni_transform() t1w_to_mni_transformation_files, t1w_to_mni_errors = clinica_file_reader( self.subjects, self.sessions, self.caps_directory, pattern ) @@ -119,7 +119,6 @@ def _build_input_node(self): raise ClinicaCAPSError( format_clinica_file_reader_errors(t1w_to_mni_errors, pattern) ) - if len(self.subjects): print_images_to_process(self.subjects, self.sessions) cprint("The pipeline will last approximately 3 minutes per image.") diff --git a/clinica/pipelines/pet/volume/pipeline.py b/clinica/pipelines/pet/volume/pipeline.py index ee57fa6cf..9cad71d4e 100644 --- a/clinica/pipelines/pet/volume/pipeline.py +++ b/clinica/pipelines/pet/volume/pipeline.py @@ -1,10 +1,15 @@ -from typing import List, Optional +from typing import List from nipype import config from clinica.pipelines.engine import GroupPipeline from clinica.pipelines.pet.engine import PETPipeline -from clinica.utils.input_files import QueryPatternName, query_pattern_factory +from clinica.utils.input_files import ( + get_t1_volume_deformation_to_template, + get_t1_volume_group_template, + get_t1_volume_tpm, + get_t1w_mri, +) # Use hash instead of parameters for iterables folder names # Otherwise path will be too long and generate OSError @@ -127,7 +132,7 @@ def _build_input_node(self): self.bids_directory, [ self._get_pet_scans_query(), - query_pattern_factory(QueryPatternName.T1W)(), + get_t1w_mri(), ], ) except ClinicaException as e: @@ -140,9 +145,7 @@ def _build_input_node(self): self.sessions, self.caps_directory, [ - query_pattern_factory(QueryPatternName.T1_VOLUME_TPM)( - tissue_number, modulation=False, mni_space=True - ) + get_t1_volume_tpm(tissue_number, modulation=False, mni_space=True) for tissue_number in self.parameters["mask_tissues"] ], ) @@ -157,9 +160,7 @@ def _build_input_node(self): all_errors += e # Flowfields - pattern = query_pattern_factory( - QueryPatternName.T1_VOLUME_DEFORMATION_TO_TEMPLATE - )(self.group_label) + pattern = get_t1_volume_deformation_to_template(self.group_id) flowfields_caps, flowfields_errors = clinica_file_reader( self.subjects, self.sessions, @@ -173,10 +174,9 @@ def _build_input_node(self): # Dartel Template try: - pattern = query_pattern_factory(QueryPatternName.T1_VOLUME_GROUP_TEMPLATE)( - self.group_label + final_template = clinica_group_reader( + self.caps_directory, get_t1_volume_group_template(self.group_id) ) - final_template = clinica_group_reader(self.caps_directory, pattern) except ClinicaException as e: all_errors.append(e) @@ -196,9 +196,7 @@ def _build_input_node(self): # pvc tissues input try: patterns = [ - query_pattern_factory(QueryPatternName.T1_VOLUME_TPM)( - tissue_number, modulation=False, mni_space=False - ) + get_t1_volume_tpm(tissue_number, modulation=False, mni_space=False) for tissue_number in self.parameters["pvc_mask_tissues"] ] pvc_tissues_input = clinica_list_of_files_reader( diff --git a/clinica/pipelines/pet_surface/pet_surface_pipeline.py b/clinica/pipelines/pet_surface/pet_surface_pipeline.py index b51e0599c..d040dc9eb 100644 --- a/clinica/pipelines/pet_surface/pet_surface_pipeline.py +++ b/clinica/pipelines/pet_surface/pet_surface_pipeline.py @@ -2,11 +2,7 @@ from clinica.pipelines.pet.engine import PETPipeline from clinica.utils.image import HemiSphere -from clinica.utils.input_files import ( - Parcellation, - QueryPatternName, - query_pattern_factory, -) +from clinica.utils.input_files import Parcellation class PetSurface(PETPipeline): @@ -82,6 +78,11 @@ def _build_input_node_longitudinal(self): check_relative_volume_location_in_world_coordinate_system, ) from clinica.utils.exceptions import ClinicaException + from clinica.utils.input_files import ( + get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu, + get_t1_freesurfer_longitudinal_parcellation, + get_t1_freesurfer_longitudinal_white_matter_surface, + ) from clinica.utils.inputs import ( clinica_file_reader, clinica_list_of_files_reader, @@ -112,21 +113,17 @@ def _build_input_node_longitudinal(self): ) patterns = [ - query_pattern_factory(QueryPatternName.T1_FREESURFER_LONG_ORIG_NU)() + get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu() ] patterns.extend( [ - query_pattern_factory(QueryPatternName.T1_FREESURFER_LONG_SURFACE)( - hemisphere - ) + get_t1_freesurfer_longitudinal_white_matter_surface(hemisphere) for hemisphere in (HemiSphere.RIGHT, HemiSphere.LEFT) ] ) patterns.extend( [ - query_pattern_factory(QueryPatternName.T1_FREESURFER_LONG_PARCELLATION)( - hemisphere, parcellation - ) + get_t1_freesurfer_longitudinal_parcellation(hemisphere, parcellation) for parcellation in (Parcellation.DESTRIEUX, Parcellation.DESIKAN) for hemisphere in (HemiSphere.LEFT, HemiSphere.RIGHT) ] @@ -188,6 +185,11 @@ def _build_input_node_cross_sectional(self): check_relative_volume_location_in_world_coordinate_system, ) from clinica.utils.exceptions import ClinicaException + from clinica.utils.input_files import ( + get_t1_freesurfer_intensity_normalized_volume_after_nu, + get_t1_freesurfer_parcellation, + get_t1_freesurfer_white_matter_surface, + ) from clinica.utils.inputs import ( clinica_file_reader, clinica_list_of_files_reader, @@ -212,20 +214,16 @@ def _build_input_node_cross_sectional(self): if pet_errors: all_errors.append(format_clinica_file_reader_errors(pet_errors)) - patterns = [query_pattern_factory(QueryPatternName.T1_FREESURFER_ORIG_NU)()] + patterns = [get_t1_freesurfer_intensity_normalized_volume_after_nu()] patterns.extend( [ - query_pattern_factory( - QueryPatternName.T1_FREESURFER_WHITE_MATTER_SURFACE - )(hemisphere) + get_t1_freesurfer_white_matter_surface(hemisphere) for hemisphere in (HemiSphere.RIGHT, HemiSphere.LEFT) ] ) patterns.extend( [ - query_pattern_factory(QueryPatternName.T1_FREESURFER_PARCELLATION)( - hemisphere, parcellation - ) + get_t1_freesurfer_parcellation(hemisphere, parcellation) for parcellation in (Parcellation.DESTRIEUX, Parcellation.DESIKAN) for hemisphere in (HemiSphere.LEFT, HemiSphere.RIGHT) ] diff --git a/clinica/pipelines/statistics_surface/pipeline.py b/clinica/pipelines/statistics_surface/pipeline.py index 06b90ab0c..5523aff24 100644 --- a/clinica/pipelines/statistics_surface/pipeline.py +++ b/clinica/pipelines/statistics_surface/pipeline.py @@ -119,10 +119,9 @@ def _build_input_node(self): # Note(AR): if the user wants to compare Cortical Thickness measure with PET measure # using the group_id, Clinica won't allow it. # TODO: Modify this behaviour - group_folder = self.caps_directory / "groups" / str(self.group_id) - if group_folder.exists(): + if self.group_directory.exists(): raise ClinicaException( - f"Group label {self.group_label} already exists (found in {group_folder})." + f"Group label {self.group_label} already exists (found in {self.group_directory})." "Please choose another one or delete the existing folder and " "also the working directory and rerun the pipeline." ) diff --git a/clinica/pipelines/statistics_volume/statistics_volume_pipeline.py b/clinica/pipelines/statistics_volume/statistics_volume_pipeline.py index 0462ab237..c0458b26a 100644 --- a/clinica/pipelines/statistics_volume/statistics_volume_pipeline.py +++ b/clinica/pipelines/statistics_volume/statistics_volume_pipeline.py @@ -1,11 +1,7 @@ from typing import List from clinica.pipelines.engine import GroupPipeline -from clinica.utils.input_files import ( - QueryPattern, - QueryPatternName, - query_pattern_factory, -) +from clinica.utils.input_files import QueryPattern from clinica.utils.pet import SUVRReferenceRegion, Tracer @@ -98,10 +94,18 @@ def _build_input_node(self): import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaException + from clinica.utils.group import GroupID, GroupLabel + from clinica.utils.input_files import ( + get_pet_volume_normalized_suvr, + get_t1_volume_template_tpm_in_mni, + ) from clinica.utils.inputs import clinica_file_filter from clinica.utils.stream import cprint from clinica.utils.ux import print_begin_image, print_images_to_process + dartel_group_id = GroupID.from_label( + GroupLabel(self.parameters["group_label_dartel"]) + ) if self.parameters["orig_input_data_volume"] == "pet-volume": if not ( self.parameters["acq_label"] @@ -115,11 +119,9 @@ def _build_input_node(self): ) self.parameters["measure_label"] = self.parameters["acq_label"].value - pattern = query_pattern_factory( - QueryPatternName.PET_VOLUME_NORMALIZED_SUVR - )( + pattern = get_pet_volume_normalized_suvr( tracer=self.parameters["acq_label"], - group_label=self.parameters["group_label_dartel"], + group_id=dartel_group_id, suvr_reference_region=self.parameters["suvr_reference_region"], use_brainmasked_image=True, use_pvc_data=self.parameters["use_pvc_data"], @@ -127,15 +129,12 @@ def _build_input_node(self): ) elif self.parameters["orig_input_data_volume"] == "t1-volume": self.parameters["measure_label"] = "graymatter" - pattern = query_pattern_factory( - QueryPatternName.T1_VOLUME_TEMPLATE_TPM_IN_MNI - )( - group_label=self.parameters["group_label_dartel"], - tissue_number=1, + pattern = get_t1_volume_template_tpm_in_mni( + group_id=dartel_group_id, + tissue=1, modulation=True, fwhm=self.parameters["full_width_at_half_maximum"], ) - elif self.parameters["orig_input_data_volume"] == "custom-pipeline": if not self.parameters["custom_file"]: raise ClinicaException( diff --git a/clinica/pipelines/t1_linear/anat_linear_pipeline.py b/clinica/pipelines/t1_linear/anat_linear_pipeline.py index 125b3046d..08fd09a21 100644 --- a/clinica/pipelines/t1_linear/anat_linear_pipeline.py +++ b/clinica/pipelines/t1_linear/anat_linear_pipeline.py @@ -72,17 +72,16 @@ def get_processed_images( caps_directory: Path, subjects: List[str], sessions: List[str] ) -> List[str]: from clinica.utils.filemanip import extract_image_ids - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import get_t1w_linear from clinica.utils.inputs import clinica_file_reader image_ids: List[str] = [] if caps_directory.is_dir(): - pattern = query_pattern_factory(QueryPatternName.T1W_LINEAR)(cropped=True) cropped_files, _ = clinica_file_reader( subjects, sessions, caps_directory, - pattern, + get_t1w_linear(cropped=True), ) image_ids = extract_image_ids(cropped_files) return image_ids @@ -122,7 +121,7 @@ def _build_input_node(self): from clinica.utils.filemanip import extract_subjects_sessions_from_filename from clinica.utils.image import get_mni_template - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import get_t1w_mri, get_t2w_mri from clinica.utils.inputs import clinica_file_filter from clinica.utils.stream import cprint from clinica.utils.ux import print_images_to_process @@ -155,9 +154,7 @@ def _build_input_node(self): # Inputs from anat/ folder # ======================== # anat image file: - pattern = query_pattern_factory( - QueryPatternName.T1W if self.name == "t1-linear" else QueryPatternName.T2W - )() + pattern = get_t1w_mri() if self.name == "t1-linear" else get_t2w_mri() anat_files, filtered_subjects, filtered_sessions = clinica_file_filter( self.subjects, self.sessions, self.bids_directory, pattern ) diff --git a/clinica/pipelines/t1_volume_create_dartel/t1_volume_create_dartel_pipeline.py b/clinica/pipelines/t1_volume_create_dartel/t1_volume_create_dartel_pipeline.py index e594d0c7e..169cc77a1 100644 --- a/clinica/pipelines/t1_volume_create_dartel/t1_volume_create_dartel_pipeline.py +++ b/clinica/pipelines/t1_volume_create_dartel/t1_volume_create_dartel_pipeline.py @@ -46,7 +46,7 @@ def _build_input_node(self): import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaException - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import get_t1_volume_dartel_input_tissue from clinica.utils.inputs import clinica_list_of_files_reader from clinica.utils.stream import cprint from clinica.utils.ux import ( @@ -83,9 +83,7 @@ def _build_input_node(self): ), ) patterns = [ - query_pattern_factory(QueryPatternName.T1_VOLUME_DARTEL_INPUT_TISSUE)( - tissue_number - ) + get_t1_volume_dartel_input_tissue(tissue_number) for tissue_number in self.parameters["dartel_tissues"] ] try: diff --git a/clinica/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py b/clinica/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py index 5aec7265b..91573398b 100644 --- a/clinica/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py +++ b/clinica/pipelines/t1_volume_dartel2mni/t1_volume_dartel2mni_pipeline.py @@ -47,7 +47,11 @@ def _build_input_node(self): import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import ( + get_t1_volume_deformation_to_template, + get_t1_volume_group_template, + get_t1_volume_tpm, + ) from clinica.utils.inputs import ( clinica_file_reader, clinica_group_reader, @@ -76,9 +80,7 @@ def _build_input_node(self): # Segmented Tissues # ================= patterns = [ - query_pattern_factory(QueryPatternName.T1_VOLUME_TPM)( - tissue_number, modulation=False, mni_space=False - ) + get_t1_volume_tpm(tissue_number, modulation=False, mni_space=False) for tissue_number in self.parameters["tissues"] ] try: @@ -100,9 +102,7 @@ def _build_input_node(self): # Flow Fields # =========== - pattern = query_pattern_factory( - QueryPatternName.T1_VOLUME_DEFORMATION_TO_TEMPLATE - )(self.group_label) + pattern = get_t1_volume_deformation_to_template(self.group_id) read_input_node.inputs.flowfield_files, flowfield_errors = clinica_file_reader( self.subjects, self.sessions, @@ -110,16 +110,15 @@ def _build_input_node(self): pattern, ) if flowfield_errors: - all_errors.append(format_clinica_file_reader_errors(flowfield_errors)) + all_errors.append( + format_clinica_file_reader_errors(flowfield_errors, pattern) + ) # Dartel Template # ================ - pattern = query_pattern_factory(QueryPatternName.T1_VOLUME_GROUP_TEMPLATE)( - self.group_label - ) try: read_input_node.inputs.template_file = clinica_group_reader( - self.caps_directory, pattern + self.caps_directory, get_t1_volume_group_template(self.group_id) ) except ClinicaException as e: all_errors.append(e) diff --git a/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py b/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py index 9a177d8fb..c816f7dc9 100644 --- a/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py +++ b/clinica/pipelines/t1_volume_parcellation/t1_volume_parcellation_pipeline.py @@ -47,7 +47,7 @@ def _build_input_node(self): import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaException - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import get_t1_volume_template_tpm_in_mni from clinica.utils.inputs import clinica_file_filter from clinica.utils.stream import cprint from clinica.utils.ux import ( @@ -61,9 +61,9 @@ def _build_input_node(self): f"Group {self.group_label} does not exist. " "Did you run t1-volume or t1-volume-create-dartel pipeline?" ) - pattern = query_pattern_factory(QueryPatternName.T1_VOLUME_TEMPLATE_TPM_IN_MNI)( - group_label=self.group_label, - tissue_number=1, + pattern = get_t1_volume_template_tpm_in_mni( + group_id=self.group_id, + tissue=1, modulation=self.parameters["modulate"], ) gm_mni, self.subjects, self.sessions = clinica_file_filter( diff --git a/clinica/pipelines/t1_volume_register_dartel/t1_volume_register_dartel_pipeline.py b/clinica/pipelines/t1_volume_register_dartel/t1_volume_register_dartel_pipeline.py index 5d57f467d..2152ffea5 100644 --- a/clinica/pipelines/t1_volume_register_dartel/t1_volume_register_dartel_pipeline.py +++ b/clinica/pipelines/t1_volume_register_dartel/t1_volume_register_dartel_pipeline.py @@ -44,7 +44,10 @@ def _build_input_node(self): import nipype.pipeline.engine as npe from clinica.utils.exceptions import ClinicaCAPSError, ClinicaException - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import ( + get_t1_volume_dartel_input_tissue, + get_t1_volume_i_th_iteration_group_template, + ) from clinica.utils.inputs import ( clinica_group_reader, clinica_list_of_files_reader, @@ -61,9 +64,7 @@ def _build_input_node(self): # Dartel Input Tissues # ==================== patterns = [ - query_pattern_factory(QueryPatternName.T1_VOLUME_DARTEL_INPUT_TISSUE)( - tissue_number - ) + get_t1_volume_dartel_input_tissue(tissue_number) for tissue_number in self.parameters["tissues"] ] try: @@ -79,14 +80,11 @@ def _build_input_node(self): # Dartel Templates # ================ - patterns = [ - query_pattern_factory(QueryPatternName.T1_VOLUME_ITERATION_GROUP_TEMPLATE)( - self.group_label, i - ) - for i in range(1, 7) - ] dartel_iter_templates = [] - for pattern in patterns: + for pattern in [ + get_t1_volume_i_th_iteration_group_template(self.group_id, i) + for i in range(1, 7) + ]: try: dartel_iter_templates.append( clinica_group_reader(self.caps_directory, pattern) diff --git a/clinica/pipelines/t1_volume_tissue_segmentation/t1_volume_tissue_segmentation_pipeline.py b/clinica/pipelines/t1_volume_tissue_segmentation/t1_volume_tissue_segmentation_pipeline.py index c15f8c18a..62390780d 100644 --- a/clinica/pipelines/t1_volume_tissue_segmentation/t1_volume_tissue_segmentation_pipeline.py +++ b/clinica/pipelines/t1_volume_tissue_segmentation/t1_volume_tissue_segmentation_pipeline.py @@ -75,7 +75,7 @@ def _build_input_node(self): from clinica.iotools.utils.data_handling import ( check_volume_location_in_world_coordinate_system, ) - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import get_t1w_mri from clinica.utils.inputs import clinica_file_filter from clinica.utils.stream import cprint from clinica.utils.ux import print_images_to_process @@ -83,9 +83,8 @@ def _build_input_node(self): # Inputs from anat/ folder # ======================== # T1w file: - pattern = query_pattern_factory(QueryPatternName.T1W)() t1w_files, subjects, sessions = clinica_file_filter( - self.subjects, self.sessions, self.bids_directory, pattern + self.subjects, self.sessions, self.bids_directory, get_t1w_mri() ) self.subjects = subjects self.sessions = sessions diff --git a/clinica/pydra/query.py b/clinica/pydra/query.py index 1a0c1563a..a8c5a397f 100644 --- a/clinica/pydra/query.py +++ b/clinica/pydra/query.py @@ -1,12 +1,7 @@ import abc -from functools import partial from typing import Callable, Dict, Optional -from clinica.utils.input_files import ( - QueryPattern, - QueryPatternName, - query_pattern_factory, -) +from clinica.utils.input_files import QueryPattern class Query: @@ -269,35 +264,32 @@ class CAPSFileQuery(CAPSQuery): from functools import partial - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import ( + get_pet_volume_normalized_suvr, + get_t1_volume_dartel_input_tissue, + get_t1_volume_deformation_to_template, + get_t1_volume_template_tpm_in_mni, + get_t1_volume_tpm, + get_t1w_to_mni_transform, + ) _query_makers = { "tissues": partial( - query_pattern_factory(QueryPatternName.T1_VOLUME_TPM), + get_t1_volume_tpm, mni_space=False, modulation=False, ), - "mask_tissues": partial( - query_pattern_factory(QueryPatternName.T1_VOLUME_TPM), mni_space=True - ), - "flow_fields": query_pattern_factory( - QueryPatternName.T1_VOLUME_DEFORMATION_TO_TEMPLATE - ), + "mask_tissues": partial(get_t1_volume_tpm, mni_space=True), + "flow_fields": get_t1_volume_deformation_to_template, "pvc_mask_tissues": partial( - query_pattern_factory(QueryPatternName.T1_VOLUME_TPM), + get_t1_volume_tpm, mni_space=False, modulation=False, ), - "dartel_input_tissue": query_pattern_factory( - QueryPatternName.T1_VOLUME_DARTEL_INPUT_TISSUE - ), - "t1w_to_mni": query_pattern_factory(QueryPatternName.T1W_TO_MNI_TRANSFORM), - "pet_volume": query_pattern_factory( - QueryPatternName.PET_VOLUME_NORMALIZED_SUVR - ), - "t1_volume": query_pattern_factory( - QueryPatternName.T1_VOLUME_TEMPLATE_TPM_IN_MNI - ), + "dartel_input_tissue": get_t1_volume_dartel_input_tissue, + "t1w_to_mni": get_t1w_to_mni_transform, + "pet_volume": get_pet_volume_normalized_suvr, + "t1_volume": get_t1_volume_template_tpm_in_mni, } @@ -324,15 +316,14 @@ class CAPSGroupQuery(CAPSQuery): } """ - from clinica.utils.input_files import QueryPatternName, query_pattern_factory + from clinica.utils.input_files import ( + get_t1_volume_group_template, + get_t1_volume_i_th_iteration_group_template, + ) _query_makers = { - "dartel_template": query_pattern_factory( - QueryPatternName.T1_VOLUME_GROUP_TEMPLATE - ), - "dartel_iteration_templates": query_pattern_factory( - QueryPatternName.T1_VOLUME_ITERATION_GROUP_TEMPLATE - ), + "dartel_template": get_t1_volume_group_template, + "dartel_iteration_templates": get_t1_volume_i_th_iteration_group_template, } diff --git a/clinica/utils/input_files.py b/clinica/utils/input_files.py index 2cec23de4..3a9535393 100644 --- a/clinica/utils/input_files.py +++ b/clinica/utils/input_files.py @@ -6,35 +6,51 @@ import functools from collections.abc import Iterable from dataclasses import dataclass -from enum import Enum, auto +from enum import Enum from pathlib import Path -from typing import Callable, Optional, Union +from typing import Optional, Union from clinica.utils.dwi import DTIBasedMeasure +from clinica.utils.group import GroupID from clinica.utils.image import HemiSphere from clinica.utils.pet import ReconstructionMethod, SUVRReferenceRegion, Tracer -from .spm import get_spm_tissue_from_index +from .spm import SPMTissue, get_spm_tissue_from_index __all__ = [ "DWIFileType", "Parcellation", "QueryPattern", - "QueryPatternName", - "query_pattern_factory", + "get_t1w_mri", + "get_t2w_mri", + "get_t1_freesurfer_segmentation_white_matter", + "get_t1_freesurfer_extracted_brain", + "get_t1_freesurfer_intensity_normalized_volume_after_nu", + "get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu", + "get_t1w_to_mni_transform", "get_dwi_file", "get_dwi_preprocessed_file", + "get_dwi_preprocessed_brainmask", "get_dwi_fmap_phasediff_file", "get_dwi_fmap_magnitude1_file", "get_t1w_linear", "get_t1_freesurfer_white_matter_surface", "get_t1_freesurfer_longitudinal_white_matter_surface", "get_t1_freesurfer_segmentation", + "get_t1_freesurfer_statistics", "get_t1_freesurfer_parcellation", "get_t1_freesurfer_template", "get_t1_freesurfer_longitudinal_parcellation", "get_t1_volume_tpm", "get_t1_volume_dartel_input_tissue", + "get_t1_volume_template_tpm_in_mni", + "get_t1_volume_deformation_to_template", + "get_t1_volume_i_th_iteration_group_template", + "get_t1_volume_group_template", + "get_dwi_dti", + "get_pet_nifti", + "get_pet_volume_normalized_suvr", + "get_pet_linear_nifti", ] @@ -78,49 +94,6 @@ def to_dict(self) -> dict: } -class QueryPatternName(Enum): - """The different names for usual pattern in Clinica. - - T1W : Get T1W MRI in BIDS - T2W : Get T2W FLAIR MRI in BIDS - T1_FS_WM : GET Freesurfer segmentation of white matter - T1_FS_BRAIN : Get Freesurfer extracted brain from T1w MRI - T1_FS_ORIG_NU : Get Freesurfer intensity normalized volume after correction for non-uniformity - T1_FS_WM_SURF : Get white matter border surface files from the Freesurfer output - T1_FS_LONG_SURF : Get white matter border surface files from the Freesurfer longitudinal output - """ - - T1W = auto() - T1W_LINEAR = auto() - T1W_TO_MNI_TRANSFORM = auto() - T2W = auto() - T1_FREESURFER_WHITE_MATTER = auto() - T1_FREESURFER_BRAIN = auto() - T1_FREESURFER_ORIG_NU = auto() - T1_FREESURFER_LONG_ORIG_NU = auto() - T1_FREESURFER_WHITE_MATTER_SURFACE = auto() - T1_FREESURFER_LONG_SURFACE = auto() - T1_FREESURFER_PARCELLATION = auto() - T1_FREESURFER_LONG_PARCELLATION = auto() - T1_FREESURFER_SEGMENTATION = auto() - T1_FREESURFER_TEMPLATE = auto() - T1_VOLUME_TPM = auto() - T1_VOLUME_DARTEL_INPUT_TISSUE = auto() - T1_VOLUME_DEFORMATION_TO_TEMPLATE = auto() - T1_VOLUME_GROUP_TEMPLATE = auto() - T1_VOLUME_ITERATION_GROUP_TEMPLATE = auto() - T1_VOLUME_TEMPLATE_TPM_IN_MNI = auto() - DWI = auto() - DWI_PREPROC = auto() - DWI_PREPROC_BRAINMASK = auto() - DWI_FMAP_PHASEDIFF = auto() - DWI_FMAP_MAGNITUDE1 = auto() - DWI_DTI = auto() - PET_NII = auto() - PET_LINEAR_NII = auto() - PET_VOLUME_NORMALIZED_SUVR = auto() - - class Parcellation(str, Enum): """The possible atlas names used for deriving parcellations and segmentations.""" @@ -135,96 +108,17 @@ class DWIFileType(str, Enum): BVAL = "bval" -QueryPatternBuilderInterface = Callable[..., QueryPattern] - - -def query_pattern_factory( - name: Union[str, QueryPatternName], -) -> QueryPatternBuilderInterface: - """Return the query pattern builder corresponding to the provided name. - - Parameters - ---------- - name : str or QueryPatternName - The name of the desired pattern. - - Returns - ------- - QueryPatternBuilderInterface : - The desired query pattern builder. - """ - name = QueryPatternName(name) - if name == QueryPatternName.T1W: - return get_t1w_mri - if name == QueryPatternName.T2W: - return get_t2w_mri - if name == QueryPatternName.T1_FREESURFER_WHITE_MATTER: - return get_t1_freesurfer_segmentation_white_matter - if name == QueryPatternName.T1_FREESURFER_BRAIN: - return get_t1_freesurfer_extracted_brain - if name == QueryPatternName.T1_FREESURFER_ORIG_NU: - return get_t1_freesurfer_intensity_normalized_volume_after_nu - if name == QueryPatternName.T1_FREESURFER_LONG_ORIG_NU: - return get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu - if name == QueryPatternName.T1_FREESURFER_WHITE_MATTER_SURFACE: - return get_t1_freesurfer_white_matter_surface - if name == QueryPatternName.T1_FREESURFER_LONG_SURFACE: - return get_t1_freesurfer_longitudinal_white_matter_surface - if name == QueryPatternName.T1_VOLUME_TPM: - return get_t1_volume_tpm - if name == QueryPatternName.T1_VOLUME_DARTEL_INPUT_TISSUE: - return get_t1_volume_dartel_input_tissue - if name == QueryPatternName.T1_VOLUME_DEFORMATION_TO_TEMPLATE: - return get_t1_volume_deformation_to_template - if name == QueryPatternName.T1_VOLUME_GROUP_TEMPLATE: - return get_t1_volume_group_template - if name == QueryPatternName.T1_VOLUME_ITERATION_GROUP_TEMPLATE: - return get_t1_volume_i_th_iteration_group_template - if name == QueryPatternName.T1_VOLUME_TEMPLATE_TPM_IN_MNI: - return get_t1_volume_template_tpm_in_mni - if name == QueryPatternName.T1W_LINEAR: - return get_t1w_linear - if name == QueryPatternName.T1W_TO_MNI_TRANSFORM: - return get_t1w_to_mni_transform - if name == QueryPatternName.T1_FREESURFER_PARCELLATION: - return get_t1_freesurfer_parcellation - if name == QueryPatternName.T1_FREESURFER_LONG_PARCELLATION: - return get_t1_freesurfer_longitudinal_parcellation - if name == QueryPatternName.T1_FREESURFER_SEGMENTATION: - return get_t1_freesurfer_segmentation - if name == QueryPatternName.T1_FREESURFER_TEMPLATE: - return get_t1_freesurfer_template - if name == QueryPatternName.DWI: - return get_dwi_file - if name == QueryPatternName.DWI_PREPROC: - return get_dwi_preprocessed_file - if name == QueryPatternName.DWI_PREPROC_BRAINMASK: - return get_dwi_preprocessed_brainmask - if name == QueryPatternName.DWI_FMAP_PHASEDIFF: - return get_dwi_fmap_phasediff_file - if name == QueryPatternName.DWI_FMAP_MAGNITUDE1: - return get_dwi_fmap_magnitude1_file - if name == QueryPatternName.DWI_DTI: - return get_dwi_dti - if name == QueryPatternName.PET_NII: - return get_pet_nifti - if name == QueryPatternName.PET_LINEAR_NII: - return get_pet_linear_nifti - if name == QueryPatternName.PET_VOLUME_NORMALIZED_SUVR: - return get_pet_volume_normalized_suvr - - -def get_t1w_mri(*args, **kwargs) -> QueryPattern: +def get_t1w_mri() -> QueryPattern: """Get T1W MRI in BIDS.""" return QueryPattern("sub-*_ses-*_t1w.nii*", "T1w MRI", "") -def get_t2w_mri(*args, **kwargs) -> QueryPattern: +def get_t2w_mri() -> QueryPattern: """Get T2W FLAIR MRI in BIDS.""" return QueryPattern("sub-*_ses-*_flair.nii*", "FLAIR T2w MRI", "") -def get_t1_freesurfer_segmentation_white_matter(*args, **kwargs) -> QueryPattern: +def get_t1_freesurfer_segmentation_white_matter() -> QueryPattern: """GET Freesurfer segmentation of white matter.""" return QueryPattern( "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/wm.seg.mgz", @@ -233,7 +127,7 @@ def get_t1_freesurfer_segmentation_white_matter(*args, **kwargs) -> QueryPattern ) -def get_t1_freesurfer_extracted_brain(*args, **kwargs) -> QueryPattern: +def get_t1_freesurfer_extracted_brain() -> QueryPattern: return QueryPattern( "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/brain.mgz", "extracted brain from T1w MRI (mri/brain.mgz).", @@ -241,9 +135,7 @@ def get_t1_freesurfer_extracted_brain(*args, **kwargs) -> QueryPattern: ) -def get_t1_freesurfer_intensity_normalized_volume_after_nu( - *args, **kwargs -) -> QueryPattern: +def get_t1_freesurfer_intensity_normalized_volume_after_nu() -> QueryPattern: return QueryPattern( "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/orig_nu.mgz", ( @@ -254,9 +146,9 @@ def get_t1_freesurfer_intensity_normalized_volume_after_nu( ) -def get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu( - *args, **kwargs -) -> QueryPattern: +def get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu() -> ( + QueryPattern +): return QueryPattern( "t1/long-*/freesurfer_longitudinal/sub-*_ses-*.long.sub-*_*/mri/orig_nu.mgz", ( @@ -267,7 +159,7 @@ def get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu( ) -def get_t1w_to_mni_transform(*args, **kwargs) -> QueryPattern: +def get_t1w_to_mni_transform() -> QueryPattern: return QueryPattern( "*space-MNI152NLin2009cSym_res-1x1x1_affine.mat", "Transformation matrix from T1W image to MNI space using t1-linear pipeline", @@ -292,7 +184,7 @@ def get_dwi_preprocessed_file(filetype: Union[str, DWIFileType]) -> QueryPattern ) -def get_dwi_preprocessed_brainmask(*args, **kwargs) -> QueryPattern: +def get_dwi_preprocessed_brainmask() -> QueryPattern: return QueryPattern( "dwi/preprocessing/sub-*_ses-*_space-*_brainmask.nii*", "b0 brainmask", @@ -401,6 +293,17 @@ def get_t1_freesurfer_segmentation(parcellation: Parcellation) -> QueryPattern: ) +def get_t1_freesurfer_statistics( + atlas: str, hemisphere: Union[str, HemiSphere] +) -> QueryPattern: + hemisphere = HemiSphere(hemisphere) + return QueryPattern( + f"t1/freesurfer_cross_sectional/sub-*_ses-*/stats/{hemisphere.value}.{atlas}.stats", + f"{atlas}-based segmentation", + "t1-freesurfer", + ) + + def get_t1_freesurfer_parcellation( hemisphere: Union[str, HemiSphere], parcellation: Union[str, Parcellation], @@ -633,18 +536,21 @@ def get_t1_volume_dartel_input_tissue(tissue_number: int) -> QueryPattern: def get_t1_volume_template_tpm_in_mni( - group_label: str, tissue_number: int, modulation: bool, fwhm: Optional[int] = None + group_id: GroupID, + tissue: Union[int, SPMTissue], + modulation: bool, + fwhm: Optional[int] = None, ) -> QueryPattern: """Build the pattern required by clinica_file_reader to get the tissue probability maps based on group template in MNI space. Parameters ---------- - group_label : str - Label used for the group of interest. + group_id : GroupID + The ID for the group of interest. - tissue_number : int - An integer defining the tissue of interest. + tissue : int or SPMTissue + Either the tissue of interest, or an integer defining the tissue of interest. modulation : {"on", "off"} Whether modulation is on or off. @@ -659,7 +565,7 @@ def get_t1_volume_template_tpm_in_mni( """ from .spm import get_spm_tissue_from_index - tissue = get_spm_tissue_from_index(tissue_number) + tissue = get_spm_tissue_from_index(tissue) pattern_modulation = "on" if modulation else "off" description_modulation = "with" if modulation else "without" fwhm_key_value = f"_fwhm-{fwhm}mm" if fwhm else "" @@ -670,56 +576,53 @@ def get_t1_volume_template_tpm_in_mni( Path("t1") / "spm" / "dartel" - / f"group-{group_label}" + / str(group_id) / f"*_T1w_segm-{tissue.value}_space-Ixi549Space_modulated-{pattern_modulation}{fwhm_key_value}_probability.nii*" ), ( - f"Tissue probability map {tissue.value} based on {group_label} template in MNI space " + f"Tissue probability map {tissue.value} based on {group_id.label} template in MNI space " f"(Ixi549) {description_modulation} modulation and {fwhm_description}." ), "t1-volume", ) -def get_t1_volume_deformation_to_template(group_label: str) -> QueryPattern: +def get_t1_volume_deformation_to_template(group_id: GroupID) -> QueryPattern: return QueryPattern( str( Path("t1") / "spm" / "dartel" - / f"group-{group_label}" - / f"sub-*_ses-*_T1w_target-{group_label}_transformation-forward_deformation.nii*" + / str(group_id) + / f"sub-*_ses-*_T1w_target-{group_id.label}_transformation-forward_deformation.nii*" ), - f"Deformation from native space to group template {group_label} space.", + f"Deformation from native space to group template {group_id.label} space.", "t1-volume-create-dartel", ) @aggregator def get_t1_volume_i_th_iteration_group_template( - group_label: str, i: int + group_id: GroupID, i: int ) -> QueryPattern: return QueryPattern( - str( - Path(f"group-{group_label}") - / "t1" - / f"group-{group_label}_iteration-{i}_template.nii*" - ), - f"Iteration #{i} of Dartel template {group_label}", + str(Path(str(group_id)) / "t1" / f"{group_id}_iteration-{i}_template.nii*"), + f"Iteration #{i} of Dartel template {group_id.label}", "t1-volume or t1-volume-create-dartel", ) -def get_t1_volume_group_template(group_label: str) -> QueryPattern: +def get_t1_volume_group_template(group_id: GroupID) -> QueryPattern: return QueryPattern( - str(Path(f"group-{group_label}") / "t1" / f"group-{group_label}_template.nii*"), - f"T1w template file of group {group_label}", + str(Path(str(group_id)) / "t1" / f"{group_id}_template.nii*"), + f"T1w template file of group {group_id.label}", "t1-volume or t1-volume-create-dartel", ) def get_dwi_dti( - measure: Union[str, DTIBasedMeasure], space: Optional[str] = None + measure: Union[str, DTIBasedMeasure], + space: Optional[str] = None, ) -> QueryPattern: """Return the query pattern required to capture DWI DTI images. @@ -793,7 +696,7 @@ def get_pet_nifti( def get_pet_volume_normalized_suvr( tracer: Union[str, Tracer], - group_label: str, + group_id: GroupID, suvr_reference_region: Union[str, SUVRReferenceRegion], use_brainmasked_image: bool, use_pvc_data: bool, @@ -826,12 +729,12 @@ def get_pet_volume_normalized_suvr( str( Path("pet") / "preprocessing" - / f"group-{group_label}" + / str(group_id) / f"*_trc-{tracer.value}_pet_space-Ixi549Space{pvc_key_value}{suvr_key_value}{mask_key_value}{fwhm_key_value}_pet.nii*" ), ( f"{mask_description} SUVR map (using {region.value} region) of {tracer.value}-PET " - f"{pvc_description} and {fwhm_description} in Ixi549Space space based on {group_label} DARTEL template" + f"{pvc_description} and {fwhm_description} in Ixi549Space space based on {group_id.label} DARTEL template" ), "pet-volume", ) diff --git a/clinica/utils/spm.py b/clinica/utils/spm.py index 8255b86fc..08e8f1e03 100644 --- a/clinica/utils/spm.py +++ b/clinica/utils/spm.py @@ -3,6 +3,7 @@ from enum import Enum from os import PathLike from pathlib import Path +from typing import Union __all__ = [ "SPMTissue", @@ -23,7 +24,9 @@ class SPMTissue(str, Enum): BACKGROUND = "background" -def get_spm_tissue_from_index(index: int) -> SPMTissue: +def get_spm_tissue_from_index(index: Union[int, SPMTissue]) -> SPMTissue: + if isinstance(index, SPMTissue): + return index if index == 1: return SPMTissue.GRAY_MATTER if index == 2: diff --git a/test/unittests/pydra/test_interfaces.py b/test/unittests/pydra/test_interfaces.py index f879f6a02..38f5625f6 100644 --- a/test/unittests/pydra/test_interfaces.py +++ b/test/unittests/pydra/test_interfaces.py @@ -60,6 +60,7 @@ def test_caps_reader_instantiation(tmp_path, query, grabber, name): def test_caps_reader(tmp_path): from clinica.pydra.engine_utils import run from clinica.pydra.interfaces import caps_reader + from clinica.utils.group import GroupID structure = { "groups": ["UnitTest"], @@ -73,7 +74,7 @@ def test_caps_reader(tmp_path): query = CAPSFileQuery( { "mask_tissues": {"tissue_number": (1, 2, 3), "modulation": False}, - "flow_fields": {"group_label": "UnitTest"}, + "flow_fields": {"group_id": GroupID("group-UnitTest")}, "pvc_mask_tissues": {"tissue_number": (1, 2, 3)}, "dartel_template": {"group_label": "UnitTest"}, } @@ -98,7 +99,7 @@ def test_caps_reader(tmp_path): "sub-01_ses-M00_T1w_target-UnitTest_transformation-forward_deformation.nii.gz", "sub-03_ses-M00_T1w_target-UnitTest_transformation-forward_deformation.nii.gz", } - query = CAPSGroupQuery({"dartel_template": {"group_label": "UnitTest"}}) + query = CAPSGroupQuery({"dartel_template": {"group_id": GroupID("group-UnitTest")}}) task = caps_reader(query, tmp_path) results = run(task) assert hasattr(results.output, "dartel_template") diff --git a/test/unittests/pydra/test_query.py b/test/unittests/pydra/test_query.py index 529172a4f..84041c58d 100644 --- a/test/unittests/pydra/test_query.py +++ b/test/unittests/pydra/test_query.py @@ -3,6 +3,7 @@ import pytest from clinica.pydra.query import BIDSQuery, CAPSFileQuery, CAPSGroupQuery, Query +from clinica.utils.group import GroupID from clinica.utils.input_files import QueryPattern @@ -69,7 +70,7 @@ def test_caps_file_query(): q = CAPSFileQuery( { "mask_tissues": {"tissue_number": (1, 2), "modulation": False}, - "flow_fields": {"group_label": "UnitTest"}, + "flow_fields": {"group_id": GroupID("group-UnitTest")}, } ) assert len(q) == 2 @@ -113,7 +114,7 @@ def test_caps_file_query(): def test_caps_group_query(): - q = CAPSGroupQuery({"dartel_template": {"group_label": "UnitTest"}}) + q = CAPSGroupQuery({"dartel_template": {"group_id": GroupID("group-UnitTest")}}) assert len(q) == 1 assert q.query == { "dartel_template": QueryPattern( diff --git a/test/unittests/utils/test_input_files.py b/test/unittests/utils/test_input_files.py index b431eab7f..307b1b93f 100644 --- a/test/unittests/utils/test_input_files.py +++ b/test/unittests/utils/test_input_files.py @@ -3,11 +3,7 @@ import pytest from clinica.utils.dwi import DTIBasedMeasure -from clinica.utils.input_files import ( - QueryPattern, - QueryPatternName, - query_pattern_factory, -) +from clinica.utils.input_files import QueryPattern from clinica.utils.pet import ReconstructionMethod, Tracer @@ -45,63 +41,105 @@ def toy_func_3(x, y=2, z=3): toy_func_3((1, 2, 3), z=(4, 5)) -@pytest.mark.parametrize( - "query_name,expected_pattern,expected_description,expected_pipelines", - [ - (QueryPatternName.T1W, "sub-*_ses-*_t1w.nii*", "T1w MRI", ""), - (QueryPatternName.T2W, "sub-*_ses-*_flair.nii*", "FLAIR T2w MRI", ""), - ( - QueryPatternName.T1_FREESURFER_WHITE_MATTER, - "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/wm.seg.mgz", - "segmentation of white matter (mri/wm.seg.mgz).", - "t1-freesurfer", - ), - ( - QueryPatternName.T1_FREESURFER_BRAIN, - "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/brain.mgz", - "extracted brain from T1w MRI (mri/brain.mgz).", - "t1-freesurfer", - ), - ( - QueryPatternName.T1_FREESURFER_ORIG_NU, - "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/orig_nu.mgz", - ( - "intensity normalized volume generated after correction for" - " non-uniformity in FreeSurfer (mri/orig_nu.mgz)." - ), - "t1-freesurfer", - ), - ( - QueryPatternName.T1_FREESURFER_LONG_ORIG_NU, - "t1/long-*/freesurfer_longitudinal/sub-*_ses-*.long.sub-*_*/mri/orig_nu.mgz", - ( - "intensity normalized volume generated after correction for non-uniformity " - "in FreeSurfer (orig_nu.mgz) in longitudinal" - ), - "t1-freesurfer and t1-freesurfer longitudinal", - ), - ( - QueryPatternName.T1W_TO_MNI_TRANSFORM, - "*space-MNI152NLin2009cSym_res-1x1x1_affine.mat", - "Transformation matrix from T1W image to MNI space using t1-linear pipeline", - "t1-linear", - ), - ( - QueryPatternName.DWI_PREPROC_BRAINMASK, - "dwi/preprocessing/sub-*_ses-*_space-*_brainmask.nii*", - "b0 brainmask", - "dwi-preprocessing-using-t1 or dwi-preprocessing-using-fieldmap", - ), - ], -) -def test_query_factory( - query_name, expected_pattern, expected_description, expected_pipelines -): - query = query_pattern_factory(query_name)() +def test_get_t1w_mri(): + from clinica.utils.input_files import get_t1w_mri - assert query.pattern == expected_pattern - assert query.description == expected_description - assert query.needed_pipeline == expected_pipelines + pattern = get_t1w_mri() + + assert pattern.pattern == "sub-*_ses-*_t1w.nii*" + assert pattern.description == "T1w MRI" + assert pattern.needed_pipeline == "" + + +def test_get_t2w_mri(): + from clinica.utils.input_files import get_t2w_mri + + pattern = get_t2w_mri() + + assert pattern.pattern == "sub-*_ses-*_flair.nii*" + assert pattern.description == "FLAIR T2w MRI" + assert pattern.needed_pipeline == "" + + +def test_get_t1_freesurfer_segmentation_white_matter(): + from clinica.utils.input_files import get_t1_freesurfer_segmentation_white_matter + + pattern = get_t1_freesurfer_segmentation_white_matter() + + assert pattern.pattern == "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/wm.seg.mgz" + assert pattern.description == "segmentation of white matter (mri/wm.seg.mgz)." + assert pattern.needed_pipeline == "t1-freesurfer" + + +def test_get_t1_freesurfer_extracted_brain(): + from clinica.utils.input_files import get_t1_freesurfer_extracted_brain + + pattern = get_t1_freesurfer_extracted_brain() + + assert pattern.pattern == "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/brain.mgz" + assert pattern.description == "extracted brain from T1w MRI (mri/brain.mgz)." + assert pattern.needed_pipeline == "t1-freesurfer" + + +def test_get_t1_freesurfer_intensity_normalized_volume_after_nu(): + from clinica.utils.input_files import ( + get_t1_freesurfer_intensity_normalized_volume_after_nu, + ) + + pattern = get_t1_freesurfer_intensity_normalized_volume_after_nu() + + assert ( + pattern.pattern == "t1/freesurfer_cross_sectional/sub-*_ses-*/mri/orig_nu.mgz" + ) + assert pattern.description == ( + "intensity normalized volume generated after correction for" + " non-uniformity in FreeSurfer (mri/orig_nu.mgz)." + ) + assert pattern.needed_pipeline == "t1-freesurfer" + + +def test_get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu(): + from clinica.utils.input_files import ( + get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu, + ) + + pattern = get_t1_freesurfer_longitudinal_intensity_normalized_volume_after_nu() + + assert ( + pattern.pattern + == "t1/long-*/freesurfer_longitudinal/sub-*_ses-*.long.sub-*_*/mri/orig_nu.mgz" + ) + assert pattern.description == ( + "intensity normalized volume generated after correction for non-uniformity " + "in FreeSurfer (orig_nu.mgz) in longitudinal" + ) + assert pattern.needed_pipeline == "t1-freesurfer and t1-freesurfer longitudinal" + + +def test_get_t1w_to_mni_transform(): + from clinica.utils.input_files import get_t1w_to_mni_transform + + pattern = get_t1w_to_mni_transform() + + assert pattern.pattern == "*space-MNI152NLin2009cSym_res-1x1x1_affine.mat" + assert ( + pattern.description + == "Transformation matrix from T1W image to MNI space using t1-linear pipeline" + ) + assert pattern.needed_pipeline == "t1-linear" + + +def test_get_dwi_preprocessed_brainmask(): + from clinica.utils.input_files import get_dwi_preprocessed_brainmask + + pattern = get_dwi_preprocessed_brainmask() + + assert pattern.pattern == "dwi/preprocessing/sub-*_ses-*_space-*_brainmask.nii*" + assert pattern.description == "b0 brainmask" + assert ( + pattern.needed_pipeline + == "dwi-preprocessing-using-t1 or dwi-preprocessing-using-fieldmap" + ) @pytest.mark.parametrize(