diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eec05e222..d9fee828e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,20 +3,12 @@ repos: rev: v4.4.0 hooks: - id: check-yaml - - repo: https://github.com/psf/black-pre-commit-mirror - rev: '23.11.0' + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.7 hooks: - - id: black - - repo: https://github.com/PyCQA/isort - rev: '5.12.0' - hooks: - - id: isort - - repo: https://github.com/PyCQA/flake8 - rev: '6.1.0' - hooks: - - id: flake8 - additional_dependencies: - - flake8-pyproject + - id: ruff + args: [ --fix ] + - id: ruff-format - repo: https://github.com/codespell-project/codespell rev: v2.2.6 hooks: diff --git a/clinica/iotools/bids_utils.py b/clinica/iotools/bids_utils.py index 9eafd4767..1c982fefb 100644 --- a/clinica/iotools/bids_utils.py +++ b/clinica/iotools/bids_utils.py @@ -402,7 +402,7 @@ def create_scans_dict( # Some flutemeta lines contain a non-coded string value at the second-to-last position. This value # contains a comma which adds an extra column and shifts the remaining values to the right. In this # case, we just remove the erroneous content and replace it with -4 which AIBL uses as n/a value. - on_bad_lines = ( + on_bad_lines = ( # noqa: E731 lambda bad_line: bad_line[:-3] + [-4, bad_line[-1]] if "flutemeta" in file_path and study_name == "AIBL" else "error" diff --git a/clinica/pipelines/dwi_connectome/dwi_connectome_pipeline.py b/clinica/pipelines/dwi_connectome/dwi_connectome_pipeline.py index bde3b89ce..64f16abd7 100644 --- a/clinica/pipelines/dwi_connectome/dwi_connectome_pipeline.py +++ b/clinica/pipelines/dwi_connectome/dwi_connectome_pipeline.py @@ -245,11 +245,11 @@ def build_core_nodes(self): import nipype.interfaces.mrtrix3 as mrtrix3 import nipype.interfaces.utility as niu import nipype.pipeline.engine as npe + from nipype.interfaces.mrtrix.preprocess import MRTransform from nipype.interfaces.mrtrix3 import ( ConstrainedSphericalDeconvolution, Tractography, ) - from nipype.interfaces.mrtrix.preprocess import MRTransform import clinica.pipelines.dwi_connectome.dwi_connectome_utils as utils from clinica.utils.exceptions import ClinicaCAPSError diff --git a/clinica/pipelines/dwi_dti/dwi_dti_pipeline.py b/clinica/pipelines/dwi_dti/dwi_dti_pipeline.py index 2be82c382..a7f38fa06 100644 --- a/clinica/pipelines/dwi_dti/dwi_dti_pipeline.py +++ b/clinica/pipelines/dwi_dti/dwi_dti_pipeline.py @@ -213,8 +213,8 @@ def build_core_nodes(self): import nipype.interfaces.utility as nutil import nipype.pipeline.engine as npe from nipype.interfaces.ants import ApplyTransforms, RegistrationSynQuick - from nipype.interfaces.mrtrix3 import TensorMetrics from nipype.interfaces.mrtrix.preprocess import DWI2Tensor + from nipype.interfaces.mrtrix3 import TensorMetrics from clinica.utils.check_dependency import check_environment_variable from clinica.utils.dwi import extract_bids_identifier_from_filename diff --git a/clinica/pipelines/dwi_preprocessing_using_fmap/dwi_preprocessing_using_phasediff_fmap_pipeline.py b/clinica/pipelines/dwi_preprocessing_using_fmap/dwi_preprocessing_using_phasediff_fmap_pipeline.py index f713c2030..da01f62e0 100644 --- a/clinica/pipelines/dwi_preprocessing_using_fmap/dwi_preprocessing_using_phasediff_fmap_pipeline.py +++ b/clinica/pipelines/dwi_preprocessing_using_fmap/dwi_preprocessing_using_phasediff_fmap_pipeline.py @@ -279,7 +279,6 @@ def build_core_nodes(self): import nipype.interfaces.fsl as fsl import nipype.interfaces.mrtrix3 as mrtrix3 import nipype.interfaces.utility as nutil - import nipype.interfaces.utility as niu import nipype.pipeline.engine as npe from clinica.pipelines.dwi_preprocessing_using_t1.dwi_preprocessing_using_t1_workflows import ( @@ -341,7 +340,7 @@ def build_core_nodes(self): # ======================= # Compute average b0 on corrected dataset (for brain mask extraction) compute_avg_b0 = npe.Node( - niu.Function( + nutil.Function( input_names=["in_dwi", "in_bval"], output_names=["out_b0_average"], function=compute_average_b0, diff --git a/clinica/pipelines/machine_learning/algorithm.py b/clinica/pipelines/machine_learning/algorithm.py index cb0ec2d30..505041854 100644 --- a/clinica/pipelines/machine_learning/algorithm.py +++ b/clinica/pipelines/machine_learning/algorithm.py @@ -457,9 +457,9 @@ def _select_best_parameter(self, async_result): best_min_samples_split = int(round(np.mean([x[2] for x in params_list]))) def max_feature_to_float(m): - if type(m) is float: + if isinstance(m, float): return m - if type(m) is int: + if isinstance(m, int): return float(m) / float(self._x.shape[1]) if m == "auto" or m == "sqrt": return np.sqrt(self._x.shape[1]) / float(self._x.shape[1]) diff --git a/clinica/pipelines/machine_learning/vertex_based_io.py b/clinica/pipelines/machine_learning/vertex_based_io.py index 06f970906..394393059 100644 --- a/clinica/pipelines/machine_learning/vertex_based_io.py +++ b/clinica/pipelines/machine_learning/vertex_based_io.py @@ -13,9 +13,7 @@ def load_data(mgh_list): # Construct 0-matrix with the good size, based on the size of the surfaces # provided by the first subject - N_vertex = ( - [] - ) # array containing the surface size of the different surfaces of a subject + N_vertex = [] # array containing the surface size of the different surfaces of a subject sample = mgh_list[0] for i in range(len(sample)): N_vertex.append(np.max(nib.load(sample[i]).header.get_data_shape())) diff --git a/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_utils.py b/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_utils.py index 43d5a6b16..bc79180a1 100644 --- a/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_utils.py +++ b/clinica/pipelines/machine_learning_spatial_svm/spatial_svm_utils.py @@ -262,8 +262,8 @@ def roots_poly(C): ] ) # two roots - rts1 = (-C[1, :] + delta) * (1 / ((2 * C[0, :]))) - rts2 = (-C[1, :] - delta) * (1 / ((2 * C[0, :]))) + rts1 = (-C[1, :] + delta) * (1 / (2 * C[0, :])) + rts2 = (-C[1, :] - delta) * (1 / (2 * C[0, :])) rts = np.array([rts1, rts2]) elif C.shape[0] < 5: diff --git a/pyproject.toml b/pyproject.toml index 298a4160b..084e53520 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -93,35 +93,22 @@ clinica = "clinica.cmdline:main" requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" -[tool.black] +[tool.ruff] +target-version = "py38" line-length = 88 -target-version = ['py36', 'py37', 'py38'] -include = '\.pyi?$' -force-exclude = ''' -/( - \.eggs - | \.git - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | \.pytest_cache - | _build - | buck-out - | build - | dist - | docs - | clinica/lib -)/ -''' -[tool.isort] -profile = "black" +[tool.ruff.lint] +select = ["E", "W", "I001"] +ignore = ["E203", "E501"] -[tool.flake8] -max-line-length = 88 -select = ["E", "W"] -extend-ignore = ["E203", "E501", "W503"] +[tool.ruff.lint.isort] +known-first-party = ["clinica"] + +[tool.ruff.format] +quote-style = "double" +indent-style = "space" +skip-magic-trailing-comma = false +line-ending = "auto" [tool.codespell] summary = '' diff --git a/test/unittests/pipelines/dwi_preprocessing_using_t1/test_dwi_preprocessing_using_t1_utils.py b/test/unittests/pipelines/dwi_preprocessing_using_t1/test_dwi_preprocessing_using_t1_utils.py index 2f008c440..dc6c8543f 100644 --- a/test/unittests/pipelines/dwi_preprocessing_using_t1/test_dwi_preprocessing_using_t1_utils.py +++ b/test/unittests/pipelines/dwi_preprocessing_using_t1/test_dwi_preprocessing_using_t1_utils.py @@ -360,22 +360,25 @@ def test_prepare_reference_b0(tmp_path, mocker): assert reference_dataset.dwi == tmp_path / "sub-foo_ses-bar_dwi_merged.nii.gz" assert reference_dataset.b_values == tmp_path / "sub-foo_ses-bar_dwi_merged.bval" assert reference_dataset.b_vectors == tmp_path / "sub-foo_ses-bar_dwi_merged.bvec" - assert sorted([p.name for p in tmp_path.iterdir()]) == [ - "reference_b0_volume.nii.gz", # This is the 3D volume corresponding to the co-registered volumes for b<=low_b - "sub-foo_ses-bar_dwi.bval", # Initial bvalue file - "sub-foo_ses-bar_dwi.bvec", # Initial bvectors file - "sub-foo_ses-bar_dwi.nii.gz", # Initial dwi image file - "sub-foo_ses-bar_dwi_large_b.bval", # bvalue file corresponding to DWI volumes with b>low_b - "sub-foo_ses-bar_dwi_large_b.bvec", # bvectors file corresponding to DWI volumes with b>low_b - "sub-foo_ses-bar_dwi_large_b.nii.gz", # DWI image file holding volumes for which b>low_b - "sub-foo_ses-bar_dwi_merged.bval", # bvalue file corresponding to the merged DWI dataset - "sub-foo_ses-bar_dwi_merged.bvec", # bvectors file corresponding to the merged DWI dataset - "sub-foo_ses-bar_dwi_merged.nii.gz", # image file holding the merged DWI volumes - "sub-foo_ses-bar_dwi_small_b.bval", # bvalue file corresponding to the volumes for which b<=low_b - "sub-foo_ses-bar_dwi_small_b.bvec", # bvectors file corresponding to the volumes for which b<=low_b - "sub-foo_ses-bar_dwi_small_b.nii.gz", # DWI image file holding volumes for which b<=low_b - "tmp", # Working directory containing all the stuff generated by b0_flirt_pipeline - ] + assert ( + sorted([p.name for p in tmp_path.iterdir()]) + == [ + "reference_b0_volume.nii.gz", # This is the 3D volume corresponding to the co-registered volumes for b<=low_b + "sub-foo_ses-bar_dwi.bval", # Initial bvalue file + "sub-foo_ses-bar_dwi.bvec", # Initial bvectors file + "sub-foo_ses-bar_dwi.nii.gz", # Initial dwi image file + "sub-foo_ses-bar_dwi_large_b.bval", # bvalue file corresponding to DWI volumes with b>low_b + "sub-foo_ses-bar_dwi_large_b.bvec", # bvectors file corresponding to DWI volumes with b>low_b + "sub-foo_ses-bar_dwi_large_b.nii.gz", # DWI image file holding volumes for which b>low_b + "sub-foo_ses-bar_dwi_merged.bval", # bvalue file corresponding to the merged DWI dataset + "sub-foo_ses-bar_dwi_merged.bvec", # bvectors file corresponding to the merged DWI dataset + "sub-foo_ses-bar_dwi_merged.nii.gz", # image file holding the merged DWI volumes + "sub-foo_ses-bar_dwi_small_b.bval", # bvalue file corresponding to the volumes for which b<=low_b + "sub-foo_ses-bar_dwi_small_b.bvec", # bvectors file corresponding to the volumes for which b<=low_b + "sub-foo_ses-bar_dwi_small_b.nii.gz", # DWI image file holding volumes for which b<=low_b + "tmp", # Working directory containing all the stuff generated by b0_flirt_pipeline + ] + ) ref_b0_volume = nib.load(tmp_path / "reference_b0_volume.nii.gz") assert_array_equal(ref_b0_volume.get_fdata(), 5.0 * np.ones((5, 5, 5, 1))) large_b_image = nib.load(tmp_path / "sub-foo_ses-bar_dwi_large_b.nii.gz") diff --git a/test/unittests/utils/test_freesurfer.py b/test/unittests/utils/test_freesurfer.py index f55c5dd14..11c26f3ed 100644 --- a/test/unittests/utils/test_freesurfer.py +++ b/test/unittests/utils/test_freesurfer.py @@ -4,10 +4,10 @@ import pandas as pd import pytest -from clinica.utils.freesurfer import _get_prefix # noqa from clinica.utils.freesurfer import ( ColumnType, InfoType, + _get_prefix, # noqa extract_image_id_from_longitudinal_segmentation, )