diff --git a/.github/workflows/ci-cd.yaml b/.github/workflows/ci-cd.yaml index 7333d04..4cb8dfb 100644 --- a/.github/workflows/ci-cd.yaml +++ b/.github/workflows/ci-cd.yaml @@ -9,30 +9,41 @@ name: CI/CD on: push: branches: [ main, develop ] - tags: [ '*' ] pull_request: branches: [ main, develop ] + release: + types: [published] repository_dispatch: - types: [create-release] + types: [create-post-release] + +env: # Define environment variables + ANTS_VERSION: v2.5.1 jobs: nipype-conv: runs-on: ubuntu-latest steps: + - name: Checkout - uses: actions/checkout@v3 - - name: Revert version to most recent tag on upstream update + uses: actions/checkout@v4 + + - name: Revert version to most recent version tag on upstream update if: github.event_name == 'repository_dispatch' - run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + run: git checkout $(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}') + + - name: Set up Python + uses: actions/setup-python@v5 + - name: Install build dependencies run: python -m pip install --upgrade pip + - name: Install requirements run: python -m pip install ./related-packages/fileformats -r ./nipype-auto-conv/requirements.txt + - name: Run automatic Nipype > Pydra conversion run: ./nipype-auto-conv/generate + - uses: actions/upload-artifact@v3 with: name: converted-nipype @@ -50,256 +61,334 @@ jobs: - '--editable git+https://github.com/nipype/pydra.git#egg=pydra' steps: - name: Checkout - uses: actions/checkout@v3 - - name: Revert version to most recent tag on upstream update + uses: actions/checkout@v4 + + - name: Revert version to most recent version tag on upstream update if: github.event_name == 'repository_dispatch' - run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - name: Download tasks converted from Nipype + run: git checkout $(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}') + + - name: Download tasks converted from Nipype uses: actions/download-artifact@v3 with: name: converted-nipype path: pydra/tasks/ants/auto + - name: Strip auto package from gitignore so it is included in package run: | sed -i '/\/pydra\/tasks\/ants\/auto/d' .gitignore + sed -i '/^_version.py/d' .gitignore + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + - name: Install build dependencies run: | python -m pip install --upgrade pip + - name: Install Pydra run: | pushd $HOME pip install ${{ matrix.pydra }} popd python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" + - name: Install task package run: | - pip install "./related-packages/fileformats[dev]" "related-packages/fileformats-extras[dev]" + pip install ${{ matrix.pip-flags }} "./related-packages/fileformats[dev]" + pip install ${{ matrix.pip-flags }} "related-packages/fileformats-extras[dev]" pip install ${{ matrix.pip-flags }} ".[dev]" python -c "import pydra.tasks.ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import fileformats.medimage_ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import fileformats.extras.medimage_ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - - fileformats-test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ['3.8', '3.11'] - steps: - - uses: actions/checkout@v3 - - name: Revert version to most recent tag on upstream update - if: github.event_name == 'repository_dispatch' - run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install build dependencies - run: | - python -m pip install --upgrade pip - - name: Install task package - run: | - pip install "./related-packages/fileformats[test]" "./related-packages/fileformats-extras[test]" - python -c "import fileformats.medimage_ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - - name: Test fileformats with pytest - run: | - cd ./fileformats - pytest -sv --cov fileformats.medimage_ants --cov fileformats.extras.medimage_ants --cov-report xml . test: - needs: [nipype-conv, fileformats-test] + needs: [nipype-conv] runs-on: ubuntu-22.04 strategy: matrix: - python-version: ['3.8'] # '3.11' + python-version: ['3.8', '3.11'] steps: + + - name: Install prerequisite packages + run: sudo apt install -y cmake + - name: Removed unnecessary tools to free space run: | sudo rm -rf /usr/share/dotnet - sudo rm -rf "$AGENT_TOOLSDIRECTORY" - - name: Get Download cache Key - id: cache-key - run: echo "::set-output name=key::ants-linux-ubuntu22_amd64-7.4.1" - - name: Cache FreeSurfer - uses: actions/cache@v2 + sudo rm -rf "$AGENT_TOOLSDIRECTORY" + + - name: Cache ANTs Install + id: cache-install + uses: actions/cache@v4 with: - path: $HOME/downloads/ants - key: ${{ steps.cache-key.outputs.key }} - restore-keys: | - ants-linux-ubuntu22_amd64-7.4.1 - - name: Download FreeSurfer - if: steps.cache-key.outputs.key != steps.cache-hit.outputs.key - run: | - mkdir -p $HOME/downloads/ants - curl -s -o $HOME/downloads/ants/ants-linux-ubuntu22_amd64-7.4.1.tar.gz https://surfer.nmr.mgh.harvard.edu/pub/dist/ants/7.4.1/ants-linux-ubuntu22_amd64-7.4.1.tar.gz - shell: bash - - name: Install Freesurfer - env: - FREESURFER_LICENCE: ${{ secrets.FREESURFER_LICENCE }} + path: install + key: ants-${{ env.ANTS_VERSION }}-${{ runner.os }} + + - name: Install ANTs Package + if: steps.cache-install.outputs.cache-hit != 'true' run: | - pushd $HOME/downloads/ants - tar -zxpf ants-linux-ubuntu22_amd64-7.4.1.tar.gz - mv ants $HOME/ + workingDir=${PWD} + git clone https://github.com/ANTsX/ANTs.git + pushd ./ANTs + git checkout ${{ env.ANTS_VERSION }} popd - export FREESURFER_HOME=$HOME/ants - source $FREESURFER_HOME/SetUpFreeSurfer.sh - echo $FREESURFER_LICENCE > $FREESURFER_HOME/license.txt - export PATH=$FREESURFER_HOME/bin:$PATH - - uses: actions/checkout@v3 - - name: Revert version to most recent tag on upstream update + mkdir build install + cd build + cmake -DCMAKE_INSTALL_PREFIX=${workingDir}/install ../ANTs + make -j 4 + cd ANTS-build + make install + echo "Installation completed successfully" + echo "PATH=${workingDir}/install/bin:$PATH" >> $GITHUB_ENV + echo "LD_LIBRARY_PATH=${workingDir}/install/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV + + - name: Checkout repo + uses: actions/checkout@v4 + + - name: Revert version to most recent version tag on upstream update if: github.event_name == 'repository_dispatch' - run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - name: Download tasks converted from Nipype + run: git checkout $(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}') + + - name: Download tasks converted from Nipype uses: actions/download-artifact@v3 with: name: converted-nipype path: pydra/tasks/ants/auto + + - name: Show the contents of the auto-generated tasks + run: tree pydra + - name: Strip auto package from gitignore so it is included in package run: | - sed -i '/\/src\/pydra\/tasks\/ants\/auto/d' .gitignore + sed -i '/\/pydra\/tasks\/ants\/auto/d' .gitignore + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + - name: Install build dependencies run: | python -m pip install --upgrade pip + - name: Install task package run: | pip install "./related-packages/fileformats" "./related-packages/fileformats-extras" ".[test]" python -c "import pydra.tasks.ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" + - name: Test with pytest - run: | - pytest -sv --doctest-modules ./pydra/tasks/ants \ - --cov pydra.tasks.ants --cov-report xml - - uses: codecov/codecov-action@v3 + run: >- + pytest -sv + ./pydra/tasks/ants + ./related-packages/fileformats + ./related-packages/fileformats-extras + --cov pydra.tasks.ants + --cov fileformats.medimage_ants + --cov fileformats.extras.medimage_ants + --cov-report xml + + - name: Upload to CodeCov + uses: codecov/codecov-action@v3 if: ${{ always() }} with: - files: coverage.xml,./fileformats/coverage.xml + files: coverage.xml name: pydra-ants + deploy-fileformats: needs: [devcheck, test] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + + - uses: actions/checkout@v4 with: submodules: recursive - fetch-depth: 0 + fetch-depth: 0 + - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' + - name: Install build tools run: python -m pip install build twine + - name: Build source and wheel distributions run: python -m build ./related-packages/fileformats + - name: Check distributions run: twine check ./related-packages/fileformats/dist/* + - name: Check for PyPI token on tag id: deployable - if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' + if: github.event_name == 'release' || github.event_name == 'repository_dispatch' env: PYPI_API_TOKEN: "${{ secrets.PYPI_FILEFORMATS_API_TOKEN }}" run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi + - name: Upload to PyPI if: steps.deployable.outputs.DEPLOY uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_FILEFORMATS_API_TOKEN }} - packages-dir: ./related-packages/fileformats/dist + packages-dir: ./related-packages/fileformats/dist deploy-fileformats-extras: needs: [deploy-fileformats] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + + - uses: actions/checkout@v4 with: submodules: recursive - fetch-depth: 0 + fetch-depth: 0 + - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' + - name: Install build tools run: python -m pip install build twine + - name: Build source and wheel distributions run: python -m build ./related-packages/fileformats-extras + - name: Check distributions run: twine check ./related-packages/fileformats-extras/dist/* + - name: Check for PyPI token on tag id: deployable - if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' + if: github.event_name == 'release' || github.event_name == 'repository_dispatch' env: PYPI_API_TOKEN: "${{ secrets.PYPI_FILEFORMATS_EXTRAS_API_TOKEN }}" run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi + - name: Upload to PyPI if: steps.deployable.outputs.DEPLOY uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_FILEFORMATS_EXTRAS_API_TOKEN }} - packages-dir: ./related-packages/fileformats-extras/dist + packages-dir: ./related-packages/fileformats-extras/dist deploy: - needs: [deploy-fileformats-extras] + needs: [nipype-conv, test, deploy-fileformats, deploy-fileformats-extras] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: recursive fetch-depth: 0 - - name: Download tasks converted from Nipype + + - name: Set up Git user + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + + - name: Get latest version tag + id: latest_tag + run: | + git fetch --tags + echo "TAG=$(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}')" >> $GITHUB_OUTPUT + + - name: Revert to latest tag + if: github.event_name == 'repository_dispatch' + run: git checkout ${{ steps.latest_tag.outputs.TAG }} + + - name: Download tasks converted from Nipype uses: actions/download-artifact@v3 with: name: converted-nipype path: pydra/tasks/ants/auto - - name: Tag release with a post-release based on Nipype and Nipype2Pydra versions - if: github.event_name == 'repository_dispatch' - run: | - TAG=$(git tag -l | tail -n 1 | awk -F post '{print $1}') - POST=$(python -c "from pydra.tasks.ants.auto._version import *; print(post_release)") - git checkout $TAG - git add -f pydra/tasks/ants/auto/_version.py - git commit -am"added auto-generated version to make new tag for package version" - git tag ${TAG}post${POST} + + - name: Show the contents of the auto-generated tasks + run: tree pydra + - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' + - name: Install build tools run: python -m pip install build twine + - name: Strip auto package from gitignore so it is included in package run: | sed -i '/\/pydra\/tasks\/ants\/auto/d' .gitignore + cat .gitignore + + - name: Install task package to calculate post-release tag + run: | + pip install "./related-packages/fileformats" "./related-packages/fileformats-extras" ".[test]" + + - name: Generate post-release tag based on Nipype and Nipype2Pydra versions + id: post_release_tag + run: | + POST=$(python -c "from pydra.tasks.ants.auto._version import *; print(post_release)") + echo "TAG=${{ steps.latest_tag.outputs.TAG }}post${POST}" >> $GITHUB_OUTPUT + + - name: Add auto directory to git repo + if: github.event_name == 'release' || github.event_name == 'repository_dispatch' + run: | + git add pydra/tasks/ants/auto + git commit -am"added auto-generated version to make new tag for package version" + git status + + - name: Overwrite the tag of release event with latest commit (i.e. including the auto directory) + if: github.event_name == 'release' + run: | + git tag -d ${{ steps.latest_tag.outputs.TAG }}; + git tag ${{ steps.latest_tag.outputs.TAG }}; + + - name: Tag repo with the post-release + if: github.event_name == 'repository_dispatch' + run: git tag ${{ steps.post_release_tag.outputs.TAG }} + - name: Build source and wheel distributions run: python -m build . + - name: Check distributions run: twine check dist/* + - uses: actions/upload-artifact@v3 with: name: distributions path: dist/ + - name: Check for PyPI token on tag id: deployable - if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' + if: github.event_name == 'release' || github.event_name == 'repository_dispatch' env: PYPI_API_TOKEN: "${{ secrets.PYPI_API_TOKEN }}" run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi + - name: Upload to PyPI if: steps.deployable.outputs.DEPLOY uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} + password: ${{ secrets.PYPI_API_TOKEN }} + + - name: Create post-release release for releases triggered by nipype2pydra dispatches + if: steps.deployable.outputs.DEPLOY && github.event_name == 'repository_dispatch' + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token + with: + tag_name: ${{ steps.post_release_tag.outputs.TAG }} + release_name: Release ${{ steps.post_release_tag.outputs.TAG }} + draft: false + prerelease: false # Deploy on tags if PYPI_API_TOKEN is defined in the repository secrets. # Secrets are not accessible in the if: condition [0], so set an output variable [1] # [0] https://github.community/t/16928 -# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter \ No newline at end of file +# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml deleted file mode 100644 index 5cdf094..0000000 --- a/.github/workflows/pythonpackage.yml +++ /dev/null @@ -1,32 +0,0 @@ -#This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Python package - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - build: - - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.7, 3.8, 3.9, '3.10'] - - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install ".[test]" - - name: Test with pytest - run: | - pytest -sv --doctest-modules pydra/tasks/ants diff --git a/.gitignore b/.gitignore index 9e8d1aa..00ee882 100644 --- a/.gitignore +++ b/.gitignore @@ -131,11 +131,18 @@ dmypy.json # Pycharm .idea +# Vim +.*.sw[op] + # VS Code .vscode # Mac garbarge .DS_store +# Generated files +/pydra/tasks/ants/_version.py +/related-packages/fileformats/fileformats/medimage_ants/_version.py +/related-packages/fileformats-extras/fileformats/extras/medimage_ants/_version.py /pydra/tasks/ants/auto /pydra/tasks/ants/_version.py diff --git a/README.md b/README.md deleted file mode 100644 index 67ba7e3..0000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -# Pydra tasks for ANTs commands diff --git a/README.rst b/README.rst index c52dfd7..9e28777 100644 --- a/README.rst +++ b/README.rst @@ -1,11 +1,11 @@ -=============================== +=========================== Pydra task package for ants -=============================== +=========================== -.. image:: https://github.com/nipype/pydra-ants/actions/workflows/pythonpackage.yaml/badge.svg - :target: https://github.com/nipype/pydra-ants/actions/workflows/pythonpackage.yaml -.. .. image:: https://codecov.io/gh/nipype/pydra-ants/branch/main/graph/badge.svg?token=UIS0OGPST7 -.. :target: https://codecov.io/gh/nipype/pydra-ants +.. image:: https://github.com/nipype/pydra-ants/actions/workflows/ci-cd.yaml/badge.svg + :target: https://github.com/nipype/pydra-ants/actions/workflows/ci-cd.yaml +.. image:: https://codecov.io/gh/nipype/pydra-ants/branch/main/graph/badge.svg?token=UIS0OGPST7 + :target: https://codecov.io/gh/nipype/pydra-ants .. image:: https://img.shields.io/pypi/pyversions/pydra-ants.svg :target: https://pypi.python.org/pypi/pydra-ants/ :alt: Supported Python versions @@ -27,7 +27,7 @@ Automatically generated tasks can be found in the `pydra.tasks.ants.auto` packag These packages should be treated with extreme caution as they likely do not pass testing. Generated tasks that have been edited and pass testing are imported into one or more of the `pydra.tasks.ants.v*` packages, corresponding to the version of the ants toolkit -they are designed for. +they are designed for. Tests ----- @@ -71,6 +71,14 @@ Contributing to this package Developer installation ~~~~~~~~~~~~~~~~~~~~~~ +Install the `fileformats `__ packages +corresponding to AFNI specific file formats + + +.. code-block:: + + $ pip install -e ./related-packages/fileformats[dev] + $ pip install -e ./related-packages/fileformats-extras[dev] Install repo in developer mode from the source directory and install pre-commit to ensure consistent code-style and quality. @@ -78,7 +86,7 @@ ensure consistent code-style and quality. .. code-block:: $ pip install -e .[test,dev] -$ pre-commit install + $ pre-commit install Next install the requirements for running the auto-conversion script and generate the Pydra task interfaces from their Nipype counterparts @@ -93,7 +101,8 @@ The run the conversion script to convert Nipype interfaces to Pydra $ nipype-auto-conv/generate -## Methodology +Methodology +~~~~~~~~~~~ The development of this package is expected to have two phases @@ -149,6 +158,6 @@ in the ``inputs > types`` and ``outputs > types`` dicts of the YAML spec. If the required file-type is not found implemented within fileformats, please see the `fileformats docs `__ for instructions on how to define -new fileformat types, and see +new fileformat types, and see `fileformats-medimage-extras `__ for an example on how to implement methods to generate sample data for them. diff --git a/nipype-auto-conv/generate b/nipype-auto-conv/generate index a3729e8..42f5fcb 100755 --- a/nipype-auto-conv/generate +++ b/nipype-auto-conv/generate @@ -5,10 +5,11 @@ from warnings import warn from pathlib import Path import shutil from importlib import import_module +from tqdm import tqdm import yaml import nipype import nipype2pydra.utils -from nipype2pydra.task import TaskConverter +from nipype2pydra.task import get_converter SPECS_DIR = Path(__file__).parent / "specs" @@ -35,7 +36,10 @@ auto_dir = PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" if auto_dir.exists(): shutil.rmtree(auto_dir) -for fspath in sorted(SPECS_DIR.glob("**/*.yaml")): +all_interfaces = [] +for fspath in tqdm( + sorted(SPECS_DIR.glob("**/*.yaml")), "converting interfaces from Nipype to Pydra" +): with open(fspath) as f: spec = yaml.load(f, Loader=yaml.SafeLoader) @@ -49,13 +53,14 @@ for fspath in sorted(SPECS_DIR.glob("**/*.yaml")): module_name = nipype2pydra.utils.to_snake_case(spec["task_name"]) - converter = TaskConverter( + converter = get_converter( output_module=f"pydra.tasks.{PKG_NAME}.auto.{module_name}", callables_module=callables, # type: ignore **spec, ) converter.generate(PKG_ROOT) auto_init += f"from .{module_name} import {converter.task_name}\n" + all_interfaces.append(converter.task_name) with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "_version.py", "w") as f: @@ -68,5 +73,9 @@ post_release = (nipype_version + nipype2pydra_version).replace(".", "") """ ) +auto_init += ( + "\n\n__all__ = [\n" + "\n".join(f' "{i}",' for i in all_interfaces) + "\n]\n" +) + with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "__init__.py", "w") as f: f.write(auto_init) diff --git a/nipype-auto-conv/requirements.txt b/nipype-auto-conv/requirements.txt index 06ac987..5fbe6aa 100644 --- a/nipype-auto-conv/requirements.txt +++ b/nipype-auto-conv/requirements.txt @@ -2,10 +2,11 @@ black attrs>=22.1.0 nipype pydra +tqdm PyYAML>=6.0 fileformats >=0.8 fileformats-medimage >=0.4 fileformats-datascience >= 0.1 fileformats-medimage-ants traits -nipype2pydra \ No newline at end of file +nipype2pydra diff --git a/nipype-auto-conv/specs/affine_initializer.yaml b/nipype-auto-conv/specs/affine_initializer.yaml index 65c7ba1..b54afde 100644 --- a/nipype-auto-conv/specs/affine_initializer.yaml +++ b/nipype-auto-conv/specs/affine_initializer.yaml @@ -5,17 +5,17 @@ # # Docs # ---- -# +# # Initialize an affine transform (as in antsBrainExtraction.sh) -# +# # >>> from nipype.interfaces.ants import AffineInitializer # >>> init = AffineInitializer() # >>> init.inputs.fixed_image = 'fixed1.nii' # >>> init.inputs.moving_image = 'moving1.nii' # >>> init.cmdline # 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10' -# -# +# +# task_name: AffineInitializer nipype_name: AffineInitializer nipype_module: nipype.interfaces.ants.utils @@ -34,9 +34,12 @@ inputs: # type=file|default=: reference image moving_image: medimage/nifti1 # type=file|default=: moving image - out_file: generic/file + out_file: Path # type=file: output transform file # type=file|default='transform.mat': output transform file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -88,15 +91,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -110,15 +113,15 @@ tests: moving_image: # type=file|default=: moving image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -131,12 +134,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image: + fixed_image: '"fixed1.nii"' # type=file|default=: reference image - moving_image: + moving_image: '"moving1.nii"' # type=file|default=: moving image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/affine_initializer_callables.py b/nipype-auto-conv/specs/affine_initializer_callables.py index 47d5ce0..309afda 100644 --- a/nipype-auto-conv/specs/affine_initializer_callables.py +++ b/nipype-auto-conv/specs/affine_initializer_callables.py @@ -1 +1,20 @@ -"""Module to put any functions that are referred to in AffineInitializer.yaml""" +"""Module to put any functions that are referred to in the "callables" section of AffineInitializer.yaml""" + +import os + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L834 of /interfaces/ants/utils.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return {"out_file": os.path.abspath(inputs.out_file)} diff --git a/nipype-auto-conv/specs/ai.yaml b/nipype-auto-conv/specs/ai.yaml index e3b217a..5fda363 100644 --- a/nipype-auto-conv/specs/ai.yaml +++ b/nipype-auto-conv/specs/ai.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Calculate the optimal linear transform parameters for aligning two images. -# +# # Examples # -------- # >>> AI( @@ -17,7 +17,7 @@ # ... ).cmdline # 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] # -o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0' -# +# # >>> AI(fixed_image='structural.nii', # ... moving_image='epi.nii', # ... metric=('Mattes', 32, 'Regular', 1), @@ -25,8 +25,8 @@ # ... ).cmdline # 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] # -o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0' -# -# +# +# task_name: AI nipype_name: AI nipype_module: nipype.interfaces.ants.utils @@ -43,15 +43,18 @@ inputs: # passed to the field in the automatically generated unittests. fixed_image: generic/file # type=file|default=: Image to which the moving_image should be transformed - moving_image: generic/file - # type=file|default=: Image that will be transformed to fixed_image fixed_image_mask: generic/file # type=file|default=: fixed mage mask + moving_image: generic/file + # type=file|default=: Image that will be transformed to fixed_image moving_image_mask: generic/file # type=file|default=: moving mage mask - output_transform: generic/file + output_transform: Path # type=file: output file name # type=file|default='initialization.mat': output file name + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -113,15 +116,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true diff --git a/nipype-auto-conv/specs/ai_callables.py b/nipype-auto-conv/specs/ai_callables.py index dafef82..071a285 100644 --- a/nipype-auto-conv/specs/ai_callables.py +++ b/nipype-auto-conv/specs/ai_callables.py @@ -1 +1,18 @@ -"""Module to put any functions that are referred to in AI.yaml""" +"""Module to put any functions that are referred to in the "callables" section of AI.yaml""" + + +def output_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_transform"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L539 of /interfaces/ants/utils.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + return getattr(self, "_output") diff --git a/nipype-auto-conv/specs/ants.yaml b/nipype-auto-conv/specs/ants.yaml index 8108e61..4a00c3a 100644 --- a/nipype-auto-conv/specs/ants.yaml +++ b/nipype-auto-conv/specs/ants.yaml @@ -7,10 +7,10 @@ # ---- # ANTS wrapper for registration of images # (old, use Registration instead) -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import ANTS # >>> ants = ANTS() # >>> ants.inputs.dimension = 3 @@ -31,7 +31,7 @@ # >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] # >>> ants.cmdline # 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] --transformation-model SyN[0.25] --use-Histogram-Matching 1' -# +# task_name: ANTS nipype_name: ANTS nipype_module: nipype.interfaces.ants.registration @@ -50,6 +50,9 @@ inputs: # type=inputmultiobject|default=[]: image to which the moving image is warped moving_image: medimage/nifti1+list-of # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional) + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -65,14 +68,14 @@ outputs: # passed to the field in the automatically generated unittests. affine_transform: generic/file # type=file: Affine transform file - warp_transform: generic/file - # type=file: Warping deformation field inverse_warp_transform: generic/file # type=file: Inverse warping deformation field metaheader: generic/file # type=file: VTK metaheader .mhd file metaheader_raw: generic/file # type=file: VTK metaheader .raw file + warp_transform: generic/file + # type=file: Warping deformation field callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -91,43 +94,43 @@ tests: moving_image: # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional) metric: - # type=list|default=[]: + # type=list|default=[]: metric_weight: # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius: # type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation output_transform_prefix: - # type=str|default='out': + # type=str|default='out': transformation_model: - # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: + # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: gradient_step_length: - # type=float|default=0.0: + # type=float|default=0.0: number_of_time_steps: - # type=int|default=0: + # type=int|default=0: delta_time: - # type=float|default=0.0: + # type=float|default=0.0: symmetry_type: - # type=float|default=0.0: + # type=float|default=0.0: use_histogram_matching: - # type=bool|default=True: + # type=bool|default=True: number_of_iterations: - # type=list|default=[]: + # type=list|default=[]: smoothing_sigmas: - # type=list|default=[]: + # type=list|default=[]: subsampling_factors: - # type=list|default=[]: + # type=list|default=[]: affine_gradient_descent_option: - # type=list|default=[]: + # type=list|default=[]: mi_option: - # type=list|default=[]: + # type=list|default=[]: regularization: - # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: + # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: regularization_gradient_field_sigma: - # type=float|default=0.0: + # type=float|default=0.0: regularization_deformation_field_sigma: - # type=float|default=0.0: + # type=float|default=0.0: number_of_affine_iterations: - # type=list|default=[]: + # type=list|default=[]: num_threads: # type=int|default=1: Number of ITK threads to use args: @@ -135,15 +138,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -155,9 +158,9 @@ tests: dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) output_transform_prefix: '"MY"' - # type=str|default='out': + # type=str|default='out': metric: '["CC"]' - # type=list|default=[]: + # type=list|default=[]: fixed_image: # type=inputmultiobject|default=[]: image to which the moving image is warped moving_image: @@ -167,33 +170,33 @@ tests: radius: '[5]' # type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation transformation_model: '"SyN"' - # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: + # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: gradient_step_length: '0.25' - # type=float|default=0.0: + # type=float|default=0.0: number_of_iterations: '[50, 35, 15]' - # type=list|default=[]: + # type=list|default=[]: use_histogram_matching: 'True' - # type=bool|default=True: + # type=bool|default=True: mi_option: '[32, 16000]' - # type=list|default=[]: + # type=list|default=[]: regularization: '"Gauss"' - # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: + # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: regularization_gradient_field_sigma: '3' - # type=float|default=0.0: + # type=float|default=0.0: regularization_deformation_field_sigma: '0' - # type=float|default=0.0: + # type=float|default=0.0: number_of_affine_iterations: '[10000,10000,10000,10000,10000]' - # type=list|default=[]: + # type=list|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -209,37 +212,37 @@ doctests: dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) output_transform_prefix: '"MY"' - # type=str|default='out': + # type=str|default='out': metric: '["CC"]' - # type=list|default=[]: - fixed_image: + # type=list|default=[]: + fixed_image: '["T1.nii"]' # type=inputmultiobject|default=[]: image to which the moving image is warped - moving_image: + moving_image: '["resting.nii"]' # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional) metric_weight: '[1.0]' # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius: '[5]' # type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation transformation_model: '"SyN"' - # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: + # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: gradient_step_length: '0.25' - # type=float|default=0.0: + # type=float|default=0.0: number_of_iterations: '[50, 35, 15]' - # type=list|default=[]: + # type=list|default=[]: use_histogram_matching: 'True' - # type=bool|default=True: + # type=bool|default=True: mi_option: '[32, 16000]' - # type=list|default=[]: + # type=list|default=[]: regularization: '"Gauss"' - # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: + # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: regularization_gradient_field_sigma: '3' - # type=float|default=0.0: + # type=float|default=0.0: regularization_deformation_field_sigma: '0' - # type=float|default=0.0: + # type=float|default=0.0: number_of_affine_iterations: '[10000,10000,10000,10000,10000]' - # type=list|default=[]: + # type=list|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/ants_callables.py b/nipype-auto-conv/specs/ants_callables.py index dc14cf0..93306b7 100644 --- a/nipype-auto-conv/specs/ants_callables.py +++ b/nipype-auto-conv/specs/ants_callables.py @@ -1 +1,60 @@ -"""Module to put any functions that are referred to in ANTS.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ANTS.yaml""" + +import os + + +def affine_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["affine_transform"] + + +def inverse_warp_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_warp_transform"] + + +def metaheader_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["metaheader"] + + +def metaheader_raw_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["metaheader_raw"] + + +def warp_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["warp_transform"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L242 of /interfaces/ants/registration.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["affine_transform"] = os.path.abspath( + inputs.output_transform_prefix + "Affine.txt" + ) + outputs["warp_transform"] = os.path.abspath( + inputs.output_transform_prefix + "Warp.nii.gz" + ) + outputs["inverse_warp_transform"] = os.path.abspath( + inputs.output_transform_prefix + "InverseWarp.nii.gz" + ) + # outputs['metaheader'] = os.path.abspath(inputs.output_transform_prefix + 'velocity.mhd') + # outputs['metaheader_raw'] = os.path.abspath(inputs.output_transform_prefix + 'velocity.raw') + return outputs diff --git a/nipype-auto-conv/specs/ants_introduction.yaml b/nipype-auto-conv/specs/ants_introduction.yaml index eff4899..f016eb4 100644 --- a/nipype-auto-conv/specs/ants_introduction.yaml +++ b/nipype-auto-conv/specs/ants_introduction.yaml @@ -6,10 +6,10 @@ # Docs # ---- # Uses ANTS to generate matrices to warp data from one space to another. -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants.legacy import antsIntroduction # >>> warp = antsIntroduction() # >>> warp.inputs.reference_image = 'Template_6.nii' @@ -17,8 +17,8 @@ # >>> warp.inputs.max_iterations = [30,90,20] # >>> warp.cmdline # 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' -# -# +# +# task_name: antsIntroduction nipype_name: antsIntroduction nipype_module: nipype.interfaces.ants.legacy @@ -33,10 +33,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - reference_image: medimage/nifti1 - # type=file|default=: template file to warp to input_image: medimage/nifti1 # type=file|default=: input image to warp to template + reference_image: medimage/nifti1 + # type=file|default=: template file to warp to + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -52,14 +55,14 @@ outputs: # passed to the field in the automatically generated unittests. affine_transformation: generic/file # type=file: affine (prefix_Affine.txt) - warp_field: generic/file - # type=file: warp field (prefix_Warp.nii) - inverse_warp_field: generic/file - # type=file: inverse warp field (prefix_InverseWarp.nii) input_file: generic/file # type=file: input image (prefix_repaired.nii) + inverse_warp_field: generic/file + # type=file: inverse warp field (prefix_InverseWarp.nii) output_file: generic/file # type=file: output image (prefix_deformed.nii) + warp_field: generic/file + # type=file: warp field (prefix_Warp.nii) callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -100,15 +103,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -124,15 +127,15 @@ tests: max_iterations: '[30,90,20]' # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -145,14 +148,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - reference_image: + reference_image: '"Template_6.nii"' # type=file|default=: template file to warp to - input_image: + input_image: '"structural.nii"' # type=file|default=: input image to warp to template max_iterations: '[30,90,20]' # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/ants_introduction_callables.py b/nipype-auto-conv/specs/ants_introduction_callables.py index d6e4f62..a68abea 100644 --- a/nipype-auto-conv/specs/ants_introduction_callables.py +++ b/nipype-auto-conv/specs/ants_introduction_callables.py @@ -1 +1,74 @@ -"""Module to put any functions that are referred to in antsIntroduction.yaml""" +"""Module to put any functions that are referred to in the "callables" section of antsIntroduction.yaml""" + +import attrs +import os + + +def affine_transformation_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["affine_transformation"] + + +def input_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["input_file"] + + +def inverse_warp_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_warp_field"] + + +def output_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_file"] + + +def warp_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["warp_field"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L141 of /interfaces/ants/legacy.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + transmodel = inputs.transformation_model + + # When transform is set as 'RI'/'RA', wrap fields should not be expected + # The default transformation is GR, which outputs the wrap fields + if (transmodel is attrs.NOTHING) or ( + (transmodel is not attrs.NOTHING) and transmodel not in ["RI", "RA"] + ): + outputs["warp_field"] = os.path.join( + output_dir, inputs.out_prefix + "Warp.nii.gz" + ) + outputs["inverse_warp_field"] = os.path.join( + output_dir, inputs.out_prefix + "InverseWarp.nii.gz" + ) + + outputs["affine_transformation"] = os.path.join( + output_dir, inputs.out_prefix + "Affine.txt" + ) + outputs["input_file"] = os.path.join( + output_dir, inputs.out_prefix + "repaired.nii.gz" + ) + outputs["output_file"] = os.path.join( + output_dir, inputs.out_prefix + "deformed.nii.gz" + ) + + return outputs diff --git a/nipype-auto-conv/specs/apply_transforms.yaml b/nipype-auto-conv/specs/apply_transforms.yaml index f3e8d35..fdcb2a7 100644 --- a/nipype-auto-conv/specs/apply_transforms.yaml +++ b/nipype-auto-conv/specs/apply_transforms.yaml @@ -7,10 +7,10 @@ # ---- # ApplyTransforms, applied to an input image, transforms it according to a # reference image and a transform (or a set of transforms). -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import ApplyTransforms # >>> at = ApplyTransforms() # >>> at.inputs.input_image = 'moving1.nii' @@ -18,7 +18,7 @@ # >>> at.inputs.transforms = 'identity' # >>> at.cmdline # 'antsApplyTransforms --default-value 0 --float 0 --input moving1.nii --interpolation Linear --output moving1_trans.nii --reference-image fixed1.nii --transform identity' -# +# # >>> at = ApplyTransforms() # >>> at.inputs.dimension = 3 # >>> at.inputs.input_image = 'moving1.nii' @@ -30,7 +30,7 @@ # >>> at.inputs.invert_transform_flags = [False, True] # >>> at.cmdline # 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform [ trans.mat, 1 ]' -# +# # >>> at1 = ApplyTransforms() # >>> at1.inputs.dimension = 3 # >>> at1.inputs.input_image = 'moving1.nii' @@ -43,9 +43,9 @@ # >>> at1.inputs.invert_transform_flags = [False, False] # >>> at1.cmdline # 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform trans.mat' -# +# # Identity transforms may be used as part of a chain: -# +# # >>> at2 = ApplyTransforms() # >>> at2.inputs.dimension = 3 # >>> at2.inputs.input_image = 'moving1.nii' @@ -57,7 +57,7 @@ # >>> at2.inputs.transforms = ['identity', 'ants_Warp.nii.gz', 'trans.mat'] # >>> at2.cmdline # 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform identity --transform ants_Warp.nii.gz --transform trans.mat' -# +# task_name: ApplyTransforms nipype_name: ApplyTransforms nipype_module: nipype.interfaces.ants.resampling @@ -76,10 +76,9 @@ inputs: # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: medimage/nifti1 # type=file|default=: reference image space that you wish to warp INTO - transforms: medimage/nifti-gz+list-of - # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: generic/file+list-of - # type=inputmultiobject|default=[]: + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -124,15 +123,15 @@ tests: reference_image: # type=file|default=: reference image space that you wish to warp INTO interpolation: - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: - # type=traitcompound|default=None: + # type=traitcompound|default=None: transforms: # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. invert_transform_flags: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: default_value: - # type=float|default=0.0: + # type=float|default=0.0: print_out_composite_warp_file: # type=bool|default=False: output a composite warp file instead of a transformed image float: @@ -144,15 +143,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -165,18 +164,18 @@ tests: # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: # type=file|default=: reference image space that you wish to warp INTO - transforms: + transforms: '"identity"' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -195,23 +194,23 @@ tests: # type=file: Warped image # type=str|default='': output file name interpolation: '"Linear"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: default_value: '0' - # type=float|default=0.0: - transforms: + # type=float|default=0.0: + transforms: '["ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: - # type=inputmultiobject|default=[]: + invert_transform_flags: '[False, True]' + # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -230,25 +229,25 @@ tests: # type=file: Warped image # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=traitcompound|default=None: default_value: '0' - # type=float|default=0.0: - transforms: + # type=float|default=0.0: + transforms: '["ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: - # type=inputmultiobject|default=[]: + invert_transform_flags: '[False, False]' + # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -267,23 +266,23 @@ tests: # type=file: Warped image # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=traitcompound|default=None: default_value: '0' - # type=float|default=0.0: - transforms: + # type=float|default=0.0: + transforms: '["identity", "ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -296,14 +295,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"moving1.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO - transforms: + transforms: '"identity"' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -315,23 +314,23 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: + input_image: '"moving1.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO output_image: '"deformed_moving1.nii"' # type=file: Warped image # type=str|default='': output file name interpolation: '"Linear"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: default_value: '0' - # type=float|default=0.0: - transforms: + # type=float|default=0.0: + transforms: '["ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: - # type=inputmultiobject|default=[]: + invert_transform_flags: '[False, True]' + # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -343,25 +342,25 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: + input_image: '"moving1.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO output_image: '"deformed_moving1.nii"' # type=file: Warped image # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=traitcompound|default=None: default_value: '0' - # type=float|default=0.0: - transforms: + # type=float|default=0.0: + transforms: '["ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: - # type=inputmultiobject|default=[]: + invert_transform_flags: '[False, False]' + # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -373,23 +372,23 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: + input_image: '"moving1.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO output_image: '"deformed_moving1.nii"' # type=file: Warped image # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=traitcompound|default=None: default_value: '0' - # type=float|default=0.0: - transforms: + # type=float|default=0.0: + transforms: '["identity", "ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/apply_transforms_callables.py b/nipype-auto-conv/specs/apply_transforms_callables.py index eaafa83..303b094 100644 --- a/nipype-auto-conv/specs/apply_transforms_callables.py +++ b/nipype-auto-conv/specs/apply_transforms_callables.py @@ -1 +1,93 @@ -"""Module to put any functions that are referred to in ApplyTransforms.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ApplyTransforms.yaml""" + +import attrs +import os +import os.path as op + + +def output_image_default(inputs): + return _gen_filename("output_image", inputs=inputs) + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +# Original source at L465 of /interfaces/ants/resampling.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + if name == "output_image": + output = inputs.output_image + if output is attrs.NOTHING: + _, name, ext = split_filename(inputs.input_image) + output = name + inputs.out_postfix + ext + return output + return None + + +# Original source at L522 of /interfaces/ants/resampling.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["output_image"] = os.path.abspath( + _gen_filename( + "output_image", + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + ) + return outputs + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext diff --git a/nipype-auto-conv/specs/apply_transforms_to_points.yaml b/nipype-auto-conv/specs/apply_transforms_to_points.yaml index da0206f..f9e8c51 100644 --- a/nipype-auto-conv/specs/apply_transforms_to_points.yaml +++ b/nipype-auto-conv/specs/apply_transforms_to_points.yaml @@ -7,10 +7,10 @@ # ---- # ApplyTransformsToPoints, applied to an CSV file, transforms coordinates # using provided transform (or a set of transforms). -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import ApplyTransforms # >>> at = ApplyTransformsToPoints() # >>> at.inputs.dimension = 3 @@ -19,9 +19,9 @@ # >>> at.inputs.invert_transform_flags = [False, False] # >>> at.cmdline # 'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv --transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]' -# -# -# +# +# +# task_name: ApplyTransformsToPoints nipype_name: ApplyTransformsToPoints nipype_module: nipype.interfaces.ants.resampling @@ -40,6 +40,9 @@ inputs: # type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting. transforms: datascience/text-matrix+list-of # type=list|default=[]: transforms that will be applied to the points + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -85,15 +88,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -111,15 +114,15 @@ tests: invert_transform_flags: '[False, False]' # type=list|default=[]: list indicating if a transform should be reversed imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -134,14 +137,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_file: + input_file: '"moving.csv"' # type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting. - transforms: + transforms: '["trans.mat", "ants_Warp.nii.gz"]' # type=list|default=[]: transforms that will be applied to the points invert_transform_flags: '[False, False]' # type=list|default=[]: list indicating if a transform should be reversed imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/apply_transforms_to_points_callables.py b/nipype-auto-conv/specs/apply_transforms_to_points_callables.py index de371e7..78f8d13 100644 --- a/nipype-auto-conv/specs/apply_transforms_to_points_callables.py +++ b/nipype-auto-conv/specs/apply_transforms_to_points_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in ApplyTransformsToPoints.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ApplyTransformsToPoints.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_file"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/atropos.yaml b/nipype-auto-conv/specs/atropos.yaml index 5c596a5..1dd546e 100644 --- a/nipype-auto-conv/specs/atropos.yaml +++ b/nipype-auto-conv/specs/atropos.yaml @@ -5,15 +5,15 @@ # # Docs # ---- -# +# # A multivariate n-class segmentation algorithm. -# +# # A finite mixture modeling (FMM) segmentation approach with possibilities for # specifying prior constraints. These prior constraints include the specification # of a prior label image, prior probability images (one for each class), and/or an # MRF prior to enforce spatial smoothing of the labels. Similar algorithms include # FAST and SPM. -# +# # Examples # -------- # >>> from nipype.interfaces.ants import Atropos @@ -30,7 +30,7 @@ # --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] # --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] # --use-random-seed 1' -# +# # >>> at = Atropos( # ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', # ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, @@ -45,7 +45,7 @@ # --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] # --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] # --use-random-seed 1' -# +# # >>> at = Atropos( # ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', # ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, @@ -63,7 +63,7 @@ # --mrf [0.2,1x1x1] --convergence [5,1e-06] # --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] # --posterior-formulation Socrates[1] --use-random-seed 1' -# +# # >>> at = Atropos( # ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', # ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, @@ -80,8 +80,8 @@ # --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] # --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] # --use-random-seed 1' -# -# +# +# task_name: Atropos nipype_name: Atropos nipype_module: nipype.interfaces.ants.segmentation @@ -97,9 +97,16 @@ inputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. intensity_images: medimage/nifti1+list-of - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: mask_image: medimage/nifti1 - # type=file|default=: + # type=file|default=: + out_classified_image_name: Path + # type=file|default=: + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + out_classified_image_name: out_classified_image_name_default + # type=file|default=: metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -114,14 +121,14 @@ outputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. classified_image: generic/file - # type=file: + # type=file: + posteriors: generic/file+list-of + # type=outputmultiobject: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: # dict[str, str] - `output_file_template` values to be provided to output fields - out_classified_image_name: out_classified_image_name - # type=file|default=: requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -131,47 +138,47 @@ tests: dimension: # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: mask_image: - # type=file|default=: + # type=file|default=: initialization: - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: kmeans_init_centers: - # type=list|default=[]: + # type=list|default=[]: prior_image: # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. number_of_tissue_classes: - # type=int|default=0: + # type=int|default=0: prior_weighting: - # type=float|default=0.0: + # type=float|default=0.0: prior_probability_threshold: - # type=float|default=0.0: + # type=float|default=0.0: likelihood_model: - # type=str|default='': + # type=str|default='': mrf_smoothing_factor: - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: - # type=int|default=0: + # type=int|default=0: n_iterations: - # type=int|default=0: + # type=int|default=0: convergence_threshold: - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: - # type=str|default='': + # type=str|default='': use_random_seed: # type=bool|default=True: use random seed value over constant use_mixture_model_proportions: - # type=bool|default=False: + # type=bool|default=False: out_classified_image_name: - # type=file|default=: + # type=file|default=: save_posteriors: - # type=bool|default=False: + # type=bool|default=False: output_posteriors_name_template: - # type=str|default='POSTERIOR_%02d.nii.gz': + # type=str|default='POSTERIOR_%02d.nii.gz': num_threads: # type=int|default=1: Number of ITK threads to use args: @@ -179,15 +186,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -197,45 +204,45 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) initialization: '"Random"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: mask_image: - # type=file|default=: + # type=file|default=: number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -245,47 +252,47 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) initialization: '"KMeans"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: kmeans_init_centers: '[100, 200]' - # type=list|default=[]: + # type=list|default=[]: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: mask_image: - # type=file|default=: + # type=file|default=: number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -295,51 +302,51 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) initialization: '"PriorProbabilityImages"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: prior_image: '"BrainSegmentationPrior%02d.nii.gz"' # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. prior_weighting: '0.8' - # type=float|default=0.0: + # type=float|default=0.0: prior_probability_threshold: '0.0000001' - # type=float|default=0.0: + # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: mask_image: - # type=file|default=: + # type=file|default=: number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -349,49 +356,49 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) initialization: '"PriorLabelImage"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: prior_image: '"segmentation0.nii.gz"' # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: prior_weighting: '0.8' - # type=float|default=0.0: + # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: mask_image: - # type=file|default=: + # type=file|default=: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -405,37 +412,37 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. initialization: '"Random"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) - intensity_images: - # type=inputmultiobject|default=[]: - mask_image: - # type=file|default=: + intensity_images: '"structural.nii"' + # type=inputmultiobject|default=[]: + mask_image: '"mask.nii"' + # type=file|default=: number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -446,39 +453,39 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. initialization: '"KMeans"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: kmeans_init_centers: '[100, 200]' - # type=list|default=[]: + # type=list|default=[]: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) - intensity_images: - # type=inputmultiobject|default=[]: - mask_image: - # type=file|default=: + intensity_images: '"structural.nii"' + # type=inputmultiobject|default=[]: + mask_image: '"mask.nii"' + # type=file|default=: number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -489,43 +496,43 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. initialization: '"PriorProbabilityImages"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: prior_image: '"BrainSegmentationPrior%02d.nii.gz"' # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. prior_weighting: '0.8' - # type=float|default=0.0: + # type=float|default=0.0: prior_probability_threshold: '0.0000001' - # type=float|default=0.0: + # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) - intensity_images: - # type=inputmultiobject|default=[]: - mask_image: - # type=file|default=: + intensity_images: '"structural.nii"' + # type=inputmultiobject|default=[]: + mask_image: '"mask.nii"' + # type=file|default=: number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -536,41 +543,41 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. initialization: '"PriorLabelImage"' - # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: + # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: prior_image: '"segmentation0.nii.gz"' # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. number_of_tissue_classes: '2' - # type=int|default=0: + # type=int|default=0: prior_weighting: '0.8' - # type=float|default=0.0: + # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) - intensity_images: - # type=inputmultiobject|default=[]: - mask_image: - # type=file|default=: + intensity_images: '"structural.nii"' + # type=inputmultiobject|default=[]: + mask_image: '"mask.nii"' + # type=file|default=: likelihood_model: '"Gaussian"' - # type=str|default='': + # type=str|default='': save_posteriors: 'True' - # type=bool|default=False: + # type=bool|default=False: mrf_smoothing_factor: '0.2' - # type=float|default=0.0: + # type=float|default=0.0: mrf_radius: '[1, 1, 1]' - # type=list|default=[]: + # type=list|default=[]: icm_use_synchronous_update: 'True' - # type=bool|default=False: + # type=bool|default=False: maximum_number_of_icm_terations: '1' - # type=int|default=0: + # type=int|default=0: n_iterations: '5' - # type=int|default=0: + # type=int|default=0: convergence_threshold: '0.000001' - # type=float|default=0.0: + # type=float|default=0.0: posterior_formulation: '"Socrates"' - # type=str|default='': + # type=str|default='': use_mixture_model_proportions: 'True' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/atropos_callables.py b/nipype-auto-conv/specs/atropos_callables.py index 93f87cd..e2b8327 100644 --- a/nipype-auto-conv/specs/atropos_callables.py +++ b/nipype-auto-conv/specs/atropos_callables.py @@ -1 +1,105 @@ -"""Module to put any functions that are referred to in Atropos.yaml""" +"""Module to put any functions that are referred to in the "callables" section of Atropos.yaml""" + +import attrs +import os +import os.path as op + + +def out_classified_image_name_default(inputs): + return _gen_filename("out_classified_image_name", inputs=inputs) + + +def classified_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["classified_image"] + + +def posteriors_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["posteriors"] + + +# Original source at L232 of /interfaces/ants/segmentation.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + if name == "out_classified_image_name": + output = inputs.out_classified_image_name + if output is attrs.NOTHING: + _, name, ext = split_filename(inputs.intensity_images[0]) + output = name + "_labeled" + ext + return output + + +# Original source at L240 of /interfaces/ants/segmentation.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["classified_image"] = os.path.abspath( + _gen_filename( + "out_classified_image_name", + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + ) + if (inputs.save_posteriors is not attrs.NOTHING) and inputs.save_posteriors: + outputs["posteriors"] = [] + for i in range(inputs.number_of_tissue_classes): + outputs["posteriors"].append( + os.path.abspath(inputs.output_posteriors_name_template % (i + 1)) + ) + return outputs + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext diff --git a/nipype-auto-conv/specs/average_affine_transform.yaml b/nipype-auto-conv/specs/average_affine_transform.yaml index 914aac8..5f9b016 100644 --- a/nipype-auto-conv/specs/average_affine_transform.yaml +++ b/nipype-auto-conv/specs/average_affine_transform.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# # Examples # -------- # >>> from nipype.interfaces.ants import AverageAffineTransform @@ -15,8 +15,8 @@ # >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' # >>> avg.cmdline # 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' -# -# +# +# task_name: AverageAffineTransform nipype_name: AverageAffineTransform nipype_module: nipype.interfaces.ants.utils @@ -31,10 +31,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_affine_transform: datascience/text-matrix + output_affine_transform: Path # type=file|default=: Outputfname.txt: the name of the resulting transform. transforms: datascience/text-matrix+list-of # type=inputmultiobject|default=[]: transforms to average + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -74,15 +77,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -95,18 +98,18 @@ tests: # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) transforms: # type=inputmultiobject|default=[]: transforms to average - output_affine_transform: + output_affine_transform: '"MYtemplatewarp.mat"' # type=file|default=: Outputfname.txt: the name of the resulting transform. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -121,12 +124,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - transforms: + transforms: '["trans.mat", "func_to_struct.mat"]' # type=inputmultiobject|default=[]: transforms to average - output_affine_transform: + output_affine_transform: '"MYtemplatewarp.mat"' # type=file|default=: Outputfname.txt: the name of the resulting transform. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/average_affine_transform_callables.py b/nipype-auto-conv/specs/average_affine_transform_callables.py index 5ee0257..ed4730a 100644 --- a/nipype-auto-conv/specs/average_affine_transform_callables.py +++ b/nipype-auto-conv/specs/average_affine_transform_callables.py @@ -1 +1,22 @@ -"""Module to put any functions that are referred to in AverageAffineTransform.yaml""" +"""Module to put any functions that are referred to in the "callables" section of AverageAffineTransform.yaml""" + +import os + + +def affine_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["affine_transform"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L587 of /interfaces/ants/utils.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["affine_transform"] = os.path.abspath(inputs.output_affine_transform) + return outputs diff --git a/nipype-auto-conv/specs/average_images.yaml b/nipype-auto-conv/specs/average_images.yaml index 0169511..58f077a 100644 --- a/nipype-auto-conv/specs/average_images.yaml +++ b/nipype-auto-conv/specs/average_images.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# # Examples # -------- # >>> from nipype.interfaces.ants import AverageImages @@ -16,7 +16,7 @@ # >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii'] # >>> avg.cmdline # 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' -# +# task_name: AverageImages nipype_name: AverageImages nipype_module: nipype.interfaces.ants.utils @@ -31,11 +31,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_average_image: medimage/nifti-gz - # type=file: average image file - # type=file|default='average.nii': the name of the resulting image. images: medimage/nifti1+list-of # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional) + output_average_image: Path + # type=file: average image file + # type=file|default='average.nii': the name of the resulting image. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -79,15 +82,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -98,7 +101,7 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - output_average_image: + output_average_image: '"average.nii.gz"' # type=file: average image file # type=file|default='average.nii': the name of the resulting image. normalize: 'True' @@ -106,15 +109,15 @@ tests: images: # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -129,15 +132,15 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - output_average_image: + output_average_image: '"average.nii.gz"' # type=file: average image file # type=file|default='average.nii': the name of the resulting image. normalize: 'True' # type=bool|default=False: Normalize: if true, the 2nd image is divided by its mean. This will select the largest image to average into. - images: + images: '["rc1s1.nii", "rc1s1.nii"]' # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/average_images_callables.py b/nipype-auto-conv/specs/average_images_callables.py index 8d6e9a0..c0d588d 100644 --- a/nipype-auto-conv/specs/average_images_callables.py +++ b/nipype-auto-conv/specs/average_images_callables.py @@ -1 +1,22 @@ -"""Module to put any functions that are referred to in AverageImages.yaml""" +"""Module to put any functions that are referred to in the "callables" section of AverageImages.yaml""" + +import os + + +def output_average_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_average_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L648 of /interfaces/ants/utils.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["output_average_image"] = os.path.realpath(inputs.output_average_image) + return outputs diff --git a/nipype-auto-conv/specs/brain_extraction.yaml b/nipype-auto-conv/specs/brain_extraction.yaml index f4b8d8a..48d94c0 100644 --- a/nipype-auto-conv/specs/brain_extraction.yaml +++ b/nipype-auto-conv/specs/brain_extraction.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Atlas-based brain extraction. -# +# # Examples # -------- # >>> from nipype.interfaces.ants.segmentation import BrainExtraction @@ -19,8 +19,8 @@ # >>> brainextraction.cmdline # 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz # -e study_template.nii.gz -d 3 -s nii.gz -o highres001_' -# -# +# +# task_name: BrainExtraction nipype_name: BrainExtraction nipype_module: nipype.interfaces.ants.segmentation @@ -37,12 +37,15 @@ inputs: # passed to the field in the automatically generated unittests. anatomical_image: medimage/nifti-gz # type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. - brain_template: medimage/nifti-gz - # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. brain_probability_mask: medimage/nifti-gz # type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. + brain_template: medimage/nifti-gz + # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. extraction_registration_mask: generic/file # type=file|default=: Mask (defined in the template space) used during registration for brain extraction. To limit the metric computation to a specific region. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -56,8 +59,6 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - BrainExtractionMask: generic/file - # type=file: brain extraction mask BrainExtractionBrain: generic/file # type=file: brain extraction image BrainExtractionCSF: generic/file @@ -65,33 +66,35 @@ outputs: BrainExtractionGM: generic/file # type=file: segmentation mask with only grey matter BrainExtractionInitialAffine: generic/file - # type=file: + # type=file: BrainExtractionInitialAffineFixed: generic/file - # type=file: + # type=file: BrainExtractionInitialAffineMoving: generic/file - # type=file: + # type=file: BrainExtractionLaplacian: generic/file - # type=file: + # type=file: + BrainExtractionMask: generic/file + # type=file: brain extraction mask BrainExtractionPrior0GenericAffine: generic/file - # type=file: + # type=file: BrainExtractionPrior1InverseWarp: generic/file - # type=file: + # type=file: BrainExtractionPrior1Warp: generic/file - # type=file: + # type=file: BrainExtractionPriorWarped: generic/file - # type=file: + # type=file: BrainExtractionSegmentation: generic/file # type=file: segmentation mask with CSF, GM, and WM BrainExtractionTemplateLaplacian: generic/file - # type=file: + # type=file: BrainExtractionTmp: generic/file - # type=file: + # type=file: BrainExtractionWM: generic/file # type=file: segmenration mask with only white matter N4Corrected0: generic/file # type=file: N4 bias field corrected image N4Truncated0: generic/file - # type=file: + # type=file: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -132,15 +135,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -158,15 +161,15 @@ tests: brain_probability_mask: # type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -181,14 +184,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - anatomical_image: + anatomical_image: '"T1.nii.gz"' # type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. - brain_template: + brain_template: '"study_template.nii.gz"' # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. - brain_probability_mask: + brain_probability_mask: '"ProbabilityMaskOfStudyTemplate.nii.gz"' # type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/brain_extraction_callables.py b/nipype-auto-conv/specs/brain_extraction_callables.py index 88a7a0b..157af1e 100644 --- a/nipype-auto-conv/specs/brain_extraction_callables.py +++ b/nipype-auto-conv/specs/brain_extraction_callables.py @@ -1 +1,224 @@ -"""Module to put any functions that are referred to in BrainExtraction.yaml""" +"""Module to put any functions that are referred to in the "callables" section of BrainExtraction.yaml""" + +import attrs +import os + + +def BrainExtractionBrain_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionBrain"] + + +def BrainExtractionCSF_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionCSF"] + + +def BrainExtractionGM_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionGM"] + + +def BrainExtractionInitialAffine_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionInitialAffine"] + + +def BrainExtractionInitialAffineFixed_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionInitialAffineFixed"] + + +def BrainExtractionInitialAffineMoving_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionInitialAffineMoving"] + + +def BrainExtractionLaplacian_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionLaplacian"] + + +def BrainExtractionMask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionMask"] + + +def BrainExtractionPrior0GenericAffine_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionPrior0GenericAffine"] + + +def BrainExtractionPrior1InverseWarp_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionPrior1InverseWarp"] + + +def BrainExtractionPrior1Warp_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionPrior1Warp"] + + +def BrainExtractionPriorWarped_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionPriorWarped"] + + +def BrainExtractionSegmentation_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionSegmentation"] + + +def BrainExtractionTemplateLaplacian_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionTemplateLaplacian"] + + +def BrainExtractionTmp_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionTmp"] + + +def BrainExtractionWM_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionWM"] + + +def N4Corrected0_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["N4Corrected0"] + + +def N4Truncated0_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["N4Truncated0"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L1031 of /interfaces/ants/segmentation.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["BrainExtractionMask"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionMask." + inputs.image_suffix, + ) + outputs["BrainExtractionBrain"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionBrain." + inputs.image_suffix, + ) + if ( + inputs.keep_temporary_files is not attrs.NOTHING + ) and inputs.keep_temporary_files != 0: + outputs["BrainExtractionCSF"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionCSF." + inputs.image_suffix, + ) + outputs["BrainExtractionGM"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionGM." + inputs.image_suffix, + ) + outputs["BrainExtractionInitialAffine"] = os.path.join( + output_dir, inputs.out_prefix + "BrainExtractionInitialAffine.mat" + ) + outputs["BrainExtractionInitialAffineFixed"] = os.path.join( + output_dir, + inputs.out_prefix + + "BrainExtractionInitialAffineFixed." + + inputs.image_suffix, + ) + outputs["BrainExtractionInitialAffineMoving"] = os.path.join( + output_dir, + inputs.out_prefix + + "BrainExtractionInitialAffineMoving." + + inputs.image_suffix, + ) + outputs["BrainExtractionLaplacian"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionLaplacian." + inputs.image_suffix, + ) + outputs["BrainExtractionPrior0GenericAffine"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionPrior0GenericAffine.mat", + ) + outputs["BrainExtractionPrior1InverseWarp"] = os.path.join( + output_dir, + inputs.out_prefix + + "BrainExtractionPrior1InverseWarp." + + inputs.image_suffix, + ) + outputs["BrainExtractionPrior1Warp"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionPrior1Warp." + inputs.image_suffix, + ) + outputs["BrainExtractionPriorWarped"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionPriorWarped." + inputs.image_suffix, + ) + outputs["BrainExtractionSegmentation"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionSegmentation." + inputs.image_suffix, + ) + outputs["BrainExtractionTemplateLaplacian"] = os.path.join( + output_dir, + inputs.out_prefix + + "BrainExtractionTemplateLaplacian." + + inputs.image_suffix, + ) + outputs["BrainExtractionTmp"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionTmp." + inputs.image_suffix, + ) + outputs["BrainExtractionWM"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionWM." + inputs.image_suffix, + ) + outputs["N4Corrected0"] = os.path.join( + output_dir, + inputs.out_prefix + "N4Corrected0." + inputs.image_suffix, + ) + outputs["N4Truncated0"] = os.path.join( + output_dir, + inputs.out_prefix + "N4Truncated0." + inputs.image_suffix, + ) + + return outputs diff --git a/nipype-auto-conv/specs/buildtemplateparallel.yaml b/nipype-auto-conv/specs/buildtemplateparallel.yaml index af036f8..e03adaf 100644 --- a/nipype-auto-conv/specs/buildtemplateparallel.yaml +++ b/nipype-auto-conv/specs/buildtemplateparallel.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Generate a optimal average template -# +# # .. warning:: -# +# # This can take a VERY long time to complete -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants.legacy import buildtemplateparallel # >>> tmpl = buildtemplateparallel() # >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] # >>> tmpl.inputs.max_iterations = [30, 90, 20] # >>> tmpl.cmdline # 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' -# -# +# +# task_name: buildtemplateparallel nipype_name: buildtemplateparallel nipype_module: nipype.interfaces.ants.legacy @@ -38,6 +38,9 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage/nifti1+list-of # type=list|default=[]: list of images to generate template from + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -53,6 +56,10 @@ outputs: # passed to the field in the automatically generated unittests. final_template_file: generic/file # type=file: final ANTS template + subject_outfiles: generic/file+list-of + # type=outputmultiobject: Outputs for each input image. Includes warp field, inverse warp, Affine, original image (repaired) and warped image (deformed) + template_files: generic/file+list-of + # type=outputmultiobject: Templates from different stages of iteration callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -97,15 +104,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -119,15 +126,15 @@ tests: max_iterations: '[30, 90, 20]' # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -140,12 +147,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - in_files: + in_files: '["T1.nii", "structural.nii"]' # type=list|default=[]: list of images to generate template from max_iterations: '[30, 90, 20]' # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/buildtemplateparallel_callables.py b/nipype-auto-conv/specs/buildtemplateparallel_callables.py index 7ba280c..17f849e 100644 --- a/nipype-auto-conv/specs/buildtemplateparallel_callables.py +++ b/nipype-auto-conv/specs/buildtemplateparallel_callables.py @@ -1 +1,119 @@ -"""Module to put any functions that are referred to in buildtemplateparallel.yaml""" +"""Module to put any functions that are referred to in the "callables" section of buildtemplateparallel.yaml""" + +import os +import os.path as op +from builtins import range +from glob import glob + + +def final_template_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["final_template_file"] + + +def subject_outfiles_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["subject_outfiles"] + + +def template_files_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["template_files"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L340 of /interfaces/ants/legacy.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["template_files"] = [] + for i in range(len(glob(os.path.realpath("*iteration*")))): + temp = os.path.realpath( + "%s_iteration_%d/%stemplate.nii.gz" + % (inputs.transformation_model, i, inputs.out_prefix) + ) + os.rename( + temp, + os.path.realpath( + "%s_iteration_%d/%stemplate_i%d.nii.gz" + % (inputs.transformation_model, i, inputs.out_prefix, i) + ), + ) + file_ = "%s_iteration_%d/%stemplate_i%d.nii.gz" % ( + inputs.transformation_model, + i, + inputs.out_prefix, + i, + ) + + outputs["template_files"].append(os.path.realpath(file_)) + outputs["final_template_file"] = os.path.realpath( + "%stemplate.nii.gz" % inputs.out_prefix + ) + outputs["subject_outfiles"] = [] + for filename in inputs.in_files: + _, base, _ = split_filename(filename) + temp = glob(os.path.realpath("%s%s*" % (inputs.out_prefix, base))) + for file_ in temp: + outputs["subject_outfiles"].append(file_) + return outputs + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext diff --git a/nipype-auto-conv/specs/compose_multi_transform.yaml b/nipype-auto-conv/specs/compose_multi_transform.yaml index e195d16..5f20948 100644 --- a/nipype-auto-conv/specs/compose_multi_transform.yaml +++ b/nipype-auto-conv/specs/compose_multi_transform.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Take a set of transformations and convert them to a single transformation matrix/warpfield. -# +# # Examples # -------- # >>> from nipype.interfaces.ants import ComposeMultiTransform @@ -17,8 +17,8 @@ # >>> compose_transform.cmdline # 'ComposeMultiTransform 3 struct_to_template_composed.mat # struct_to_template.mat func_to_struct.mat' -# -# +# +# task_name: ComposeMultiTransform nipype_name: ComposeMultiTransform nipype_module: nipype.interfaces.ants.utils @@ -33,13 +33,16 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_transform: generic/file + output_transform: Path # type=file: Composed transform file # type=file|default=: the name of the resulting transform. reference_image: generic/file # type=file|default=: Reference image (only necessary when output is warpfield) transforms: datascience/text-matrix+list-of # type=inputmultiobject|default=[]: transforms to average + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -83,15 +86,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -105,15 +108,15 @@ tests: transforms: # type=inputmultiobject|default=[]: transforms to average imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -128,10 +131,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - transforms: + transforms: '["struct_to_template.mat", "func_to_struct.mat"]' # type=inputmultiobject|default=[]: transforms to average imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/compose_multi_transform_callables.py b/nipype-auto-conv/specs/compose_multi_transform_callables.py index 88052b5..0fff187 100644 --- a/nipype-auto-conv/specs/compose_multi_transform_callables.py +++ b/nipype-auto-conv/specs/compose_multi_transform_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in ComposeMultiTransform.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ComposeMultiTransform.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_transform"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/composite_transform_util.yaml b/nipype-auto-conv/specs/composite_transform_util.yaml index 1848ac9..44085fb 100644 --- a/nipype-auto-conv/specs/composite_transform_util.yaml +++ b/nipype-auto-conv/specs/composite_transform_util.yaml @@ -5,13 +5,13 @@ # # Docs # ---- -# +# # ANTs utility which can combine or break apart transform files into their individual # constituent components. -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import CompositeTransformUtil # >>> tran = CompositeTransformUtil() # >>> tran.inputs.process = 'disassemble' @@ -19,9 +19,9 @@ # >>> tran.cmdline # 'CompositeTransformUtil --disassemble output_Composite.h5 transform' # >>> tran.run() # doctest: +SKIP -# +# # example for assembling transformation files -# +# # >>> from nipype.interfaces.ants import CompositeTransformUtil # >>> tran = CompositeTransformUtil() # >>> tran.inputs.process = 'assemble' @@ -30,7 +30,7 @@ # >>> tran.cmdline # 'CompositeTransformUtil --assemble my.h5 AffineTransform.mat DisplacementFieldTransform.nii.gz ' # >>> tran.run() # doctest: +SKIP -# +# task_name: CompositeTransformUtil nipype_name: CompositeTransformUtil nipype_module: nipype.interfaces.ants.registration @@ -45,11 +45,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: datascience/hdf5 - # type=file: Compound transformation file - # type=file|default=: Output file path (only used for disassembly). in_file: '[datascience/text-matrix,datascience/hdf5]+list-of' # type=inputmultiobject|default=[]: Input transform file(s) + out_file: Path + # type=file: Compound transformation file + # type=file|default=: Output file path (only used for disassembly). + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -97,15 +100,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -119,15 +122,15 @@ tests: in_file: # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -138,21 +141,21 @@ tests: # (if not specified, will try to choose a sensible value) process: '"assemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - out_file: + out_file: '"my.h5"' # type=file: Compound transformation file # type=file|default=: Output file path (only used for disassembly). in_file: # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -167,10 +170,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. process: '"disassemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - in_file: + in_file: '"output_Composite.h5"' # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -182,13 +185,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. process: '"assemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - out_file: + out_file: '"my.h5"' # type=file: Compound transformation file # type=file|default=: Output file path (only used for disassembly). - in_file: + in_file: '["AffineTransform.mat", "DisplacementFieldTransform.nii.gz"]' # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/composite_transform_util_callables.py b/nipype-auto-conv/specs/composite_transform_util_callables.py index 4983be4..0c092e5 100644 --- a/nipype-auto-conv/specs/composite_transform_util_callables.py +++ b/nipype-auto-conv/specs/composite_transform_util_callables.py @@ -1 +1,44 @@ -"""Module to put any functions that are referred to in CompositeTransformUtil.yaml""" +"""Module to put any functions that are referred to in the "callables" section of CompositeTransformUtil.yaml""" + +import os + + +def affine_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["affine_transform"] + + +def displacement_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["displacement_field"] + + +def out_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_file"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L1873 of /interfaces/ants/registration.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + if inputs.process == "disassemble": + outputs["affine_transform"] = os.path.abspath( + "00_{}_AffineTransform.mat".format(inputs.output_prefix) + ) + outputs["displacement_field"] = os.path.abspath( + "01_{}_DisplacementFieldTransform.nii.gz".format(inputs.output_prefix) + ) + if inputs.process == "assemble": + outputs["out_file"] = os.path.abspath(inputs.out_file) + return outputs diff --git a/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml b/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml index a2d2dcd..9676fe6 100644 --- a/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml +++ b/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Convert scalar images to RGB. -# +# # Examples # -------- # >>> from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB @@ -19,8 +19,8 @@ # >>> converter.inputs.maximum_input = 6 # >>> converter.cmdline # 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255' -# -# +# +# task_name: ConvertScalarImageToRGB nipype_name: ConvertScalarImageToRGB nipype_module: nipype.interfaces.ants.visualization @@ -37,6 +37,9 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti-gz # type=file|default=: Main input is a 3-D grayscale image. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -82,9 +85,9 @@ tests: maximum_input: # type=int|default=0: maximum input minimum_RGB_output: - # type=int|default=0: + # type=int|default=0: maximum_RGB_output: - # type=int|default=255: + # type=int|default=255: num_threads: # type=int|default=1: Number of ITK threads to use args: @@ -92,15 +95,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -120,15 +123,15 @@ tests: maximum_input: '6' # type=int|default=0: maximum input imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -143,7 +146,7 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - input_image: + input_image: '"T1.nii.gz"' # type=file|default=: Main input is a 3-D grayscale image. colormap: '"jet"' # type=enum|default='grey'|allowed['autumn','blue','cool','copper','custom','green','grey','hot','hsv','jet','overunder','red','spring','summer','winter']: Select a colormap @@ -152,7 +155,7 @@ doctests: maximum_input: '6' # type=int|default=0: maximum input imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py b/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py index f4c7836..bf7c5d1 100644 --- a/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py +++ b/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py @@ -1 +1,22 @@ -"""Module to put any functions that are referred to in ConvertScalarImageToRGB.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ConvertScalarImageToRGB.yaml""" + +import os + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L103 of /interfaces/ants/visualization.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["output_image"] = os.path.join(output_dir, inputs.output_image) + return outputs diff --git a/nipype-auto-conv/specs/cortical_thickness.yaml b/nipype-auto-conv/specs/cortical_thickness.yaml index d237b9e..71761ea 100644 --- a/nipype-auto-conv/specs/cortical_thickness.yaml +++ b/nipype-auto-conv/specs/cortical_thickness.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# # Examples # -------- # >>> from nipype.interfaces.ants.segmentation import CorticalThickness @@ -23,8 +23,8 @@ # 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz # -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_ # -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' -# -# +# +# task_name: CorticalThickness nipype_name: CorticalThickness nipype_module: nipype.interfaces.ants.segmentation @@ -41,18 +41,21 @@ inputs: # passed to the field in the automatically generated unittests. anatomical_image: medimage/nifti-gz # type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. - brain_template: medimage/nifti-gz - # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped. brain_probability_mask: medimage/nifti-gz # type=file|default=: brain probability mask in template space + brain_template: medimage/nifti-gz + # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped. + cortical_label_image: generic/file + # type=file|default=: Cortical ROI labels to use as a prior for ATITH. + extraction_registration_mask: generic/file + # type=file|default=: Mask (defined in the template space) used during registration for brain extraction. segmentation_priors: medimage/nifti-gz+list-of - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: t1_registration_template: medimage/nifti-gz # type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped. - extraction_registration_mask: generic/file - # type=file|default=: Mask (defined in the template space) used during registration for brain extraction. - cortical_label_image: generic/file - # type=file|default=: Cortical ROI labels to use as a prior for ATITH. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -68,28 +71,30 @@ outputs: # passed to the field in the automatically generated unittests. BrainExtractionMask: generic/file # type=file: brain extraction mask - ExtractedBrainN4: generic/file - # type=file: extracted brain from N4 image BrainSegmentation: generic/file # type=file: brain segmentation image BrainSegmentationN4: generic/file # type=file: N4 corrected image + BrainSegmentationPosteriors: generic/file+list-of + # type=outputmultiobject: Posterior probability images + BrainVolumes: generic/file + # type=file: Brain volumes as text CorticalThickness: generic/file # type=file: cortical thickness file - TemplateToSubject1GenericAffine: generic/file - # type=file: Template to subject affine - TemplateToSubject0Warp: generic/file - # type=file: Template to subject warp - SubjectToTemplate1Warp: generic/file - # type=file: Template to subject inverse warp + CorticalThicknessNormedToTemplate: generic/file + # type=file: Normalized cortical thickness + ExtractedBrainN4: generic/file + # type=file: extracted brain from N4 image SubjectToTemplate0GenericAffine: generic/file # type=file: Template to subject inverse affine + SubjectToTemplate1Warp: generic/file + # type=file: Template to subject inverse warp SubjectToTemplateLogJacobian: generic/file # type=file: Template to subject log jacobian - CorticalThicknessNormedToTemplate: generic/file - # type=file: Normalized cortical thickness - BrainVolumes: generic/file - # type=file: Brain volumes as text + TemplateToSubject0Warp: generic/file + # type=file: Template to subject warp + TemplateToSubject1GenericAffine: generic/file + # type=file: Template to subject affine callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -110,7 +115,7 @@ tests: brain_probability_mask: # type=file|default=: brain probability mask in template space segmentation_priors: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: out_prefix: # type=str|default='antsCT_': Prefix that is prepended to all output files image_suffix: @@ -150,15 +155,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -176,19 +181,19 @@ tests: brain_probability_mask: # type=file|default=: brain probability mask in template space segmentation_priors: - # type=inputmultiobject|default=[]: + # type=inputmultiobject|default=[]: t1_registration_template: # type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -203,18 +208,18 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - anatomical_image: + anatomical_image: '"T1.nii.gz"' # type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. - brain_template: + brain_template: '"study_template.nii.gz"' # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped. - brain_probability_mask: + brain_probability_mask: '"ProbabilityMaskOfStudyTemplate.nii.gz"' # type=file|default=: brain probability mask in template space - segmentation_priors: - # type=inputmultiobject|default=[]: - t1_registration_template: + segmentation_priors: '["BrainSegmentationPrior01.nii.gz","BrainSegmentationPrior02.nii.gz","BrainSegmentationPrior03.nii.gz","BrainSegmentationPrior04.nii.gz"]' + # type=inputmultiobject|default=[]: + t1_registration_template: '"brain_study_template.nii.gz"' # type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/cortical_thickness_callables.py b/nipype-auto-conv/specs/cortical_thickness_callables.py index 847ddf0..2113f52 100644 --- a/nipype-auto-conv/specs/cortical_thickness_callables.py +++ b/nipype-auto-conv/specs/cortical_thickness_callables.py @@ -1 +1,161 @@ -"""Module to put any functions that are referred to in CorticalThickness.yaml""" +"""Module to put any functions that are referred to in the "callables" section of CorticalThickness.yaml""" + +import os + + +def BrainExtractionMask_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainExtractionMask"] + + +def BrainSegmentation_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainSegmentation"] + + +def BrainSegmentationN4_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainSegmentationN4"] + + +def BrainSegmentationPosteriors_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainSegmentationPosteriors"] + + +def BrainVolumes_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["BrainVolumes"] + + +def CorticalThickness_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["CorticalThickness"] + + +def CorticalThicknessNormedToTemplate_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["CorticalThicknessNormedToTemplate"] + + +def ExtractedBrainN4_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["ExtractedBrainN4"] + + +def SubjectToTemplate0GenericAffine_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["SubjectToTemplate0GenericAffine"] + + +def SubjectToTemplate1Warp_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["SubjectToTemplate1Warp"] + + +def SubjectToTemplateLogJacobian_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["SubjectToTemplateLogJacobian"] + + +def TemplateToSubject0Warp_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["TemplateToSubject0Warp"] + + +def TemplateToSubject1GenericAffine_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["TemplateToSubject1GenericAffine"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L789 of /interfaces/ants/segmentation.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["BrainExtractionMask"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainExtractionMask." + inputs.image_suffix, + ) + outputs["ExtractedBrainN4"] = os.path.join( + output_dir, + inputs.out_prefix + "ExtractedBrain0N4." + inputs.image_suffix, + ) + outputs["BrainSegmentation"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainSegmentation." + inputs.image_suffix, + ) + outputs["BrainSegmentationN4"] = os.path.join( + output_dir, + inputs.out_prefix + "BrainSegmentation0N4." + inputs.image_suffix, + ) + posteriors = [] + for i in range(len(inputs.segmentation_priors)): + posteriors.append( + os.path.join( + output_dir, + inputs.out_prefix + + "BrainSegmentationPosteriors%02d." % (i + 1) + + inputs.image_suffix, + ) + ) + outputs["BrainSegmentationPosteriors"] = posteriors + outputs["CorticalThickness"] = os.path.join( + output_dir, + inputs.out_prefix + "CorticalThickness." + inputs.image_suffix, + ) + outputs["TemplateToSubject1GenericAffine"] = os.path.join( + output_dir, inputs.out_prefix + "TemplateToSubject1GenericAffine.mat" + ) + outputs["TemplateToSubject0Warp"] = os.path.join( + output_dir, + inputs.out_prefix + "TemplateToSubject0Warp." + inputs.image_suffix, + ) + outputs["SubjectToTemplate1Warp"] = os.path.join( + output_dir, + inputs.out_prefix + "SubjectToTemplate1Warp." + inputs.image_suffix, + ) + outputs["SubjectToTemplate0GenericAffine"] = os.path.join( + output_dir, inputs.out_prefix + "SubjectToTemplate0GenericAffine.mat" + ) + outputs["SubjectToTemplateLogJacobian"] = os.path.join( + output_dir, + inputs.out_prefix + "SubjectToTemplateLogJacobian." + inputs.image_suffix, + ) + outputs["CorticalThicknessNormedToTemplate"] = os.path.join( + output_dir, + inputs.out_prefix + "CorticalThickness." + inputs.image_suffix, + ) + outputs["BrainVolumes"] = os.path.join( + output_dir, inputs.out_prefix + "brainvols.csv" + ) + return outputs diff --git a/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml b/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml index 06655ca..8dc42b0 100644 --- a/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml +++ b/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# # Examples # -------- # >>> from nipype.interfaces.ants import CreateJacobianDeterminantImage @@ -15,7 +15,7 @@ # >>> jacobian.inputs.outputImage = 'out_name.nii.gz' # >>> jacobian.cmdline # 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz' -# +# task_name: CreateJacobianDeterminantImage nipype_name: CreateJacobianDeterminantImage nipype_module: nipype.interfaces.ants.utils @@ -34,6 +34,9 @@ inputs: # type=file|default=: deformation transformation file outputImage: medimage/nifti-gz # type=file|default=: output filename + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -77,15 +80,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -101,15 +104,15 @@ tests: outputImage: # type=file|default=: output filename imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -124,12 +127,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. imageDimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - deformationField: + deformationField: '"ants_Warp.nii.gz"' # type=file|default=: deformation transformation file - outputImage: + outputImage: '"out_name.nii.gz"' # type=file|default=: output filename imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py b/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py index ed5d927..0bd6e4c 100644 --- a/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py +++ b/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py @@ -1 +1,22 @@ -"""Module to put any functions that are referred to in CreateJacobianDeterminantImage.yaml""" +"""Module to put any functions that are referred to in the "callables" section of CreateJacobianDeterminantImage.yaml""" + +import os + + +def jacobian_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["jacobian_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L756 of /interfaces/ants/utils.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["jacobian_image"] = os.path.abspath(inputs.outputImage) + return outputs diff --git a/nipype-auto-conv/specs/create_tiled_mosaic.yaml b/nipype-auto-conv/specs/create_tiled_mosaic.yaml index 5924ea0..e5452ee 100644 --- a/nipype-auto-conv/specs/create_tiled_mosaic.yaml +++ b/nipype-auto-conv/specs/create_tiled_mosaic.yaml @@ -9,10 +9,10 @@ # provides useful functionality for common image analysis tasks. The basic # usage of CreateTiledMosaic is to tile a 3-D image volume slice-wise into # a 2-D image. -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants.visualization import CreateTiledMosaic # >>> mosaic_slicer = CreateTiledMosaic() # >>> mosaic_slicer.inputs.input_image = 'T1.nii.gz' @@ -25,7 +25,7 @@ # >>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]' # >>> mosaic_slicer.cmdline # 'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] -r rgb.nii.gz -s [2 ,100 ,160]' -# +# task_name: CreateTiledMosaic nipype_name: CreateTiledMosaic nipype_module: nipype.interfaces.ants.visualization @@ -42,10 +42,13 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti-gz # type=file|default=: Main input is a 3-D grayscale image. - rgb_image: medimage/nifti-gz - # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image. mask_image: medimage/nifti-gz # type=file|default=: Specifies the ROI of the RGB voxels used. + rgb_image: medimage/nifti-gz + # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -103,15 +106,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -138,15 +141,15 @@ tests: slices: '"[2 ,100 ,160]"' # type=str|default='': Number of slices to increment Slice1xSlice2xSlice3[numberOfSlicesToIncrement,,] imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -159,11 +162,11 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"T1.nii.gz"' # type=file|default=: Main input is a 3-D grayscale image. - rgb_image: + rgb_image: '"rgb.nii.gz"' # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image. - mask_image: + mask_image: '"mask.nii.gz"' # type=file|default=: Specifies the ROI of the RGB voxels used. output_image: '"output.png"' # type=file: image file @@ -177,7 +180,7 @@ doctests: slices: '"[2 ,100 ,160]"' # type=str|default='': Number of slices to increment Slice1xSlice2xSlice3[numberOfSlicesToIncrement,,] imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/create_tiled_mosaic_callables.py b/nipype-auto-conv/specs/create_tiled_mosaic_callables.py index b65faa6..ffb423b 100644 --- a/nipype-auto-conv/specs/create_tiled_mosaic_callables.py +++ b/nipype-auto-conv/specs/create_tiled_mosaic_callables.py @@ -1 +1,22 @@ -"""Module to put any functions that are referred to in CreateTiledMosaic.yaml""" +"""Module to put any functions that are referred to in the "callables" section of CreateTiledMosaic.yaml""" + +import os + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L217 of /interfaces/ants/visualization.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["output_image"] = os.path.join(output_dir, inputs.output_image) + return outputs diff --git a/nipype-auto-conv/specs/denoise_image.yaml b/nipype-auto-conv/specs/denoise_image.yaml index b2cf222..bc578fd 100644 --- a/nipype-auto-conv/specs/denoise_image.yaml +++ b/nipype-auto-conv/specs/denoise_image.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# # Examples # -------- # >>> import copy @@ -15,21 +15,21 @@ # >>> denoise.inputs.input_image = 'im1.nii' # >>> denoise.cmdline # 'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1' -# +# # >>> denoise_2 = copy.deepcopy(denoise) # >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz' # >>> denoise_2.inputs.noise_model = 'Rician' # >>> denoise_2.inputs.shrink_factor = 2 # >>> denoise_2.cmdline # 'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2' -# +# # >>> denoise_3 = DenoiseImage() # >>> denoise_3.inputs.input_image = 'im1.nii' # >>> denoise_3.inputs.save_noise = True # >>> denoise_3.cmdline # 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1' -# -# +# +# task_name: DenoiseImage nipype_name: DenoiseImage nipype_module: nipype.interfaces.ants.segmentation @@ -46,12 +46,15 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: A scalar image is expected as input for noise correction. - output_image: medimage/nifti-gz - # type=file: - # type=file|default=: The output consists of the noise corrected version of the input image. - noise_image: generic/file - # type=file: + noise_image: Path + # type=file: # type=file|default=: Filename for the estimated noise. + output_image: Path + # type=file: + # type=file|default=: The output consists of the noise corrected version of the input image. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -65,12 +68,12 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: medimage/nifti-gz - # type=file: - # type=file|default=: The output consists of the noise corrected version of the input image. noise_image: generic/file - # type=file: + # type=file: # type=file|default=: Filename for the estimated noise. + output_image: medimage/nifti-gz + # type=file: + # type=file|default=: The output consists of the noise corrected version of the input image. callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -91,12 +94,12 @@ tests: shrink_factor: # type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default. output_image: - # type=file: + # type=file: # type=file|default=: The output consists of the noise corrected version of the input image. save_noise: # type=bool|default=False: True if the estimated noise should be saved to file. noise_image: - # type=file: + # type=file: # type=file|default=: Filename for the estimated noise. verbose: # type=bool|default=False: Verbose output. @@ -107,15 +110,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -129,7 +132,7 @@ tests: input_image: # type=file|default=: A scalar image is expected as input for noise correction. imports: &id001 - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy expected_outputs: @@ -137,8 +140,8 @@ tests: # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -147,23 +150,23 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - output_image: - # type=file: + output_image: '"output_corrected_image.nii.gz"' + # type=file: # type=file|default=: The output consists of the noise corrected version of the input image. noise_model: '"Rician"' # type=enum|default='Gaussian'|allowed['Gaussian','Rician']: Employ a Rician or Gaussian noise model. shrink_factor: '2' # type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -177,15 +180,15 @@ tests: save_noise: 'True' # type=bool|default=False: True if the estimated noise should be saved to file. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -200,10 +203,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image. - input_image: + input_image: '"im1.nii"' # type=file|default=: A scalar image is expected as input for noise correction. imports: *id001 - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -213,15 +216,15 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - output_image: - # type=file: + output_image: '"output_corrected_image.nii.gz"' + # type=file: # type=file|default=: The output consists of the noise corrected version of the input image. noise_model: '"Rician"' # type=enum|default='Gaussian'|allowed['Gaussian','Rician']: Employ a Rician or Gaussian noise model. shrink_factor: '2' # type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -231,12 +234,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"im1.nii"' # type=file|default=: A scalar image is expected as input for noise correction. save_noise: 'True' # type=bool|default=False: True if the estimated noise should be saved to file. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/denoise_image_callables.py b/nipype-auto-conv/specs/denoise_image_callables.py index 1f1bd27..b1656c0 100644 --- a/nipype-auto-conv/specs/denoise_image_callables.py +++ b/nipype-auto-conv/specs/denoise_image_callables.py @@ -1 +1,210 @@ -"""Module to put any functions that are referred to in DenoiseImage.yaml""" +"""Module to put any functions that are referred to in the "callables" section of DenoiseImage.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def noise_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["noise_image"] + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/gen_warp_fields.yaml b/nipype-auto-conv/specs/gen_warp_fields.yaml index 0519ffd..9c15214 100644 --- a/nipype-auto-conv/specs/gen_warp_fields.yaml +++ b/nipype-auto-conv/specs/gen_warp_fields.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# task_name: GenWarpFields nipype_name: GenWarpFields nipype_module: nipype.interfaces.ants.legacy @@ -20,10 +20,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - reference_image: generic/file - # type=file|default=: template file to warp to input_image: generic/file # type=file|default=: input image to warp to template + reference_image: generic/file + # type=file|default=: template file to warp to + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -39,14 +42,14 @@ outputs: # passed to the field in the automatically generated unittests. affine_transformation: generic/file # type=file: affine (prefix_Affine.txt) - warp_field: generic/file - # type=file: warp field (prefix_Warp.nii) - inverse_warp_field: generic/file - # type=file: inverse warp field (prefix_InverseWarp.nii) input_file: generic/file # type=file: input image (prefix_repaired.nii) + inverse_warp_field: generic/file + # type=file: inverse warp field (prefix_InverseWarp.nii) output_file: generic/file # type=file: output image (prefix_deformed.nii) + warp_field: generic/file + # type=file: warp field (prefix_Warp.nii) callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -87,15 +90,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true diff --git a/nipype-auto-conv/specs/gen_warp_fields_callables.py b/nipype-auto-conv/specs/gen_warp_fields_callables.py index f08cf2c..a3b80a6 100644 --- a/nipype-auto-conv/specs/gen_warp_fields_callables.py +++ b/nipype-auto-conv/specs/gen_warp_fields_callables.py @@ -1 +1,74 @@ -"""Module to put any functions that are referred to in GenWarpFields.yaml""" +"""Module to put any functions that are referred to in the "callables" section of GenWarpFields.yaml""" + +import attrs +import os + + +def affine_transformation_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["affine_transformation"] + + +def input_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["input_file"] + + +def inverse_warp_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_warp_field"] + + +def output_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_file"] + + +def warp_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["warp_field"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L141 of /interfaces/ants/legacy.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + transmodel = inputs.transformation_model + + # When transform is set as 'RI'/'RA', wrap fields should not be expected + # The default transformation is GR, which outputs the wrap fields + if (transmodel is attrs.NOTHING) or ( + (transmodel is not attrs.NOTHING) and transmodel not in ["RI", "RA"] + ): + outputs["warp_field"] = os.path.join( + output_dir, inputs.out_prefix + "Warp.nii.gz" + ) + outputs["inverse_warp_field"] = os.path.join( + output_dir, inputs.out_prefix + "InverseWarp.nii.gz" + ) + + outputs["affine_transformation"] = os.path.join( + output_dir, inputs.out_prefix + "Affine.txt" + ) + outputs["input_file"] = os.path.join( + output_dir, inputs.out_prefix + "repaired.nii.gz" + ) + outputs["output_file"] = os.path.join( + output_dir, inputs.out_prefix + "deformed.nii.gz" + ) + + return outputs diff --git a/nipype-auto-conv/specs/image_math.yaml b/nipype-auto-conv/specs/image_math.yaml index aac8fdd..eb9bac5 100644 --- a/nipype-auto-conv/specs/image_math.yaml +++ b/nipype-auto-conv/specs/image_math.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Operations over images. -# +# # Examples # -------- # >>> ImageMath( @@ -15,51 +15,51 @@ # ... operation='+', # ... op2='2').cmdline # 'ImageMath 3 structural_maths.nii + structural.nii 2' -# +# # >>> ImageMath( # ... op1='structural.nii', # ... operation='Project', # ... op2='1 2').cmdline # 'ImageMath 3 structural_maths.nii Project structural.nii 1 2' -# +# # >>> ImageMath( # ... op1='structural.nii', # ... operation='G', # ... op2='4').cmdline # 'ImageMath 3 structural_maths.nii G structural.nii 4' -# +# # >>> ImageMath( # ... op1='structural.nii', # ... operation='TruncateImageIntensity', # ... op2='0.005 0.999 256').cmdline # 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' -# +# # By default, Nipype copies headers from the first input image (``op1``) # to the output image. # For some operations, as the ``PadImage`` operation, the header cannot be copied from inputs to # outputs, and so ``copy_header`` option is automatically set to ``False``. -# +# # >>> pad = ImageMath( # ... op1='structural.nii', # ... operation='PadImage') # >>> pad.inputs.copy_header # False -# +# # While the operation is set to ``PadImage``, # setting ``copy_header = True`` will have no effect. -# +# # >>> pad.inputs.copy_header = True # >>> pad.inputs.copy_header # False -# +# # For any other operation, ``copy_header`` can be enabled/disabled normally: -# +# # >>> pad.inputs.operation = "ME" # >>> pad.inputs.copy_header = True # >>> pad.inputs.copy_header # True -# -# +# +# task_name: ImageMath nipype_name: ImageMath nipype_module: nipype.interfaces.ants.utils @@ -74,11 +74,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: generic/file - # type=file: output image file - # type=file|default=: output image file op1: medimage/nifti1 # type=file|default=: first operator + output_image: Path + # type=file: output image file + # type=file|default=: output image file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -126,15 +129,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -150,15 +153,15 @@ tests: op2: '"2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -174,15 +177,15 @@ tests: op2: '"1 2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -198,15 +201,15 @@ tests: op2: '"4"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -222,15 +225,15 @@ tests: op2: '"0.005 0.999 256"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -243,14 +246,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - op1: + op1: '"structural.nii"' # type=file|default=: first operator operation: '"+"' # type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations op2: '"2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -260,14 +263,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - op1: + op1: '"structural.nii"' # type=file|default=: first operator operation: '"Project"' # type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations op2: '"1 2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -277,14 +280,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - op1: + op1: '"structural.nii"' # type=file|default=: first operator operation: '"G"' # type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations op2: '"4"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -294,14 +297,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - op1: + op1: '"structural.nii"' # type=file|default=: first operator operation: '"TruncateImageIntensity"' # type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations op2: '"0.005 0.999 256"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/image_math_callables.py b/nipype-auto-conv/specs/image_math_callables.py index 49800dd..e845128 100644 --- a/nipype-auto-conv/specs/image_math_callables.py +++ b/nipype-auto-conv/specs/image_math_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in ImageMath.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ImageMath.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/joint_fusion.yaml b/nipype-auto-conv/specs/joint_fusion.yaml index 81c5971..5b7d94e 100644 --- a/nipype-auto-conv/specs/joint_fusion.yaml +++ b/nipype-auto-conv/specs/joint_fusion.yaml @@ -5,23 +5,23 @@ # # Docs # ---- -# +# # An image fusion algorithm. -# +# # Developed by Hongzhi Wang and Paul Yushkevich, and it won segmentation challenges # at MICCAI 2012 and MICCAI 2013. # The original label fusion framework was extended to accommodate intensities by Brian # Avants. # This implementation is based on Paul's original ITK-style implementation # and Brian's ANTsR implementation. -# +# # References include 1) H. Wang, J. W. Suh, S. # Das, J. Pluta, C. Craige, P. Yushkevich, Multi-atlas segmentation with joint # label fusion IEEE Trans. on Pattern Analysis and Machine Intelligence, 35(3), # 611-623, 2013. and 2) H. Wang and P. A. Yushkevich, Multi-atlas segmentation # with joint label fusion and corrective learning--an open source implementation, # Front. Neuroinform., 2013. -# +# # Examples # -------- # >>> from nipype.interfaces.ants import JointFusion @@ -33,12 +33,12 @@ # >>> jf.cmdline # "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz # -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" -# +# # >>> jf.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] # >>> jf.cmdline # "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz # -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" -# +# # >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'], # ... ['rc2s1.nii','rc2s2.nii'] ] # >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', @@ -47,7 +47,7 @@ # "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] # -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii # -s 3x3x3 -t ['im1.nii', 'im2.nii']" -# +# # >>> jf.inputs.dimension = 3 # >>> jf.inputs.alpha = 0.5 # >>> jf.inputs.beta = 1.0 @@ -57,7 +57,7 @@ # "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] # -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii # -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" -# +# # >>> jf.inputs.search_radius = ['mask.nii'] # >>> jf.inputs.verbose = True # >>> jf.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] @@ -66,7 +66,7 @@ # "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] # -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] # -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" -# +# # >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' # >>> jf.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' # >>> jf.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' @@ -77,8 +77,8 @@ # -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, # ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] # -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" -# -# +# +# task_name: JointFusion nipype_name: JointFusion nipype_module: nipype.interfaces.ants.segmentation @@ -99,9 +99,12 @@ inputs: # type=list|default=[]: Specify an exclusion region for the given label. mask_image: generic/file # type=file|default=: If a mask image is specified, fusion is only performed in the mask region. - out_label_fusion: medimage/nifti1 - # type=file: + out_label_fusion: Path + # type=file: # type=file|default=: The output label fusion image. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -115,9 +118,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + out_atlas_voting_weight: generic/file+list-of + # type=outputmultiobject: + out_intensity_fusion: generic/file+list-of + # type=outputmultiobject: out_label_fusion: medimage/nifti1 - # type=file: + # type=file: # type=file|default=: The output label fusion image. + out_label_post_prob: generic/file+list-of + # type=outputmultiobject: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -160,7 +169,7 @@ tests: mask_image: # type=file|default=: If a mask image is specified, fusion is only performed in the mask region. out_label_fusion: - # type=file: + # type=file: # type=file|default=: The output label fusion image. out_intensity_fusion_name_format: # type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz") @@ -177,15 +186,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -194,8 +203,8 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - out_label_fusion: - # type=file: + out_label_fusion: '"ants_fusion_label_output.nii"' + # type=file: # type=file|default=: The output label fusion image. atlas_image: '[ ["rc1s1.nii","rc1s2.nii"] ]' # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. @@ -204,15 +213,15 @@ tests: target_image: '["im1.nii"]' # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -224,15 +233,15 @@ tests: target_image: '[ ["im1.nii", "im2.nii"] ]' # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -246,15 +255,15 @@ tests: atlas_segmentation_image: # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -274,15 +283,15 @@ tests: search_radius: '[3]' # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -300,15 +309,15 @@ tests: exclusion_image_label: '["1","2"]' # type=list|default=[]: Specify a label for the exclusion region. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -317,8 +326,8 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - out_label_fusion: - # type=file: + out_label_fusion: '"ants_fusion_label_output.nii"' + # type=file: # type=file|default=: The output label fusion image. out_intensity_fusion_name_format: '"ants_joint_fusion_intensity_%d.nii.gz"' # type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz") @@ -327,15 +336,15 @@ tests: out_atlas_voting_weight_name_format: '"ants_joint_fusion_voting_weight_%d.nii.gz"' # type=str|default='antsJointFusionVotingWeight_%d.nii.gz': Optional atlas voting weight image file name format. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -348,17 +357,17 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - out_label_fusion: - # type=file: + out_label_fusion: '"ants_fusion_label_output.nii"' + # type=file: # type=file|default=: The output label fusion image. atlas_image: '[ ["rc1s1.nii","rc1s2.nii"] ]' # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. - atlas_segmentation_image: + atlas_segmentation_image: '["segmentation0.nii.gz"]' # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. target_image: '["im1.nii"]' # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -371,7 +380,7 @@ doctests: target_image: '[ ["im1.nii", "im2.nii"] ]' # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -383,10 +392,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. atlas_image: '[ ["rc1s1.nii","rc1s2.nii"],["rc2s1.nii","rc2s2.nii"] ]' # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. - atlas_segmentation_image: + atlas_segmentation_image: '["segmentation0.nii.gz","segmentation1.nii.gz"]' # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -407,7 +416,7 @@ doctests: search_radius: '[3]' # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -421,12 +430,12 @@ doctests: # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. verbose: 'True' # type=bool|default=False: Verbose output. - exclusion_image: + exclusion_image: '["roi01.nii", "roi02.nii"]' # type=list|default=[]: Specify an exclusion region for the given label. exclusion_image_label: '["1","2"]' # type=list|default=[]: Specify a label for the exclusion region. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -436,8 +445,8 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - out_label_fusion: - # type=file: + out_label_fusion: '"ants_fusion_label_output.nii"' + # type=file: # type=file|default=: The output label fusion image. out_intensity_fusion_name_format: '"ants_joint_fusion_intensity_%d.nii.gz"' # type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz") @@ -446,7 +455,7 @@ doctests: out_atlas_voting_weight_name_format: '"ants_joint_fusion_voting_weight_%d.nii.gz"' # type=str|default='antsJointFusionVotingWeight_%d.nii.gz': Optional atlas voting weight image file name format. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/joint_fusion_callables.py b/nipype-auto-conv/specs/joint_fusion_callables.py index f0bf25d..57ba462 100644 --- a/nipype-auto-conv/specs/joint_fusion_callables.py +++ b/nipype-auto-conv/specs/joint_fusion_callables.py @@ -1 +1,60 @@ -"""Module to put any functions that are referred to in JointFusion.yaml""" +"""Module to put any functions that are referred to in the "callables" section of JointFusion.yaml""" + +import attrs +import os +from glob import glob + + +def out_atlas_voting_weight_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_atlas_voting_weight"] + + +def out_intensity_fusion_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_intensity_fusion"] + + +def out_label_fusion_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_label_fusion"] + + +def out_label_post_prob_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_label_post_prob"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L1541 of /interfaces/ants/segmentation.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + if inputs.out_label_fusion is not attrs.NOTHING: + outputs["out_label_fusion"] = os.path.abspath(inputs.out_label_fusion) + if inputs.out_intensity_fusion_name_format is not attrs.NOTHING: + outputs["out_intensity_fusion"] = glob( + os.path.abspath(inputs.out_intensity_fusion_name_format.replace("%d", "*")) + ) + if inputs.out_label_post_prob_name_format is not attrs.NOTHING: + outputs["out_label_post_prob"] = glob( + os.path.abspath(inputs.out_label_post_prob_name_format.replace("%d", "*")) + ) + if inputs.out_atlas_voting_weight_name_format is not attrs.NOTHING: + outputs["out_atlas_voting_weight"] = glob( + os.path.abspath( + inputs.out_atlas_voting_weight_name_format.replace("%d", "*") + ) + ) + return outputs diff --git a/nipype-auto-conv/specs/kelly_kapowski.yaml b/nipype-auto-conv/specs/kelly_kapowski.yaml index fd76247..7966dc9 100644 --- a/nipype-auto-conv/specs/kelly_kapowski.yaml +++ b/nipype-auto-conv/specs/kelly_kapowski.yaml @@ -5,13 +5,13 @@ # # Docs # ---- -# +# # Nipype Interface to ANTs' KellyKapowski, also known as DiReCT. -# +# # DiReCT is a registration based estimate of cortical thickness. It was published # in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based # cortical thickness measurement, Neuroimage 2009, 45:867--879. -# +# # Examples # -------- # >>> from nipype.interfaces.ants.segmentation import KellyKapowski @@ -27,8 +27,8 @@ # --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 # --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 # --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' -# -# +# +# task_name: KellyKapowski nipype_name: KellyKapowski nipype_module: nipype.interfaces.ants.segmentation @@ -43,20 +43,23 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - segmentation_image: medimage/nifti-gz - # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively. + cortical_thickness: Path + # type=file: A thickness map defined in the segmented gray matter. + # type=file|default=: Filename for the cortical thickness. gray_matter_prob_image: generic/file # type=file|default=: In addition to the segmentation image, a gray matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm. - white_matter_prob_image: generic/file - # type=file|default=: In addition to the segmentation image, a white matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm. + segmentation_image: medimage/nifti-gz + # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively. thickness_prior_image: generic/file # type=file|default=: An image containing spatially varying prior thickness values. - cortical_thickness: generic/file - # type=file: A thickness map defined in the segmented gray matter. - # type=file|default=: Filename for the cortical thickness. - warped_white_matter: generic/file + warped_white_matter: Path # type=file: A warped white matter image. # type=file|default=: Filename for the warped white matter file. + white_matter_prob_image: generic/file + # type=file|default=: In addition to the segmentation image, a white matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -130,15 +133,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -156,15 +159,15 @@ tests: thickness_prior_estimate: '10' # type=float|default=10: Provides a prior constraint on the final thickness measurement in mm. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -179,14 +182,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - segmentation_image: + segmentation_image: '"segmentation0.nii.gz"' # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively. convergence: '"[45,0.0,10]"' # type=str|default='[50,0.001,10]': Convergence is determined by fitting a line to the normalized energy profile of the last N iterations (where N is specified by the window size) and determining the slope which is then compared with the convergence threshold. thickness_prior_estimate: '10' # type=float|default=10: Provides a prior constraint on the final thickness measurement in mm. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/kelly_kapowski_callables.py b/nipype-auto-conv/specs/kelly_kapowski_callables.py index 9073c2b..543950d 100644 --- a/nipype-auto-conv/specs/kelly_kapowski_callables.py +++ b/nipype-auto-conv/specs/kelly_kapowski_callables.py @@ -1 +1,222 @@ -"""Module to put any functions that are referred to in KellyKapowski.yaml""" +"""Module to put any functions that are referred to in the "callables" section of KellyKapowski.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def cortical_thickness_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["cortical_thickness"] + + +def warped_white_matter_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["warped_white_matter"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L1765 of /interfaces/ants/segmentation.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + if name == "cortical_thickness": + output = inputs.cortical_thickness + if output is attrs.NOTHING: + _, name, ext = split_filename(inputs.segmentation_image) + output = name + "_cortical_thickness" + ext + return output + + if name == "warped_white_matter": + output = inputs.warped_white_matter + if output is attrs.NOTHING: + _, name, ext = split_filename(inputs.segmentation_image) + output = name + "_warped_white_matter" + ext + return output + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/label_geometry.yaml b/nipype-auto-conv/specs/label_geometry.yaml index 220c058..8c34c50 100644 --- a/nipype-auto-conv/specs/label_geometry.yaml +++ b/nipype-auto-conv/specs/label_geometry.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Extracts geometry measures using a label file and an optional image file -# +# # Examples # -------- # >>> from nipype.interfaces.ants import LabelGeometry @@ -16,12 +16,12 @@ # >>> label_extract.inputs.label_image = 'atlas.nii.gz' # >>> label_extract.cmdline # 'LabelGeometryMeasures 3 atlas.nii.gz [] atlas.csv' -# +# # >>> label_extract.inputs.intensity_image = 'ants_Warp.nii.gz' # >>> label_extract.cmdline # 'LabelGeometryMeasures 3 atlas.nii.gz ants_Warp.nii.gz atlas.csv' -# -# +# +# task_name: LabelGeometry nipype_name: LabelGeometry nipype_module: nipype.interfaces.ants.utils @@ -36,10 +36,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - label_image: medimage/nifti-gz - # type=file|default=: label image to use for extracting geometry measures intensity_image: medimage/nifti-gz # type=file|default='[]': Intensity image to extract values from. This is an optional input + label_image: medimage/nifti-gz + # type=file|default=: label image to use for extracting geometry measures + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -83,15 +86,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -105,15 +108,15 @@ tests: label_image: # type=file|default=: label image to use for extracting geometry measures imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -125,15 +128,15 @@ tests: intensity_image: # type=file|default='[]': Intensity image to extract values from. This is an optional input imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -148,10 +151,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - label_image: + label_image: '"atlas.nii.gz"' # type=file|default=: label image to use for extracting geometry measures imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -161,10 +164,10 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - intensity_image: + intensity_image: '"ants_Warp.nii.gz"' # type=file|default='[]': Intensity image to extract values from. This is an optional input imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/label_geometry_callables.py b/nipype-auto-conv/specs/label_geometry_callables.py index 2ec8a91..8df4bb8 100644 --- a/nipype-auto-conv/specs/label_geometry_callables.py +++ b/nipype-auto-conv/specs/label_geometry_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in LabelGeometry.yaml""" +"""Module to put any functions that are referred to in the "callables" section of LabelGeometry.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_file_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_file"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/laplacian_thickness.yaml b/nipype-auto-conv/specs/laplacian_thickness.yaml index 5b815a3..47dff88 100644 --- a/nipype-auto-conv/specs/laplacian_thickness.yaml +++ b/nipype-auto-conv/specs/laplacian_thickness.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Calculates the cortical thickness from an anatomical image -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import LaplacianThickness # >>> cort_thick = LaplacianThickness() # >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz' # >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz' # >>> cort_thick.cmdline # 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz white_matter_thickness.nii.gz' -# +# # >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz' # >>> cort_thick.cmdline # 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz' -# -# +# +# task_name: LaplacianThickness nipype_name: LaplacianThickness nipype_module: nipype.interfaces.ants.segmentation @@ -36,10 +36,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - input_wm: medimage/nifti-gz - # type=file|default=: white matter segmentation image input_gm: medimage/nifti-gz # type=file|default=: gray matter segmentation image + input_wm: medimage/nifti-gz + # type=file|default=: white matter segmentation image + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -91,15 +94,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -113,15 +116,15 @@ tests: input_gm: # type=file|default=: gray matter segmentation image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -134,15 +137,15 @@ tests: # type=file: Cortical thickness # type=str|default='': name of output file imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -155,12 +158,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_wm: + input_wm: '"white_matter.nii.gz"' # type=file|default=: white matter segmentation image - input_gm: + input_gm: '"gray_matter.nii.gz"' # type=file|default=: gray matter segmentation image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -174,7 +177,7 @@ doctests: # type=file: Cortical thickness # type=str|default='': name of output file imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/laplacian_thickness_callables.py b/nipype-auto-conv/specs/laplacian_thickness_callables.py index c361c6b..0d43117 100644 --- a/nipype-auto-conv/specs/laplacian_thickness_callables.py +++ b/nipype-auto-conv/specs/laplacian_thickness_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in LaplacianThickness.yaml""" +"""Module to put any functions that are referred to in the "callables" section of LaplacianThickness.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/measure_image_similarity.yaml b/nipype-auto-conv/specs/measure_image_similarity.yaml index 2d2dae7..ca820bb 100644 --- a/nipype-auto-conv/specs/measure_image_similarity.yaml +++ b/nipype-auto-conv/specs/measure_image_similarity.yaml @@ -5,12 +5,12 @@ # # Docs # ---- -# -# -# +# +# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import MeasureImageSimilarity # >>> sim = MeasureImageSimilarity() # >>> sim.inputs.dimension = 3 @@ -25,7 +25,7 @@ # >>> sim.inputs.moving_image_mask = 'mask.nii.gz' # >>> sim.cmdline # 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] --metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]' -# +# task_name: MeasureImageSimilarity nipype_name: MeasureImageSimilarity nipype_module: nipype.interfaces.ants.registration @@ -42,12 +42,15 @@ inputs: # passed to the field in the automatically generated unittests. fixed_image: medimage/nifti1 # type=file|default=: Image to which the moving image is warped - moving_image: medimage/nifti1 - # type=file|default=: Image to apply transformation to (generally a coregistered functional) fixed_image_mask: medimage/nifti1 # type=file|default=: mask used to limit metric sampling region of the fixed image + moving_image: medimage/nifti1 + # type=file|default=: Image to apply transformation to (generally a coregistered functional) moving_image_mask: medimage/nifti-gz # type=file|default=: mask used to limit metric sampling region of the moving image + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -64,6 +67,8 @@ outputs: callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + similarity: similarity_callable + # type=float: templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -79,7 +84,7 @@ tests: moving_image: # type=file|default=: Image to apply transformation to (generally a coregistered functional) metric: - # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: + # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: metric_weight: # type=float|default=1.0: The "metricWeight" variable is not used. radius_or_number_of_bins: @@ -99,15 +104,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -119,7 +124,7 @@ tests: dimension: '3' # type=enum|default=2|allowed[2,3,4]: Dimensionality of the fixed/moving image pair metric: '"MI"' - # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: + # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: fixed_image: # type=file|default=: Image to which the moving image is warped moving_image: @@ -137,15 +142,15 @@ tests: moving_image_mask: # type=file|default=: mask used to limit metric sampling region of the moving image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -161,10 +166,10 @@ doctests: dimension: '3' # type=enum|default=2|allowed[2,3,4]: Dimensionality of the fixed/moving image pair metric: '"MI"' - # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: - fixed_image: + # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: + fixed_image: '"T1.nii"' # type=file|default=: Image to which the moving image is warped - moving_image: + moving_image: '"resting.nii"' # type=file|default=: Image to apply transformation to (generally a coregistered functional) metric_weight: '1.0' # type=float|default=1.0: The "metricWeight" variable is not used. @@ -174,12 +179,12 @@ doctests: # type=enum|default='None'|allowed['None','Random','Regular']: Manner of choosing point set over which to optimize the metric. Defaults to "None" (i.e. a dense sampling of one sample per voxel). sampling_percentage: '1.0' # type=range|default=None: Percentage of points accessible to the sampling strategy over which to optimize the metric. - fixed_image_mask: + fixed_image_mask: '"mask.nii"' # type=file|default=: mask used to limit metric sampling region of the fixed image - moving_image_mask: + moving_image_mask: '"mask.nii.gz"' # type=file|default=: mask used to limit metric sampling region of the moving image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/measure_image_similarity_callables.py b/nipype-auto-conv/specs/measure_image_similarity_callables.py index 7379ba2..758e235 100644 --- a/nipype-auto-conv/specs/measure_image_similarity_callables.py +++ b/nipype-auto-conv/specs/measure_image_similarity_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in MeasureImageSimilarity.yaml""" +"""Module to put any functions that are referred to in the "callables" section of MeasureImageSimilarity.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def similarity_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["similarity"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/multiply_images.yaml b/nipype-auto-conv/specs/multiply_images.yaml index 0fe41e2..976836f 100644 --- a/nipype-auto-conv/specs/multiply_images.yaml +++ b/nipype-auto-conv/specs/multiply_images.yaml @@ -5,7 +5,7 @@ # # Docs # ---- -# +# # Examples # -------- # >>> from nipype.interfaces.ants import MultiplyImages @@ -16,7 +16,7 @@ # >>> test.inputs.output_product_image = "out.nii" # >>> test.cmdline # 'MultiplyImages 3 moving2.nii 0.25 out.nii' -# +# task_name: MultiplyImages nipype_name: MultiplyImages nipype_module: nipype.interfaces.ants.utils @@ -33,9 +33,12 @@ inputs: # passed to the field in the automatically generated unittests. first_input: medimage/nifti1 # type=file|default=: image 1 - output_product_image: medimage/nifti1 + output_product_image: Path # type=file: average image file # type=file|default=: Outputfname.nii.gz: the name of the resulting image. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -79,15 +82,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -102,19 +105,19 @@ tests: # type=file|default=: image 1 second_input: '0.25' # type=traitcompound|default=None: image 2 or multiplication weight - output_product_image: + output_product_image: '"out.nii"' # type=file: average image file # type=file|default=: Outputfname.nii.gz: the name of the resulting image. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -129,15 +132,15 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - first_input: + first_input: '"moving2.nii"' # type=file|default=: image 1 second_input: '0.25' # type=traitcompound|default=None: image 2 or multiplication weight - output_product_image: + output_product_image: '"out.nii"' # type=file: average image file # type=file|default=: Outputfname.nii.gz: the name of the resulting image. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/multiply_images_callables.py b/nipype-auto-conv/specs/multiply_images_callables.py index 47cb95b..acf302e 100644 --- a/nipype-auto-conv/specs/multiply_images_callables.py +++ b/nipype-auto-conv/specs/multiply_images_callables.py @@ -1 +1,22 @@ -"""Module to put any functions that are referred to in MultiplyImages.yaml""" +"""Module to put any functions that are referred to in the "callables" section of MultiplyImages.yaml""" + +import os + + +def output_product_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_product_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L704 of /interfaces/ants/utils.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["output_product_image"] = os.path.abspath(inputs.output_product_image) + return outputs diff --git a/nipype-auto-conv/specs/n4_bias_field_correction.yaml b/nipype-auto-conv/specs/n4_bias_field_correction.yaml index e49fa63..25ed45b 100644 --- a/nipype-auto-conv/specs/n4_bias_field_correction.yaml +++ b/nipype-auto-conv/specs/n4_bias_field_correction.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Bias field correction. -# +# # N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) # retrospective bias correction algorithm. Based on the assumption that the # corruption of the low frequency bias field can be modeled as a convolution of @@ -16,14 +16,14 @@ # the intensities, and then spatially smoothing this result by a B-spline modeling # of the bias field itself. The modifications from and improvements obtained over # the original N3 algorithm are described in [Tustison2010]_. -# +# # .. [Tustison2010] N. Tustison et al., # N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, # 29(6):1310-1320, June 2010. -# +# # Examples # -------- -# +# # >>> import copy # >>> from nipype.interfaces.ants import N4BiasFieldCorrection # >>> n4 = N4BiasFieldCorrection() @@ -37,7 +37,7 @@ # -d 3 --input-image structural.nii # --convergence [ 50x50x30x20 ] --output structural_corrected.nii # --shrink-factor 3' -# +# # >>> n4_2 = copy.deepcopy(n4) # >>> n4_2.inputs.convergence_threshold = 1e-6 # >>> n4_2.cmdline @@ -45,7 +45,7 @@ # -d 3 --input-image structural.nii # --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii # --shrink-factor 3' -# +# # >>> n4_3 = copy.deepcopy(n4_2) # >>> n4_3.inputs.bspline_order = 5 # >>> n4_3.cmdline @@ -53,7 +53,7 @@ # -d 3 --input-image structural.nii # --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii # --shrink-factor 3' -# +# # >>> n4_4 = N4BiasFieldCorrection() # >>> n4_4.inputs.input_image = 'structural.nii' # >>> n4_4.inputs.save_bias = True @@ -61,7 +61,7 @@ # >>> n4_4.cmdline # 'N4BiasFieldCorrection -d 3 --input-image structural.nii # --output [ structural_corrected.nii, structural_bias.nii ]' -# +# # >>> n4_5 = N4BiasFieldCorrection() # >>> n4_5.inputs.input_image = 'structural.nii' # >>> n4_5.inputs.dimension = 3 @@ -69,8 +69,8 @@ # >>> n4_5.cmdline # 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] # --input-image structural.nii --output structural_corrected.nii' -# -# +# +# task_name: N4BiasFieldCorrection nipype_name: N4BiasFieldCorrection nipype_module: nipype.interfaces.ants.segmentation @@ -85,15 +85,18 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + bias_image: Path + # type=file: Estimated bias + # type=file|default=: Filename for the estimated bias. input_image: medimage/nifti1 # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction mask_image: generic/file # type=file|default=: image to specify region to perform final bias correction in weight_image: generic/file - # type=file|default=: image for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting. - bias_image: generic/file - # type=file: Estimated bias - # type=file|default=: Filename for the estimated bias. + # type=file|default=: image for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting. + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -107,12 +110,12 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: generic/file - # type=file: Warped image - # type=str|default='': output file name bias_image: generic/file # type=file: Estimated bias # type=file|default=: Filename for the estimated bias. + output_image: generic/file + # type=file: Warped image + # type=str|default='': output file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -131,20 +134,20 @@ tests: mask_image: # type=file|default=: image to specify region to perform final bias correction in weight_image: - # type=file|default=: image for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting. + # type=file|default=: image for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting. output_image: # type=file: Warped image # type=str|default='': output file name bspline_fitting_distance: - # type=float|default=0.0: + # type=float|default=0.0: bspline_order: - # type=int|default=0: + # type=int|default=0: shrink_factor: - # type=int|default=0: + # type=int|default=0: n_iterations: - # type=list|default=[]: + # type=list|default=[]: convergence_threshold: - # type=float|default=0.0: + # type=float|default=0.0: save_bias: # type=bool|default=False: True if the estimated bias should be saved to file. bias_image: @@ -163,15 +166,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -185,13 +188,13 @@ tests: input_image: # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction bspline_fitting_distance: '300' - # type=float|default=0.0: + # type=float|default=0.0: shrink_factor: '3' - # type=int|default=0: + # type=int|default=0: n_iterations: '[50,50,30,20]' - # type=list|default=[]: + # type=list|default=[]: imports: &id001 - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy expected_outputs: @@ -199,8 +202,8 @@ tests: # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -210,17 +213,17 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) convergence_threshold: 1e-6 - # type=float|default=0.0: + # type=float|default=0.0: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -230,17 +233,17 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) bspline_order: '5' - # type=int|default=0: + # type=int|default=0: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -256,15 +259,15 @@ tests: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -280,15 +283,15 @@ tests: histogram_sharpening: (0.12, 0.02, 200) # type=tuple|default=(0.15, 0.01, 200): Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm. The default values have been shown to work fairly well. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -303,16 +306,16 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) - input_image: + input_image: '"structural.nii"' # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction bspline_fitting_distance: '300' - # type=float|default=0.0: + # type=float|default=0.0: shrink_factor: '3' - # type=int|default=0: + # type=int|default=0: n_iterations: '[50,50,30,20]' - # type=list|default=[]: + # type=list|default=[]: imports: *id001 - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -323,9 +326,9 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. convergence_threshold: 1e-6 - # type=float|default=0.0: + # type=float|default=0.0: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -336,9 +339,9 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. bspline_order: '5' - # type=int|default=0: + # type=int|default=0: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -348,14 +351,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction save_bias: 'True' # type=bool|default=False: True if the estimated bias should be saved to file. dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -365,14 +368,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) histogram_sharpening: (0.12, 0.02, 200) # type=tuple|default=(0.15, 0.01, 200): Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm. The default values have been shown to work fairly well. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/n4_bias_field_correction_callables.py b/nipype-auto-conv/specs/n4_bias_field_correction_callables.py index 7c58464..7bd3dae 100644 --- a/nipype-auto-conv/specs/n4_bias_field_correction_callables.py +++ b/nipype-auto-conv/specs/n4_bias_field_correction_callables.py @@ -1 +1,220 @@ -"""Module to put any functions that are referred to in N4BiasFieldCorrection.yaml""" +"""Module to put any functions that are referred to in the "callables" section of N4BiasFieldCorrection.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def bias_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["bias_image"] + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L540 of /interfaces/ants/segmentation.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = nipype_interfaces_ants__ANTSCommand___list_outputs() + if _out_bias_file: + outputs["bias_image"] = os.path.abspath(_out_bias_file) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L891 of /interfaces/base/core.py +def nipype_interfaces_ants__ANTSCommand___list_outputs( + inputs=None, stdout=None, stderr=None, output_dir=None +): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/registration.yaml b/nipype-auto-conv/specs/registration.yaml index 892d26f..a02d798 100644 --- a/nipype-auto-conv/specs/registration.yaml +++ b/nipype-auto-conv/specs/registration.yaml @@ -6,33 +6,33 @@ # Docs # ---- # ANTs Registration command for registration of images -# +# # `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``, # using a predefined (sequence of) cost function(s) and transformation operations. # The cost function is defined using one or more 'metrics', specifically # local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``), # global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``). -# +# # ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``, # or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``, # ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``, # ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple # *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear # (Syn)-transformation. -# +# # antsRegistration can be initialized using one or more transforms from moving_image # to fixed_image with the ``initial_moving_transform``-input. For example, when you # already have a warpfield that corrects for geometrical distortions in an EPI (functional) image, # that you want to apply before an Affine registration to a structural image. # You could put this transform into 'intial_moving_transform'. -# +# # The Registration-interface can output the resulting transform(s) that map moving_image to # fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform`` # is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output # inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using # ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural' # order: the first element should be applied first, the last element should be applied last. -# +# # Note, however, that ANTS tools always apply lists of transformations in reverse order (the last # transformation in the list is applied first). Therefore, if the output forward_transforms # is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To @@ -41,23 +41,23 @@ # ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for # this purpose. Note also that, because ``composite_transform`` is always a single file, this # output is preferred for most use-cases. -# +# # More information can be found in the `ANTS # manual `_. -# +# # See below for some useful examples. -# +# # Examples # -------- -# +# # Set up a Registration node with some default settings. This Node registers # 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and # then a non-linear 'SyN' transformation, both using the Mutual Information-cost # metric. -# +# # The registration is initialized by first applying the (linear) transform # trans.mat. -# +# # >>> import copy, pprint # >>> from nipype.interfaces.ants import Registration # >>> reg = Registration() @@ -88,51 +88,51 @@ # >>> reg.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' # >>> reg.run() # doctest: +SKIP -# +# # Same as reg1, but first invert the initial transform ('trans.mat') before applying it. -# +# # >>> reg.inputs.invert_initial_moving_transform = True # >>> reg1 = copy.deepcopy(reg) # >>> reg1.inputs.winsorize_lower_quantile = 0.025 # >>> reg1.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' # >>> reg1.run() # doctest: +SKIP -# +# # Clip extremely high intensity data points using winsorize_upper_quantile. All data points # higher than the 0.975 quantile are set to the value of the 0.975 quantile. -# +# # >>> reg2 = copy.deepcopy(reg) # >>> reg2.inputs.winsorize_upper_quantile = 0.975 # >>> reg2.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' -# +# # Clip extremely low intensity data points using winsorize_lower_quantile. All data points # lower than the 0.025 quantile are set to the original value at the 0.025 quantile. -# -# +# +# # >>> reg3 = copy.deepcopy(reg) # >>> reg3.inputs.winsorize_lower_quantile = 0.025 # >>> reg3.inputs.winsorize_upper_quantile = 0.975 # >>> reg3.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' -# +# # Use float instead of double for computations (saves memory usage) -# +# # >>> reg3a = copy.deepcopy(reg) # >>> reg3a.inputs.float = True # >>> reg3a.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # Force to use double instead of float for computations (more precision and memory usage). -# +# # >>> reg3b = copy.deepcopy(reg) # >>> reg3b.inputs.float = False # >>> reg3b.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'- # file. Note that forward_transforms will now be an empty list. -# +# # >>> # Test collapse transforms flag # >>> reg4 = copy.deepcopy(reg) # >>> reg4.inputs.save_state = 'trans.mat' @@ -156,8 +156,8 @@ # 'warped_image': '...data/output_warped_image.nii.gz'} # >>> reg4.cmdline # 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# +# +# # >>> # Test collapse transforms flag # >>> reg4b = copy.deepcopy(reg4) # >>> reg4b.inputs.write_composite_transform = False @@ -181,7 +181,7 @@ # >>> reg4b.aggregate_outputs() # doctest: +SKIP # >>> reg4b.cmdline # 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0' -# +# # One can use multiple similarity metrics in a single registration stage.The Node below first # performs a linear registation using only the Mutual Information ('Mattes')-metric. # In a second stage, it performs a non-linear registration ('Syn') using both a @@ -189,7 +189,7 @@ # equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins. # The local cross-correlations (correlations between every voxel's neighborhoods) is computed # with a radius of 4. -# +# # >>> # Test multiple metrics per stage # >>> reg5 = copy.deepcopy(reg) # >>> reg5.inputs.fixed_image = 'fixed1.nii' @@ -201,64 +201,64 @@ # >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] # >>> reg5.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed # that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and # moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii, # then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from # the transformation of the first step. -# +# # >>> # Test multiple inputS # >>> reg6 = copy.deepcopy(reg5) # >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] # >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] # >>> reg6.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # Different methods can be used for the interpolation when applying transformations. -# +# # >>> # Test Interpolation Parameters (BSpline) # >>> reg7a = copy.deepcopy(reg) # >>> reg7a.inputs.interpolation = 'BSpline' # >>> reg7a.inputs.interpolation_parameters = (3,) # >>> reg7a.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # >>> # Test Interpolation Parameters (MultiLabel/Gaussian) # >>> reg7b = copy.deepcopy(reg) # >>> reg7b.inputs.interpolation = 'Gaussian' # >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) # >>> reg7b.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # BSplineSyN non-linear registration with custom parameters. -# +# # >>> # Test Extended Transform Parameters # >>> reg8 = copy.deepcopy(reg) # >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] # >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] # >>> reg8.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # Mask the fixed image in the second stage of the registration (but not the first). -# +# # >>> # Test masking # >>> reg9 = copy.deepcopy(reg) # >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii'] # >>> reg9.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# # Here we use both a warpfield and a linear transformation, before registration commences. Note that # the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of # 'initial_moving_transform'. -# +# # >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform) # >>> reg10 = copy.deepcopy(reg) # >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz'] # >>> reg10.inputs.invert_initial_moving_transform = [False, False] # >>> reg10.cmdline # 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# +# task_name: Registration nipype_name: Registration nipype_module: nipype.interfaces.ants.registration @@ -277,23 +277,20 @@ inputs: # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) fixed_image_mask: generic/file # type=file|default=: Mask used to limit metric sampling region of the fixed imagein all stages - fixed_image_masks: generic/file+list-of - # type=inputmultiobject|default=[]: Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage) + initial_moving_transform: datascience/text-matrix+list-of + # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. moving_image: medimage/nifti1+list-of # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to moving_image_mask: generic/file # type=file|default=: mask used to limit metric sampling region of the moving imagein all stages - moving_image_masks: generic/file+list-of - # type=inputmultiobject|default=[]: Masks used to limit metric sampling region of the moving image, defined per registration stage(Use "NULL" to omit a mask at a given stage) - save_state: datascience/text-matrix - # type=file: The saved registration state to be restored - # type=file|default=: Filename for saving the internal restorable state of the registration restore_state: datascience/text-matrix # type=file|default=: Filename for restoring the internal restorable state of the registration - initial_moving_transform: datascience/text-matrix+list-of - # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. - invert_initial_moving_transform: generic/file+list-of - # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. + save_state: Path + # type=file: The saved registration state to be restored + # type=file|default=: Filename for saving the internal restorable state of the registration + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -309,18 +306,28 @@ outputs: # passed to the field in the automatically generated unittests. composite_transform: generic/file # type=file: Composite transform file + forward_transforms: generic/file+list-of + # type=list: List of output transforms for forward registration inverse_composite_transform: generic/file # type=file: Inverse composite transform file - warped_image: generic/file - # type=file: Outputs warped image inverse_warped_image: generic/file # type=file: Outputs the inverse of the warped image + reverse_forward_transforms: generic/file+list-of + # type=list: List of output transforms for forward registration reversed for antsApplyTransform + reverse_transforms: generic/file+list-of + # type=list: List of output transforms for reverse registration save_state: datascience/text-matrix # type=file: The saved registration state to be restored # type=file|default=: Filename for saving the internal restorable state of the registration + warped_image: generic/file + # type=file: Outputs warped image callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields + elapsed_time: elapsed_time_callable + # type=float: the total elapsed time as reported by ANTs + metric_value: metric_value_callable + # type=float: the final value of metric templates: # dict[str, str] - `output_file_template` values to be provided to output fields requirements: @@ -355,75 +362,75 @@ tests: initial_moving_transform_com: # type=enum|default=0|allowed[0,1,2]: Align the moving_image and fixed_image before registration using the geometric center of the images (=0), the image intensities (=1), or the origin of the images (=2). metric_item_trait: - # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: + # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: metric_stage_trait: - # type=traitcompound|default=None: + # type=traitcompound|default=None: metric: # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. metric_weight_item_trait: - # type=float|default=1.0: + # type=float|default=1.0: metric_weight_stage_trait: - # type=traitcompound|default=None: + # type=traitcompound|default=None: metric_weight: # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius_bins_item_trait: - # type=int|default=5: + # type=int|default=5: radius_bins_stage_trait: - # type=traitcompound|default=None: + # type=traitcompound|default=None: radius_or_number_of_bins: # type=list|default=[5]: the number of bins in each stage for the MI and Mattes metric, the radius for other metrics sampling_strategy_item_trait: - # type=enum|default='None'|allowed['None','Random','Regular',None]: + # type=enum|default='None'|allowed['None','Random','Regular',None]: sampling_strategy_stage_trait: - # type=traitcompound|default=None: + # type=traitcompound|default=None: sampling_strategy: # type=list|default=[]: the metric sampling strategy (strategies) for each stage sampling_percentage_item_trait: - # type=traitcompound|default=None: + # type=traitcompound|default=None: sampling_percentage_stage_trait: - # type=traitcompound|default=None: + # type=traitcompound|default=None: sampling_percentage: # type=list|default=[]: the metric sampling percentage(s) to use for each stage use_estimate_learning_rate_once: - # type=list|default=[]: + # type=list|default=[]: use_histogram_matching: # type=traitcompound|default=True: Histogram match the images before registration. interpolation: - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: - # type=traitcompound|default=None: + # type=traitcompound|default=None: write_composite_transform: - # type=bool|default=False: + # type=bool|default=False: collapse_output_transforms: # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. initialize_transforms_per_stage: - # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). + # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). float: # type=bool|default=False: Use float instead of double for computations. transforms: - # type=list|default=[]: + # type=list|default=[]: transform_parameters: - # type=list|default=[]: + # type=list|default=[]: restrict_deformation: # type=list|default=[]: This option allows the user to restrict the optimization of the displacement field, translation, rigid or affine transform on a per-component basis. For example, if one wants to limit the deformation or rotation of 3-D volume to the first two dimensions, this is possible by specifying a weight vector of '1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid transformation. Low-dimensional restriction only works if there are no preceding transformations. number_of_iterations: - # type=list|default=[]: + # type=list|default=[]: smoothing_sigmas: - # type=list|default=[]: + # type=list|default=[]: sigma_units: # type=list|default=[]: units for smoothing sigmas shrink_factors: - # type=list|default=[]: + # type=list|default=[]: convergence_threshold: - # type=list|default=[1e-06]: + # type=list|default=[1e-06]: convergence_window_size: - # type=list|default=[10]: + # type=list|default=[10]: output_transform_prefix: - # type=str|default='transform': + # type=str|default='transform': output_warped_image: - # type=traitcompound|default=None: + # type=traitcompound|default=None: output_inverse_warped_image: - # type=traitcompound|default=None: + # type=traitcompound|default=None: winsorize_upper_quantile: # type=range|default=1.0: The Upper quantile to clip image ranges winsorize_lower_quantile: @@ -431,7 +438,7 @@ tests: random_seed: # type=int|default=0: Fixed seed for random number generation verbose: - # type=bool|default=False: + # type=bool|default=False: num_threads: # type=int|default=1: Number of ITK threads to use args: @@ -439,15 +446,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -461,23 +468,23 @@ tests: moving_image: # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to output_transform_prefix: '"output_"' - # type=str|default='transform': + # type=str|default='transform': initial_moving_transform: # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. transforms: '["Affine", "SyN"]' - # type=list|default=[]: + # type=list|default=[]: transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]' - # type=list|default=[]: + # type=list|default=[]: number_of_iterations: '[[1500, 200], [100, 50, 30]]' - # type=list|default=[]: + # type=list|default=[]: dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) write_composite_transform: 'True' - # type=bool|default=False: + # type=bool|default=False: collapse_output_transforms: 'False' # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. initialize_transforms_per_stage: 'False' - # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). + # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). metric: '["Mattes"]*2' # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. metric_weight: '[1]*2 # Default (value ignored currently by ANTs)' @@ -489,23 +496,23 @@ tests: sampling_percentage: '[0.05, None]' # type=list|default=[]: the metric sampling percentage(s) to use for each stage convergence_threshold: '[1.e-8, 1.e-9]' - # type=list|default=[1e-06]: + # type=list|default=[1e-06]: convergence_window_size: '[20]*2' - # type=list|default=[10]: + # type=list|default=[10]: smoothing_sigmas: '[[1,0], [2,1,0]]' - # type=list|default=[]: + # type=list|default=[]: sigma_units: '["vox"] * 2' # type=list|default=[]: units for smoothing sigmas shrink_factors: '[[2,1], [3,2,1]]' - # type=list|default=[]: + # type=list|default=[]: use_estimate_learning_rate_once: '[True, True]' - # type=list|default=[]: + # type=list|default=[]: use_histogram_matching: '[True, True] # This is the default' # type=traitcompound|default=True: Histogram match the images before registration. output_warped_image: '"output_warped_image.nii.gz"' - # type=traitcompound|default=None: + # type=traitcompound|default=None: imports: &id001 - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy - module: pprint @@ -514,8 +521,8 @@ tests: # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -524,20 +531,20 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - invert_initial_moving_transform: + invert_initial_moving_transform: 'True' # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. winsorize_lower_quantile: '0.025' # type=range|default=0.0: The Lower quantile to clip image ranges imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -549,15 +556,15 @@ tests: winsorize_upper_quantile: '0.975' # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -571,15 +578,15 @@ tests: winsorize_upper_quantile: '0.975' # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -591,15 +598,15 @@ tests: float: 'True' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -611,15 +618,15 @@ tests: float: 'False' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -628,25 +635,25 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - save_state: + save_state: '"trans.mat"' # type=file: The saved registration state to be restored # type=file|default=: Filename for saving the internal restorable state of the registration restore_state: # type=file|default=: Filename for restoring the internal restorable state of the registration initialize_transforms_per_stage: 'True' - # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). + # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). collapse_output_transforms: 'True' # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -656,17 +663,17 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) write_composite_transform: 'False' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -690,15 +697,15 @@ tests: sampling_percentage: '[0.05, [0.05, 0.10]]' # type=list|default=[]: the metric sampling percentage(s) to use for each stage imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -712,15 +719,15 @@ tests: moving_image: # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -730,19 +737,19 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (3,) - # type=traitcompound|default=None: + # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -752,19 +759,19 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) interpolation: '"Gaussian"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (1.0, 1.0) - # type=traitcompound|default=None: + # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -774,19 +781,19 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) transforms: '["Affine", "BSplineSyN"]' - # type=list|default=[]: + # type=list|default=[]: transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]' - # type=list|default=[]: + # type=list|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -795,18 +802,18 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - fixed_image_masks: + fixed_image_masks: '["NULL", "fixed1.nii"]' # type=inputmultiobject|default=[]: Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -817,18 +824,18 @@ tests: # (if not specified, will try to choose a sensible value) initial_moving_transform: # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. - invert_initial_moving_transform: + invert_initial_moving_transform: '[False, False]' # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -841,28 +848,28 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image: + fixed_image: '"fixed1.nii"' # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: + moving_image: '"moving1.nii"' # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to output_transform_prefix: '"output_"' - # type=str|default='transform': - initial_moving_transform: + # type=str|default='transform': + initial_moving_transform: '"trans.mat"' # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. transforms: '["Affine", "SyN"]' - # type=list|default=[]: + # type=list|default=[]: transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]' - # type=list|default=[]: + # type=list|default=[]: number_of_iterations: '[[1500, 200], [100, 50, 30]]' - # type=list|default=[]: + # type=list|default=[]: dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) write_composite_transform: 'True' - # type=bool|default=False: + # type=bool|default=False: collapse_output_transforms: 'False' # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. initialize_transforms_per_stage: 'False' - # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). + # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). metric: '["Mattes"]*2' # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. metric_weight: '[1]*2 # Default (value ignored currently by ANTs)' @@ -874,23 +881,23 @@ doctests: sampling_percentage: '[0.05, None]' # type=list|default=[]: the metric sampling percentage(s) to use for each stage convergence_threshold: '[1.e-8, 1.e-9]' - # type=list|default=[1e-06]: + # type=list|default=[1e-06]: convergence_window_size: '[20]*2' - # type=list|default=[10]: + # type=list|default=[10]: smoothing_sigmas: '[[1,0], [2,1,0]]' - # type=list|default=[]: + # type=list|default=[]: sigma_units: '["vox"] * 2' # type=list|default=[]: units for smoothing sigmas shrink_factors: '[[2,1], [3,2,1]]' - # type=list|default=[]: + # type=list|default=[]: use_estimate_learning_rate_once: '[True, True]' - # type=list|default=[]: + # type=list|default=[]: use_histogram_matching: '[True, True] # This is the default' # type=traitcompound|default=True: Histogram match the images before registration. output_warped_image: '"output_warped_image.nii.gz"' - # type=traitcompound|default=None: + # type=traitcompound|default=None: imports: *id001 - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -900,12 +907,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - invert_initial_moving_transform: + invert_initial_moving_transform: 'True' # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. winsorize_lower_quantile: '0.025' # type=range|default=0.0: The Lower quantile to clip image ranges imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -918,7 +925,7 @@ doctests: winsorize_upper_quantile: '0.975' # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -933,7 +940,7 @@ doctests: winsorize_upper_quantile: '0.975' # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -946,7 +953,7 @@ doctests: float: 'True' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -959,7 +966,7 @@ doctests: float: 'False' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -969,17 +976,17 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - save_state: + save_state: '"trans.mat"' # type=file: The saved registration state to be restored # type=file|default=: Filename for saving the internal restorable state of the registration - restore_state: + restore_state: '"trans.mat"' # type=file|default=: Filename for restoring the internal restorable state of the registration initialize_transforms_per_stage: 'True' - # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). + # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). collapse_output_transforms: 'True' # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -990,9 +997,9 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. write_composite_transform: 'False' - # type=bool|default=False: + # type=bool|default=False: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1002,9 +1009,9 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image: + fixed_image: '"fixed1.nii"' # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: + moving_image: '"moving1.nii"' # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to metric: '["Mattes", ["Mattes", "CC"]]' # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. @@ -1017,7 +1024,7 @@ doctests: sampling_percentage: '[0.05, [0.05, 0.10]]' # type=list|default=[]: the metric sampling percentage(s) to use for each stage imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1027,12 +1034,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image: + fixed_image: '["fixed1.nii", "fixed2.nii"]' # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: + moving_image: '["moving1.nii", "moving2.nii"]' # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1043,11 +1050,11 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (3,) - # type=traitcompound|default=None: + # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1058,11 +1065,11 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. interpolation: '"Gaussian"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: (1.0, 1.0) - # type=traitcompound|default=None: + # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1073,11 +1080,11 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. transforms: '["Affine", "BSplineSyN"]' - # type=list|default=[]: + # type=list|default=[]: transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]' - # type=list|default=[]: + # type=list|default=[]: imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1087,10 +1094,10 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image_masks: + fixed_image_masks: '["NULL", "fixed1.nii"]' # type=inputmultiobject|default=[]: Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -1100,12 +1107,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - initial_moving_transform: + initial_moving_transform: '["func_to_struct.mat", "ants_Warp.nii.gz"]' # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. - invert_initial_moving_transform: + invert_initial_moving_transform: '[False, False]' # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/registration_callables.py b/nipype-auto-conv/specs/registration_callables.py index 5191c22..2f2420c 100644 --- a/nipype-auto-conv/specs/registration_callables.py +++ b/nipype-auto-conv/specs/registration_callables.py @@ -1 +1,318 @@ -"""Module to put any functions that are referred to in Registration.yaml""" +"""Module to put any functions that are referred to in the "callables" section of Registration.yaml""" + +import attrs +import os + + +def composite_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["composite_transform"] + + +def elapsed_time_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["elapsed_time"] + + +def forward_invert_flags_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["forward_invert_flags"] + + +def forward_transforms_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["forward_transforms"] + + +def inverse_composite_transform_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_composite_transform"] + + +def inverse_warped_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_warped_image"] + + +def metric_value_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["metric_value"] + + +def reverse_forward_invert_flags_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["reverse_forward_invert_flags"] + + +def reverse_forward_transforms_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["reverse_forward_transforms"] + + +def reverse_invert_flags_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["reverse_invert_flags"] + + +def reverse_transforms_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["reverse_transforms"] + + +def save_state_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["save_state"] + + +def warped_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["warped_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L1201 of /interfaces/ants/registration.py +def _get_outputfilenames( + inverse=False, inputs=None, stdout=None, stderr=None, output_dir=None +): + output_filename = None + if not inverse: + if ( + inputs.output_warped_image is not attrs.NOTHING + ) and inputs.output_warped_image: + output_filename = inputs.output_warped_image + if isinstance(output_filename, bool): + output_filename = "%s_Warped.nii.gz" % inputs.output_transform_prefix + return output_filename + inv_output_filename = None + if ( + inputs.output_inverse_warped_image is not attrs.NOTHING + ) and inputs.output_inverse_warped_image: + inv_output_filename = inputs.output_inverse_warped_image + if isinstance(inv_output_filename, bool): + inv_output_filename = ( + "%s_InverseWarped.nii.gz" % inputs.output_transform_prefix + ) + return inv_output_filename + + +# Original source at L1363 of /interfaces/ants/registration.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + outputs["forward_transforms"] = [] + outputs["forward_invert_flags"] = [] + outputs["reverse_transforms"] = [] + outputs["reverse_invert_flags"] = [] + + # invert_initial_moving_transform should be always defined, even if + # there's no initial transform + invert_initial_moving_transform = [False] * len(inputs.initial_moving_transform) + if inputs.invert_initial_moving_transform is not attrs.NOTHING: + invert_initial_moving_transform = inputs.invert_initial_moving_transform + + if inputs.write_composite_transform: + filename = inputs.output_transform_prefix + "Composite.h5" + outputs["composite_transform"] = os.path.abspath(filename) + filename = inputs.output_transform_prefix + "InverseComposite.h5" + outputs["inverse_composite_transform"] = os.path.abspath(filename) + # If composite transforms are written, then individuals are not written (as of 2014-10-26 + else: + if not inputs.collapse_output_transforms: + transform_count = 0 + if inputs.initial_moving_transform is not attrs.NOTHING: + outputs["forward_transforms"] += inputs.initial_moving_transform + outputs["forward_invert_flags"] += invert_initial_moving_transform + outputs["reverse_transforms"] = ( + inputs.initial_moving_transform + outputs["reverse_transforms"] + ) + outputs["reverse_invert_flags"] = [ + not e for e in invert_initial_moving_transform + ] + outputs[ + "reverse_invert_flags" + ] # Prepend + transform_count += len(inputs.initial_moving_transform) + elif inputs.initial_moving_transform_com is not attrs.NOTHING: + forward_filename, forward_inversemode = _output_filenames( + inputs.output_transform_prefix, + transform_count, + "Initial", + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + reverse_filename, reverse_inversemode = _output_filenames( + inputs.output_transform_prefix, + transform_count, + "Initial", + True, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + outputs["forward_transforms"].append(os.path.abspath(forward_filename)) + outputs["forward_invert_flags"].append(False) + outputs["reverse_transforms"].insert( + 0, os.path.abspath(reverse_filename) + ) + outputs["reverse_invert_flags"].insert(0, True) + transform_count += 1 + + for count in range(len(inputs.transforms)): + forward_filename, forward_inversemode = _output_filenames( + inputs.output_transform_prefix, + transform_count, + inputs.transforms[count], + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + reverse_filename, reverse_inversemode = _output_filenames( + inputs.output_transform_prefix, + transform_count, + inputs.transforms[count], + True, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + outputs["forward_transforms"].append(os.path.abspath(forward_filename)) + outputs["forward_invert_flags"].append(forward_inversemode) + outputs["reverse_transforms"].insert( + 0, os.path.abspath(reverse_filename) + ) + outputs["reverse_invert_flags"].insert(0, reverse_inversemode) + transform_count += 1 + else: + transform_count = 0 + is_linear = [t in _linear_transform_names for t in inputs.transforms] + collapse_list = [] + + if (inputs.initial_moving_transform is not attrs.NOTHING) or ( + inputs.initial_moving_transform_com is not attrs.NOTHING + ): + is_linear.insert(0, True) + + # Only files returned by collapse_output_transforms + if any(is_linear): + collapse_list.append("GenericAffine") + if not all(is_linear): + collapse_list.append("SyN") + + for transform in collapse_list: + forward_filename, forward_inversemode = _output_filenames( + inputs.output_transform_prefix, + transform_count, + transform, + inverse=False, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + reverse_filename, reverse_inversemode = _output_filenames( + inputs.output_transform_prefix, + transform_count, + transform, + inverse=True, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + outputs["forward_transforms"].append(os.path.abspath(forward_filename)) + outputs["forward_invert_flags"].append(forward_inversemode) + outputs["reverse_transforms"].append(os.path.abspath(reverse_filename)) + outputs["reverse_invert_flags"].append(reverse_inversemode) + transform_count += 1 + + out_filename = _get_outputfilenames( + inverse=False, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + inv_out_filename = _get_outputfilenames( + inverse=True, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if out_filename: + outputs["warped_image"] = os.path.abspath(out_filename) + if inv_out_filename: + outputs["inverse_warped_image"] = os.path.abspath(inv_out_filename) + if len(inputs.save_state): + outputs["save_state"] = os.path.abspath(inputs.save_state) + if _metric_value: + outputs["metric_value"] = _metric_value + if _elapsed_time: + outputs["elapsed_time"] = _elapsed_time + + outputs["reverse_forward_transforms"] = outputs["forward_transforms"][::-1] + outputs["reverse_forward_invert_flags"] = outputs["forward_invert_flags"][::-1] + + return outputs + + +# Original source at L1341 of /interfaces/ants/registration.py +def _output_filenames( + prefix, + count, + transform, + inverse=False, + inputs=None, + stdout=None, + stderr=None, + output_dir=None, +): + low_dimensional_transform_map = { + "Rigid": "Rigid.mat", + "Affine": "Affine.mat", + "GenericAffine": "GenericAffine.mat", + "CompositeAffine": "Affine.mat", + "Similarity": "Similarity.mat", + "Translation": "Translation.mat", + "BSpline": "BSpline.txt", + "Initial": "DerivedInitialMovingTranslation.mat", + } + if transform in list(low_dimensional_transform_map.keys()): + suffix = low_dimensional_transform_map[transform] + inverse_mode = inverse + else: + inverse_mode = False # These are not analytically invertable + if inverse: + suffix = "InverseWarp.nii.gz" + else: + suffix = "Warp.nii.gz" + return "%s%d%s" % (prefix, count, suffix), inverse_mode diff --git a/nipype-auto-conv/specs/registration_syn_quick.yaml b/nipype-auto-conv/specs/registration_syn_quick.yaml index 5feb7e1..deb3a09 100644 --- a/nipype-auto-conv/specs/registration_syn_quick.yaml +++ b/nipype-auto-conv/specs/registration_syn_quick.yaml @@ -5,14 +5,14 @@ # # Docs # ---- -# +# # Registration using a symmetric image normalization method (SyN). # You can read more in Avants et al.; Med Image Anal., 2008 # (https://www.ncbi.nlm.nih.gov/pubmed/17659998). -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import RegistrationSynQuick # >>> reg = RegistrationSynQuick() # >>> reg.inputs.fixed_image = 'fixed1.nii' @@ -21,9 +21,9 @@ # >>> reg.cmdline # 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -r 32 -m moving1.nii -n 2 -o transform -p d -s 26 -t s' # >>> reg.run() # doctest: +SKIP -# +# # example for multiple images -# +# # >>> from nipype.interfaces.ants import RegistrationSynQuick # >>> reg = RegistrationSynQuick() # >>> reg.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] @@ -32,7 +32,7 @@ # >>> reg.cmdline # 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -f fixed2.nii -r 32 -m moving1.nii -m moving2.nii -n 2 -o transform -p d -s 26 -t s' # >>> reg.run() # doctest: +SKIP -# +# task_name: RegistrationSynQuick nipype_name: RegistrationSynQuick nipype_module: nipype.interfaces.ants.registration @@ -51,6 +51,9 @@ inputs: # type=inputmultiobject|default=[]: Fixed image or source image or reference image moving_image: medimage/nifti1+list-of # type=inputmultiobject|default=[]: Moving image or target image + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -64,16 +67,16 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - warped_image: generic/file - # type=file: Warped image - inverse_warped_image: generic/file - # type=file: Inverse warped image - out_matrix: generic/file - # type=file: Affine matrix forward_warp_field: generic/file # type=file: Forward warp field inverse_warp_field: generic/file # type=file: Inverse warp field + inverse_warped_image: generic/file + # type=file: Inverse warped image + out_matrix: generic/file + # type=file: Affine matrix + warped_image: generic/file + # type=file: Warped image callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields @@ -96,7 +99,7 @@ tests: num_threads: # type=int|default=1: Number of threads (default = 1) transform_type: - # type=enum|default='s'|allowed['a','b','br','r','s','sr','t']: Transform type * t: translation * r: rigid * a: rigid + affine * s: rigid + affine + deformable syn (default) * sr: rigid + deformable syn * b: rigid + affine + deformable b-spline syn * br: rigid + deformable b-spline syn + # type=enum|default='s'|allowed['a','b','br','r','s','sr','t']: Transform type * t: translation * r: rigid * a: rigid + affine * s: rigid + affine + deformable syn (default) * sr: rigid + deformable syn * b: rigid + affine + deformable b-spline syn * br: rigid + deformable b-spline syn use_histogram_matching: # type=bool|default=False: use histogram matching histogram_bins: @@ -112,15 +115,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -136,15 +139,15 @@ tests: num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -160,15 +163,15 @@ tests: num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -181,14 +184,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image: + fixed_image: '"fixed1.nii"' # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: + moving_image: '"moving1.nii"' # type=inputmultiobject|default=[]: Moving image or target image num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -198,14 +201,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - fixed_image: + fixed_image: '["fixed1.nii", "fixed2.nii"]' # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: + moving_image: '["moving1.nii", "moving2.nii"]' # type=inputmultiobject|default=[]: Moving image or target image num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/registration_syn_quick_callables.py b/nipype-auto-conv/specs/registration_syn_quick_callables.py index d009c67..2fb5730 100644 --- a/nipype-auto-conv/specs/registration_syn_quick_callables.py +++ b/nipype-auto-conv/specs/registration_syn_quick_callables.py @@ -1 +1,57 @@ -"""Module to put any functions that are referred to in RegistrationSynQuick.yaml""" +"""Module to put any functions that are referred to in the "callables" section of RegistrationSynQuick.yaml""" + +import os + + +def forward_warp_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["forward_warp_field"] + + +def inverse_warp_field_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_warp_field"] + + +def inverse_warped_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["inverse_warped_image"] + + +def out_matrix_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["out_matrix"] + + +def warped_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["warped_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L1777 of /interfaces/ants/registration.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + out_base = os.path.abspath(inputs.output_prefix) + outputs["warped_image"] = out_base + "Warped.nii.gz" + outputs["inverse_warped_image"] = out_base + "InverseWarped.nii.gz" + outputs["out_matrix"] = out_base + "0GenericAffine.mat" + + if inputs.transform_type not in ("t", "r", "a"): + outputs["forward_warp_field"] = out_base + "1Warp.nii.gz" + outputs["inverse_warp_field"] = out_base + "1InverseWarp.nii.gz" + return outputs diff --git a/nipype-auto-conv/specs/resample_image_by_spacing.yaml b/nipype-auto-conv/specs/resample_image_by_spacing.yaml index 8e70c36..8700041 100644 --- a/nipype-auto-conv/specs/resample_image_by_spacing.yaml +++ b/nipype-auto-conv/specs/resample_image_by_spacing.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Resample an image with a given spacing. -# +# # Examples # -------- # >>> res = ResampleImageBySpacing(dimension=3) @@ -16,7 +16,7 @@ # >>> res.inputs.out_spacing = (4, 4, 4) # >>> res.cmdline #doctest: +ELLIPSIS # 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4' -# +# # >>> res = ResampleImageBySpacing(dimension=3) # >>> res.inputs.input_image = 'structural.nii' # >>> res.inputs.output_image = 'output.nii.gz' @@ -24,7 +24,7 @@ # >>> res.inputs.apply_smoothing = True # >>> res.cmdline #doctest: +ELLIPSIS # 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4 1' -# +# # >>> res = ResampleImageBySpacing(dimension=3) # >>> res.inputs.input_image = 'structural.nii' # >>> res.inputs.output_image = 'output.nii.gz' @@ -34,8 +34,8 @@ # >>> res.inputs.nn_interp = False # >>> res.cmdline #doctest: +ELLIPSIS # 'ResampleImageBySpacing 3 structural.nii output.nii.gz 0.4 0.4 0.4 1 2 0' -# -# +# +# task_name: ResampleImageBySpacing nipype_name: ResampleImageBySpacing nipype_module: nipype.interfaces.ants.utils @@ -52,9 +52,12 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: input image file - output_image: medimage/nifti-gz + output_image: Path # type=file: resampled file # type=file|default=: output image file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -104,15 +107,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -123,7 +126,7 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file out_spacing: (4, 4, 4) @@ -131,15 +134,15 @@ tests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -150,7 +153,7 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file out_spacing: (4, 4, 4) @@ -160,15 +163,15 @@ tests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -179,7 +182,7 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file out_spacing: (0.4, 0.4, 0.4) @@ -193,15 +196,15 @@ tests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -214,9 +217,9 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file out_spacing: (4, 4, 4) @@ -224,7 +227,7 @@ doctests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -234,9 +237,9 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file out_spacing: (4, 4, 4) @@ -246,7 +249,7 @@ doctests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -256,9 +259,9 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file out_spacing: (0.4, 0.4, 0.4) @@ -272,7 +275,7 @@ doctests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/resample_image_by_spacing_callables.py b/nipype-auto-conv/specs/resample_image_by_spacing_callables.py index e84e078..ae2b321 100644 --- a/nipype-auto-conv/specs/resample_image_by_spacing_callables.py +++ b/nipype-auto-conv/specs/resample_image_by_spacing_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in ResampleImageBySpacing.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ResampleImageBySpacing.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/threshold_image.yaml b/nipype-auto-conv/specs/threshold_image.yaml index de7136b..1e6ed70 100644 --- a/nipype-auto-conv/specs/threshold_image.yaml +++ b/nipype-auto-conv/specs/threshold_image.yaml @@ -5,9 +5,9 @@ # # Docs # ---- -# +# # Apply thresholds on images. -# +# # Examples # -------- # >>> thres = ThresholdImage(dimension=3) @@ -19,7 +19,7 @@ # >>> thres.inputs.outside_value = 0.0 # >>> thres.cmdline #doctest: +ELLIPSIS # 'ThresholdImage 3 structural.nii output.nii.gz 0.500000 1.000000 1.000000 0.000000' -# +# # >>> thres = ThresholdImage(dimension=3) # >>> thres.inputs.input_image = 'structural.nii' # >>> thres.inputs.output_image = 'output.nii.gz' @@ -27,8 +27,8 @@ # >>> thres.inputs.num_thresholds = 4 # >>> thres.cmdline #doctest: +ELLIPSIS # 'ThresholdImage 3 structural.nii output.nii.gz Kmeans 4' -# -# +# +# task_name: ThresholdImage nipype_name: ThresholdImage nipype_module: nipype.interfaces.ants.utils @@ -45,11 +45,14 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: input image file - output_image: medimage/nifti-gz - # type=file: resampled file - # type=file|default=: output image file input_mask: generic/file # type=file|default=: input mask for Otsu, Kmeans + output_image: Path + # type=file: resampled file + # type=file|default=: output image file + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -107,15 +110,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -126,7 +129,7 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file th_low: '0.5' @@ -140,15 +143,15 @@ tests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -159,7 +162,7 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file mode: '"Kmeans"' @@ -169,15 +172,15 @@ tests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -190,9 +193,9 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file th_low: '0.5' @@ -206,7 +209,7 @@ doctests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -216,9 +219,9 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: input image file - output_image: + output_image: '"output.nii.gz"' # type=file: resampled file # type=file|default=: output image file mode: '"Kmeans"' @@ -228,7 +231,7 @@ doctests: dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/threshold_image_callables.py b/nipype-auto-conv/specs/threshold_image_callables.py index 91e21ef..fce8554 100644 --- a/nipype-auto-conv/specs/threshold_image_callables.py +++ b/nipype-auto-conv/specs/threshold_image_callables.py @@ -1 +1,203 @@ -"""Module to put any functions that are referred to in ThresholdImage.yaml""" +"""Module to put any functions that are referred to in the "callables" section of ThresholdImage.yaml""" + +import attrs +import logging +import os +import os.path as op + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +iflogger = logging.getLogger("nipype.interface") + + +# Original source at L809 of /interfaces/base/core.py +def _filename_from_source( + name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + if chain is None: + chain = [] + + trait_spec = inputs.trait(name) + retval = getattr(inputs, name) + source_ext = None + if (retval is attrs.NOTHING) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.xor or () + ): + return retval + + # Do not generate filename when required fields are missing + if not all( + (getattr(inputs, field) is not attrs.NOTHING) + for field in trait_spec.requires or () + ): + return retval + + if (retval is not attrs.NOTHING) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning("Only one name_source per trait is allowed") + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + "name_source of '{}' trait should be an input trait " + "name, but a type {} object was found".format(name, type(ns)) + ) + + if getattr(inputs, ns) is not attrs.NOTHING: + name_source = ns + source = getattr(inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError("Mutually pointing name_sources") + + chain.append(name) + base = _filename_from_source( + ns, + chain, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + if base is not attrs.NOTHING: + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = _overload_extension( + retval, + name, + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + return retval + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L891 of /interfaces/base/core.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + metadata = dict(name_source=lambda t: t is not None) + traits = inputs.traits(**metadata) + if traits: + outputs = {} + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = _filename_from_source( + name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir + ) + if fname is not attrs.NOTHING: + outputs[out_name] = os.path.abspath(fname) + return outputs + + +# Original source at L888 of /interfaces/base/core.py +def _overload_extension( + value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None +): + return value + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +# Original source at L125 of /interfaces/base/support.py +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return "{}".format(self.value) diff --git a/nipype-auto-conv/specs/warp_image_multi_transform.yaml b/nipype-auto-conv/specs/warp_image_multi_transform.yaml index 6f7c350..915c3c8 100644 --- a/nipype-auto-conv/specs/warp_image_multi_transform.yaml +++ b/nipype-auto-conv/specs/warp_image_multi_transform.yaml @@ -6,10 +6,10 @@ # Docs # ---- # Warps an image from one space to another -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import WarpImageMultiTransform # >>> wimt = WarpImageMultiTransform() # >>> wimt.inputs.input_image = 'structural.nii' @@ -17,7 +17,7 @@ # >>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] # >>> wimt.cmdline # 'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' -# +# # >>> wimt = WarpImageMultiTransform() # >>> wimt.inputs.input_image = 'diffusion_weighted.nii' # >>> wimt.inputs.reference_image = 'functional.nii' @@ -25,8 +25,8 @@ # >>> wimt.inputs.invert_affine = [1] # this will invert the 1st Affine file: 'func2anat_coreg_Affine.txt' # >>> wimt.cmdline # 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii -i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' -# -# +# +# task_name: WarpImageMultiTransform nipype_name: WarpImageMultiTransform nipype_module: nipype.interfaces.ants.resampling @@ -43,12 +43,18 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: image to apply transformation to (generally a coregistered functional) - out_postfix: generic/file + out_postfix: str # type=file|default='_wimt': Postfix that is prepended to all output files (default = _wimt) + output_image: Path + # type=file: Warped image + # type=file|default=: name of the output warped image reference_image: medimage/nifti1,medimage/nifti-gz # type=file|default=: reference image space that you wish to warp INTO transformation_series: '[text/text-file,medimage/nifti-gz]+list-of' # type=inputmultiobject|default=[]: transformation file(s) to be applied + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -109,15 +115,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -133,15 +139,15 @@ tests: transformation_series: # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -159,15 +165,15 @@ tests: invert_affine: '[1] # this will invert the 1st Affine file: "func2anat_coreg_Affine.txt"' # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -180,14 +186,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"structural.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"ants_deformed.nii.gz"' # type=file|default=: reference image space that you wish to warp INTO - transformation_series: + transformation_series: '["ants_Warp.nii.gz","ants_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -197,16 +203,16 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"diffusion_weighted.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"functional.nii"' # type=file|default=: reference image space that you wish to warp INTO - transformation_series: + transformation_series: '["func2anat_coreg_Affine.txt","func2anat_InverseWarp.nii.gz", "dwi2anat_Warp.nii.gz","dwi2anat_coreg_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied invert_affine: '[1] # this will invert the 1st Affine file: "func2anat_coreg_Affine.txt"' # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/warp_image_multi_transform_callables.py b/nipype-auto-conv/specs/warp_image_multi_transform_callables.py index 6982677..fd0ce2b 100644 --- a/nipype-auto-conv/specs/warp_image_multi_transform_callables.py +++ b/nipype-auto-conv/specs/warp_image_multi_transform_callables.py @@ -1 +1,93 @@ -"""Module to put any functions that are referred to in WarpImageMultiTransform.yaml""" +"""Module to put any functions that are referred to in the "callables" section of WarpImageMultiTransform.yaml""" + +import attrs +import os +import os.path as op + + +def output_image_default(inputs): + return _gen_filename("output_image", inputs=inputs) + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +# Original source at L262 of /interfaces/ants/resampling.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + if name == "output_image": + _, name, ext = split_filename(os.path.abspath(inputs.input_image)) + return "".join((name, inputs.out_postfix, ext)) + return None + + +# Original source at L295 of /interfaces/ants/resampling.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + if inputs.output_image is not attrs.NOTHING: + outputs["output_image"] = os.path.abspath(inputs.output_image) + else: + outputs["output_image"] = os.path.abspath( + _gen_filename( + "output_image", + inputs=inputs, + stdout=stdout, + stderr=stderr, + output_dir=output_dir, + ) + ) + return outputs + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext diff --git a/nipype-auto-conv/specs/warp_time_series_image_multi_transform.yaml b/nipype-auto-conv/specs/warp_time_series_image_multi_transform.yaml index 10c84aa..b9813e6 100644 --- a/nipype-auto-conv/specs/warp_time_series_image_multi_transform.yaml +++ b/nipype-auto-conv/specs/warp_time_series_image_multi_transform.yaml @@ -6,10 +6,10 @@ # Docs # ---- # Warps a time-series from one space to another -# +# # Examples # -------- -# +# # >>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform # >>> wtsimt = WarpTimeSeriesImageMultiTransform() # >>> wtsimt.inputs.input_image = 'resting.nii' @@ -17,7 +17,7 @@ # >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] # >>> wtsimt.cmdline # 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' -# +# # >>> wtsimt = WarpTimeSeriesImageMultiTransform() # >>> wtsimt.inputs.input_image = 'resting.nii' # >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' @@ -25,7 +25,7 @@ # >>> wtsimt.inputs.invert_affine = [1] # # this will invert the 1st Affine file: ants_Affine.txt # >>> wtsimt.cmdline # 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt' -# +# task_name: WarpTimeSeriesImageMultiTransform nipype_name: WarpTimeSeriesImageMultiTransform nipype_module: nipype.interfaces.ants.resampling @@ -46,6 +46,9 @@ inputs: # type=file|default=: reference image space that you wish to warp INTO transformation_series: medimage/nifti-gz+list-of # type=inputmultiobject|default=[]: transformation file(s) to be applied + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields metadata: # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) outputs: @@ -99,15 +102,15 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -123,15 +126,15 @@ tests: transformation_series: # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -149,15 +152,15 @@ tests: invert_affine: '[1] # # this will invert the 1st Affine file: ants_Affine.txt' # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically # be terminated before they complete for time-saving reasons, and therefore # these values will be ignored, when running in CI timeout: 10 - # int - the value to set for the timeout in the generated test, - # after which the test will be considered to have been initialised + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised # successfully. Set to 0 to disable the timeout (warning, this could # lead to the unittests taking a very long time to complete) xfail: true @@ -170,14 +173,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"resting.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"ants_deformed.nii.gz"' # type=file|default=: reference image space that you wish to warp INTO - transformation_series: + transformation_series: '["ants_Warp.nii.gz","ants_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -187,16 +190,16 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - input_image: + input_image: '"resting.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: + reference_image: '"ants_deformed.nii.gz"' # type=file|default=: reference image space that you wish to warp INTO - transformation_series: + transformation_series: '["ants_Warp.nii.gz","ants_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied invert_affine: '[1] # # this will invert the 1st Affine file: ants_Affine.txt' # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/nipype-auto-conv/specs/warp_time_series_image_multi_transform_callables.py b/nipype-auto-conv/specs/warp_time_series_image_multi_transform_callables.py index 7eef52d..3a7dcc9 100644 --- a/nipype-auto-conv/specs/warp_time_series_image_multi_transform_callables.py +++ b/nipype-auto-conv/specs/warp_time_series_image_multi_transform_callables.py @@ -1 +1,77 @@ -"""Module to put any functions that are referred to in WarpTimeSeriesImageMultiTransform.yaml""" +"""Module to put any functions that are referred to in the "callables" section of WarpTimeSeriesImageMultiTransform.yaml""" + +import os +import os.path as op + + +def output_image_callable(output_dir, inputs, stdout, stderr): + outputs = _list_outputs( + output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr + ) + return outputs["output_image"] + + +# Original source at L885 of /interfaces/base/core.py +def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None): + raise NotImplementedError + + +# Original source at L137 of /interfaces/ants/resampling.py +def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None): + outputs = {} + _, name, ext = split_filename(os.path.abspath(inputs.input_image)) + outputs["output_image"] = os.path.join( + output_dir, "".join((name, inputs.out_postfix, ext)) + ) + return outputs + + +# Original source at L58 of /utils/filemanip.py +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext diff --git a/pydra/tasks/ants/v1/__init__.py b/pydra/tasks/ants/v1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pydra/tasks/ants/v2_5/__init__.py b/pydra/tasks/ants/v2_5/__init__.py new file mode 100644 index 0000000..030759b --- /dev/null +++ b/pydra/tasks/ants/v2_5/__init__.py @@ -0,0 +1,11 @@ +""" +ANTs +==== + +>>> from pydra.tasks import ants +""" + +from .apply_transforms import ApplyTransforms +from .bias_correction import N4BiasFieldCorrection +from .create_jacobian_determinant_image import CreateJacobianDeterminantImage +from .registration import Registration, registration_syn, registration_syn_quick diff --git a/pydra/tasks/ants/v2_5/apply_transforms.py b/pydra/tasks/ants/v2_5/apply_transforms.py new file mode 100644 index 0000000..770098e --- /dev/null +++ b/pydra/tasks/ants/v2_5/apply_transforms.py @@ -0,0 +1,244 @@ +__all__ = ["ApplyTransforms"] + +from os import PathLike +from typing import Sequence + +from attrs import define, field +from pydra.engine.specs import ShellSpec, SpecInfo +from pydra.engine.task import ShellCommandTask + + +def _format_output( + output_image: PathLike, + save_warp_field: bool, + output_warp_field: PathLike, + save_transform: bool, + output_transform: PathLike, + invert_transform: bool, +) -> str: + return "-o {}".format( + "Linear[{},{:%d}]".format(output_transform, invert_transform) + if save_transform + else "[{},{:%d}]".format(output_warp_field, save_warp_field) + if save_warp_field + else f"{output_image}" + ) + + +def _format_interpolation( + interpolator: str, sigma: float, alpha: float, order: int +) -> str: + return "-n {}{}".format( + interpolator, + f"[{order}]" + if interpolator == "BSpline" + else f"[{sigma},{alpha}]" + if interpolator in ("MultiLabel", "Gaussian") + else "", + ) + + +class ApplyTransforms(ShellCommandTask): + """Task definition for antsApplyTransforms. + + Examples + -------- + >>> task = ApplyTransforms(moving_image="moving.nii", fixed_image="fixed.nii") + >>> task.cmdline # doctest: +ELLIPSIS + 'antsApplyTransforms -e scalar -i moving.nii -r fixed.nii -o .../moving_warped.nii -n Linear ...' + + >>> task = ApplyTransforms( + ... moving_image="moving.nii", + ... fixed_image="fixed.nii", + ... interpolator="BSpline", + ... input_transforms=["affine.mat"], + ... ) + >>> task.cmdline # doctest: +ELLIPSIS + 'antsApplyTransforms ... -n BSpline[3] -t affine.mat ...' + + >>> task = ApplyTransforms( + ... moving_image="moving.nii", + ... fixed_image="fixed.nii", + ... interpolator="Gaussian", + ... sigma=4.0, + ... alpha=1.0, + ... input_transforms=["affine.mat", "warp_field.nii.gz"], + ... invert_transforms=[True, False], + ... ) + >>> task.cmdline # doctest: +ELLIPSIS + 'antsApplyTransforms ... -n Gaussian[4.0,1.0] -t [affine.mat,1] -t [warp_field.nii.gz,0] ...' + """ + + @define(kw_only=True) + class InputSpec(ShellSpec): + dimensionality: int = field( + metadata={ + "help_string": "image dimensionality", + "argstr": "-d", + "allowed_values": {2, 3, 4}, + } + ) + + image_type: str = field( + default="scalar", + metadata={ + "help_string": ( + "specify the image type (0: scalar, 1: vector, 2: tensor, 3: time-series, 4: multichannel," + " 5: five-dimensional)" + ), + "argstr": "-e", + "allowed_values": {0, 1, 2, 3, 4, 5}, + }, + ) + + moving_image: PathLike = field( + metadata={"help_string": "moving image", "mandatory": True, "argstr": "-i"} + ) + + fixed_image: PathLike = field( + metadata={"help_string": "fixed image", "mandatory": True, "argstr": "-r"} + ) + + output_: str = field( + metadata={ + "help_string": "output parameter", + "readonly": True, + "formatter": _format_output, + } + ) + + output_image: str = field( + metadata={ + "help_string": "output image", + "output_file_template": "{moving_image}_warped", + } + ) + + save_warp_field: bool = field( + metadata={"help_string": "save composite warp field"} + ) + + output_warp_field: str = field( + metadata={ + "help_string": "output warp field", + "output_file_template": "{moving_image}_warpfield", + "requires": {"save_warp_field"}, + } + ) + + save_transform: bool = field( + metadata={"help_string": "save composite transform"} + ) + + output_transform: str = field( + metadata={ + "help_string": "output transform", + "output_file_template": "{moving_image}_affine.mat", + "keep_extension": False, + "requires": {"save_transform"}, + } + ) + + invert_transform: bool = field( + default=False, metadata={"help_string": "invert composite transform"} + ) + + interpolation_: str = field( + metadata={ + "help_string": "interpolation parameter", + "readonly": True, + "formatter": _format_interpolation, + } + ) + + interpolator: str = field( + default="Linear", + metadata={ + "help_string": "interpolation method", + "allowed_values": { + "Linear", + "NearestNeighbor", + "Gaussian", + "BSpline", + "CosineWindowedSinc", + "WelchWindowedSinc", + "HammingWindowedSinc", + "LanczosWindowedSinc", + }, + }, + ) + + sigma: float = field( + default=1.0, metadata={"help_string": "sigma parameter interpolation"} + ) + + alpha: float = field( + default=1.0, metadata={"help_string": "alpha parameter for interpolation"} + ) + + order: int = field( + default=3, metadata={"help_string": "order parameter for interpolation"} + ) + + output_datatype: str = field( + metadata={ + "help_string": "force output image datatype", + "argstr": "-u", + "allowed_values": { + "char", + "uchar", + "short", + "int", + "float", + "double", + "default", + }, + } + ) + + input_transforms: Sequence[PathLike] = field( + metadata={ + "help_string": "input transforms to apply", + "formatter": lambda input_transforms, invert_transforms: ( + "" + if not input_transforms + else " ".join(f"-t {f}" for f in input_transforms) + if not invert_transforms + else " ".join( + f"-t [{f},{int(i)}]" + for f, i in zip(input_transforms, invert_transforms) + ) + ), + } + ) + + invert_transforms: Sequence[bool] = field( + metadata={ + "help_string": "which transforms to invert", + "requires": {"input_transforms"}, + } + ) + + default_value: float = field( + metadata={"help_string": "default voxel value", "argstr": "-f"} + ) + + use_float_precision: bool = field( + default=False, + metadata={ + "help_string": "use float precision instead of double", + "formatter": lambda use_float_precision: f"--float {use_float_precision:d}", + }, + ) + + verbose: bool = field( + default=False, + metadata={ + "help_string": "enable verbose output", + "formatter": lambda verbose: f"--verbose {verbose:d}", + }, + ) + + input_spec = SpecInfo(name="Input", bases=(InputSpec,)) + + executable = "antsApplyTransforms" diff --git a/pydra/tasks/ants/v2_5/bias_correction.py b/pydra/tasks/ants/v2_5/bias_correction.py new file mode 100644 index 0000000..ddbd3b4 --- /dev/null +++ b/pydra/tasks/ants/v2_5/bias_correction.py @@ -0,0 +1,142 @@ +__all__ = ["N4BiasFieldCorrection"] + +from os import PathLike +from typing import Sequence + +from attrs import define, field +from pydra.engine.specs import ShellSpec, SpecInfo +from pydra.engine.task import ShellCommandTask + + +class N4BiasFieldCorrection(ShellCommandTask): + """Task definition for N4BiasFieldCorrection. + + Examples + -------- + >>> task = N4BiasFieldCorrection(input_image="input.nii") + >>> task.cmdline # doctest: +ELLIPSIS + 'N4BiasFieldCorrection -i input.nii -r 1 -s 4 -b [200,3] -c [50x50x50x50,0.0] -t [0.15,0.01,200] \ +-o .../input_corrected.nii' + """ + + @define(kw_only=True) + class InputSpec(ShellSpec): + dimensionality: int = field( + metadata={ + "help_string": "image dimensionality", + "argstr": "-d", + "allowed_values": {2, 3, 4}, + } + ) + + input_image: PathLike = field( + metadata={"help_string": "input image", "mandatory": True, "argstr": "-i"} + ) + + mask_image: PathLike = field( + metadata={"help_string": "mask image", "argstr": "-x"} + ) + + rescale_intensities: bool = field( + default=True, + metadata={ + "help_string": "rescale intensities", + "formatter": lambda rescale_intensities: f"-r {rescale_intensities:d}", + }, + ) + + weight_image: PathLike = field( + metadata={"help_string": "weight image", "argstr": "-w"} + ) + + shrink_factor: int = field( + default=4, metadata={"help_string": "shrink factor", "argstr": "-s"} + ) + + bspline_fitting_: str = field( + metadata={ + "help_string": "b-spline fitting parameter", + "argstr": "-b [{spline_distance},{spline_order}]", + "readonly": True, + } + ) + + spline_distance: float = field( + default=200, metadata={"help_string": "spline distance"} + ) + + spline_order: int = field(default=3, metadata={"help_string": "spline order"}) + + convergence_: str = field( + metadata={ + "help_string": "convergence parameters", + "readonly": True, + "formatter": lambda num_iterations, threshold: ( + "-c [{},{}]".format( + "x".join(str(i) for i in num_iterations), threshold + ) + ), + } + ) + + num_iterations: Sequence[int] = field( + default=(50, 50, 50, 50), metadata={"help_string": "number of iterations"} + ) + + threshold: float = field( + default=0.0, metadata={"help_string": "convergence threshold"} + ) + + histogram_sharpening_: str = field( + metadata={ + "help_string": "histogram sharpening parameter", + "argstr": "-t [{bias_field_fwhm},{wiener_filter_noise},{num_histogram_bins}]", + "readonly": True, + } + ) + + bias_field_fwhm: float = field( + default=0.15, metadata={"help_string": "Bias field FWHM"} + ) + + wiener_filter_noise: float = field( + default=0.01, metadata={"help_string": "Wiener filter noise"} + ) + + num_histogram_bins: int = field( + default=200, metadata={"help_string": "number of histogram bins"} + ) + + output_ = field( + metadata={ + "help_string": "output parameters", + "readonly": True, + "formatter": lambda output_image, save_bias_field, output_bias_field: ( + f"-o [{output_image},{output_bias_field}]" + if save_bias_field + else f"-o {output_image}" + ), + } + ) + + output_image: str = field( + metadata={ + "help_string": "output image", + "output_file_template": "{input_image}_corrected", + } + ) + + save_bias_field: bool = field( + default=False, metadata={"help_string": "save bias field"} + ) + + output_bias_field: str = field( + metadata={ + "help_string": "output bias field", + "output_file_template": "{input_image}_biasfield", + } + ) + + input_spec = SpecInfo(name="Input", bases=(InputSpec,)) + + executable = "N4BiasFieldCorrection" diff --git a/pydra/tasks/ants/v2_5/create_jacobian_determinant_image.py b/pydra/tasks/ants/v2_5/create_jacobian_determinant_image.py new file mode 100644 index 0000000..e70bc11 --- /dev/null +++ b/pydra/tasks/ants/v2_5/create_jacobian_determinant_image.py @@ -0,0 +1,65 @@ +__all__ = ["CreateJacobianDeterminantImage"] + +from os import PathLike + +from attrs import define, field +from pydra.engine.specs import ShellSpec, SpecInfo +from pydra.engine.task import ShellCommandTask + + +class CreateJacobianDeterminantImage(ShellCommandTask): + """Task definition for CreateJacobianDeterminantImage. + + Examples + -------- + >>> task = CreateJacobianDeterminantImage(dimensionality=3, warp_field="warp.nii.gz") + >>> task.cmdline # doctest: +ELLIPSIS + 'CreateJacobianDeterminantImage 3 warp.nii.gz .../warp_jac.nii.gz 0 0' + """ + + @define(kw_only=True) + class InputSpec(ShellSpec): + dimensionality: int = field( + metadata={ + "help_string": "image dimensionality", + "mandatory": True, + "argstr": "", + "allowed_values": {2, 3}, + } + ) + + warp_field: PathLike = field( + metadata={ + "help_string": "displacement field", + "mandatory": True, + "argstr": "", + } + ) + + output_image: str = field( + metadata={ + "help_string": "output image", + "argstr": "", + "output_file_template": "{warp_field}_jac", + } + ) + + calculate_log_jacobian: bool = field( + default=False, + metadata={ + "help_string": "calculate log jacobian", + "formatter": lambda calculate_log_jacobian: f"{calculate_log_jacobian:d}", + }, + ) + + calculate_geometric_jacobian: bool = field( + default=False, + metadata={ + "help_string": "calculate geometric jacobian", + "formatter": lambda calculate_geometric_jacobian: f"{calculate_geometric_jacobian:d}", + }, + ) + + input_spec = SpecInfo(name="Input", bases=(InputSpec,)) + + executable = "CreateJacobianDeterminantImage" diff --git a/pydra/tasks/ants/v2_5/registration.py b/pydra/tasks/ants/v2_5/registration.py new file mode 100644 index 0000000..cb8f276 --- /dev/null +++ b/pydra/tasks/ants/v2_5/registration.py @@ -0,0 +1,878 @@ +__all__ = ["Registration", "registration_syn", "registration_syn_quick"] + +from functools import partial +from os import PathLike +from pathlib import Path +from typing import Optional, Sequence + +from attrs import NOTHING, define, field +from pydra.engine.specs import File, ShellOutSpec, ShellSpec, SpecInfo +from pydra.engine.task import ShellCommandTask + + +class Registration(ShellCommandTask): + """Task definition for antsRegistration.""" + + @define(kw_only=True) + class InputSpec(ShellSpec): + dimensionality: int = field( + default=3, + metadata={ + "help_string": "image dimensionality", + "argstr": "-d", + "allowed_values": {2, 3, 4}, + }, + ) + + fixed_image: PathLike = field( + metadata={"help_string": "fixed image", "mandatory": True} + ) + + moving_image: PathLike = field( + metadata={"help_string": "moving image", "mandatory": True} + ) + + output_: str = field( + metadata={ + "help_string": "output parameter", + "readonly": True, + "formatter": lambda output_transform_prefix, warped_image, inverse_warped_image: ( + "-o {}".format( + f"[{output_transform_prefix},{warped_image},{inverse_warped_image}]" + if all([warped_image, inverse_warped_image]) + else output_transform_prefix + ) + ), + } + ) + + output_transform_prefix: str = field( + default="output", metadata={"help_string": "output transform prefix"} + ) + + warped_image: str = field( + metadata={ + "help_string": "warped moving image to fixed image space", + "output_file_template": "{moving_image}_warped", + } + ) + + inverse_warped_image: str = field( + metadata={ + "help_string": "warped fixed image to moving image space", + "output_file_template": "{fixed_image}_warped", + } + ) + + initialize_transforms_per_stage: bool = field( + default=False, + metadata={ + "help_string": "initialize linear transforms from the previous stage", + "formatter": lambda initialize_transforms_per_stage: f"-i {initialize_transforms_per_stage:d}", + }, + ) + + interpolation_: str = field( + metadata={ + "help_string": "interpolation parameter", + "readonly": True, + "formatter": lambda interpolator, sigma, alpha, order: ( + "-n {}{}".format( + interpolator, + f"[{sigma},{alpha}]" + if interpolator == "Gaussian" + else f"[{order}]" + if interpolator == "BSpline" + else "", + ) + ), + } + ) + + interpolator: str = field( + default="Linear", + metadata={ + "help_string": "choice of interpolator", + "allowed_values": { + "Linear", + "NearestNeighbor", + "Gaussian", + "BSpline", + "CosineWindowedSinc", + "WelchWindowedSinc", + "HammingWindowedSinc", + "LanczosWindowedSinc", + }, + }, + ) + + sigma: float = field( + default=1.0, metadata={"help_string": "sigma parameter for interpolation"} + ) + + alpha: float = field( + default=1.0, metadata={"help_string": "alpha parameter for interpolation"} + ) + + order: int = field( + default=3, metadata={"help_string": "order parameter for interpolation"} + ) + + masks_: str = field( + metadata={ + "help_string": "masks parameter", + "readonly": True, + "formatter": lambda fixed_mask, moving_mask: ( + f"-x [{fixed_mask or 'NULL'},{moving_mask or 'NULL'}]" + if any([fixed_mask, moving_mask]) + else "" + ), + } + ) + + fixed_mask: PathLike = field( + metadata={"help_string": "mask applied to the fixed image"} + ) + + moving_mask: PathLike = field( + metadata={"help_string": "mask applied to the moving image"} + ) + + use_histogram_matching: bool = field( + default=False, + metadata={ + "help_string": "use histogram matching", + "formatter": lambda use_histogram_matching: f"-u {use_histogram_matching:d}", + }, + ) + + winsorize_image_intensities: bool = field( + default=False, + metadata={ + "help_string": "winsorize image intensities", + "formatter": lambda winsorize_image_intensities, lower_quantile, upper_quantile: ( + f"-w [{lower_quantile},{upper_quantile}]" + if winsorize_image_intensities + else "" + ), + }, + ) + + lower_quantile: float = field( + default=0.0, metadata={"help_string": "lower quantile"} + ) + + upper_quantile: float = field( + default=1.0, metadata={"help_string": "upper quantile"} + ) + + initial_fixed_transforms: Sequence[PathLike] = field( + metadata={ + "help_string": "initialize composite fixed transform with these transforms", + "formatter": lambda initial_fixed_transforms, invert_fixed_transforms: ( + "" + if not initial_fixed_transforms + else " ".join(f"-q {x}" for x in initial_fixed_transforms) + if not invert_fixed_transforms + else " ".join( + f"-q [{x},{y:d}]" + for x, y in zip( + initial_fixed_transforms, invert_fixed_transforms + ) + ) + ), + } + ) + + invert_fixed_transforms: Sequence[bool] = field( + metadata={ + "help_string": "specify which initial fixed transforms to invert", + "requires": {"initial_fixed_transforms"}, + } + ) + + initial_moving_transforms: Sequence[PathLike] = field( + metadata={ + "help_string": "initialize composite moving transform with these transforms", + "formatter": lambda initial_moving_transforms, invert_moving_transforms, fixed_image, moving_image: ( + f"-r [{fixed_image},{moving_image},1]" + if not initial_moving_transforms + else " ".join(f"-r {x}" for x in initial_moving_transforms) + if not invert_moving_transforms + else " ".join( + f"-r [{x},{y:d}]" + for x, y in zip( + initial_moving_transforms, invert_moving_transforms + ) + ) + ), + } + ) + + invert_moving_transforms: Sequence[bool] = field( + metadata={ + "help_string": "specify which initial moving transforms to invert", + "requires": {"initial_moving_transforms"}, + } + ) + + enable_rigid_stage = field( + default=True, metadata={"help_string": "enable rigid registration stage"} + ) + + rigid_transform_type: str = field( + default="Rigid", + metadata={ + "help_string": "transform type for rigid stage", + "allowed_values": {"Rigid", "Translation"}, + "formatter": lambda enable_rigid_stage, rigid_transform_type, rigid_gradient_step: ( + f"-t {rigid_transform_type}[{rigid_gradient_step}]" + if enable_rigid_stage + else "" + ), + }, + ) + + rigid_gradient_step: bool = field( + default=0.1, metadata={"help_string": "gradient step for rigid stage"} + ) + + rigid_metric: str = field( + metadata={ + "help_string": "rigid metric parameter", + "allowed_values": {"CC", "MI", "Mattes", "MeanSquares", "Demons", "GC"}, + "formatter": lambda enable_rigid_stage, rigid_metric, fixed_image, moving_image, rigid_radius, rigid_num_bins, rigid_sampling_strategy, rigid_sampling_rate: ( + "-m {}[{},{},1,{},{},{}]".format( + rigid_metric, + fixed_image, + moving_image, + rigid_num_bins + if rigid_metric in {"MI", "Mattes"} + else rigid_radius, + rigid_sampling_strategy, + rigid_sampling_rate, + ) + if enable_rigid_stage + else "" + ), + } + ) + + rigid_radius: int = field( + default=4, metadata={"help_string": "radius for rigid stage"} + ) + + rigid_num_bins: int = field( + default=32, metadata={"help_string": "number of bins for rigid stage"} + ) + + rigid_sampling_strategy: str = field( + default="None", + metadata={ + "help_string": "sampling strategy for rigid stage", + "allowed_values": {"None", "Regular", "Random"}, + }, + ) + + rigid_sampling_rate: float = field( + default=1.0, metadata={"help_string": "sampling rate for rigid stage"} + ) + + rigid_convergence_: Sequence[int] = field( + metadata={ + "help_string": "convergence parameter for rigid stage", + "readonly": True, + "formatter": lambda enable_rigid_stage, rigid_num_iterations, rigid_threshold, rigid_window_size: ( + "-c [{},{},{}]".format( + "x".join(str(c) for c in rigid_num_iterations), + rigid_threshold, + rigid_window_size, + ) + if enable_rigid_stage + else "" + ), + }, + ) + + rigid_num_iterations: Sequence[int] = field( + default=(1000, 500, 250, 0), + metadata={"help_string": "number of iterations for rigid stage"}, + ) + + rigid_threshold: float = field( + default=1e-6, + metadata={"help_string": "convergence threshold for rigid stage"}, + ) + + rigid_window_size: int = field( + default=10, + metadata={"help_string": "convergence window size for rigid stage"}, + ) + + rigid_shrink_factors: Sequence[int] = field( + default=(8, 4, 2, 1), + metadata={ + "help_string": "shrink factors for rigid stage", + "formatter": lambda enable_rigid_stage, rigid_shrink_factors: ( + "-f {}".format("x".join(str(f) for f in rigid_shrink_factors)) + if enable_rigid_stage + else "" + ), + }, + ) + + rigid_smoothing_sigmas: Sequence[int] = field( + default=(3, 2, 1, 0), + metadata={ + "help_string": "smoothing sigmas for rigid stage", + "formatter": lambda enable_rigid_stage, rigid_smoothing_sigmas, rigid_smoothing_units: ( + "-s {}{}".format( + "x".join(str(s) for s in rigid_smoothing_sigmas), + rigid_smoothing_units, + ) + if enable_rigid_stage + else "" + ), + }, + ) + + rigid_smoothing_units: str = field( + default="vox", + metadata={ + "help_string": "smoothing units for rigid stage", + "allowed_values": {"vox", "mm"}, + }, + ) + + enable_affine_stage: bool = field( + default=True, metadata={"help_string": "enable affine registration stage"} + ) + + affine_transform_type: str = field( + default="Affine", + metadata={ + "help_string": "transform type for affine stage", + "allowed_values": {"Affine", "CompositeAffine", "Similarity"}, + "formatter": lambda enable_affine_stage, affine_transform_type, affine_gradient_step: ( + f"-t {affine_transform_type}[{affine_gradient_step}]" + if enable_affine_stage + else "" + ), + }, + ) + + affine_gradient_step: bool = field( + default=0.1, metadata={"help_string": "gradient step for affine stage"} + ) + + affine_metric: str = field( + metadata={ + "help_string": "metric parameter for affine stage", + "allowed_values": {"CC", "MI", "Mattes", "MeanSquares", "Demons", "GC"}, + "formatter": lambda enable_affine_stage, affine_metric, fixed_image, moving_image, affine_radius, affine_num_bins, affine_sampling_strategy, affine_sampling_rate: ( + "-m {}[{},{},1,{},{},{}]".format( + affine_metric, + fixed_image, + moving_image, + affine_num_bins + if affine_metric in {"MI", "Mattes"} + else affine_radius, + affine_sampling_strategy, + affine_sampling_rate, + ) + if enable_affine_stage + else "" + ), + } + ) + + affine_radius: int = field( + default=4, metadata={"help_string": "radius for affine stage"} + ) + + affine_num_bins: int = field( + default=32, metadata={"help_string": "number of bins for affine stage"} + ) + + affine_sampling_strategy: str = field( + default="None", + metadata={ + "help_string": "sampling strategy for affine stage", + "allowed_values": {"None", "Regular", "Random"}, + }, + ) + + affine_sampling_rate: float = field( + default=1.0, metadata={"help_string": "sampling rate for affine stage"} + ) + + affine_convergence_: Sequence[int] = field( + metadata={ + "help_string": "convergence parameter for affine stage", + "readonly": True, + "formatter": lambda enable_affine_stage, affine_num_iterations, affine_threshold, affine_window_size: ( + "-c [{},{},{}]".format( + "x".join(str(c) for c in affine_num_iterations), + affine_threshold, + affine_window_size, + ) + if enable_affine_stage + else "" + ), + }, + ) + + affine_num_iterations: Sequence[int] = field( + default=(1000, 500, 250, 0), + metadata={"help_string": "number of iterations for affine stage"}, + ) + + affine_threshold: float = field( + default=1e-6, + metadata={"help_string": "convergence threshold for affine stage"}, + ) + + affine_window_size: int = field( + default=10, + metadata={"help_string": "convergence window size for affine stage"}, + ) + + affine_shrink_factors: Sequence[int] = field( + default=(8, 4, 2, 1), + metadata={ + "help_string": "shrink factors for affine stage", + "formatter": lambda enable_affine_stage, affine_shrink_factors: ( + "-f {}".format("x".join(str(f) for f in affine_shrink_factors)) + if enable_affine_stage + else "" + ), + }, + ) + + affine_smoothing_sigmas: Sequence[int] = field( + default=(3, 2, 1, 0), + metadata={ + "help_string": "smoothing sigmas for affine stage", + "formatter": lambda enable_affine_stage, affine_smoothing_sigmas, affine_smoothing_units: ( + "-s {}{}".format( + "x".join(str(s) for s in affine_smoothing_sigmas), + affine_smoothing_units, + ) + if enable_affine_stage + else "" + ), + }, + ) + + affine_smoothing_units: str = field( + default="vox", + metadata={ + "help_string": "smoothing units for affine stage", + "allowed_values": {"vox", "mm"}, + }, + ) + + enable_syn_stage: str = field( + default=True, metadata={"help_string": "enable SyN registration stage"} + ) + + syn_transform_type: str = field( + default="Syn", + metadata={ + "help_string": "transform type for SyN stage", + "allowed_values": {"GaussianDisplacementField", "SyN", "BSplineSyN"}, + "formatter": lambda enable_syn_stage, syn_transform_type, syn_gradient_step, syn_flow_sigma, syn_total_sigma, syn_spline_distance, syn_spline_order: ( + "-t {}[{}]".format( + syn_transform_type, + f"{syn_gradient_step},{syn_spline_distance},0,{syn_spline_order}" + if syn_transform_type == "BSplineSyn" + else f"{syn_gradient_step},{syn_flow_sigma},{syn_total_sigma}", + ) + if enable_syn_stage + else "" + ), + }, + ) + + syn_gradient_step: bool = field( + default=0.1, metadata={"help_string": "gradient step for SyN stage"} + ) + + syn_flow_sigma: float = field( + default=3, metadata={"help_string": "sigma for flow field in SyN stage"} + ) + + syn_total_sigma: float = field( + default=0, metadata={"help_string": "sigma for total field in SyN stage"} + ) + + syn_spline_distance: int = field( + default=26, metadata={"help_string": "spline distance for SyN stage"} + ) + + syn_spline_order: int = field( + default=3, metadata={"help_string": "spline order for SyN stage"} + ) + + syn_metric: str = field( + default="MI", + metadata={ + "help_string": "metric for SyN stage", + "allowed_values": {"CC", "MI", "Mattes", "MeanSquares", "Demons", "GC"}, + "formatter": lambda enable_syn_stage, syn_metric, fixed_image, moving_image, syn_radius, syn_num_bins, syn_sampling_strategy, syn_sampling_rate: ( + "-m {}[{},{},1,{},{},{}]".format( + syn_metric, + fixed_image, + moving_image, + syn_num_bins if syn_metric in {"MI", "Mattes"} else syn_radius, + syn_sampling_strategy, + syn_sampling_rate, + ) + if enable_syn_stage + else "" + ), + }, + ) + + syn_radius: int = field( + default=4, metadata={"help_string": "radius for SyN stage"} + ) + + syn_num_bins: int = field( + default=32, metadata={"help_string": "number of bins for SyN stage"} + ) + + syn_sampling_strategy: str = field( + default="None", + metadata={ + "help_string": "sampling strategy for SyN stage", + "allowed_values": {"None", "Regular", "Random"}, + }, + ) + + syn_sampling_rate: float = field( + default=1.0, metadata={"help_string": "sampling rate for SyN stage"} + ) + + syn_convergence_: str = field( + metadata={ + "help_string": "convergence parameter for SyN stage", + "readonly": True, + "formatter": lambda enable_syn_stage, syn_num_iterations, syn_threshold, syn_window_size: ( + "-c [{},{},{}]".format( + "x".join(str(c) for c in syn_num_iterations), + syn_threshold, + syn_window_size, + ) + if enable_syn_stage + else "" + ), + }, + ) + + syn_num_iterations: Sequence[int] = field( + default=(100, 70, 50, 20), + metadata={"help_string": "number of iterations for SyN stage"}, + ) + + syn_threshold: float = field( + default=1e-6, + metadata={"help_string": "convergence threshold for SyN stage"}, + ) + + syn_window_size: int = field( + default=10, + metadata={"help_string": "convergence window size for SyN stage"}, + ) + + syn_shrink_factors: Sequence[int] = field( + default=(8, 4, 2, 1), + metadata={ + "help_string": "shrink factors for SyN stage", + "formatter": lambda enable_syn_stage, syn_shrink_factors: ( + "-f {}".format("x".join(str(f) for f in syn_shrink_factors)) + if enable_syn_stage + else "" + ), + }, + ) + + syn_smoothing_sigmas: Sequence[int] = field( + default=(3, 2, 1, 0), + metadata={ + "help_string": "smoothing sigmas for SyN stage", + "formatter": lambda enable_syn_stage, syn_smoothing_sigmas, syn_smoothing_units: ( + "-s {}{}".format( + "x".join(str(s) for s in syn_smoothing_sigmas), + syn_smoothing_units, + ) + if enable_syn_stage + else "" + ), + }, + ) + + syn_smoothing_units: str = field( + default="vox", + metadata={ + "help_string": "smoothing units for SyN stage", + "allowed_values": {"vox", "mm"}, + }, + ) + + use_float_precision: bool = field( + default=False, + metadata={ + "help_string": "use float precision instead of double", + "formatter": lambda use_float_precision: f"--float {use_float_precision:d}", + }, + ) + + use_minc_format: bool = field( + default=False, + metadata={ + "help_string": "save output transforms to MINC format", + "formatter": lambda use_minc_format: f"--minc {use_minc_format:d}", + }, + ) + + random_seed: int = field( + metadata={"help_string": "random seed", "argstr": "--random-seed"} + ) + + verbose: bool = field( + default=False, + metadata={ + "help_string": "enable verbose output", + "formatter": lambda verbose: f"--verbose {verbose:d}", + }, + ) + + input_spec = SpecInfo(name="Input", bases=(InputSpec,)) + + @define(kw_only=True) + class OutputSpec(ShellOutSpec): + affine_transform: File = field( + metadata={ + "help_string": "affine transform", + "callable": lambda output_transform_prefix, use_minc_format: ( + Path.cwd() + / "{}0GenericAffine{}".format( + output_transform_prefix, ".xfm" if use_minc_format else ".mat" + ) + ), + } + ) + + warp_field: File = field( + metadata={ + "help_string": "warp field from moving to fixed image space", + "output_file_template": "{output_transform_prefix}1Warp.nii.gz", + } + ) + + inverse_warp_field: File = field( + metadata={ + "help_string": "warp field from fixed to moving image space", + "output_file_template": "{output_transform_prefix}1InverseWarp.nii.gz", + } + ) + + output_spec = SpecInfo(name="Output", bases=(OutputSpec,)) + + executable = "antsRegistration" + + +def registration_syn( + dimensionality: int, + fixed_image: PathLike, + moving_image: PathLike, + output_prefix: str = "output", + transform_type: str = "s", + num_bins: int = 32, + gradient_step: float = 0.1, + radius: int = 4, + spline_distance: int = 26, + fixed_mask: Optional[PathLike] = None, + moving_mask: Optional[PathLike] = None, + use_float_precision: bool = False, + use_minc_format: bool = False, + use_histogram_matching: bool = False, + reproducible: bool = False, + random_seed: Optional[int] = None, + verbose: bool = False, + large: bool = False, + quick: bool = False, + **kwargs, +) -> Registration: + """Returns a task for SyN registration. + + This function instantiates a SyN registration task with parameters mimicking the `antsRegistrationSyn` scripts + provided by ANTs. + + Parameters + ---------- + dimensionality : {2, 3, 4} + Image dimensionality. + fixed_image : path_like + Fixed image, also referred to as source image. + moving_image : path_like + Moving image, also referred to as target image. + output_prefix : str, default="output" + Prefix prepended to all output files. + transform_type : {"t", "r", "a", "s", "sr", "so", "b", "br", "bo"}, default="s" + Type of transform for the registration: + * t: Translation only + * r: Rigid only + * a: Rigid + Affine + * s: Rigid + Affine + SyN + * sr: Rigid + SyN + * so: SyN only + * b: Rigid + Affine + BSplineSyn + * br: Rigid + BSplineSyn + * bo: BSplineSyn only + num_bins : int, default=32 + Number of histogram bins for the MI metric in SyN stage. + gradient_step : float, default=0.1 + Gradient step size for the CC metric in SyN stage. + radius : int, default=4 + Radius for the CC metric used in SyN stage. + spline_distance : int, default=26 + Spline distance for deformable B-splines in SyN stage. + fixed_mask : path_like, optional + Mask applied to the fixed image space. + moving_mask : path_like, optional + Mask applied to the moving image space. + use_float_precision : bool, default=False + Use float precision for computation instead of double. + use_minc_format: bool, default=False + Save output transforms to MINC format. + use_histogram_matching : bool, default=True + Perform histogram matching prior to registration. + reproducible : bool, default=False + Use a reproducible set of parameters, + i.e. `GC` metric for linear stages and `CC` for SyN. + Random seed should be specified or else a fixed value of 1 is used. + random_seed : int, optional + Specify a custom random seed for reproducibility. + verbose : bool, default=False + Enable verbose logging. + large : bool, default=False + Use a set of parameters optimized for large images. + ANTs considers input images to be "large" if any dimension is over 256. + quick : bool, default=False + Use a set of parameters optimized for faster convergence. + **kwargs : dict, optional + Extra arguments passed to the task constructor. + + Returns + ------- + Registration + The configured registration task. + + See Also + -------- + pydra.tasks.ants.registration.registration_syn_quick : + Same as `registration_syn` with `quick` enabled. + + Examples + -------- + >>> task = registration_syn( + ... dimensionality=3, + ... fixed_image="reference.nii.gz", + ... moving_image="structural.nii.gz", + ... ) + >>> task.cmdline + 'antsRegistration -d 3 -o [output,outputWarped.nii.gz,outputInverseWarped.nii.gz] -i 0 -n Linear \ +-u 0 -w [0.005,0.995] -r [reference.nii.gz,structural.nii.gz,1] -t Rigid[0.1] \ +-m MI[reference.nii.gz,structural.nii.gz,1,32,Regular,0.25] -c [1000x500x250x100,1e-06,10] -f 8x4x2x1 \ +-s 3x2x1x0vox -t Affine[0.1] -m MI[reference.nii.gz,structural.nii.gz,1,32,Regular,0.25] \ +-c [1000x500x250x100,1e-06,10] -f 8x4x2x1 -s 3x2x1x0vox -t Syn[0.1,3,0] \ +-m MI[reference.nii.gz,structural.nii.gz,1,32,None,1.0] -c [100x70x50x20,1e-06,10] -f 8x4x2x1 \ +-s 3x2x1x0vox --float 0 --minc 0 --verbose 0' + + >>> task = registration_syn( + ... dimensionality=3, + ... fixed_image="reference.nii.gz", + ... moving_image="structural.nii.gz", + ... large=True, + ... ) + >>> task.cmdline # doctest: +ELLIPSIS + 'antsRegistration ... -c [1000x500x250x100,1e-06,10] -f 12x8x4x2 -s 4x3x2x1vox ... \ +-c [1000x500x250x100,1e-06,10] -f 12x8x4x2 -s 4x3x2x1vox ... \ +-c [100x100x70x50x20,1e-06,10] -f 10x6x4x2x1 -s 5x3x2x1x0vox ...' + + >>> task = registration_syn( + ... dimensionality=3, + ... fixed_image="reference.nii.gz", + ... moving_image="structural.nii.gz", + ... reproducible=True, + ... ) + >>> task.cmdline # doctest: +ELLIPSIS + 'antsRegistration ... -m GC[...] ... -m GC[...] ... -m CC[...] ...' + + >>> task = registration_syn( + ... dimensionality=3, + ... fixed_image="reference.nii.gz", + ... moving_image="structural.nii.gz", + ... quick=True, + ... ) + >>> task.cmdline # doctest: +ELLIPSIS + 'antsRegistration ... -c [1000x500x250x0,...] ... -c [1000x500x250x0,...] ... -c [100x70x50x0,...] ...' + """ + return Registration( + dimensionality=dimensionality, + fixed_image=fixed_image, + moving_image=moving_image, + output_transform_prefix=output_prefix, + warped_image=f"{output_prefix}Warped.nii.gz", + inverse_warped_image=f"{output_prefix}InverseWarped.nii.gz", + fixed_mask=fixed_mask or NOTHING, + moving_mask=moving_mask or NOTHING, + winsorize_image_intensities=True, + lower_quantile=0.005, + upper_quantile=0.995, + enable_rigid_stage=transform_type not in {"bo", "so"}, + rigid_transform_type="Translation" if transform_type == "t" else "Rigid", + rigid_metric="GC" if reproducible else "MI", + rigid_radius=1, + rigid_num_bins=32, + rigid_sampling_strategy="Regular", + rigid_sampling_rate=0.25, + rigid_num_iterations=(1000, 500, 250, 0 if quick else 100), + rigid_shrink_factors=(12, 8, 4, 2) if large else (8, 4, 2, 1), + rigid_smoothing_sigmas=(4, 3, 2, 1) if large else (3, 2, 1, 0), + enable_affine_stage=transform_type in {"a", "b", "s"}, + affine_transform_type="Affine", + affine_metric="GC" if reproducible else "MI", + affine_radius=1, + affine_num_bins=32, + affine_sampling_strategy="Regular", + affine_sampling_rate=0.25, + affine_num_iterations=(1000, 500, 250, 0 if quick else 100), + affine_shrink_factors=(12, 8, 4, 2) if large else (8, 4, 2, 1), + affine_smoothing_sigmas=(4, 3, 2, 1) if large else (3, 2, 1, 0), + enable_syn_stage=transform_type[0] in {"b", "s"}, + syn_transform_type="BSplineSyn" if transform_type[0] == "b" else "Syn", + syn_gradient_step=gradient_step, + syn_spline_distance=spline_distance, + syn_metric="CC" if reproducible else "MI", + syn_radius=radius, + syn_num_bins=num_bins, + syn_num_iterations=( + (100, 100, 70, 50, 0 if quick else 20) + if large + else (100, 70, 50, 0 if quick else 20) + ), + syn_shrink_factors=(10, 6, 4, 2, 1) if large else (8, 4, 2, 1), + syn_smoothing_sigmas=(5, 3, 2, 1, 0) if large else (3, 2, 1, 0), + use_histogram_matching=use_histogram_matching, + use_float_precision=use_float_precision, + use_minc_format=use_minc_format, + random_seed=random_seed or (1 if reproducible else NOTHING), + verbose=verbose, + **kwargs, + ) + + +registration_syn_quick = partial(registration_syn, quick=True) diff --git a/pyproject.toml b/pyproject.toml index 2db0ba8..ebf2a10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,6 +44,7 @@ doc = [ "sphinxcontrib-versioning", ] test = [ + "nipype2pydra", "pytest >= 4.4.0", "pytest-cov", "pytest-env", diff --git a/related-packages/fileformats-extras/README.rst b/related-packages/fileformats-extras/README.rst index d293494..d875e79 100644 --- a/related-packages/fileformats-extras/README.rst +++ b/related-packages/fileformats-extras/README.rst @@ -1,12 +1,14 @@ -FileFormats-ants Extras -====================================== +FileFormats-medimage-ants Extras +================================ + .. image:: https://github.com/nipype/pydra-freesurfer/actions/workflows/ci-cd.yaml/badge.svg :target: https://github.com/nipype/pydra-freesurfer/actions/workflows/ci-cd.yaml -This is a extras module for the `fileformats-ants `__ +This is a extras module for the `fileformats-medimage-ants `__ fileformats extension package, which provides additional functionality to format classes (i.e. aside -from basic identification and validation), such as conversion tools, metadata parsers, test data generators, etc... +from basic identification and validation), such as conversion tools, metadata parsers, +sample data generators, etc... Quick Installation @@ -14,9 +16,10 @@ Quick Installation This extension can be installed for Python 3 using *pip*:: - $ pip3 install fileformats-ants-extras + $ pip3 install fileformats-medimage-ants-extras -This will install the core package and any other dependencies +This will install the package, base packages, and any other dependencies required to +implement the extra functionality. License ------- diff --git a/related-packages/fileformats-extras/fileformats/extras/medimage_ants/__init__.py b/related-packages/fileformats-extras/fileformats/extras/medimage_ants/__init__.py index 5b445d8..9328bb7 100644 --- a/related-packages/fileformats-extras/fileformats/extras/medimage_ants/__init__.py +++ b/related-packages/fileformats-extras/fileformats/extras/medimage_ants/__init__.py @@ -1,7 +1,5 @@ +from ._version import __version__ # noqa: F401 from pathlib import Path import typing as ty from random import Random -from fileformats.core import FileSet -from fileformats.medimage_ants import ( -) - +from fileformats.core import FileSet, SampleFileGenerator diff --git a/related-packages/fileformats-extras/pyproject.toml b/related-packages/fileformats-extras/pyproject.toml index 9f2e11a..c9f4d75 100644 --- a/related-packages/fileformats-extras/pyproject.toml +++ b/related-packages/fileformats-extras/pyproject.toml @@ -7,22 +7,11 @@ name = "fileformats-medimage-ants-extras" description = "Extensions to add functionality to tool-specific *fileformats* classes" readme = "README.rst" requires-python = ">=3.8" -dependencies = [ - "fileformats >= 0.7", - "fileformats-medimage-ants", - "pydra >= 0.22.0" -] -license = {file = "LICENSE"} -authors = [ - {name = "Thomas G. Close", email = "tom.g.close@gmail.com"}, -] -maintainers = [ - {name = "Thomas G. Close", email = "tom.g.close@gmail.com"}, -] -keywords = [ - "file formats", - "data", -] +dependencies = ["fileformats", "fileformats-medimage-ants", "pydra >= 0.23.0a"] +license = { file = "LICENSE" } +authors = [{ name = "Thomas G. Close", email = "tom.g.close@gmail.com" }] +maintainers = [{ name = "Thomas G. Close", email = "tom.g.close@gmail.com" }] +keywords = ["file formats", "data"] classifiers = [ "Development Status :: 3 - Alpha", "Environment :: Console", @@ -40,22 +29,10 @@ classifiers = [ dynamic = ["version"] [project.optional-dependencies] -dev = [ - "black", - "pre-commit", - "codespell", - "flake8", - "flake8-pyproject", -] -test = [ - "pytest >=6.2.5", - "pytest-env>=0.6.2", - "pytest-cov>=2.12.1", - "codecov", -] +dev = ["black", "pre-commit", "codespell", "flake8", "flake8-pyproject"] +test = ["pytest >=6.2.5", "pytest-env>=0.6.2", "pytest-cov>=2.12.1", "codecov"] -converters = [ -] +converters = [] [project.urls] repository = "https://github.com/nipype/pydra-ants" @@ -79,9 +56,7 @@ ignore-words = ".codespell-ignorewords" [tool.flake8] doctests = true -per-file-ignores = [ - "__init__.py:F401" -] +per-file-ignores = ["__init__.py:F401"] max-line-length = 88 select = "C,E,F,W,B,B950" extend-ignore = ['E203', 'E501', 'E129'] diff --git a/related-packages/fileformats/README.rst b/related-packages/fileformats/README.rst index 4d94531..92a3c8f 100644 --- a/related-packages/fileformats/README.rst +++ b/related-packages/fileformats/README.rst @@ -1,22 +1,11 @@ -How to customise this template -============================== +FileFormats-medimage-ants +========================= -#. Rename the `related-packages/fileformats/ants` directory to the name of the fileformats subpackage (e.g. `medimage_fsl`) -#. Search and replace "ants" with the name of the fileformats subpackage the extras are to be added -#. Replace name + email placeholders in `pyproject.toml` for developers and maintainers -#. Add the extension file-format classes -#. Ensure that all the extension file-format classes are imported into the extras package root, i.e. `fileformats/ants` -#. Delete these instructions - -... - -FileFormats Extension - ants -==================================== .. image:: https://github.com/nipype/pydra-ants/actions/workflows/ci-cd.yml/badge.svg :target: https://github.com/nipype/pydra-ants/actions/workflows/ci-cd.yml -This is the "ants" extension module for the -`fileformats `__ package +This is an extension module of the `fileformats `__ +package for defining file formats that are specific to the CHANGME software toolkit. Quick Installation @@ -24,9 +13,9 @@ Quick Installation This extension can be installed for Python 3 using *pip*:: - $ pip3 install fileformats-ants + $ pip3 install fileformats-medimage-ants -This will install the core package and any other dependencies +This will install the format extensions and dependent base packages. License ------- diff --git a/related-packages/fileformats/fileformats/medimage_ants/__init__.py b/related-packages/fileformats/fileformats/medimage_ants/__init__.py index 10bd9c0..3a8d6d5 100644 --- a/related-packages/fileformats/fileformats/medimage_ants/__init__.py +++ b/related-packages/fileformats/fileformats/medimage_ants/__init__.py @@ -1 +1 @@ -from fileformats.generic import File \ No newline at end of file +from ._version import __version__ # noqa: F401 diff --git a/related-packages/fileformats/pyproject.toml b/related-packages/fileformats/pyproject.toml index fc24a31..8a05608 100644 --- a/related-packages/fileformats/pyproject.toml +++ b/related-packages/fileformats/pyproject.toml @@ -7,21 +7,11 @@ name = "fileformats-medimage-ants" description = "Classes for representing different file formats in Python classes for use in type hinting in data workflows" readme = "README.rst" requires-python = ">=3.8" -dependencies = [ - "fileformats >= 0.4", - "fileformats-medimage > = 0.2" -] -license = {file = "LICENSE"} -authors = [ - {name = "Thomas G. Close", email = "tom.g.close@gmail.com"}, -] -maintainers = [ - {name = "Thomas G. Close", email = "tom.g.close@gmail.com"}, -] -keywords = [ - "file formats", - "data", -] +dependencies = ["fileformats", "fileformats-medimage"] +license = { file = "LICENSE" } +authors = [{ name = "Thomas G. Close", email = "tom.g.close@gmail.com" }] +maintainers = [{ name = "Thomas G. Close", email = "tom.g.close@gmail.com" }] +keywords = ["file formats", "data"] classifiers = [ "Development Status :: 3 - Alpha", "Environment :: Console", @@ -39,13 +29,7 @@ classifiers = [ dynamic = ["version"] [project.optional-dependencies] -dev = [ - "black", - "pre-commit", - "codespell", - "flake8", - "flake8-pyproject", -] +dev = ["black", "pre-commit", "codespell", "flake8", "flake8-pyproject"] test = [ "pytest >=6.2.5", "pytest-env>=0.6.2", @@ -76,9 +60,7 @@ ignore-words = ".codespell-ignorewords" [tool.flake8] doctests = true -per-file-ignores = [ - "__init__.py:F401" -] +per-file-ignores = ["__init__.py:F401"] max-line-length = 88 select = "C,E,F,W,B,B950" extend-ignore = ['E203', 'E501', 'E129']