diff --git a/.github/workflows/ci-cd.yaml b/.github/workflows/ci-cd.yaml
index 7333d04..4cb8dfb 100644
--- a/.github/workflows/ci-cd.yaml
+++ b/.github/workflows/ci-cd.yaml
@@ -9,30 +9,41 @@ name: CI/CD
on:
push:
branches: [ main, develop ]
- tags: [ '*' ]
pull_request:
branches: [ main, develop ]
+ release:
+ types: [published]
repository_dispatch:
- types: [create-release]
+ types: [create-post-release]
+
+env: # Define environment variables
+ ANTS_VERSION: v2.5.1
jobs:
nipype-conv:
runs-on: ubuntu-latest
steps:
+
- name: Checkout
- uses: actions/checkout@v3
- - name: Revert version to most recent tag on upstream update
+ uses: actions/checkout@v4
+
+ - name: Revert version to most recent version tag on upstream update
if: github.event_name == 'repository_dispatch'
- run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ run: git checkout $(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}')
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+
- name: Install build dependencies
run: python -m pip install --upgrade pip
+
- name: Install requirements
run: python -m pip install ./related-packages/fileformats -r ./nipype-auto-conv/requirements.txt
+
- name: Run automatic Nipype > Pydra conversion
run: ./nipype-auto-conv/generate
+
- uses: actions/upload-artifact@v3
with:
name: converted-nipype
@@ -50,256 +61,334 @@ jobs:
- '--editable git+https://github.com/nipype/pydra.git#egg=pydra'
steps:
- name: Checkout
- uses: actions/checkout@v3
- - name: Revert version to most recent tag on upstream update
+ uses: actions/checkout@v4
+
+ - name: Revert version to most recent version tag on upstream update
if: github.event_name == 'repository_dispatch'
- run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
- - name: Download tasks converted from Nipype
+ run: git checkout $(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}')
+
+ - name: Download tasks converted from Nipype
uses: actions/download-artifact@v3
with:
name: converted-nipype
path: pydra/tasks/ants/auto
+
- name: Strip auto package from gitignore so it is included in package
run: |
sed -i '/\/pydra\/tasks\/ants\/auto/d' .gitignore
+ sed -i '/^_version.py/d' .gitignore
+
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
+
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
+
- name: Install Pydra
run: |
pushd $HOME
pip install ${{ matrix.pydra }}
popd
python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
+
- name: Install task package
run: |
- pip install "./related-packages/fileformats[dev]" "related-packages/fileformats-extras[dev]"
+ pip install ${{ matrix.pip-flags }} "./related-packages/fileformats[dev]"
+ pip install ${{ matrix.pip-flags }} "related-packages/fileformats-extras[dev]"
pip install ${{ matrix.pip-flags }} ".[dev]"
python -c "import pydra.tasks.ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
python -c "import fileformats.medimage_ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
python -c "import fileformats.extras.medimage_ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
-
- fileformats-test:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ['3.8', '3.11']
- steps:
- - uses: actions/checkout@v3
- - name: Revert version to most recent tag on upstream update
- if: github.event_name == 'repository_dispatch'
- run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
- with:
- python-version: ${{ matrix.python-version }}
- - name: Install build dependencies
- run: |
- python -m pip install --upgrade pip
- - name: Install task package
- run: |
- pip install "./related-packages/fileformats[test]" "./related-packages/fileformats-extras[test]"
- python -c "import fileformats.medimage_ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
- - name: Test fileformats with pytest
- run: |
- cd ./fileformats
- pytest -sv --cov fileformats.medimage_ants --cov fileformats.extras.medimage_ants --cov-report xml .
test:
- needs: [nipype-conv, fileformats-test]
+ needs: [nipype-conv]
runs-on: ubuntu-22.04
strategy:
matrix:
- python-version: ['3.8'] # '3.11'
+ python-version: ['3.8', '3.11']
steps:
+
+ - name: Install prerequisite packages
+ run: sudo apt install -y cmake
+
- name: Removed unnecessary tools to free space
run: |
sudo rm -rf /usr/share/dotnet
- sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- - name: Get Download cache Key
- id: cache-key
- run: echo "::set-output name=key::ants-linux-ubuntu22_amd64-7.4.1"
- - name: Cache FreeSurfer
- uses: actions/cache@v2
+ sudo rm -rf "$AGENT_TOOLSDIRECTORY"
+
+ - name: Cache ANTs Install
+ id: cache-install
+ uses: actions/cache@v4
with:
- path: $HOME/downloads/ants
- key: ${{ steps.cache-key.outputs.key }}
- restore-keys: |
- ants-linux-ubuntu22_amd64-7.4.1
- - name: Download FreeSurfer
- if: steps.cache-key.outputs.key != steps.cache-hit.outputs.key
- run: |
- mkdir -p $HOME/downloads/ants
- curl -s -o $HOME/downloads/ants/ants-linux-ubuntu22_amd64-7.4.1.tar.gz https://surfer.nmr.mgh.harvard.edu/pub/dist/ants/7.4.1/ants-linux-ubuntu22_amd64-7.4.1.tar.gz
- shell: bash
- - name: Install Freesurfer
- env:
- FREESURFER_LICENCE: ${{ secrets.FREESURFER_LICENCE }}
+ path: install
+ key: ants-${{ env.ANTS_VERSION }}-${{ runner.os }}
+
+ - name: Install ANTs Package
+ if: steps.cache-install.outputs.cache-hit != 'true'
run: |
- pushd $HOME/downloads/ants
- tar -zxpf ants-linux-ubuntu22_amd64-7.4.1.tar.gz
- mv ants $HOME/
+ workingDir=${PWD}
+ git clone https://github.com/ANTsX/ANTs.git
+ pushd ./ANTs
+ git checkout ${{ env.ANTS_VERSION }}
popd
- export FREESURFER_HOME=$HOME/ants
- source $FREESURFER_HOME/SetUpFreeSurfer.sh
- echo $FREESURFER_LICENCE > $FREESURFER_HOME/license.txt
- export PATH=$FREESURFER_HOME/bin:$PATH
- - uses: actions/checkout@v3
- - name: Revert version to most recent tag on upstream update
+ mkdir build install
+ cd build
+ cmake -DCMAKE_INSTALL_PREFIX=${workingDir}/install ../ANTs
+ make -j 4
+ cd ANTS-build
+ make install
+ echo "Installation completed successfully"
+ echo "PATH=${workingDir}/install/bin:$PATH" >> $GITHUB_ENV
+ echo "LD_LIBRARY_PATH=${workingDir}/install/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV
+
+ - name: Checkout repo
+ uses: actions/checkout@v4
+
+ - name: Revert version to most recent version tag on upstream update
if: github.event_name == 'repository_dispatch'
- run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
- - name: Download tasks converted from Nipype
+ run: git checkout $(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}')
+
+ - name: Download tasks converted from Nipype
uses: actions/download-artifact@v3
with:
name: converted-nipype
path: pydra/tasks/ants/auto
+
+ - name: Show the contents of the auto-generated tasks
+ run: tree pydra
+
- name: Strip auto package from gitignore so it is included in package
run: |
- sed -i '/\/src\/pydra\/tasks\/ants\/auto/d' .gitignore
+ sed -i '/\/pydra\/tasks\/ants\/auto/d' .gitignore
+
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
+
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
+
- name: Install task package
run: |
pip install "./related-packages/fileformats" "./related-packages/fileformats-extras" ".[test]"
python -c "import pydra.tasks.ants as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
+
- name: Test with pytest
- run: |
- pytest -sv --doctest-modules ./pydra/tasks/ants \
- --cov pydra.tasks.ants --cov-report xml
- - uses: codecov/codecov-action@v3
+ run: >-
+ pytest -sv
+ ./pydra/tasks/ants
+ ./related-packages/fileformats
+ ./related-packages/fileformats-extras
+ --cov pydra.tasks.ants
+ --cov fileformats.medimage_ants
+ --cov fileformats.extras.medimage_ants
+ --cov-report xml
+
+ - name: Upload to CodeCov
+ uses: codecov/codecov-action@v3
if: ${{ always() }}
with:
- files: coverage.xml,./fileformats/coverage.xml
+ files: coverage.xml
name: pydra-ants
+
deploy-fileformats:
needs: [devcheck, test]
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+
+ - uses: actions/checkout@v4
with:
submodules: recursive
- fetch-depth: 0
+ fetch-depth: 0
+
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: '3.11'
+
- name: Install build tools
run: python -m pip install build twine
+
- name: Build source and wheel distributions
run: python -m build ./related-packages/fileformats
+
- name: Check distributions
run: twine check ./related-packages/fileformats/dist/*
+
- name: Check for PyPI token on tag
id: deployable
- if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch'
+ if: github.event_name == 'release' || github.event_name == 'repository_dispatch'
env:
PYPI_API_TOKEN: "${{ secrets.PYPI_FILEFORMATS_API_TOKEN }}"
run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi
+
- name: Upload to PyPI
if: steps.deployable.outputs.DEPLOY
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_FILEFORMATS_API_TOKEN }}
- packages-dir: ./related-packages/fileformats/dist
+ packages-dir: ./related-packages/fileformats/dist
deploy-fileformats-extras:
needs: [deploy-fileformats]
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+
+ - uses: actions/checkout@v4
with:
submodules: recursive
- fetch-depth: 0
+ fetch-depth: 0
+
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: '3.11'
+
- name: Install build tools
run: python -m pip install build twine
+
- name: Build source and wheel distributions
run: python -m build ./related-packages/fileformats-extras
+
- name: Check distributions
run: twine check ./related-packages/fileformats-extras/dist/*
+
- name: Check for PyPI token on tag
id: deployable
- if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch'
+ if: github.event_name == 'release' || github.event_name == 'repository_dispatch'
env:
PYPI_API_TOKEN: "${{ secrets.PYPI_FILEFORMATS_EXTRAS_API_TOKEN }}"
run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi
+
- name: Upload to PyPI
if: steps.deployable.outputs.DEPLOY
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_FILEFORMATS_EXTRAS_API_TOKEN }}
- packages-dir: ./related-packages/fileformats-extras/dist
+ packages-dir: ./related-packages/fileformats-extras/dist
deploy:
- needs: [deploy-fileformats-extras]
+ needs: [nipype-conv, test, deploy-fileformats, deploy-fileformats-extras]
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+
+ - name: Checkout repository
+ uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- - name: Download tasks converted from Nipype
+
+ - name: Set up Git user
+ run: |
+ git config --local user.email "action@github.com"
+ git config --local user.name "GitHub Action"
+
+ - name: Get latest version tag
+ id: latest_tag
+ run: |
+ git fetch --tags
+ echo "TAG=$(git tag -l | grep 'v.*' | tail -n 1 | awk -F post '{print $1}')" >> $GITHUB_OUTPUT
+
+ - name: Revert to latest tag
+ if: github.event_name == 'repository_dispatch'
+ run: git checkout ${{ steps.latest_tag.outputs.TAG }}
+
+ - name: Download tasks converted from Nipype
uses: actions/download-artifact@v3
with:
name: converted-nipype
path: pydra/tasks/ants/auto
- - name: Tag release with a post-release based on Nipype and Nipype2Pydra versions
- if: github.event_name == 'repository_dispatch'
- run: |
- TAG=$(git tag -l | tail -n 1 | awk -F post '{print $1}')
- POST=$(python -c "from pydra.tasks.ants.auto._version import *; print(post_release)")
- git checkout $TAG
- git add -f pydra/tasks/ants/auto/_version.py
- git commit -am"added auto-generated version to make new tag for package version"
- git tag ${TAG}post${POST}
+
+ - name: Show the contents of the auto-generated tasks
+ run: tree pydra
+
- name: Set up Python
- uses: actions/setup-python@v4
+ uses: actions/setup-python@v5
with:
python-version: '3.11'
+
- name: Install build tools
run: python -m pip install build twine
+
- name: Strip auto package from gitignore so it is included in package
run: |
sed -i '/\/pydra\/tasks\/ants\/auto/d' .gitignore
+ cat .gitignore
+
+ - name: Install task package to calculate post-release tag
+ run: |
+ pip install "./related-packages/fileformats" "./related-packages/fileformats-extras" ".[test]"
+
+ - name: Generate post-release tag based on Nipype and Nipype2Pydra versions
+ id: post_release_tag
+ run: |
+ POST=$(python -c "from pydra.tasks.ants.auto._version import *; print(post_release)")
+ echo "TAG=${{ steps.latest_tag.outputs.TAG }}post${POST}" >> $GITHUB_OUTPUT
+
+ - name: Add auto directory to git repo
+ if: github.event_name == 'release' || github.event_name == 'repository_dispatch'
+ run: |
+ git add pydra/tasks/ants/auto
+ git commit -am"added auto-generated version to make new tag for package version"
+ git status
+
+ - name: Overwrite the tag of release event with latest commit (i.e. including the auto directory)
+ if: github.event_name == 'release'
+ run: |
+ git tag -d ${{ steps.latest_tag.outputs.TAG }};
+ git tag ${{ steps.latest_tag.outputs.TAG }};
+
+ - name: Tag repo with the post-release
+ if: github.event_name == 'repository_dispatch'
+ run: git tag ${{ steps.post_release_tag.outputs.TAG }}
+
- name: Build source and wheel distributions
run: python -m build .
+
- name: Check distributions
run: twine check dist/*
+
- uses: actions/upload-artifact@v3
with:
name: distributions
path: dist/
+
- name: Check for PyPI token on tag
id: deployable
- if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch'
+ if: github.event_name == 'release' || github.event_name == 'repository_dispatch'
env:
PYPI_API_TOKEN: "${{ secrets.PYPI_API_TOKEN }}"
run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi
+
- name: Upload to PyPI
if: steps.deployable.outputs.DEPLOY
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
- password: ${{ secrets.PYPI_API_TOKEN }}
+ password: ${{ secrets.PYPI_API_TOKEN }}
+
+ - name: Create post-release release for releases triggered by nipype2pydra dispatches
+ if: steps.deployable.outputs.DEPLOY && github.event_name == 'repository_dispatch'
+ uses: actions/create-release@v1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # This token is provided by Actions, you do not need to create your own token
+ with:
+ tag_name: ${{ steps.post_release_tag.outputs.TAG }}
+ release_name: Release ${{ steps.post_release_tag.outputs.TAG }}
+ draft: false
+ prerelease: false
# Deploy on tags if PYPI_API_TOKEN is defined in the repository secrets.
# Secrets are not accessible in the if: condition [0], so set an output variable [1]
# [0] https://github.community/t/16928
-# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter
\ No newline at end of file
+# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter
diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/pythonpackage.yml
deleted file mode 100644
index 5cdf094..0000000
--- a/.github/workflows/pythonpackage.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-#This workflow will install Python dependencies, run tests and lint with a variety of Python versions
-# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
-
-name: Python package
-
-on:
- push:
- branches: [ master ]
- pull_request:
- branches: [ master ]
-
-jobs:
- build:
-
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: [3.7, 3.8, 3.9, '3.10']
-
- steps:
- - uses: actions/checkout@v3
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v4
- with:
- python-version: ${{ matrix.python-version }}
- - name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install ".[test]"
- - name: Test with pytest
- run: |
- pytest -sv --doctest-modules pydra/tasks/ants
diff --git a/.gitignore b/.gitignore
index 9e8d1aa..00ee882 100644
--- a/.gitignore
+++ b/.gitignore
@@ -131,11 +131,18 @@ dmypy.json
# Pycharm
.idea
+# Vim
+.*.sw[op]
+
# VS Code
.vscode
# Mac garbarge
.DS_store
+# Generated files
+/pydra/tasks/ants/_version.py
+/related-packages/fileformats/fileformats/medimage_ants/_version.py
+/related-packages/fileformats-extras/fileformats/extras/medimage_ants/_version.py
/pydra/tasks/ants/auto
/pydra/tasks/ants/_version.py
diff --git a/README.md b/README.md
deleted file mode 100644
index 67ba7e3..0000000
--- a/README.md
+++ /dev/null
@@ -1 +0,0 @@
-# Pydra tasks for ANTs commands
diff --git a/README.rst b/README.rst
index c52dfd7..9e28777 100644
--- a/README.rst
+++ b/README.rst
@@ -1,11 +1,11 @@
-===============================
+===========================
Pydra task package for ants
-===============================
+===========================
-.. image:: https://github.com/nipype/pydra-ants/actions/workflows/pythonpackage.yaml/badge.svg
- :target: https://github.com/nipype/pydra-ants/actions/workflows/pythonpackage.yaml
-.. .. image:: https://codecov.io/gh/nipype/pydra-ants/branch/main/graph/badge.svg?token=UIS0OGPST7
-.. :target: https://codecov.io/gh/nipype/pydra-ants
+.. image:: https://github.com/nipype/pydra-ants/actions/workflows/ci-cd.yaml/badge.svg
+ :target: https://github.com/nipype/pydra-ants/actions/workflows/ci-cd.yaml
+.. image:: https://codecov.io/gh/nipype/pydra-ants/branch/main/graph/badge.svg?token=UIS0OGPST7
+ :target: https://codecov.io/gh/nipype/pydra-ants
.. image:: https://img.shields.io/pypi/pyversions/pydra-ants.svg
:target: https://pypi.python.org/pypi/pydra-ants/
:alt: Supported Python versions
@@ -27,7 +27,7 @@ Automatically generated tasks can be found in the `pydra.tasks.ants.auto` packag
These packages should be treated with extreme caution as they likely do not pass testing.
Generated tasks that have been edited and pass testing are imported into one or more of the
`pydra.tasks.ants.v*` packages, corresponding to the version of the ants toolkit
-they are designed for.
+they are designed for.
Tests
-----
@@ -71,6 +71,14 @@ Contributing to this package
Developer installation
~~~~~~~~~~~~~~~~~~~~~~
+Install the `fileformats `__ packages
+corresponding to AFNI specific file formats
+
+
+.. code-block::
+
+ $ pip install -e ./related-packages/fileformats[dev]
+ $ pip install -e ./related-packages/fileformats-extras[dev]
Install repo in developer mode from the source directory and install pre-commit to
ensure consistent code-style and quality.
@@ -78,7 +86,7 @@ ensure consistent code-style and quality.
.. code-block::
$ pip install -e .[test,dev]
-$ pre-commit install
+ $ pre-commit install
Next install the requirements for running the auto-conversion script and generate the
Pydra task interfaces from their Nipype counterparts
@@ -93,7 +101,8 @@ The run the conversion script to convert Nipype interfaces to Pydra
$ nipype-auto-conv/generate
-## Methodology
+Methodology
+~~~~~~~~~~~
The development of this package is expected to have two phases
@@ -149,6 +158,6 @@ in the ``inputs > types`` and ``outputs > types`` dicts of the YAML spec.
If the required file-type is not found implemented within fileformats, please see the `fileformats
docs `__ for instructions on how to define
-new fileformat types, and see
+new fileformat types, and see
`fileformats-medimage-extras `__
for an example on how to implement methods to generate sample data for them.
diff --git a/nipype-auto-conv/generate b/nipype-auto-conv/generate
index a3729e8..42f5fcb 100755
--- a/nipype-auto-conv/generate
+++ b/nipype-auto-conv/generate
@@ -5,10 +5,11 @@ from warnings import warn
from pathlib import Path
import shutil
from importlib import import_module
+from tqdm import tqdm
import yaml
import nipype
import nipype2pydra.utils
-from nipype2pydra.task import TaskConverter
+from nipype2pydra.task import get_converter
SPECS_DIR = Path(__file__).parent / "specs"
@@ -35,7 +36,10 @@ auto_dir = PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto"
if auto_dir.exists():
shutil.rmtree(auto_dir)
-for fspath in sorted(SPECS_DIR.glob("**/*.yaml")):
+all_interfaces = []
+for fspath in tqdm(
+ sorted(SPECS_DIR.glob("**/*.yaml")), "converting interfaces from Nipype to Pydra"
+):
with open(fspath) as f:
spec = yaml.load(f, Loader=yaml.SafeLoader)
@@ -49,13 +53,14 @@ for fspath in sorted(SPECS_DIR.glob("**/*.yaml")):
module_name = nipype2pydra.utils.to_snake_case(spec["task_name"])
- converter = TaskConverter(
+ converter = get_converter(
output_module=f"pydra.tasks.{PKG_NAME}.auto.{module_name}",
callables_module=callables, # type: ignore
**spec,
)
converter.generate(PKG_ROOT)
auto_init += f"from .{module_name} import {converter.task_name}\n"
+ all_interfaces.append(converter.task_name)
with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "_version.py", "w") as f:
@@ -68,5 +73,9 @@ post_release = (nipype_version + nipype2pydra_version).replace(".", "")
"""
)
+auto_init += (
+ "\n\n__all__ = [\n" + "\n".join(f' "{i}",' for i in all_interfaces) + "\n]\n"
+)
+
with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "__init__.py", "w") as f:
f.write(auto_init)
diff --git a/nipype-auto-conv/requirements.txt b/nipype-auto-conv/requirements.txt
index 06ac987..5fbe6aa 100644
--- a/nipype-auto-conv/requirements.txt
+++ b/nipype-auto-conv/requirements.txt
@@ -2,10 +2,11 @@ black
attrs>=22.1.0
nipype
pydra
+tqdm
PyYAML>=6.0
fileformats >=0.8
fileformats-medimage >=0.4
fileformats-datascience >= 0.1
fileformats-medimage-ants
traits
-nipype2pydra
\ No newline at end of file
+nipype2pydra
diff --git a/nipype-auto-conv/specs/affine_initializer.yaml b/nipype-auto-conv/specs/affine_initializer.yaml
index 65c7ba1..b54afde 100644
--- a/nipype-auto-conv/specs/affine_initializer.yaml
+++ b/nipype-auto-conv/specs/affine_initializer.yaml
@@ -5,17 +5,17 @@
#
# Docs
# ----
-#
+#
# Initialize an affine transform (as in antsBrainExtraction.sh)
-#
+#
# >>> from nipype.interfaces.ants import AffineInitializer
# >>> init = AffineInitializer()
# >>> init.inputs.fixed_image = 'fixed1.nii'
# >>> init.inputs.moving_image = 'moving1.nii'
# >>> init.cmdline
# 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10'
-#
-#
+#
+#
task_name: AffineInitializer
nipype_name: AffineInitializer
nipype_module: nipype.interfaces.ants.utils
@@ -34,9 +34,12 @@ inputs:
# type=file|default=: reference image
moving_image: medimage/nifti1
# type=file|default=: moving image
- out_file: generic/file
+ out_file: Path
# type=file: output transform file
# type=file|default='transform.mat': output transform file
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -88,15 +91,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -110,15 +113,15 @@ tests:
moving_image:
# type=file|default=: moving image
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -131,12 +134,12 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- fixed_image:
+ fixed_image: '"fixed1.nii"'
# type=file|default=: reference image
- moving_image:
+ moving_image: '"moving1.nii"'
# type=file|default=: moving image
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/affine_initializer_callables.py b/nipype-auto-conv/specs/affine_initializer_callables.py
index 47d5ce0..309afda 100644
--- a/nipype-auto-conv/specs/affine_initializer_callables.py
+++ b/nipype-auto-conv/specs/affine_initializer_callables.py
@@ -1 +1,20 @@
-"""Module to put any functions that are referred to in AffineInitializer.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of AffineInitializer.yaml"""
+
+import os
+
+
+def out_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["out_file"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L834 of /interfaces/ants/utils.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ return {"out_file": os.path.abspath(inputs.out_file)}
diff --git a/nipype-auto-conv/specs/ai.yaml b/nipype-auto-conv/specs/ai.yaml
index e3b217a..5fda363 100644
--- a/nipype-auto-conv/specs/ai.yaml
+++ b/nipype-auto-conv/specs/ai.yaml
@@ -5,9 +5,9 @@
#
# Docs
# ----
-#
+#
# Calculate the optimal linear transform parameters for aligning two images.
-#
+#
# Examples
# --------
# >>> AI(
@@ -17,7 +17,7 @@
# ... ).cmdline
# 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1]
# -o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0'
-#
+#
# >>> AI(fixed_image='structural.nii',
# ... moving_image='epi.nii',
# ... metric=('Mattes', 32, 'Regular', 1),
@@ -25,8 +25,8 @@
# ... ).cmdline
# 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1]
# -o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0'
-#
-#
+#
+#
task_name: AI
nipype_name: AI
nipype_module: nipype.interfaces.ants.utils
@@ -43,15 +43,18 @@ inputs:
# passed to the field in the automatically generated unittests.
fixed_image: generic/file
# type=file|default=: Image to which the moving_image should be transformed
- moving_image: generic/file
- # type=file|default=: Image that will be transformed to fixed_image
fixed_image_mask: generic/file
# type=file|default=: fixed mage mask
+ moving_image: generic/file
+ # type=file|default=: Image that will be transformed to fixed_image
moving_image_mask: generic/file
# type=file|default=: moving mage mask
- output_transform: generic/file
+ output_transform: Path
# type=file: output file name
# type=file|default='initialization.mat': output file name
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -113,15 +116,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
diff --git a/nipype-auto-conv/specs/ai_callables.py b/nipype-auto-conv/specs/ai_callables.py
index dafef82..071a285 100644
--- a/nipype-auto-conv/specs/ai_callables.py
+++ b/nipype-auto-conv/specs/ai_callables.py
@@ -1 +1,18 @@
-"""Module to put any functions that are referred to in AI.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of AI.yaml"""
+
+
+def output_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_transform"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L539 of /interfaces/ants/utils.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ return getattr(self, "_output")
diff --git a/nipype-auto-conv/specs/ants.yaml b/nipype-auto-conv/specs/ants.yaml
index 8108e61..4a00c3a 100644
--- a/nipype-auto-conv/specs/ants.yaml
+++ b/nipype-auto-conv/specs/ants.yaml
@@ -7,10 +7,10 @@
# ----
# ANTS wrapper for registration of images
# (old, use Registration instead)
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants import ANTS
# >>> ants = ANTS()
# >>> ants.inputs.dimension = 3
@@ -31,7 +31,7 @@
# >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000]
# >>> ants.cmdline
# 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] --transformation-model SyN[0.25] --use-Histogram-Matching 1'
-#
+#
task_name: ANTS
nipype_name: ANTS
nipype_module: nipype.interfaces.ants.registration
@@ -50,6 +50,9 @@ inputs:
# type=inputmultiobject|default=[]: image to which the moving image is warped
moving_image: medimage/nifti1+list-of
# type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional)
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -65,14 +68,14 @@ outputs:
# passed to the field in the automatically generated unittests.
affine_transform: generic/file
# type=file: Affine transform file
- warp_transform: generic/file
- # type=file: Warping deformation field
inverse_warp_transform: generic/file
# type=file: Inverse warping deformation field
metaheader: generic/file
# type=file: VTK metaheader .mhd file
metaheader_raw: generic/file
# type=file: VTK metaheader .raw file
+ warp_transform: generic/file
+ # type=file: Warping deformation field
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -91,43 +94,43 @@ tests:
moving_image:
# type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional)
metric:
- # type=list|default=[]:
+ # type=list|default=[]:
metric_weight:
# type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage.
radius:
# type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation
output_transform_prefix:
- # type=str|default='out':
+ # type=str|default='out':
transformation_model:
- # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']:
+ # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']:
gradient_step_length:
- # type=float|default=0.0:
+ # type=float|default=0.0:
number_of_time_steps:
- # type=int|default=0:
+ # type=int|default=0:
delta_time:
- # type=float|default=0.0:
+ # type=float|default=0.0:
symmetry_type:
- # type=float|default=0.0:
+ # type=float|default=0.0:
use_histogram_matching:
- # type=bool|default=True:
+ # type=bool|default=True:
number_of_iterations:
- # type=list|default=[]:
+ # type=list|default=[]:
smoothing_sigmas:
- # type=list|default=[]:
+ # type=list|default=[]:
subsampling_factors:
- # type=list|default=[]:
+ # type=list|default=[]:
affine_gradient_descent_option:
- # type=list|default=[]:
+ # type=list|default=[]:
mi_option:
- # type=list|default=[]:
+ # type=list|default=[]:
regularization:
- # type=enum|default='Gauss'|allowed['DMFFD','Gauss']:
+ # type=enum|default='Gauss'|allowed['DMFFD','Gauss']:
regularization_gradient_field_sigma:
- # type=float|default=0.0:
+ # type=float|default=0.0:
regularization_deformation_field_sigma:
- # type=float|default=0.0:
+ # type=float|default=0.0:
number_of_affine_iterations:
- # type=list|default=[]:
+ # type=list|default=[]:
num_threads:
# type=int|default=1: Number of ITK threads to use
args:
@@ -135,15 +138,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -155,9 +158,9 @@ tests:
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
output_transform_prefix: '"MY"'
- # type=str|default='out':
+ # type=str|default='out':
metric: '["CC"]'
- # type=list|default=[]:
+ # type=list|default=[]:
fixed_image:
# type=inputmultiobject|default=[]: image to which the moving image is warped
moving_image:
@@ -167,33 +170,33 @@ tests:
radius: '[5]'
# type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation
transformation_model: '"SyN"'
- # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']:
+ # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']:
gradient_step_length: '0.25'
- # type=float|default=0.0:
+ # type=float|default=0.0:
number_of_iterations: '[50, 35, 15]'
- # type=list|default=[]:
+ # type=list|default=[]:
use_histogram_matching: 'True'
- # type=bool|default=True:
+ # type=bool|default=True:
mi_option: '[32, 16000]'
- # type=list|default=[]:
+ # type=list|default=[]:
regularization: '"Gauss"'
- # type=enum|default='Gauss'|allowed['DMFFD','Gauss']:
+ # type=enum|default='Gauss'|allowed['DMFFD','Gauss']:
regularization_gradient_field_sigma: '3'
- # type=float|default=0.0:
+ # type=float|default=0.0:
regularization_deformation_field_sigma: '0'
- # type=float|default=0.0:
+ # type=float|default=0.0:
number_of_affine_iterations: '[10000,10000,10000,10000,10000]'
- # type=list|default=[]:
+ # type=list|default=[]:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -209,37 +212,37 @@ doctests:
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
output_transform_prefix: '"MY"'
- # type=str|default='out':
+ # type=str|default='out':
metric: '["CC"]'
- # type=list|default=[]:
- fixed_image:
+ # type=list|default=[]:
+ fixed_image: '["T1.nii"]'
# type=inputmultiobject|default=[]: image to which the moving image is warped
- moving_image:
+ moving_image: '["resting.nii"]'
# type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional)
metric_weight: '[1.0]'
# type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage.
radius: '[5]'
# type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation
transformation_model: '"SyN"'
- # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']:
+ # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']:
gradient_step_length: '0.25'
- # type=float|default=0.0:
+ # type=float|default=0.0:
number_of_iterations: '[50, 35, 15]'
- # type=list|default=[]:
+ # type=list|default=[]:
use_histogram_matching: 'True'
- # type=bool|default=True:
+ # type=bool|default=True:
mi_option: '[32, 16000]'
- # type=list|default=[]:
+ # type=list|default=[]:
regularization: '"Gauss"'
- # type=enum|default='Gauss'|allowed['DMFFD','Gauss']:
+ # type=enum|default='Gauss'|allowed['DMFFD','Gauss']:
regularization_gradient_field_sigma: '3'
- # type=float|default=0.0:
+ # type=float|default=0.0:
regularization_deformation_field_sigma: '0'
- # type=float|default=0.0:
+ # type=float|default=0.0:
number_of_affine_iterations: '[10000,10000,10000,10000,10000]'
- # type=list|default=[]:
+ # type=list|default=[]:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/ants_callables.py b/nipype-auto-conv/specs/ants_callables.py
index dc14cf0..93306b7 100644
--- a/nipype-auto-conv/specs/ants_callables.py
+++ b/nipype-auto-conv/specs/ants_callables.py
@@ -1 +1,60 @@
-"""Module to put any functions that are referred to in ANTS.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of ANTS.yaml"""
+
+import os
+
+
+def affine_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["affine_transform"]
+
+
+def inverse_warp_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["inverse_warp_transform"]
+
+
+def metaheader_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["metaheader"]
+
+
+def metaheader_raw_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["metaheader_raw"]
+
+
+def warp_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["warp_transform"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L242 of /interfaces/ants/registration.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["affine_transform"] = os.path.abspath(
+ inputs.output_transform_prefix + "Affine.txt"
+ )
+ outputs["warp_transform"] = os.path.abspath(
+ inputs.output_transform_prefix + "Warp.nii.gz"
+ )
+ outputs["inverse_warp_transform"] = os.path.abspath(
+ inputs.output_transform_prefix + "InverseWarp.nii.gz"
+ )
+ # outputs['metaheader'] = os.path.abspath(inputs.output_transform_prefix + 'velocity.mhd')
+ # outputs['metaheader_raw'] = os.path.abspath(inputs.output_transform_prefix + 'velocity.raw')
+ return outputs
diff --git a/nipype-auto-conv/specs/ants_introduction.yaml b/nipype-auto-conv/specs/ants_introduction.yaml
index eff4899..f016eb4 100644
--- a/nipype-auto-conv/specs/ants_introduction.yaml
+++ b/nipype-auto-conv/specs/ants_introduction.yaml
@@ -6,10 +6,10 @@
# Docs
# ----
# Uses ANTS to generate matrices to warp data from one space to another.
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants.legacy import antsIntroduction
# >>> warp = antsIntroduction()
# >>> warp.inputs.reference_image = 'Template_6.nii'
@@ -17,8 +17,8 @@
# >>> warp.inputs.max_iterations = [30,90,20]
# >>> warp.cmdline
# 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR'
-#
-#
+#
+#
task_name: antsIntroduction
nipype_name: antsIntroduction
nipype_module: nipype.interfaces.ants.legacy
@@ -33,10 +33,13 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- reference_image: medimage/nifti1
- # type=file|default=: template file to warp to
input_image: medimage/nifti1
# type=file|default=: input image to warp to template
+ reference_image: medimage/nifti1
+ # type=file|default=: template file to warp to
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -52,14 +55,14 @@ outputs:
# passed to the field in the automatically generated unittests.
affine_transformation: generic/file
# type=file: affine (prefix_Affine.txt)
- warp_field: generic/file
- # type=file: warp field (prefix_Warp.nii)
- inverse_warp_field: generic/file
- # type=file: inverse warp field (prefix_InverseWarp.nii)
input_file: generic/file
# type=file: input image (prefix_repaired.nii)
+ inverse_warp_field: generic/file
+ # type=file: inverse warp field (prefix_InverseWarp.nii)
output_file: generic/file
# type=file: output image (prefix_deformed.nii)
+ warp_field: generic/file
+ # type=file: warp field (prefix_Warp.nii)
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -100,15 +103,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -124,15 +127,15 @@ tests:
max_iterations: '[30,90,20]'
# type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -145,14 +148,14 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- reference_image:
+ reference_image: '"Template_6.nii"'
# type=file|default=: template file to warp to
- input_image:
+ input_image: '"structural.nii"'
# type=file|default=: input image to warp to template
max_iterations: '[30,90,20]'
# type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/ants_introduction_callables.py b/nipype-auto-conv/specs/ants_introduction_callables.py
index d6e4f62..a68abea 100644
--- a/nipype-auto-conv/specs/ants_introduction_callables.py
+++ b/nipype-auto-conv/specs/ants_introduction_callables.py
@@ -1 +1,74 @@
-"""Module to put any functions that are referred to in antsIntroduction.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of antsIntroduction.yaml"""
+
+import attrs
+import os
+
+
+def affine_transformation_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["affine_transformation"]
+
+
+def input_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["input_file"]
+
+
+def inverse_warp_field_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["inverse_warp_field"]
+
+
+def output_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_file"]
+
+
+def warp_field_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["warp_field"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L141 of /interfaces/ants/legacy.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ transmodel = inputs.transformation_model
+
+ # When transform is set as 'RI'/'RA', wrap fields should not be expected
+ # The default transformation is GR, which outputs the wrap fields
+ if (transmodel is attrs.NOTHING) or (
+ (transmodel is not attrs.NOTHING) and transmodel not in ["RI", "RA"]
+ ):
+ outputs["warp_field"] = os.path.join(
+ output_dir, inputs.out_prefix + "Warp.nii.gz"
+ )
+ outputs["inverse_warp_field"] = os.path.join(
+ output_dir, inputs.out_prefix + "InverseWarp.nii.gz"
+ )
+
+ outputs["affine_transformation"] = os.path.join(
+ output_dir, inputs.out_prefix + "Affine.txt"
+ )
+ outputs["input_file"] = os.path.join(
+ output_dir, inputs.out_prefix + "repaired.nii.gz"
+ )
+ outputs["output_file"] = os.path.join(
+ output_dir, inputs.out_prefix + "deformed.nii.gz"
+ )
+
+ return outputs
diff --git a/nipype-auto-conv/specs/apply_transforms.yaml b/nipype-auto-conv/specs/apply_transforms.yaml
index f3e8d35..fdcb2a7 100644
--- a/nipype-auto-conv/specs/apply_transforms.yaml
+++ b/nipype-auto-conv/specs/apply_transforms.yaml
@@ -7,10 +7,10 @@
# ----
# ApplyTransforms, applied to an input image, transforms it according to a
# reference image and a transform (or a set of transforms).
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants import ApplyTransforms
# >>> at = ApplyTransforms()
# >>> at.inputs.input_image = 'moving1.nii'
@@ -18,7 +18,7 @@
# >>> at.inputs.transforms = 'identity'
# >>> at.cmdline
# 'antsApplyTransforms --default-value 0 --float 0 --input moving1.nii --interpolation Linear --output moving1_trans.nii --reference-image fixed1.nii --transform identity'
-#
+#
# >>> at = ApplyTransforms()
# >>> at.inputs.dimension = 3
# >>> at.inputs.input_image = 'moving1.nii'
@@ -30,7 +30,7 @@
# >>> at.inputs.invert_transform_flags = [False, True]
# >>> at.cmdline
# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform [ trans.mat, 1 ]'
-#
+#
# >>> at1 = ApplyTransforms()
# >>> at1.inputs.dimension = 3
# >>> at1.inputs.input_image = 'moving1.nii'
@@ -43,9 +43,9 @@
# >>> at1.inputs.invert_transform_flags = [False, False]
# >>> at1.cmdline
# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform trans.mat'
-#
+#
# Identity transforms may be used as part of a chain:
-#
+#
# >>> at2 = ApplyTransforms()
# >>> at2.inputs.dimension = 3
# >>> at2.inputs.input_image = 'moving1.nii'
@@ -57,7 +57,7 @@
# >>> at2.inputs.transforms = ['identity', 'ants_Warp.nii.gz', 'trans.mat']
# >>> at2.cmdline
# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform identity --transform ants_Warp.nii.gz --transform trans.mat'
-#
+#
task_name: ApplyTransforms
nipype_name: ApplyTransforms
nipype_module: nipype.interfaces.ants.resampling
@@ -76,10 +76,9 @@ inputs:
# type=file|default=: image to apply transformation to (generally a coregistered functional)
reference_image: medimage/nifti1
# type=file|default=: reference image space that you wish to warp INTO
- transforms: medimage/nifti-gz+list-of
- # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
- invert_transform_flags: generic/file+list-of
- # type=inputmultiobject|default=[]:
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -124,15 +123,15 @@ tests:
reference_image:
# type=file|default=: reference image space that you wish to warp INTO
interpolation:
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
interpolation_parameters:
- # type=traitcompound|default=None:
+ # type=traitcompound|default=None:
transforms:
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
invert_transform_flags:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
default_value:
- # type=float|default=0.0:
+ # type=float|default=0.0:
print_out_composite_warp_file:
# type=bool|default=False: output a composite warp file instead of a transformed image
float:
@@ -144,15 +143,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -165,18 +164,18 @@ tests:
# type=file|default=: image to apply transformation to (generally a coregistered functional)
reference_image:
# type=file|default=: reference image space that you wish to warp INTO
- transforms:
+ transforms: '"identity"'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -195,23 +194,23 @@ tests:
# type=file: Warped image
# type=str|default='': output file name
interpolation: '"Linear"'
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
default_value: '0'
- # type=float|default=0.0:
- transforms:
+ # type=float|default=0.0:
+ transforms: '["ants_Warp.nii.gz", "trans.mat"]'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
- invert_transform_flags:
- # type=inputmultiobject|default=[]:
+ invert_transform_flags: '[False, True]'
+ # type=inputmultiobject|default=[]:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -230,25 +229,25 @@ tests:
# type=file: Warped image
# type=str|default='': output file name
interpolation: '"BSpline"'
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
interpolation_parameters: (5,)
- # type=traitcompound|default=None:
+ # type=traitcompound|default=None:
default_value: '0'
- # type=float|default=0.0:
- transforms:
+ # type=float|default=0.0:
+ transforms: '["ants_Warp.nii.gz", "trans.mat"]'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
- invert_transform_flags:
- # type=inputmultiobject|default=[]:
+ invert_transform_flags: '[False, False]'
+ # type=inputmultiobject|default=[]:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -267,23 +266,23 @@ tests:
# type=file: Warped image
# type=str|default='': output file name
interpolation: '"BSpline"'
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
interpolation_parameters: (5,)
- # type=traitcompound|default=None:
+ # type=traitcompound|default=None:
default_value: '0'
- # type=float|default=0.0:
- transforms:
+ # type=float|default=0.0:
+ transforms: '["identity", "ants_Warp.nii.gz", "trans.mat"]'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -296,14 +295,14 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- input_image:
+ input_image: '"moving1.nii"'
# type=file|default=: image to apply transformation to (generally a coregistered functional)
- reference_image:
+ reference_image: '"fixed1.nii"'
# type=file|default=: reference image space that you wish to warp INTO
- transforms:
+ transforms: '"identity"'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -315,23 +314,23 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image.
- input_image:
+ input_image: '"moving1.nii"'
# type=file|default=: image to apply transformation to (generally a coregistered functional)
- reference_image:
+ reference_image: '"fixed1.nii"'
# type=file|default=: reference image space that you wish to warp INTO
output_image: '"deformed_moving1.nii"'
# type=file: Warped image
# type=str|default='': output file name
interpolation: '"Linear"'
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
default_value: '0'
- # type=float|default=0.0:
- transforms:
+ # type=float|default=0.0:
+ transforms: '["ants_Warp.nii.gz", "trans.mat"]'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
- invert_transform_flags:
- # type=inputmultiobject|default=[]:
+ invert_transform_flags: '[False, True]'
+ # type=inputmultiobject|default=[]:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -343,25 +342,25 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image.
- input_image:
+ input_image: '"moving1.nii"'
# type=file|default=: image to apply transformation to (generally a coregistered functional)
- reference_image:
+ reference_image: '"fixed1.nii"'
# type=file|default=: reference image space that you wish to warp INTO
output_image: '"deformed_moving1.nii"'
# type=file: Warped image
# type=str|default='': output file name
interpolation: '"BSpline"'
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
interpolation_parameters: (5,)
- # type=traitcompound|default=None:
+ # type=traitcompound|default=None:
default_value: '0'
- # type=float|default=0.0:
- transforms:
+ # type=float|default=0.0:
+ transforms: '["ants_Warp.nii.gz", "trans.mat"]'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
- invert_transform_flags:
- # type=inputmultiobject|default=[]:
+ invert_transform_flags: '[False, False]'
+ # type=inputmultiobject|default=[]:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -373,23 +372,23 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image.
- input_image:
+ input_image: '"moving1.nii"'
# type=file|default=: image to apply transformation to (generally a coregistered functional)
- reference_image:
+ reference_image: '"fixed1.nii"'
# type=file|default=: reference image space that you wish to warp INTO
output_image: '"deformed_moving1.nii"'
# type=file: Warped image
# type=str|default='': output file name
interpolation: '"BSpline"'
- # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
+ # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']:
interpolation_parameters: (5,)
- # type=traitcompound|default=None:
+ # type=traitcompound|default=None:
default_value: '0'
- # type=float|default=0.0:
- transforms:
+ # type=float|default=0.0:
+ transforms: '["identity", "ants_Warp.nii.gz", "trans.mat"]'
# type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/apply_transforms_callables.py b/nipype-auto-conv/specs/apply_transforms_callables.py
index eaafa83..303b094 100644
--- a/nipype-auto-conv/specs/apply_transforms_callables.py
+++ b/nipype-auto-conv/specs/apply_transforms_callables.py
@@ -1 +1,93 @@
-"""Module to put any functions that are referred to in ApplyTransforms.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of ApplyTransforms.yaml"""
+
+import attrs
+import os
+import os.path as op
+
+
+def output_image_default(inputs):
+ return _gen_filename("output_image", inputs=inputs)
+
+
+def output_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_image"]
+
+
+# Original source at L465 of /interfaces/ants/resampling.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ if name == "output_image":
+ output = inputs.output_image
+ if output is attrs.NOTHING:
+ _, name, ext = split_filename(inputs.input_image)
+ output = name + inputs.out_postfix + ext
+ return output
+ return None
+
+
+# Original source at L522 of /interfaces/ants/resampling.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["output_image"] = os.path.abspath(
+ _gen_filename(
+ "output_image",
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ )
+ return outputs
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
diff --git a/nipype-auto-conv/specs/apply_transforms_to_points.yaml b/nipype-auto-conv/specs/apply_transforms_to_points.yaml
index da0206f..f9e8c51 100644
--- a/nipype-auto-conv/specs/apply_transforms_to_points.yaml
+++ b/nipype-auto-conv/specs/apply_transforms_to_points.yaml
@@ -7,10 +7,10 @@
# ----
# ApplyTransformsToPoints, applied to an CSV file, transforms coordinates
# using provided transform (or a set of transforms).
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants import ApplyTransforms
# >>> at = ApplyTransformsToPoints()
# >>> at.inputs.dimension = 3
@@ -19,9 +19,9 @@
# >>> at.inputs.invert_transform_flags = [False, False]
# >>> at.cmdline
# 'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv --transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]'
-#
-#
-#
+#
+#
+#
task_name: ApplyTransformsToPoints
nipype_name: ApplyTransformsToPoints
nipype_module: nipype.interfaces.ants.resampling
@@ -40,6 +40,9 @@ inputs:
# type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting.
transforms: datascience/text-matrix+list-of
# type=list|default=[]: transforms that will be applied to the points
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -85,15 +88,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -111,15 +114,15 @@ tests:
invert_transform_flags: '[False, False]'
# type=list|default=[]: list indicating if a transform should be reversed
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -134,14 +137,14 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image.
- input_file:
+ input_file: '"moving.csv"'
# type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting.
- transforms:
+ transforms: '["trans.mat", "ants_Warp.nii.gz"]'
# type=list|default=[]: transforms that will be applied to the points
invert_transform_flags: '[False, False]'
# type=list|default=[]: list indicating if a transform should be reversed
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/apply_transforms_to_points_callables.py b/nipype-auto-conv/specs/apply_transforms_to_points_callables.py
index de371e7..78f8d13 100644
--- a/nipype-auto-conv/specs/apply_transforms_to_points_callables.py
+++ b/nipype-auto-conv/specs/apply_transforms_to_points_callables.py
@@ -1 +1,203 @@
-"""Module to put any functions that are referred to in ApplyTransformsToPoints.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of ApplyTransformsToPoints.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def output_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_file"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/atropos.yaml b/nipype-auto-conv/specs/atropos.yaml
index 5c596a5..1dd546e 100644
--- a/nipype-auto-conv/specs/atropos.yaml
+++ b/nipype-auto-conv/specs/atropos.yaml
@@ -5,15 +5,15 @@
#
# Docs
# ----
-#
+#
# A multivariate n-class segmentation algorithm.
-#
+#
# A finite mixture modeling (FMM) segmentation approach with possibilities for
# specifying prior constraints. These prior constraints include the specification
# of a prior label image, prior probability images (one for each class), and/or an
# MRF prior to enforce spatial smoothing of the labels. Similar algorithms include
# FAST and SPM.
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import Atropos
@@ -30,7 +30,7 @@
# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06]
# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]
# --use-random-seed 1'
-#
+#
# >>> at = Atropos(
# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
@@ -45,7 +45,7 @@
# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06]
# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]
# --use-random-seed 1'
-#
+#
# >>> at = Atropos(
# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
@@ -63,7 +63,7 @@
# --mrf [0.2,1x1x1] --convergence [5,1e-06]
# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz]
# --posterior-formulation Socrates[1] --use-random-seed 1'
-#
+#
# >>> at = Atropos(
# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii',
# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True,
@@ -80,8 +80,8 @@
# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06]
# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1]
# --use-random-seed 1'
-#
-#
+#
+#
task_name: Atropos
nipype_name: Atropos
nipype_module: nipype.interfaces.ants.segmentation
@@ -97,9 +97,16 @@ inputs:
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
intensity_images: medimage/nifti1+list-of
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
mask_image: medimage/nifti1
- # type=file|default=:
+ # type=file|default=:
+ out_classified_image_name: Path
+ # type=file|default=:
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
+ out_classified_image_name: out_classified_image_name_default
+ # type=file|default=:
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -114,14 +121,14 @@ outputs:
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
classified_image: generic/file
- # type=file:
+ # type=file:
+ posteriors: generic/file+list-of
+ # type=outputmultiobject:
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
templates:
# dict[str, str] - `output_file_template` values to be provided to output fields
- out_classified_image_name: out_classified_image_name
- # type=file|default=:
requirements:
# dict[str, list[str]] - input fields that are required to be provided for the output field to be present
tests:
@@ -131,47 +138,47 @@ tests:
dimension:
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
intensity_images:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
mask_image:
- # type=file|default=:
+ # type=file|default=:
initialization:
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
kmeans_init_centers:
- # type=list|default=[]:
+ # type=list|default=[]:
prior_image:
# type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.
number_of_tissue_classes:
- # type=int|default=0:
+ # type=int|default=0:
prior_weighting:
- # type=float|default=0.0:
+ # type=float|default=0.0:
prior_probability_threshold:
- # type=float|default=0.0:
+ # type=float|default=0.0:
likelihood_model:
- # type=str|default='':
+ # type=str|default='':
mrf_smoothing_factor:
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius:
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update:
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations:
- # type=int|default=0:
+ # type=int|default=0:
n_iterations:
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold:
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation:
- # type=str|default='':
+ # type=str|default='':
use_random_seed:
# type=bool|default=True: use random seed value over constant
use_mixture_model_proportions:
- # type=bool|default=False:
+ # type=bool|default=False:
out_classified_image_name:
- # type=file|default=:
+ # type=file|default=:
save_posteriors:
- # type=bool|default=False:
+ # type=bool|default=False:
output_posteriors_name_template:
- # type=str|default='POSTERIOR_%02d.nii.gz':
+ # type=str|default='POSTERIOR_%02d.nii.gz':
num_threads:
# type=int|default=1: Number of ITK threads to use
args:
@@ -179,15 +186,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -197,45 +204,45 @@ tests:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
initialization: '"Random"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
intensity_images:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
mask_image:
- # type=file|default=:
+ # type=file|default=:
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -245,47 +252,47 @@ tests:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
initialization: '"KMeans"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
kmeans_init_centers: '[100, 200]'
- # type=list|default=[]:
+ # type=list|default=[]:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
intensity_images:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
mask_image:
- # type=file|default=:
+ # type=file|default=:
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -295,51 +302,51 @@ tests:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
initialization: '"PriorProbabilityImages"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
prior_image: '"BrainSegmentationPrior%02d.nii.gz"'
# type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.
prior_weighting: '0.8'
- # type=float|default=0.0:
+ # type=float|default=0.0:
prior_probability_threshold: '0.0000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
intensity_images:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
mask_image:
- # type=file|default=:
+ # type=file|default=:
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -349,49 +356,49 @@ tests:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
initialization: '"PriorLabelImage"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
prior_image: '"segmentation0.nii.gz"'
# type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
prior_weighting: '0.8'
- # type=float|default=0.0:
+ # type=float|default=0.0:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
intensity_images:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
mask_image:
- # type=file|default=:
+ # type=file|default=:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -405,37 +412,37 @@ doctests:
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
initialization: '"Random"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
- intensity_images:
- # type=inputmultiobject|default=[]:
- mask_image:
- # type=file|default=:
+ intensity_images: '"structural.nii"'
+ # type=inputmultiobject|default=[]:
+ mask_image: '"mask.nii"'
+ # type=file|default=:
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -446,39 +453,39 @@ doctests:
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
initialization: '"KMeans"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
kmeans_init_centers: '[100, 200]'
- # type=list|default=[]:
+ # type=list|default=[]:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
- intensity_images:
- # type=inputmultiobject|default=[]:
- mask_image:
- # type=file|default=:
+ intensity_images: '"structural.nii"'
+ # type=inputmultiobject|default=[]:
+ mask_image: '"mask.nii"'
+ # type=file|default=:
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -489,43 +496,43 @@ doctests:
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
initialization: '"PriorProbabilityImages"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
prior_image: '"BrainSegmentationPrior%02d.nii.gz"'
# type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.
prior_weighting: '0.8'
- # type=float|default=0.0:
+ # type=float|default=0.0:
prior_probability_threshold: '0.0000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
- intensity_images:
- # type=inputmultiobject|default=[]:
- mask_image:
- # type=file|default=:
+ intensity_images: '"structural.nii"'
+ # type=inputmultiobject|default=[]:
+ mask_image: '"mask.nii"'
+ # type=file|default=:
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -536,41 +543,41 @@ doctests:
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
initialization: '"PriorLabelImage"'
- # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
+ # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']:
prior_image: '"segmentation0.nii.gz"'
# type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file.
number_of_tissue_classes: '2'
- # type=int|default=0:
+ # type=int|default=0:
prior_weighting: '0.8'
- # type=float|default=0.0:
+ # type=float|default=0.0:
dimension: '3'
# type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4)
- intensity_images:
- # type=inputmultiobject|default=[]:
- mask_image:
- # type=file|default=:
+ intensity_images: '"structural.nii"'
+ # type=inputmultiobject|default=[]:
+ mask_image: '"mask.nii"'
+ # type=file|default=:
likelihood_model: '"Gaussian"'
- # type=str|default='':
+ # type=str|default='':
save_posteriors: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
mrf_smoothing_factor: '0.2'
- # type=float|default=0.0:
+ # type=float|default=0.0:
mrf_radius: '[1, 1, 1]'
- # type=list|default=[]:
+ # type=list|default=[]:
icm_use_synchronous_update: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
maximum_number_of_icm_terations: '1'
- # type=int|default=0:
+ # type=int|default=0:
n_iterations: '5'
- # type=int|default=0:
+ # type=int|default=0:
convergence_threshold: '0.000001'
- # type=float|default=0.0:
+ # type=float|default=0.0:
posterior_formulation: '"Socrates"'
- # type=str|default='':
+ # type=str|default='':
use_mixture_model_proportions: 'True'
- # type=bool|default=False:
+ # type=bool|default=False:
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/atropos_callables.py b/nipype-auto-conv/specs/atropos_callables.py
index 93f87cd..e2b8327 100644
--- a/nipype-auto-conv/specs/atropos_callables.py
+++ b/nipype-auto-conv/specs/atropos_callables.py
@@ -1 +1,105 @@
-"""Module to put any functions that are referred to in Atropos.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of Atropos.yaml"""
+
+import attrs
+import os
+import os.path as op
+
+
+def out_classified_image_name_default(inputs):
+ return _gen_filename("out_classified_image_name", inputs=inputs)
+
+
+def classified_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["classified_image"]
+
+
+def posteriors_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["posteriors"]
+
+
+# Original source at L232 of /interfaces/ants/segmentation.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ if name == "out_classified_image_name":
+ output = inputs.out_classified_image_name
+ if output is attrs.NOTHING:
+ _, name, ext = split_filename(inputs.intensity_images[0])
+ output = name + "_labeled" + ext
+ return output
+
+
+# Original source at L240 of /interfaces/ants/segmentation.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["classified_image"] = os.path.abspath(
+ _gen_filename(
+ "out_classified_image_name",
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ )
+ if (inputs.save_posteriors is not attrs.NOTHING) and inputs.save_posteriors:
+ outputs["posteriors"] = []
+ for i in range(inputs.number_of_tissue_classes):
+ outputs["posteriors"].append(
+ os.path.abspath(inputs.output_posteriors_name_template % (i + 1))
+ )
+ return outputs
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
diff --git a/nipype-auto-conv/specs/average_affine_transform.yaml b/nipype-auto-conv/specs/average_affine_transform.yaml
index 914aac8..5f9b016 100644
--- a/nipype-auto-conv/specs/average_affine_transform.yaml
+++ b/nipype-auto-conv/specs/average_affine_transform.yaml
@@ -5,7 +5,7 @@
#
# Docs
# ----
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import AverageAffineTransform
@@ -15,8 +15,8 @@
# >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat'
# >>> avg.cmdline
# 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat'
-#
-#
+#
+#
task_name: AverageAffineTransform
nipype_name: AverageAffineTransform
nipype_module: nipype.interfaces.ants.utils
@@ -31,10 +31,13 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- output_affine_transform: datascience/text-matrix
+ output_affine_transform: Path
# type=file|default=: Outputfname.txt: the name of the resulting transform.
transforms: datascience/text-matrix+list-of
# type=inputmultiobject|default=[]: transforms to average
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -74,15 +77,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -95,18 +98,18 @@ tests:
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
transforms:
# type=inputmultiobject|default=[]: transforms to average
- output_affine_transform:
+ output_affine_transform: '"MYtemplatewarp.mat"'
# type=file|default=: Outputfname.txt: the name of the resulting transform.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -121,12 +124,12 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- transforms:
+ transforms: '["trans.mat", "func_to_struct.mat"]'
# type=inputmultiobject|default=[]: transforms to average
- output_affine_transform:
+ output_affine_transform: '"MYtemplatewarp.mat"'
# type=file|default=: Outputfname.txt: the name of the resulting transform.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/average_affine_transform_callables.py b/nipype-auto-conv/specs/average_affine_transform_callables.py
index 5ee0257..ed4730a 100644
--- a/nipype-auto-conv/specs/average_affine_transform_callables.py
+++ b/nipype-auto-conv/specs/average_affine_transform_callables.py
@@ -1 +1,22 @@
-"""Module to put any functions that are referred to in AverageAffineTransform.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of AverageAffineTransform.yaml"""
+
+import os
+
+
+def affine_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["affine_transform"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L587 of /interfaces/ants/utils.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["affine_transform"] = os.path.abspath(inputs.output_affine_transform)
+ return outputs
diff --git a/nipype-auto-conv/specs/average_images.yaml b/nipype-auto-conv/specs/average_images.yaml
index 0169511..58f077a 100644
--- a/nipype-auto-conv/specs/average_images.yaml
+++ b/nipype-auto-conv/specs/average_images.yaml
@@ -5,7 +5,7 @@
#
# Docs
# ----
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import AverageImages
@@ -16,7 +16,7 @@
# >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii']
# >>> avg.cmdline
# 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii'
-#
+#
task_name: AverageImages
nipype_name: AverageImages
nipype_module: nipype.interfaces.ants.utils
@@ -31,11 +31,14 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- output_average_image: medimage/nifti-gz
- # type=file: average image file
- # type=file|default='average.nii': the name of the resulting image.
images: medimage/nifti1+list-of
# type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional)
+ output_average_image: Path
+ # type=file: average image file
+ # type=file|default='average.nii': the name of the resulting image.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -79,15 +82,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -98,7 +101,7 @@ tests:
# (if not specified, will try to choose a sensible value)
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- output_average_image:
+ output_average_image: '"average.nii.gz"'
# type=file: average image file
# type=file|default='average.nii': the name of the resulting image.
normalize: 'True'
@@ -106,15 +109,15 @@ tests:
images:
# type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional)
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -129,15 +132,15 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- output_average_image:
+ output_average_image: '"average.nii.gz"'
# type=file: average image file
# type=file|default='average.nii': the name of the resulting image.
normalize: 'True'
# type=bool|default=False: Normalize: if true, the 2nd image is divided by its mean. This will select the largest image to average into.
- images:
+ images: '["rc1s1.nii", "rc1s1.nii"]'
# type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional)
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/average_images_callables.py b/nipype-auto-conv/specs/average_images_callables.py
index 8d6e9a0..c0d588d 100644
--- a/nipype-auto-conv/specs/average_images_callables.py
+++ b/nipype-auto-conv/specs/average_images_callables.py
@@ -1 +1,22 @@
-"""Module to put any functions that are referred to in AverageImages.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of AverageImages.yaml"""
+
+import os
+
+
+def output_average_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_average_image"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L648 of /interfaces/ants/utils.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["output_average_image"] = os.path.realpath(inputs.output_average_image)
+ return outputs
diff --git a/nipype-auto-conv/specs/brain_extraction.yaml b/nipype-auto-conv/specs/brain_extraction.yaml
index f4b8d8a..48d94c0 100644
--- a/nipype-auto-conv/specs/brain_extraction.yaml
+++ b/nipype-auto-conv/specs/brain_extraction.yaml
@@ -5,9 +5,9 @@
#
# Docs
# ----
-#
+#
# Atlas-based brain extraction.
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants.segmentation import BrainExtraction
@@ -19,8 +19,8 @@
# >>> brainextraction.cmdline
# 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz
# -e study_template.nii.gz -d 3 -s nii.gz -o highres001_'
-#
-#
+#
+#
task_name: BrainExtraction
nipype_name: BrainExtraction
nipype_module: nipype.interfaces.ants.segmentation
@@ -37,12 +37,15 @@ inputs:
# passed to the field in the automatically generated unittests.
anatomical_image: medimage/nifti-gz
# type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.
- brain_template: medimage/nifti-gz
- # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.
brain_probability_mask: medimage/nifti-gz
# type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image.
+ brain_template: medimage/nifti-gz
+ # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.
extraction_registration_mask: generic/file
# type=file|default=: Mask (defined in the template space) used during registration for brain extraction. To limit the metric computation to a specific region.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -56,8 +59,6 @@ outputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- BrainExtractionMask: generic/file
- # type=file: brain extraction mask
BrainExtractionBrain: generic/file
# type=file: brain extraction image
BrainExtractionCSF: generic/file
@@ -65,33 +66,35 @@ outputs:
BrainExtractionGM: generic/file
# type=file: segmentation mask with only grey matter
BrainExtractionInitialAffine: generic/file
- # type=file:
+ # type=file:
BrainExtractionInitialAffineFixed: generic/file
- # type=file:
+ # type=file:
BrainExtractionInitialAffineMoving: generic/file
- # type=file:
+ # type=file:
BrainExtractionLaplacian: generic/file
- # type=file:
+ # type=file:
+ BrainExtractionMask: generic/file
+ # type=file: brain extraction mask
BrainExtractionPrior0GenericAffine: generic/file
- # type=file:
+ # type=file:
BrainExtractionPrior1InverseWarp: generic/file
- # type=file:
+ # type=file:
BrainExtractionPrior1Warp: generic/file
- # type=file:
+ # type=file:
BrainExtractionPriorWarped: generic/file
- # type=file:
+ # type=file:
BrainExtractionSegmentation: generic/file
# type=file: segmentation mask with CSF, GM, and WM
BrainExtractionTemplateLaplacian: generic/file
- # type=file:
+ # type=file:
BrainExtractionTmp: generic/file
- # type=file:
+ # type=file:
BrainExtractionWM: generic/file
# type=file: segmenration mask with only white matter
N4Corrected0: generic/file
# type=file: N4 bias field corrected image
N4Truncated0: generic/file
- # type=file:
+ # type=file:
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -132,15 +135,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -158,15 +161,15 @@ tests:
brain_probability_mask:
# type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -181,14 +184,14 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- anatomical_image:
+ anatomical_image: '"T1.nii.gz"'
# type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.
- brain_template:
+ brain_template: '"study_template.nii.gz"'
# type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs.
- brain_probability_mask:
+ brain_probability_mask: '"ProbabilityMaskOfStudyTemplate.nii.gz"'
# type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/brain_extraction_callables.py b/nipype-auto-conv/specs/brain_extraction_callables.py
index 88a7a0b..157af1e 100644
--- a/nipype-auto-conv/specs/brain_extraction_callables.py
+++ b/nipype-auto-conv/specs/brain_extraction_callables.py
@@ -1 +1,224 @@
-"""Module to put any functions that are referred to in BrainExtraction.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of BrainExtraction.yaml"""
+
+import attrs
+import os
+
+
+def BrainExtractionBrain_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionBrain"]
+
+
+def BrainExtractionCSF_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionCSF"]
+
+
+def BrainExtractionGM_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionGM"]
+
+
+def BrainExtractionInitialAffine_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionInitialAffine"]
+
+
+def BrainExtractionInitialAffineFixed_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionInitialAffineFixed"]
+
+
+def BrainExtractionInitialAffineMoving_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionInitialAffineMoving"]
+
+
+def BrainExtractionLaplacian_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionLaplacian"]
+
+
+def BrainExtractionMask_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionMask"]
+
+
+def BrainExtractionPrior0GenericAffine_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionPrior0GenericAffine"]
+
+
+def BrainExtractionPrior1InverseWarp_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionPrior1InverseWarp"]
+
+
+def BrainExtractionPrior1Warp_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionPrior1Warp"]
+
+
+def BrainExtractionPriorWarped_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionPriorWarped"]
+
+
+def BrainExtractionSegmentation_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionSegmentation"]
+
+
+def BrainExtractionTemplateLaplacian_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionTemplateLaplacian"]
+
+
+def BrainExtractionTmp_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionTmp"]
+
+
+def BrainExtractionWM_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionWM"]
+
+
+def N4Corrected0_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["N4Corrected0"]
+
+
+def N4Truncated0_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["N4Truncated0"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L1031 of /interfaces/ants/segmentation.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["BrainExtractionMask"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionMask." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionBrain"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionBrain." + inputs.image_suffix,
+ )
+ if (
+ inputs.keep_temporary_files is not attrs.NOTHING
+ ) and inputs.keep_temporary_files != 0:
+ outputs["BrainExtractionCSF"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionCSF." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionGM"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionGM." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionInitialAffine"] = os.path.join(
+ output_dir, inputs.out_prefix + "BrainExtractionInitialAffine.mat"
+ )
+ outputs["BrainExtractionInitialAffineFixed"] = os.path.join(
+ output_dir,
+ inputs.out_prefix
+ + "BrainExtractionInitialAffineFixed."
+ + inputs.image_suffix,
+ )
+ outputs["BrainExtractionInitialAffineMoving"] = os.path.join(
+ output_dir,
+ inputs.out_prefix
+ + "BrainExtractionInitialAffineMoving."
+ + inputs.image_suffix,
+ )
+ outputs["BrainExtractionLaplacian"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionLaplacian." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionPrior0GenericAffine"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionPrior0GenericAffine.mat",
+ )
+ outputs["BrainExtractionPrior1InverseWarp"] = os.path.join(
+ output_dir,
+ inputs.out_prefix
+ + "BrainExtractionPrior1InverseWarp."
+ + inputs.image_suffix,
+ )
+ outputs["BrainExtractionPrior1Warp"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionPrior1Warp." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionPriorWarped"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionPriorWarped." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionSegmentation"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionSegmentation." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionTemplateLaplacian"] = os.path.join(
+ output_dir,
+ inputs.out_prefix
+ + "BrainExtractionTemplateLaplacian."
+ + inputs.image_suffix,
+ )
+ outputs["BrainExtractionTmp"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionTmp." + inputs.image_suffix,
+ )
+ outputs["BrainExtractionWM"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionWM." + inputs.image_suffix,
+ )
+ outputs["N4Corrected0"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "N4Corrected0." + inputs.image_suffix,
+ )
+ outputs["N4Truncated0"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "N4Truncated0." + inputs.image_suffix,
+ )
+
+ return outputs
diff --git a/nipype-auto-conv/specs/buildtemplateparallel.yaml b/nipype-auto-conv/specs/buildtemplateparallel.yaml
index af036f8..e03adaf 100644
--- a/nipype-auto-conv/specs/buildtemplateparallel.yaml
+++ b/nipype-auto-conv/specs/buildtemplateparallel.yaml
@@ -6,22 +6,22 @@
# Docs
# ----
# Generate a optimal average template
-#
+#
# .. warning::
-#
+#
# This can take a VERY long time to complete
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants.legacy import buildtemplateparallel
# >>> tmpl = buildtemplateparallel()
# >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii']
# >>> tmpl.inputs.max_iterations = [30, 90, 20]
# >>> tmpl.cmdline
# 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii'
-#
-#
+#
+#
task_name: buildtemplateparallel
nipype_name: buildtemplateparallel
nipype_module: nipype.interfaces.ants.legacy
@@ -38,6 +38,9 @@ inputs:
# passed to the field in the automatically generated unittests.
in_files: medimage/nifti1+list-of
# type=list|default=[]: list of images to generate template from
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -53,6 +56,10 @@ outputs:
# passed to the field in the automatically generated unittests.
final_template_file: generic/file
# type=file: final ANTS template
+ subject_outfiles: generic/file+list-of
+ # type=outputmultiobject: Outputs for each input image. Includes warp field, inverse warp, Affine, original image (repaired) and warped image (deformed)
+ template_files: generic/file+list-of
+ # type=outputmultiobject: Templates from different stages of iteration
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -97,15 +104,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -119,15 +126,15 @@ tests:
max_iterations: '[30, 90, 20]'
# type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -140,12 +147,12 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- in_files:
+ in_files: '["T1.nii", "structural.nii"]'
# type=list|default=[]: list of images to generate template from
max_iterations: '[30, 90, 20]'
# type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/buildtemplateparallel_callables.py b/nipype-auto-conv/specs/buildtemplateparallel_callables.py
index 7ba280c..17f849e 100644
--- a/nipype-auto-conv/specs/buildtemplateparallel_callables.py
+++ b/nipype-auto-conv/specs/buildtemplateparallel_callables.py
@@ -1 +1,119 @@
-"""Module to put any functions that are referred to in buildtemplateparallel.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of buildtemplateparallel.yaml"""
+
+import os
+import os.path as op
+from builtins import range
+from glob import glob
+
+
+def final_template_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["final_template_file"]
+
+
+def subject_outfiles_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["subject_outfiles"]
+
+
+def template_files_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["template_files"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L340 of /interfaces/ants/legacy.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["template_files"] = []
+ for i in range(len(glob(os.path.realpath("*iteration*")))):
+ temp = os.path.realpath(
+ "%s_iteration_%d/%stemplate.nii.gz"
+ % (inputs.transformation_model, i, inputs.out_prefix)
+ )
+ os.rename(
+ temp,
+ os.path.realpath(
+ "%s_iteration_%d/%stemplate_i%d.nii.gz"
+ % (inputs.transformation_model, i, inputs.out_prefix, i)
+ ),
+ )
+ file_ = "%s_iteration_%d/%stemplate_i%d.nii.gz" % (
+ inputs.transformation_model,
+ i,
+ inputs.out_prefix,
+ i,
+ )
+
+ outputs["template_files"].append(os.path.realpath(file_))
+ outputs["final_template_file"] = os.path.realpath(
+ "%stemplate.nii.gz" % inputs.out_prefix
+ )
+ outputs["subject_outfiles"] = []
+ for filename in inputs.in_files:
+ _, base, _ = split_filename(filename)
+ temp = glob(os.path.realpath("%s%s*" % (inputs.out_prefix, base)))
+ for file_ in temp:
+ outputs["subject_outfiles"].append(file_)
+ return outputs
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
diff --git a/nipype-auto-conv/specs/compose_multi_transform.yaml b/nipype-auto-conv/specs/compose_multi_transform.yaml
index e195d16..5f20948 100644
--- a/nipype-auto-conv/specs/compose_multi_transform.yaml
+++ b/nipype-auto-conv/specs/compose_multi_transform.yaml
@@ -5,9 +5,9 @@
#
# Docs
# ----
-#
+#
# Take a set of transformations and convert them to a single transformation matrix/warpfield.
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import ComposeMultiTransform
@@ -17,8 +17,8 @@
# >>> compose_transform.cmdline
# 'ComposeMultiTransform 3 struct_to_template_composed.mat
# struct_to_template.mat func_to_struct.mat'
-#
-#
+#
+#
task_name: ComposeMultiTransform
nipype_name: ComposeMultiTransform
nipype_module: nipype.interfaces.ants.utils
@@ -33,13 +33,16 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- output_transform: generic/file
+ output_transform: Path
# type=file: Composed transform file
# type=file|default=: the name of the resulting transform.
reference_image: generic/file
# type=file|default=: Reference image (only necessary when output is warpfield)
transforms: datascience/text-matrix+list-of
# type=inputmultiobject|default=[]: transforms to average
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -83,15 +86,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -105,15 +108,15 @@ tests:
transforms:
# type=inputmultiobject|default=[]: transforms to average
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -128,10 +131,10 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- transforms:
+ transforms: '["struct_to_template.mat", "func_to_struct.mat"]'
# type=inputmultiobject|default=[]: transforms to average
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/compose_multi_transform_callables.py b/nipype-auto-conv/specs/compose_multi_transform_callables.py
index 88052b5..0fff187 100644
--- a/nipype-auto-conv/specs/compose_multi_transform_callables.py
+++ b/nipype-auto-conv/specs/compose_multi_transform_callables.py
@@ -1 +1,203 @@
-"""Module to put any functions that are referred to in ComposeMultiTransform.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of ComposeMultiTransform.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def output_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_transform"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/composite_transform_util.yaml b/nipype-auto-conv/specs/composite_transform_util.yaml
index 1848ac9..44085fb 100644
--- a/nipype-auto-conv/specs/composite_transform_util.yaml
+++ b/nipype-auto-conv/specs/composite_transform_util.yaml
@@ -5,13 +5,13 @@
#
# Docs
# ----
-#
+#
# ANTs utility which can combine or break apart transform files into their individual
# constituent components.
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants import CompositeTransformUtil
# >>> tran = CompositeTransformUtil()
# >>> tran.inputs.process = 'disassemble'
@@ -19,9 +19,9 @@
# >>> tran.cmdline
# 'CompositeTransformUtil --disassemble output_Composite.h5 transform'
# >>> tran.run() # doctest: +SKIP
-#
+#
# example for assembling transformation files
-#
+#
# >>> from nipype.interfaces.ants import CompositeTransformUtil
# >>> tran = CompositeTransformUtil()
# >>> tran.inputs.process = 'assemble'
@@ -30,7 +30,7 @@
# >>> tran.cmdline
# 'CompositeTransformUtil --assemble my.h5 AffineTransform.mat DisplacementFieldTransform.nii.gz '
# >>> tran.run() # doctest: +SKIP
-#
+#
task_name: CompositeTransformUtil
nipype_name: CompositeTransformUtil
nipype_module: nipype.interfaces.ants.registration
@@ -45,11 +45,14 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- out_file: datascience/hdf5
- # type=file: Compound transformation file
- # type=file|default=: Output file path (only used for disassembly).
in_file: '[datascience/text-matrix,datascience/hdf5]+list-of'
# type=inputmultiobject|default=[]: Input transform file(s)
+ out_file: Path
+ # type=file: Compound transformation file
+ # type=file|default=: Output file path (only used for disassembly).
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -97,15 +100,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -119,15 +122,15 @@ tests:
in_file:
# type=inputmultiobject|default=[]: Input transform file(s)
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -138,21 +141,21 @@ tests:
# (if not specified, will try to choose a sensible value)
process: '"assemble"'
# type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble)
- out_file:
+ out_file: '"my.h5"'
# type=file: Compound transformation file
# type=file|default=: Output file path (only used for disassembly).
in_file:
# type=inputmultiobject|default=[]: Input transform file(s)
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -167,10 +170,10 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
process: '"disassemble"'
# type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble)
- in_file:
+ in_file: '"output_Composite.h5"'
# type=inputmultiobject|default=[]: Input transform file(s)
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -182,13 +185,13 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
process: '"assemble"'
# type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble)
- out_file:
+ out_file: '"my.h5"'
# type=file: Compound transformation file
# type=file|default=: Output file path (only used for disassembly).
- in_file:
+ in_file: '["AffineTransform.mat", "DisplacementFieldTransform.nii.gz"]'
# type=inputmultiobject|default=[]: Input transform file(s)
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/composite_transform_util_callables.py b/nipype-auto-conv/specs/composite_transform_util_callables.py
index 4983be4..0c092e5 100644
--- a/nipype-auto-conv/specs/composite_transform_util_callables.py
+++ b/nipype-auto-conv/specs/composite_transform_util_callables.py
@@ -1 +1,44 @@
-"""Module to put any functions that are referred to in CompositeTransformUtil.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of CompositeTransformUtil.yaml"""
+
+import os
+
+
+def affine_transform_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["affine_transform"]
+
+
+def displacement_field_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["displacement_field"]
+
+
+def out_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["out_file"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L1873 of /interfaces/ants/registration.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ if inputs.process == "disassemble":
+ outputs["affine_transform"] = os.path.abspath(
+ "00_{}_AffineTransform.mat".format(inputs.output_prefix)
+ )
+ outputs["displacement_field"] = os.path.abspath(
+ "01_{}_DisplacementFieldTransform.nii.gz".format(inputs.output_prefix)
+ )
+ if inputs.process == "assemble":
+ outputs["out_file"] = os.path.abspath(inputs.out_file)
+ return outputs
diff --git a/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml b/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml
index a2d2dcd..9676fe6 100644
--- a/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml
+++ b/nipype-auto-conv/specs/convert_scalar_image_to_rgb.yaml
@@ -5,9 +5,9 @@
#
# Docs
# ----
-#
+#
# Convert scalar images to RGB.
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB
@@ -19,8 +19,8 @@
# >>> converter.inputs.maximum_input = 6
# >>> converter.cmdline
# 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255'
-#
-#
+#
+#
task_name: ConvertScalarImageToRGB
nipype_name: ConvertScalarImageToRGB
nipype_module: nipype.interfaces.ants.visualization
@@ -37,6 +37,9 @@ inputs:
# passed to the field in the automatically generated unittests.
input_image: medimage/nifti-gz
# type=file|default=: Main input is a 3-D grayscale image.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -82,9 +85,9 @@ tests:
maximum_input:
# type=int|default=0: maximum input
minimum_RGB_output:
- # type=int|default=0:
+ # type=int|default=0:
maximum_RGB_output:
- # type=int|default=255:
+ # type=int|default=255:
num_threads:
# type=int|default=1: Number of ITK threads to use
args:
@@ -92,15 +95,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -120,15 +123,15 @@ tests:
maximum_input: '6'
# type=int|default=0: maximum input
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -143,7 +146,7 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- input_image:
+ input_image: '"T1.nii.gz"'
# type=file|default=: Main input is a 3-D grayscale image.
colormap: '"jet"'
# type=enum|default='grey'|allowed['autumn','blue','cool','copper','custom','green','grey','hot','hsv','jet','overunder','red','spring','summer','winter']: Select a colormap
@@ -152,7 +155,7 @@ doctests:
maximum_input: '6'
# type=int|default=0: maximum input
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py b/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py
index f4c7836..bf7c5d1 100644
--- a/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py
+++ b/nipype-auto-conv/specs/convert_scalar_image_to_rgb_callables.py
@@ -1 +1,22 @@
-"""Module to put any functions that are referred to in ConvertScalarImageToRGB.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of ConvertScalarImageToRGB.yaml"""
+
+import os
+
+
+def output_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_image"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L103 of /interfaces/ants/visualization.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["output_image"] = os.path.join(output_dir, inputs.output_image)
+ return outputs
diff --git a/nipype-auto-conv/specs/cortical_thickness.yaml b/nipype-auto-conv/specs/cortical_thickness.yaml
index d237b9e..71761ea 100644
--- a/nipype-auto-conv/specs/cortical_thickness.yaml
+++ b/nipype-auto-conv/specs/cortical_thickness.yaml
@@ -5,7 +5,7 @@
#
# Docs
# ----
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants.segmentation import CorticalThickness
@@ -23,8 +23,8 @@
# 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz
# -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_
# -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz'
-#
-#
+#
+#
task_name: CorticalThickness
nipype_name: CorticalThickness
nipype_module: nipype.interfaces.ants.segmentation
@@ -41,18 +41,21 @@ inputs:
# passed to the field in the automatically generated unittests.
anatomical_image: medimage/nifti-gz
# type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image.
- brain_template: medimage/nifti-gz
- # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped.
brain_probability_mask: medimage/nifti-gz
# type=file|default=: brain probability mask in template space
+ brain_template: medimage/nifti-gz
+ # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped.
+ cortical_label_image: generic/file
+ # type=file|default=: Cortical ROI labels to use as a prior for ATITH.
+ extraction_registration_mask: generic/file
+ # type=file|default=: Mask (defined in the template space) used during registration for brain extraction.
segmentation_priors: medimage/nifti-gz+list-of
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
t1_registration_template: medimage/nifti-gz
# type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped.
- extraction_registration_mask: generic/file
- # type=file|default=: Mask (defined in the template space) used during registration for brain extraction.
- cortical_label_image: generic/file
- # type=file|default=: Cortical ROI labels to use as a prior for ATITH.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -68,28 +71,30 @@ outputs:
# passed to the field in the automatically generated unittests.
BrainExtractionMask: generic/file
# type=file: brain extraction mask
- ExtractedBrainN4: generic/file
- # type=file: extracted brain from N4 image
BrainSegmentation: generic/file
# type=file: brain segmentation image
BrainSegmentationN4: generic/file
# type=file: N4 corrected image
+ BrainSegmentationPosteriors: generic/file+list-of
+ # type=outputmultiobject: Posterior probability images
+ BrainVolumes: generic/file
+ # type=file: Brain volumes as text
CorticalThickness: generic/file
# type=file: cortical thickness file
- TemplateToSubject1GenericAffine: generic/file
- # type=file: Template to subject affine
- TemplateToSubject0Warp: generic/file
- # type=file: Template to subject warp
- SubjectToTemplate1Warp: generic/file
- # type=file: Template to subject inverse warp
+ CorticalThicknessNormedToTemplate: generic/file
+ # type=file: Normalized cortical thickness
+ ExtractedBrainN4: generic/file
+ # type=file: extracted brain from N4 image
SubjectToTemplate0GenericAffine: generic/file
# type=file: Template to subject inverse affine
+ SubjectToTemplate1Warp: generic/file
+ # type=file: Template to subject inverse warp
SubjectToTemplateLogJacobian: generic/file
# type=file: Template to subject log jacobian
- CorticalThicknessNormedToTemplate: generic/file
- # type=file: Normalized cortical thickness
- BrainVolumes: generic/file
- # type=file: Brain volumes as text
+ TemplateToSubject0Warp: generic/file
+ # type=file: Template to subject warp
+ TemplateToSubject1GenericAffine: generic/file
+ # type=file: Template to subject affine
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -110,7 +115,7 @@ tests:
brain_probability_mask:
# type=file|default=: brain probability mask in template space
segmentation_priors:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
out_prefix:
# type=str|default='antsCT_': Prefix that is prepended to all output files
image_suffix:
@@ -150,15 +155,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -176,19 +181,19 @@ tests:
brain_probability_mask:
# type=file|default=: brain probability mask in template space
segmentation_priors:
- # type=inputmultiobject|default=[]:
+ # type=inputmultiobject|default=[]:
t1_registration_template:
# type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -203,18 +208,18 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- anatomical_image:
+ anatomical_image: '"T1.nii.gz"'
# type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image.
- brain_template:
+ brain_template: '"study_template.nii.gz"'
# type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped.
- brain_probability_mask:
+ brain_probability_mask: '"ProbabilityMaskOfStudyTemplate.nii.gz"'
# type=file|default=: brain probability mask in template space
- segmentation_priors:
- # type=inputmultiobject|default=[]:
- t1_registration_template:
+ segmentation_priors: '["BrainSegmentationPrior01.nii.gz","BrainSegmentationPrior02.nii.gz","BrainSegmentationPrior03.nii.gz","BrainSegmentationPrior04.nii.gz"]'
+ # type=inputmultiobject|default=[]:
+ t1_registration_template: '"brain_study_template.nii.gz"'
# type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/cortical_thickness_callables.py b/nipype-auto-conv/specs/cortical_thickness_callables.py
index 847ddf0..2113f52 100644
--- a/nipype-auto-conv/specs/cortical_thickness_callables.py
+++ b/nipype-auto-conv/specs/cortical_thickness_callables.py
@@ -1 +1,161 @@
-"""Module to put any functions that are referred to in CorticalThickness.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of CorticalThickness.yaml"""
+
+import os
+
+
+def BrainExtractionMask_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainExtractionMask"]
+
+
+def BrainSegmentation_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainSegmentation"]
+
+
+def BrainSegmentationN4_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainSegmentationN4"]
+
+
+def BrainSegmentationPosteriors_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainSegmentationPosteriors"]
+
+
+def BrainVolumes_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["BrainVolumes"]
+
+
+def CorticalThickness_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["CorticalThickness"]
+
+
+def CorticalThicknessNormedToTemplate_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["CorticalThicknessNormedToTemplate"]
+
+
+def ExtractedBrainN4_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["ExtractedBrainN4"]
+
+
+def SubjectToTemplate0GenericAffine_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["SubjectToTemplate0GenericAffine"]
+
+
+def SubjectToTemplate1Warp_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["SubjectToTemplate1Warp"]
+
+
+def SubjectToTemplateLogJacobian_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["SubjectToTemplateLogJacobian"]
+
+
+def TemplateToSubject0Warp_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["TemplateToSubject0Warp"]
+
+
+def TemplateToSubject1GenericAffine_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["TemplateToSubject1GenericAffine"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L789 of /interfaces/ants/segmentation.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["BrainExtractionMask"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainExtractionMask." + inputs.image_suffix,
+ )
+ outputs["ExtractedBrainN4"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "ExtractedBrain0N4." + inputs.image_suffix,
+ )
+ outputs["BrainSegmentation"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainSegmentation." + inputs.image_suffix,
+ )
+ outputs["BrainSegmentationN4"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "BrainSegmentation0N4." + inputs.image_suffix,
+ )
+ posteriors = []
+ for i in range(len(inputs.segmentation_priors)):
+ posteriors.append(
+ os.path.join(
+ output_dir,
+ inputs.out_prefix
+ + "BrainSegmentationPosteriors%02d." % (i + 1)
+ + inputs.image_suffix,
+ )
+ )
+ outputs["BrainSegmentationPosteriors"] = posteriors
+ outputs["CorticalThickness"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "CorticalThickness." + inputs.image_suffix,
+ )
+ outputs["TemplateToSubject1GenericAffine"] = os.path.join(
+ output_dir, inputs.out_prefix + "TemplateToSubject1GenericAffine.mat"
+ )
+ outputs["TemplateToSubject0Warp"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "TemplateToSubject0Warp." + inputs.image_suffix,
+ )
+ outputs["SubjectToTemplate1Warp"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "SubjectToTemplate1Warp." + inputs.image_suffix,
+ )
+ outputs["SubjectToTemplate0GenericAffine"] = os.path.join(
+ output_dir, inputs.out_prefix + "SubjectToTemplate0GenericAffine.mat"
+ )
+ outputs["SubjectToTemplateLogJacobian"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "SubjectToTemplateLogJacobian." + inputs.image_suffix,
+ )
+ outputs["CorticalThicknessNormedToTemplate"] = os.path.join(
+ output_dir,
+ inputs.out_prefix + "CorticalThickness." + inputs.image_suffix,
+ )
+ outputs["BrainVolumes"] = os.path.join(
+ output_dir, inputs.out_prefix + "brainvols.csv"
+ )
+ return outputs
diff --git a/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml b/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml
index 06655ca..8dc42b0 100644
--- a/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml
+++ b/nipype-auto-conv/specs/create_jacobian_determinant_image.yaml
@@ -5,7 +5,7 @@
#
# Docs
# ----
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import CreateJacobianDeterminantImage
@@ -15,7 +15,7 @@
# >>> jacobian.inputs.outputImage = 'out_name.nii.gz'
# >>> jacobian.cmdline
# 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz'
-#
+#
task_name: CreateJacobianDeterminantImage
nipype_name: CreateJacobianDeterminantImage
nipype_module: nipype.interfaces.ants.utils
@@ -34,6 +34,9 @@ inputs:
# type=file|default=: deformation transformation file
outputImage: medimage/nifti-gz
# type=file|default=: output filename
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -77,15 +80,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -101,15 +104,15 @@ tests:
outputImage:
# type=file|default=: output filename
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -124,12 +127,12 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
imageDimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- deformationField:
+ deformationField: '"ants_Warp.nii.gz"'
# type=file|default=: deformation transformation file
- outputImage:
+ outputImage: '"out_name.nii.gz"'
# type=file|default=: output filename
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py b/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py
index ed5d927..0bd6e4c 100644
--- a/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py
+++ b/nipype-auto-conv/specs/create_jacobian_determinant_image_callables.py
@@ -1 +1,22 @@
-"""Module to put any functions that are referred to in CreateJacobianDeterminantImage.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of CreateJacobianDeterminantImage.yaml"""
+
+import os
+
+
+def jacobian_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["jacobian_image"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L756 of /interfaces/ants/utils.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["jacobian_image"] = os.path.abspath(inputs.outputImage)
+ return outputs
diff --git a/nipype-auto-conv/specs/create_tiled_mosaic.yaml b/nipype-auto-conv/specs/create_tiled_mosaic.yaml
index 5924ea0..e5452ee 100644
--- a/nipype-auto-conv/specs/create_tiled_mosaic.yaml
+++ b/nipype-auto-conv/specs/create_tiled_mosaic.yaml
@@ -9,10 +9,10 @@
# provides useful functionality for common image analysis tasks. The basic
# usage of CreateTiledMosaic is to tile a 3-D image volume slice-wise into
# a 2-D image.
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants.visualization import CreateTiledMosaic
# >>> mosaic_slicer = CreateTiledMosaic()
# >>> mosaic_slicer.inputs.input_image = 'T1.nii.gz'
@@ -25,7 +25,7 @@
# >>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]'
# >>> mosaic_slicer.cmdline
# 'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] -r rgb.nii.gz -s [2 ,100 ,160]'
-#
+#
task_name: CreateTiledMosaic
nipype_name: CreateTiledMosaic
nipype_module: nipype.interfaces.ants.visualization
@@ -42,10 +42,13 @@ inputs:
# passed to the field in the automatically generated unittests.
input_image: medimage/nifti-gz
# type=file|default=: Main input is a 3-D grayscale image.
- rgb_image: medimage/nifti-gz
- # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image.
mask_image: medimage/nifti-gz
# type=file|default=: Specifies the ROI of the RGB voxels used.
+ rgb_image: medimage/nifti-gz
+ # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -103,15 +106,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -138,15 +141,15 @@ tests:
slices: '"[2 ,100 ,160]"'
# type=str|default='': Number of slices to increment Slice1xSlice2xSlice3[numberOfSlicesToIncrement,,]
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -159,11 +162,11 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- input_image:
+ input_image: '"T1.nii.gz"'
# type=file|default=: Main input is a 3-D grayscale image.
- rgb_image:
+ rgb_image: '"rgb.nii.gz"'
# type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image.
- mask_image:
+ mask_image: '"mask.nii.gz"'
# type=file|default=: Specifies the ROI of the RGB voxels used.
output_image: '"output.png"'
# type=file: image file
@@ -177,7 +180,7 @@ doctests:
slices: '"[2 ,100 ,160]"'
# type=str|default='': Number of slices to increment Slice1xSlice2xSlice3[numberOfSlicesToIncrement,,]
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/create_tiled_mosaic_callables.py b/nipype-auto-conv/specs/create_tiled_mosaic_callables.py
index b65faa6..ffb423b 100644
--- a/nipype-auto-conv/specs/create_tiled_mosaic_callables.py
+++ b/nipype-auto-conv/specs/create_tiled_mosaic_callables.py
@@ -1 +1,22 @@
-"""Module to put any functions that are referred to in CreateTiledMosaic.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of CreateTiledMosaic.yaml"""
+
+import os
+
+
+def output_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_image"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L217 of /interfaces/ants/visualization.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ outputs["output_image"] = os.path.join(output_dir, inputs.output_image)
+ return outputs
diff --git a/nipype-auto-conv/specs/denoise_image.yaml b/nipype-auto-conv/specs/denoise_image.yaml
index b2cf222..bc578fd 100644
--- a/nipype-auto-conv/specs/denoise_image.yaml
+++ b/nipype-auto-conv/specs/denoise_image.yaml
@@ -5,7 +5,7 @@
#
# Docs
# ----
-#
+#
# Examples
# --------
# >>> import copy
@@ -15,21 +15,21 @@
# >>> denoise.inputs.input_image = 'im1.nii'
# >>> denoise.cmdline
# 'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1'
-#
+#
# >>> denoise_2 = copy.deepcopy(denoise)
# >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz'
# >>> denoise_2.inputs.noise_model = 'Rician'
# >>> denoise_2.inputs.shrink_factor = 2
# >>> denoise_2.cmdline
# 'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2'
-#
+#
# >>> denoise_3 = DenoiseImage()
# >>> denoise_3.inputs.input_image = 'im1.nii'
# >>> denoise_3.inputs.save_noise = True
# >>> denoise_3.cmdline
# 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1'
-#
-#
+#
+#
task_name: DenoiseImage
nipype_name: DenoiseImage
nipype_module: nipype.interfaces.ants.segmentation
@@ -46,12 +46,15 @@ inputs:
# passed to the field in the automatically generated unittests.
input_image: medimage/nifti1
# type=file|default=: A scalar image is expected as input for noise correction.
- output_image: medimage/nifti-gz
- # type=file:
- # type=file|default=: The output consists of the noise corrected version of the input image.
- noise_image: generic/file
- # type=file:
+ noise_image: Path
+ # type=file:
# type=file|default=: Filename for the estimated noise.
+ output_image: Path
+ # type=file:
+ # type=file|default=: The output consists of the noise corrected version of the input image.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -65,12 +68,12 @@ outputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- output_image: medimage/nifti-gz
- # type=file:
- # type=file|default=: The output consists of the noise corrected version of the input image.
noise_image: generic/file
- # type=file:
+ # type=file:
# type=file|default=: Filename for the estimated noise.
+ output_image: medimage/nifti-gz
+ # type=file:
+ # type=file|default=: The output consists of the noise corrected version of the input image.
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -91,12 +94,12 @@ tests:
shrink_factor:
# type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default.
output_image:
- # type=file:
+ # type=file:
# type=file|default=: The output consists of the noise corrected version of the input image.
save_noise:
# type=bool|default=False: True if the estimated noise should be saved to file.
noise_image:
- # type=file:
+ # type=file:
# type=file|default=: Filename for the estimated noise.
verbose:
# type=bool|default=False: Verbose output.
@@ -107,15 +110,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -129,7 +132,7 @@ tests:
input_image:
# type=file|default=: A scalar image is expected as input for noise correction.
imports: &id001
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
- module: copy
expected_outputs:
@@ -137,8 +140,8 @@ tests:
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -147,23 +150,23 @@ tests:
- inputs:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
- output_image:
- # type=file:
+ output_image: '"output_corrected_image.nii.gz"'
+ # type=file:
# type=file|default=: The output consists of the noise corrected version of the input image.
noise_model: '"Rician"'
# type=enum|default='Gaussian'|allowed['Gaussian','Rician']: Employ a Rician or Gaussian noise model.
shrink_factor: '2'
# type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -177,15 +180,15 @@ tests:
save_noise: 'True'
# type=bool|default=False: True if the estimated noise should be saved to file.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -200,10 +203,10 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image.
- input_image:
+ input_image: '"im1.nii"'
# type=file|default=: A scalar image is expected as input for noise correction.
imports: *id001
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -213,15 +216,15 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- output_image:
- # type=file:
+ output_image: '"output_corrected_image.nii.gz"'
+ # type=file:
# type=file|default=: The output consists of the noise corrected version of the input image.
noise_model: '"Rician"'
# type=enum|default='Gaussian'|allowed['Gaussian','Rician']: Employ a Rician or Gaussian noise model.
shrink_factor: '2'
# type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -231,12 +234,12 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- input_image:
+ input_image: '"im1.nii"'
# type=file|default=: A scalar image is expected as input for noise correction.
save_noise: 'True'
# type=bool|default=False: True if the estimated noise should be saved to file.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/denoise_image_callables.py b/nipype-auto-conv/specs/denoise_image_callables.py
index 1f1bd27..b1656c0 100644
--- a/nipype-auto-conv/specs/denoise_image_callables.py
+++ b/nipype-auto-conv/specs/denoise_image_callables.py
@@ -1 +1,210 @@
-"""Module to put any functions that are referred to in DenoiseImage.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of DenoiseImage.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def noise_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["noise_image"]
+
+
+def output_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_image"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/gen_warp_fields.yaml b/nipype-auto-conv/specs/gen_warp_fields.yaml
index 0519ffd..9c15214 100644
--- a/nipype-auto-conv/specs/gen_warp_fields.yaml
+++ b/nipype-auto-conv/specs/gen_warp_fields.yaml
@@ -5,7 +5,7 @@
#
# Docs
# ----
-#
+#
task_name: GenWarpFields
nipype_name: GenWarpFields
nipype_module: nipype.interfaces.ants.legacy
@@ -20,10 +20,13 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- reference_image: generic/file
- # type=file|default=: template file to warp to
input_image: generic/file
# type=file|default=: input image to warp to template
+ reference_image: generic/file
+ # type=file|default=: template file to warp to
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -39,14 +42,14 @@ outputs:
# passed to the field in the automatically generated unittests.
affine_transformation: generic/file
# type=file: affine (prefix_Affine.txt)
- warp_field: generic/file
- # type=file: warp field (prefix_Warp.nii)
- inverse_warp_field: generic/file
- # type=file: inverse warp field (prefix_InverseWarp.nii)
input_file: generic/file
# type=file: input image (prefix_repaired.nii)
+ inverse_warp_field: generic/file
+ # type=file: inverse warp field (prefix_InverseWarp.nii)
output_file: generic/file
# type=file: output image (prefix_deformed.nii)
+ warp_field: generic/file
+ # type=file: warp field (prefix_Warp.nii)
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -87,15 +90,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
diff --git a/nipype-auto-conv/specs/gen_warp_fields_callables.py b/nipype-auto-conv/specs/gen_warp_fields_callables.py
index f08cf2c..a3b80a6 100644
--- a/nipype-auto-conv/specs/gen_warp_fields_callables.py
+++ b/nipype-auto-conv/specs/gen_warp_fields_callables.py
@@ -1 +1,74 @@
-"""Module to put any functions that are referred to in GenWarpFields.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of GenWarpFields.yaml"""
+
+import attrs
+import os
+
+
+def affine_transformation_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["affine_transformation"]
+
+
+def input_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["input_file"]
+
+
+def inverse_warp_field_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["inverse_warp_field"]
+
+
+def output_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_file"]
+
+
+def warp_field_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["warp_field"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L141 of /interfaces/ants/legacy.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ transmodel = inputs.transformation_model
+
+ # When transform is set as 'RI'/'RA', wrap fields should not be expected
+ # The default transformation is GR, which outputs the wrap fields
+ if (transmodel is attrs.NOTHING) or (
+ (transmodel is not attrs.NOTHING) and transmodel not in ["RI", "RA"]
+ ):
+ outputs["warp_field"] = os.path.join(
+ output_dir, inputs.out_prefix + "Warp.nii.gz"
+ )
+ outputs["inverse_warp_field"] = os.path.join(
+ output_dir, inputs.out_prefix + "InverseWarp.nii.gz"
+ )
+
+ outputs["affine_transformation"] = os.path.join(
+ output_dir, inputs.out_prefix + "Affine.txt"
+ )
+ outputs["input_file"] = os.path.join(
+ output_dir, inputs.out_prefix + "repaired.nii.gz"
+ )
+ outputs["output_file"] = os.path.join(
+ output_dir, inputs.out_prefix + "deformed.nii.gz"
+ )
+
+ return outputs
diff --git a/nipype-auto-conv/specs/image_math.yaml b/nipype-auto-conv/specs/image_math.yaml
index aac8fdd..eb9bac5 100644
--- a/nipype-auto-conv/specs/image_math.yaml
+++ b/nipype-auto-conv/specs/image_math.yaml
@@ -5,9 +5,9 @@
#
# Docs
# ----
-#
+#
# Operations over images.
-#
+#
# Examples
# --------
# >>> ImageMath(
@@ -15,51 +15,51 @@
# ... operation='+',
# ... op2='2').cmdline
# 'ImageMath 3 structural_maths.nii + structural.nii 2'
-#
+#
# >>> ImageMath(
# ... op1='structural.nii',
# ... operation='Project',
# ... op2='1 2').cmdline
# 'ImageMath 3 structural_maths.nii Project structural.nii 1 2'
-#
+#
# >>> ImageMath(
# ... op1='structural.nii',
# ... operation='G',
# ... op2='4').cmdline
# 'ImageMath 3 structural_maths.nii G structural.nii 4'
-#
+#
# >>> ImageMath(
# ... op1='structural.nii',
# ... operation='TruncateImageIntensity',
# ... op2='0.005 0.999 256').cmdline
# 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256'
-#
+#
# By default, Nipype copies headers from the first input image (``op1``)
# to the output image.
# For some operations, as the ``PadImage`` operation, the header cannot be copied from inputs to
# outputs, and so ``copy_header`` option is automatically set to ``False``.
-#
+#
# >>> pad = ImageMath(
# ... op1='structural.nii',
# ... operation='PadImage')
# >>> pad.inputs.copy_header
# False
-#
+#
# While the operation is set to ``PadImage``,
# setting ``copy_header = True`` will have no effect.
-#
+#
# >>> pad.inputs.copy_header = True
# >>> pad.inputs.copy_header
# False
-#
+#
# For any other operation, ``copy_header`` can be enabled/disabled normally:
-#
+#
# >>> pad.inputs.operation = "ME"
# >>> pad.inputs.copy_header = True
# >>> pad.inputs.copy_header
# True
-#
-#
+#
+#
task_name: ImageMath
nipype_name: ImageMath
nipype_module: nipype.interfaces.ants.utils
@@ -74,11 +74,14 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- output_image: generic/file
- # type=file: output image file
- # type=file|default=: output image file
op1: medimage/nifti1
# type=file|default=: first operator
+ output_image: Path
+ # type=file: output image file
+ # type=file|default=: output image file
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -126,15 +129,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -150,15 +153,15 @@ tests:
op2: '"2"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -174,15 +177,15 @@ tests:
op2: '"1 2"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -198,15 +201,15 @@ tests:
op2: '"4"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -222,15 +225,15 @@ tests:
op2: '"0.005 0.999 256"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -243,14 +246,14 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- op1:
+ op1: '"structural.nii"'
# type=file|default=: first operator
operation: '"+"'
# type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations
op2: '"2"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -260,14 +263,14 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- op1:
+ op1: '"structural.nii"'
# type=file|default=: first operator
operation: '"Project"'
# type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations
op2: '"1 2"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -277,14 +280,14 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- op1:
+ op1: '"structural.nii"'
# type=file|default=: first operator
operation: '"G"'
# type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations
op2: '"4"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -294,14 +297,14 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- op1:
+ op1: '"structural.nii"'
# type=file|default=: first operator
operation: '"TruncateImageIntensity"'
# type=enum|default='m'|allowed['+','-','/','4DTensorTo3DTensor','Byte','Canny','Convolve','CorruptImage','D','Decision','ExtractContours','ExtractSlice','ExtractVectorComponent','FillHoles','Finite','FlattenImage','G','GC','GD','GE','GO','GetLargestComponent','Grad','LabelStats','Laplacian','Lipschitz','MC','MD','ME','MO','MTR','MaurerDistance','Neg','NeighborhoodStats','Normalize','PValueImage','PadImage','Project','ReplaceVoxelValue','ReplicateDisplacement','ReplicateImage','RescaleImage','SetTimeSpacing','SetTimeSpacingWarp','Sharpen','SigmoidImage','TensorAxialDiffusion','TensorColor','TensorEigenvalue','TensorFA','TensorFADenominator','TensorFANumerator','TensorMask','TensorMeanDiffusion','TensorRadialDiffusion','TensorToVector','TensorToVectorComponent','ThresholdAtMean','Translate','TriPlanarView','TruncateImageIntensity','UnsharpMask','WindowImage','^','abs','addtozero','exp','m','max','mean','overadd','stack','total','v+','v-','vm','vtotal']: mathematical operations
op2: '"0.005 0.999 256"'
# type=traitcompound|default=None: second operator
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/image_math_callables.py b/nipype-auto-conv/specs/image_math_callables.py
index 49800dd..e845128 100644
--- a/nipype-auto-conv/specs/image_math_callables.py
+++ b/nipype-auto-conv/specs/image_math_callables.py
@@ -1 +1,203 @@
-"""Module to put any functions that are referred to in ImageMath.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of ImageMath.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def output_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_image"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/joint_fusion.yaml b/nipype-auto-conv/specs/joint_fusion.yaml
index 81c5971..5b7d94e 100644
--- a/nipype-auto-conv/specs/joint_fusion.yaml
+++ b/nipype-auto-conv/specs/joint_fusion.yaml
@@ -5,23 +5,23 @@
#
# Docs
# ----
-#
+#
# An image fusion algorithm.
-#
+#
# Developed by Hongzhi Wang and Paul Yushkevich, and it won segmentation challenges
# at MICCAI 2012 and MICCAI 2013.
# The original label fusion framework was extended to accommodate intensities by Brian
# Avants.
# This implementation is based on Paul's original ITK-style implementation
# and Brian's ANTsR implementation.
-#
+#
# References include 1) H. Wang, J. W. Suh, S.
# Das, J. Pluta, C. Craige, P. Yushkevich, Multi-atlas segmentation with joint
# label fusion IEEE Trans. on Pattern Analysis and Machine Intelligence, 35(3),
# 611-623, 2013. and 2) H. Wang and P. A. Yushkevich, Multi-atlas segmentation
# with joint label fusion and corrective learning--an open source implementation,
# Front. Neuroinform., 2013.
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import JointFusion
@@ -33,12 +33,12 @@
# >>> jf.cmdline
# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz
# -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']"
-#
+#
# >>> jf.inputs.target_image = [ ['im1.nii', 'im2.nii'] ]
# >>> jf.cmdline
# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz
# -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']"
-#
+#
# >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'],
# ... ['rc2s1.nii','rc2s2.nii'] ]
# >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz',
@@ -47,7 +47,7 @@
# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii
# -s 3x3x3 -t ['im1.nii', 'im2.nii']"
-#
+#
# >>> jf.inputs.dimension = 3
# >>> jf.inputs.alpha = 0.5
# >>> jf.inputs.beta = 1.0
@@ -57,7 +57,7 @@
# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii
# -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']"
-#
+#
# >>> jf.inputs.search_radius = ['mask.nii']
# >>> jf.inputs.verbose = True
# >>> jf.inputs.exclusion_image = ['roi01.nii', 'roi02.nii']
@@ -66,7 +66,7 @@
# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii']
# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii]
# -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"
-#
+#
# >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii'
# >>> jf.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz'
# >>> jf.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz'
@@ -77,8 +77,8 @@
# -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz,
# ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz]
# -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v"
-#
-#
+#
+#
task_name: JointFusion
nipype_name: JointFusion
nipype_module: nipype.interfaces.ants.segmentation
@@ -99,9 +99,12 @@ inputs:
# type=list|default=[]: Specify an exclusion region for the given label.
mask_image: generic/file
# type=file|default=: If a mask image is specified, fusion is only performed in the mask region.
- out_label_fusion: medimage/nifti1
- # type=file:
+ out_label_fusion: Path
+ # type=file:
# type=file|default=: The output label fusion image.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -115,9 +118,15 @@ outputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
+ out_atlas_voting_weight: generic/file+list-of
+ # type=outputmultiobject:
+ out_intensity_fusion: generic/file+list-of
+ # type=outputmultiobject:
out_label_fusion: medimage/nifti1
- # type=file:
+ # type=file:
# type=file|default=: The output label fusion image.
+ out_label_post_prob: generic/file+list-of
+ # type=outputmultiobject:
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
@@ -160,7 +169,7 @@ tests:
mask_image:
# type=file|default=: If a mask image is specified, fusion is only performed in the mask region.
out_label_fusion:
- # type=file:
+ # type=file:
# type=file|default=: The output label fusion image.
out_intensity_fusion_name_format:
# type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz")
@@ -177,15 +186,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -194,8 +203,8 @@ tests:
- inputs:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
- out_label_fusion:
- # type=file:
+ out_label_fusion: '"ants_fusion_label_output.nii"'
+ # type=file:
# type=file|default=: The output label fusion image.
atlas_image: '[ ["rc1s1.nii","rc1s2.nii"] ]'
# type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain.
@@ -204,15 +213,15 @@ tests:
target_image: '["im1.nii"]'
# type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -224,15 +233,15 @@ tests:
target_image: '[ ["im1.nii", "im2.nii"] ]'
# type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -246,15 +255,15 @@ tests:
atlas_segmentation_image:
# type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -274,15 +283,15 @@ tests:
search_radius: '[3]'
# type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -300,15 +309,15 @@ tests:
exclusion_image_label: '["1","2"]'
# type=list|default=[]: Specify a label for the exclusion region.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -317,8 +326,8 @@ tests:
- inputs:
# dict[str, str] - values to provide to inputs fields in the task initialisation
# (if not specified, will try to choose a sensible value)
- out_label_fusion:
- # type=file:
+ out_label_fusion: '"ants_fusion_label_output.nii"'
+ # type=file:
# type=file|default=: The output label fusion image.
out_intensity_fusion_name_format: '"ants_joint_fusion_intensity_%d.nii.gz"'
# type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz")
@@ -327,15 +336,15 @@ tests:
out_atlas_voting_weight_name_format: '"ants_joint_fusion_voting_weight_%d.nii.gz"'
# type=str|default='antsJointFusionVotingWeight_%d.nii.gz': Optional atlas voting weight image file name format.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -348,17 +357,17 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- out_label_fusion:
- # type=file:
+ out_label_fusion: '"ants_fusion_label_output.nii"'
+ # type=file:
# type=file|default=: The output label fusion image.
atlas_image: '[ ["rc1s1.nii","rc1s2.nii"] ]'
# type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain.
- atlas_segmentation_image:
+ atlas_segmentation_image: '["segmentation0.nii.gz"]'
# type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets.
target_image: '["im1.nii"]'
# type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -371,7 +380,7 @@ doctests:
target_image: '[ ["im1.nii", "im2.nii"] ]'
# type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -383,10 +392,10 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
atlas_image: '[ ["rc1s1.nii","rc1s2.nii"],["rc2s1.nii","rc2s2.nii"] ]'
# type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain.
- atlas_segmentation_image:
+ atlas_segmentation_image: '["segmentation0.nii.gz","segmentation1.nii.gz"]'
# type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -407,7 +416,7 @@ doctests:
search_radius: '[3]'
# type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -421,12 +430,12 @@ doctests:
# type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel.
verbose: 'True'
# type=bool|default=False: Verbose output.
- exclusion_image:
+ exclusion_image: '["roi01.nii", "roi02.nii"]'
# type=list|default=[]: Specify an exclusion region for the given label.
exclusion_image_label: '["1","2"]'
# type=list|default=[]: Specify a label for the exclusion region.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -436,8 +445,8 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- out_label_fusion:
- # type=file:
+ out_label_fusion: '"ants_fusion_label_output.nii"'
+ # type=file:
# type=file|default=: The output label fusion image.
out_intensity_fusion_name_format: '"ants_joint_fusion_intensity_%d.nii.gz"'
# type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz")
@@ -446,7 +455,7 @@ doctests:
out_atlas_voting_weight_name_format: '"ants_joint_fusion_voting_weight_%d.nii.gz"'
# type=str|default='antsJointFusionVotingWeight_%d.nii.gz': Optional atlas voting weight image file name format.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/joint_fusion_callables.py b/nipype-auto-conv/specs/joint_fusion_callables.py
index f0bf25d..57ba462 100644
--- a/nipype-auto-conv/specs/joint_fusion_callables.py
+++ b/nipype-auto-conv/specs/joint_fusion_callables.py
@@ -1 +1,60 @@
-"""Module to put any functions that are referred to in JointFusion.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of JointFusion.yaml"""
+
+import attrs
+import os
+from glob import glob
+
+
+def out_atlas_voting_weight_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["out_atlas_voting_weight"]
+
+
+def out_intensity_fusion_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["out_intensity_fusion"]
+
+
+def out_label_fusion_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["out_label_fusion"]
+
+
+def out_label_post_prob_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["out_label_post_prob"]
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L1541 of /interfaces/ants/segmentation.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ outputs = {}
+ if inputs.out_label_fusion is not attrs.NOTHING:
+ outputs["out_label_fusion"] = os.path.abspath(inputs.out_label_fusion)
+ if inputs.out_intensity_fusion_name_format is not attrs.NOTHING:
+ outputs["out_intensity_fusion"] = glob(
+ os.path.abspath(inputs.out_intensity_fusion_name_format.replace("%d", "*"))
+ )
+ if inputs.out_label_post_prob_name_format is not attrs.NOTHING:
+ outputs["out_label_post_prob"] = glob(
+ os.path.abspath(inputs.out_label_post_prob_name_format.replace("%d", "*"))
+ )
+ if inputs.out_atlas_voting_weight_name_format is not attrs.NOTHING:
+ outputs["out_atlas_voting_weight"] = glob(
+ os.path.abspath(
+ inputs.out_atlas_voting_weight_name_format.replace("%d", "*")
+ )
+ )
+ return outputs
diff --git a/nipype-auto-conv/specs/kelly_kapowski.yaml b/nipype-auto-conv/specs/kelly_kapowski.yaml
index fd76247..7966dc9 100644
--- a/nipype-auto-conv/specs/kelly_kapowski.yaml
+++ b/nipype-auto-conv/specs/kelly_kapowski.yaml
@@ -5,13 +5,13 @@
#
# Docs
# ----
-#
+#
# Nipype Interface to ANTs' KellyKapowski, also known as DiReCT.
-#
+#
# DiReCT is a registration based estimate of cortical thickness. It was published
# in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based
# cortical thickness measurement, Neuroimage 2009, 45:867--879.
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants.segmentation import KellyKapowski
@@ -27,8 +27,8 @@
# --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10
# --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000
# --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000'
-#
-#
+#
+#
task_name: KellyKapowski
nipype_name: KellyKapowski
nipype_module: nipype.interfaces.ants.segmentation
@@ -43,20 +43,23 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- segmentation_image: medimage/nifti-gz
- # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively.
+ cortical_thickness: Path
+ # type=file: A thickness map defined in the segmented gray matter.
+ # type=file|default=: Filename for the cortical thickness.
gray_matter_prob_image: generic/file
# type=file|default=: In addition to the segmentation image, a gray matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm.
- white_matter_prob_image: generic/file
- # type=file|default=: In addition to the segmentation image, a white matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm.
+ segmentation_image: medimage/nifti-gz
+ # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively.
thickness_prior_image: generic/file
# type=file|default=: An image containing spatially varying prior thickness values.
- cortical_thickness: generic/file
- # type=file: A thickness map defined in the segmented gray matter.
- # type=file|default=: Filename for the cortical thickness.
- warped_white_matter: generic/file
+ warped_white_matter: Path
# type=file: A warped white matter image.
# type=file|default=: Filename for the warped white matter file.
+ white_matter_prob_image: generic/file
+ # type=file|default=: In addition to the segmentation image, a white matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm.
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -130,15 +133,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -156,15 +159,15 @@ tests:
thickness_prior_estimate: '10'
# type=float|default=10: Provides a prior constraint on the final thickness measurement in mm.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -179,14 +182,14 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- segmentation_image:
+ segmentation_image: '"segmentation0.nii.gz"'
# type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively.
convergence: '"[45,0.0,10]"'
# type=str|default='[50,0.001,10]': Convergence is determined by fitting a line to the normalized energy profile of the last N iterations (where N is specified by the window size) and determining the slope which is then compared with the convergence threshold.
thickness_prior_estimate: '10'
# type=float|default=10: Provides a prior constraint on the final thickness measurement in mm.
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/kelly_kapowski_callables.py b/nipype-auto-conv/specs/kelly_kapowski_callables.py
index 9073c2b..543950d 100644
--- a/nipype-auto-conv/specs/kelly_kapowski_callables.py
+++ b/nipype-auto-conv/specs/kelly_kapowski_callables.py
@@ -1 +1,222 @@
-"""Module to put any functions that are referred to in KellyKapowski.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of KellyKapowski.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def cortical_thickness_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["cortical_thickness"]
+
+
+def warped_white_matter_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["warped_white_matter"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L1765 of /interfaces/ants/segmentation.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ if name == "cortical_thickness":
+ output = inputs.cortical_thickness
+ if output is attrs.NOTHING:
+ _, name, ext = split_filename(inputs.segmentation_image)
+ output = name + "_cortical_thickness" + ext
+ return output
+
+ if name == "warped_white_matter":
+ output = inputs.warped_white_matter
+ if output is attrs.NOTHING:
+ _, name, ext = split_filename(inputs.segmentation_image)
+ output = name + "_warped_white_matter" + ext
+ return output
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/label_geometry.yaml b/nipype-auto-conv/specs/label_geometry.yaml
index 220c058..8c34c50 100644
--- a/nipype-auto-conv/specs/label_geometry.yaml
+++ b/nipype-auto-conv/specs/label_geometry.yaml
@@ -5,9 +5,9 @@
#
# Docs
# ----
-#
+#
# Extracts geometry measures using a label file and an optional image file
-#
+#
# Examples
# --------
# >>> from nipype.interfaces.ants import LabelGeometry
@@ -16,12 +16,12 @@
# >>> label_extract.inputs.label_image = 'atlas.nii.gz'
# >>> label_extract.cmdline
# 'LabelGeometryMeasures 3 atlas.nii.gz [] atlas.csv'
-#
+#
# >>> label_extract.inputs.intensity_image = 'ants_Warp.nii.gz'
# >>> label_extract.cmdline
# 'LabelGeometryMeasures 3 atlas.nii.gz ants_Warp.nii.gz atlas.csv'
-#
-#
+#
+#
task_name: LabelGeometry
nipype_name: LabelGeometry
nipype_module: nipype.interfaces.ants.utils
@@ -36,10 +36,13 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- label_image: medimage/nifti-gz
- # type=file|default=: label image to use for extracting geometry measures
intensity_image: medimage/nifti-gz
# type=file|default='[]': Intensity image to extract values from. This is an optional input
+ label_image: medimage/nifti-gz
+ # type=file|default=: label image to use for extracting geometry measures
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -83,15 +86,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -105,15 +108,15 @@ tests:
label_image:
# type=file|default=: label image to use for extracting geometry measures
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -125,15 +128,15 @@ tests:
intensity_image:
# type=file|default='[]': Intensity image to extract values from. This is an optional input
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -148,10 +151,10 @@ doctests:
# '.mock()' method of the corresponding class is used instead.
dimension: '3'
# type=enum|default=3|allowed[2,3]: image dimension (2 or 3)
- label_image:
+ label_image: '"atlas.nii.gz"'
# type=file|default=: label image to use for extracting geometry measures
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -161,10 +164,10 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- intensity_image:
+ intensity_image: '"ants_Warp.nii.gz"'
# type=file|default='[]': Intensity image to extract values from. This is an optional input
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/label_geometry_callables.py b/nipype-auto-conv/specs/label_geometry_callables.py
index 2ec8a91..8df4bb8 100644
--- a/nipype-auto-conv/specs/label_geometry_callables.py
+++ b/nipype-auto-conv/specs/label_geometry_callables.py
@@ -1 +1,203 @@
-"""Module to put any functions that are referred to in LabelGeometry.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of LabelGeometry.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def output_file_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_file"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/laplacian_thickness.yaml b/nipype-auto-conv/specs/laplacian_thickness.yaml
index 5b815a3..47dff88 100644
--- a/nipype-auto-conv/specs/laplacian_thickness.yaml
+++ b/nipype-auto-conv/specs/laplacian_thickness.yaml
@@ -6,22 +6,22 @@
# Docs
# ----
# Calculates the cortical thickness from an anatomical image
-#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants import LaplacianThickness
# >>> cort_thick = LaplacianThickness()
# >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz'
# >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz'
# >>> cort_thick.cmdline
# 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz white_matter_thickness.nii.gz'
-#
+#
# >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz'
# >>> cort_thick.cmdline
# 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz'
-#
-#
+#
+#
task_name: LaplacianThickness
nipype_name: LaplacianThickness
nipype_module: nipype.interfaces.ants.segmentation
@@ -36,10 +36,13 @@ inputs:
# from the nipype interface, but you may want to be more specific, particularly
# for file types, where specifying the format also specifies the file that will be
# passed to the field in the automatically generated unittests.
- input_wm: medimage/nifti-gz
- # type=file|default=: white matter segmentation image
input_gm: medimage/nifti-gz
# type=file|default=: gray matter segmentation image
+ input_wm: medimage/nifti-gz
+ # type=file|default=: white matter segmentation image
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -91,15 +94,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -113,15 +116,15 @@ tests:
input_gm:
# type=file|default=: gray matter segmentation image
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -134,15 +137,15 @@ tests:
# type=file: Cortical thickness
# type=str|default='': name of output file
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -155,12 +158,12 @@ doctests:
# dict[str, str] - name-value pairs for inputs to be provided to the doctest.
# If the field is of file-format type and the value is None, then the
# '.mock()' method of the corresponding class is used instead.
- input_wm:
+ input_wm: '"white_matter.nii.gz"'
# type=file|default=: white matter segmentation image
- input_gm:
+ input_gm: '"gray_matter.nii.gz"'
# type=file|default=: gray matter segmentation image
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
@@ -174,7 +177,7 @@ doctests:
# type=file: Cortical thickness
# type=str|default='': name of output file
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/laplacian_thickness_callables.py b/nipype-auto-conv/specs/laplacian_thickness_callables.py
index c361c6b..0d43117 100644
--- a/nipype-auto-conv/specs/laplacian_thickness_callables.py
+++ b/nipype-auto-conv/specs/laplacian_thickness_callables.py
@@ -1 +1,203 @@
-"""Module to put any functions that are referred to in LaplacianThickness.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of LaplacianThickness.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def output_image_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["output_image"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of /interfaces/base/core.py
+def _gen_filename(name, inputs=None, stdout=None, stderr=None, output_dir=None):
+ raise NotImplementedError
+
+
+# Original source at L891 of /interfaces/base/core.py
+def _list_outputs(inputs=None, stdout=None, stderr=None, output_dir=None):
+ metadata = dict(name_source=lambda t: t is not None)
+ traits = inputs.traits(**metadata)
+ if traits:
+ outputs = {}
+ for name, trait_spec in list(traits.items()):
+ out_name = name
+ if trait_spec.output_name is not None:
+ out_name = trait_spec.output_name
+ fname = _filename_from_source(
+ name, inputs=inputs, stdout=stdout, stderr=stderr, output_dir=output_dir
+ )
+ if fname is not attrs.NOTHING:
+ outputs[out_name] = os.path.abspath(fname)
+ return outputs
+
+
+# Original source at L888 of /interfaces/base/core.py
+def _overload_extension(
+ value, name=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ return value
+
+
+# Original source at L58 of /utils/filemanip.py
+def split_filename(fname):
+ """Split a filename into parts: path, base filename and extension.
+
+ Parameters
+ ----------
+ fname : str
+ file or path name
+
+ Returns
+ -------
+ pth : str
+ base path from fname
+ fname : str
+ filename from fname, without extension
+ ext : str
+ file extension from fname
+
+ Examples
+ --------
+ >>> from nipype.utils.filemanip import split_filename
+ >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz')
+ >>> pth
+ '/home/data'
+
+ >>> fname
+ 'subject'
+
+ >>> ext
+ '.nii.gz'
+
+ """
+
+ special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"]
+
+ pth = op.dirname(fname)
+ fname = op.basename(fname)
+
+ ext = None
+ for special_ext in special_extensions:
+ ext_len = len(special_ext)
+ if (len(fname) > ext_len) and (fname[-ext_len:].lower() == special_ext.lower()):
+ ext = fname[-ext_len:]
+ fname = fname[:-ext_len]
+ break
+ if not ext:
+ fname, ext = op.splitext(fname)
+
+ return pth, fname, ext
+
+
+# Original source at L125 of /interfaces/base/support.py
+class NipypeInterfaceError(Exception):
+ """Custom error for interfaces"""
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return "{}".format(self.value)
diff --git a/nipype-auto-conv/specs/measure_image_similarity.yaml b/nipype-auto-conv/specs/measure_image_similarity.yaml
index 2d2dae7..ca820bb 100644
--- a/nipype-auto-conv/specs/measure_image_similarity.yaml
+++ b/nipype-auto-conv/specs/measure_image_similarity.yaml
@@ -5,12 +5,12 @@
#
# Docs
# ----
-#
-#
-#
+#
+#
+#
# Examples
# --------
-#
+#
# >>> from nipype.interfaces.ants import MeasureImageSimilarity
# >>> sim = MeasureImageSimilarity()
# >>> sim.inputs.dimension = 3
@@ -25,7 +25,7 @@
# >>> sim.inputs.moving_image_mask = 'mask.nii.gz'
# >>> sim.cmdline
# 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] --metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]'
-#
+#
task_name: MeasureImageSimilarity
nipype_name: MeasureImageSimilarity
nipype_module: nipype.interfaces.ants.registration
@@ -42,12 +42,15 @@ inputs:
# passed to the field in the automatically generated unittests.
fixed_image: medimage/nifti1
# type=file|default=: Image to which the moving image is warped
- moving_image: medimage/nifti1
- # type=file|default=: Image to apply transformation to (generally a coregistered functional)
fixed_image_mask: medimage/nifti1
# type=file|default=: mask used to limit metric sampling region of the fixed image
+ moving_image: medimage/nifti1
+ # type=file|default=: Image to apply transformation to (generally a coregistered functional)
moving_image_mask: medimage/nifti-gz
# type=file|default=: mask used to limit metric sampling region of the moving image
+ callable_defaults:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set as the `default` method of input fields
metadata:
# dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
outputs:
@@ -64,6 +67,8 @@ outputs:
callables:
# dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
# to set to the `callable` attribute of output fields
+ similarity: similarity_callable
+ # type=float:
templates:
# dict[str, str] - `output_file_template` values to be provided to output fields
requirements:
@@ -79,7 +84,7 @@ tests:
moving_image:
# type=file|default=: Image to apply transformation to (generally a coregistered functional)
metric:
- # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']:
+ # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']:
metric_weight:
# type=float|default=1.0: The "metricWeight" variable is not used.
radius_or_number_of_bins:
@@ -99,15 +104,15 @@ tests:
environ:
# type=dict|default={}: Environment variables
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -119,7 +124,7 @@ tests:
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: Dimensionality of the fixed/moving image pair
metric: '"MI"'
- # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']:
+ # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']:
fixed_image:
# type=file|default=: Image to which the moving image is warped
moving_image:
@@ -137,15 +142,15 @@ tests:
moving_image_mask:
# type=file|default=: mask used to limit metric sampling region of the moving image
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
expected_outputs:
# dict[str, str] - expected values for selected outputs, noting that tests will typically
# be terminated before they complete for time-saving reasons, and therefore
# these values will be ignored, when running in CI
timeout: 10
- # int - the value to set for the timeout in the generated test,
- # after which the test will be considered to have been initialised
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
# successfully. Set to 0 to disable the timeout (warning, this could
# lead to the unittests taking a very long time to complete)
xfail: true
@@ -161,10 +166,10 @@ doctests:
dimension: '3'
# type=enum|default=2|allowed[2,3,4]: Dimensionality of the fixed/moving image pair
metric: '"MI"'
- # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']:
- fixed_image:
+ # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']:
+ fixed_image: '"T1.nii"'
# type=file|default=: Image to which the moving image is warped
- moving_image:
+ moving_image: '"resting.nii"'
# type=file|default=: Image to apply transformation to (generally a coregistered functional)
metric_weight: '1.0'
# type=float|default=1.0: The "metricWeight" variable is not used.
@@ -174,12 +179,12 @@ doctests:
# type=enum|default='None'|allowed['None','Random','Regular']: Manner of choosing point set over which to optimize the metric. Defaults to "None" (i.e. a dense sampling of one sample per voxel).
sampling_percentage: '1.0'
# type=range|default=None: Percentage of points accessible to the sampling strategy over which to optimize the metric.
- fixed_image_mask:
+ fixed_image_mask: '"mask.nii"'
# type=file|default=: mask used to limit metric sampling region of the fixed image
- moving_image_mask:
+ moving_image_mask: '"mask.nii.gz"'
# type=file|default=: mask used to limit metric sampling region of the moving image
imports:
- # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item
# consisting of 'module', 'name', and optionally 'alias' keys
directive:
# str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/nipype-auto-conv/specs/measure_image_similarity_callables.py b/nipype-auto-conv/specs/measure_image_similarity_callables.py
index 7379ba2..758e235 100644
--- a/nipype-auto-conv/specs/measure_image_similarity_callables.py
+++ b/nipype-auto-conv/specs/measure_image_similarity_callables.py
@@ -1 +1,203 @@
-"""Module to put any functions that are referred to in MeasureImageSimilarity.yaml"""
+"""Module to put any functions that are referred to in the "callables" section of MeasureImageSimilarity.yaml"""
+
+import attrs
+import logging
+import os
+import os.path as op
+
+
+def similarity_callable(output_dir, inputs, stdout, stderr):
+ outputs = _list_outputs(
+ output_dir=output_dir, inputs=inputs, stdout=stdout, stderr=stderr
+ )
+ return outputs["similarity"]
+
+
+iflogger = logging.getLogger("nipype.interface")
+
+
+# Original source at L809 of /interfaces/base/core.py
+def _filename_from_source(
+ name, chain=None, inputs=None, stdout=None, stderr=None, output_dir=None
+):
+ if chain is None:
+ chain = []
+
+ trait_spec = inputs.trait(name)
+ retval = getattr(inputs, name)
+ source_ext = None
+ if (retval is attrs.NOTHING) or "%s" in retval:
+ if not trait_spec.name_source:
+ return retval
+
+ # Do not generate filename when excluded by other inputs
+ if any(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.xor or ()
+ ):
+ return retval
+
+ # Do not generate filename when required fields are missing
+ if not all(
+ (getattr(inputs, field) is not attrs.NOTHING)
+ for field in trait_spec.requires or ()
+ ):
+ return retval
+
+ if (retval is not attrs.NOTHING) and "%s" in retval:
+ name_template = retval
+ else:
+ name_template = trait_spec.name_template
+ if not name_template:
+ name_template = "%s_generated"
+
+ ns = trait_spec.name_source
+ while isinstance(ns, (list, tuple)):
+ if len(ns) > 1:
+ iflogger.warning("Only one name_source per trait is allowed")
+ ns = ns[0]
+
+ if not isinstance(ns, (str, bytes)):
+ raise ValueError(
+ "name_source of '{}' trait should be an input trait "
+ "name, but a type {} object was found".format(name, type(ns))
+ )
+
+ if getattr(inputs, ns) is not attrs.NOTHING:
+ name_source = ns
+ source = getattr(inputs, name_source)
+ while isinstance(source, list):
+ source = source[0]
+
+ # special treatment for files
+ try:
+ _, base, source_ext = split_filename(source)
+ except (AttributeError, TypeError):
+ base = source
+ else:
+ if name in chain:
+ raise NipypeInterfaceError("Mutually pointing name_sources")
+
+ chain.append(name)
+ base = _filename_from_source(
+ ns,
+ chain,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ if base is not attrs.NOTHING:
+ _, _, source_ext = split_filename(base)
+ else:
+ # Do not generate filename when required fields are missing
+ return retval
+
+ chain = None
+ retval = name_template % base
+ _, _, ext = split_filename(retval)
+ if trait_spec.keep_extension and (ext or source_ext):
+ if (ext is None or not ext) and source_ext:
+ retval = retval + source_ext
+ else:
+ retval = _overload_extension(
+ retval,
+ name,
+ inputs=inputs,
+ stdout=stdout,
+ stderr=stderr,
+ output_dir=output_dir,
+ )
+ return retval
+
+
+# Original source at L885 of