diff --git a/bids/__init__.py b/bids/__init__.py index f8e285c1f..fb96d2d4b 100644 --- a/bids/__init__.py +++ b/bids/__init__.py @@ -13,7 +13,7 @@ "layout", "reports", "utils", - "variables" + "variables", ] due.cite(Doi("10.1038/sdata.2016.44"), diff --git a/bids/cli.py b/bids/cli.py index 4cdc86872..4e1ba1248 100644 --- a/bids/cli.py +++ b/bids/cli.py @@ -4,6 +4,7 @@ from . import __version__ from .layout import BIDSLayoutIndexer, BIDSLayout from .utils import validate_multiple as _validate_multiple +from . import morphing_time as mt # alias -h to trigger help message CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help']} @@ -27,6 +28,37 @@ def cli(): pass +@click.command() +@click.version_option(__version__, prog_name='morphing_time') +@click.option( + "--events-tsv", required=True, help="Path to events TSV") +@click.option( + "--transforms", required=True, help="Path to transform or model json" + ) +@click.option( + "--nvol", required=True, type=int, help="Number of volumes in func time-series" + ) +@click.option( + "--tr", required=True, type=float, help="TR for func time series" + ) +@click.option( + "--ta", required=False, type=float, help="TA for events") +@click.option( + "--output-sampling-rate", + required=False, + type=float, + help="Output sampling rate in Hz when a full design matrix is desired.", + ) +@click.option( + "--output-dir", + required=False, + help="Path to directory to write processed event files.", + ) +def morphing_time(**kwargs): + mt.morphing_time(**kwargs) + + + @cli.command(context_settings=CONTEXT_SETTINGS) @click.argument('root', type=click.Path(file_okay=False, exists=True)) @click.argument('db-path', type=click.Path(file_okay=False, resolve_path=True, exists=True)) diff --git a/bids/modeling/transformations/base.py b/bids/modeling/transformations/base.py index c7006dae6..436bfd279 100644 --- a/bids/modeling/transformations/base.py +++ b/bids/modeling/transformations/base.py @@ -11,10 +11,9 @@ import pandas as pd from bids.utils import listify, convert_JSON -from bids.variables import SparseRunVariable +from bids.variables import SparseRunVariable, BIDSRunVariableCollection from bids.modeling import transformations as pbt - class Transformation(metaclass=ABCMeta): ### Class-level settings ### @@ -405,13 +404,13 @@ class TransformerManager(object): If None, the PyBIDS transformations module is used. """ - def __init__(self, default=None): + def __init__(self, default=None, save_pre_dense=False): self.transformations = {} if default is None: # Default to PyBIDS transformations default = pbt self.default = default - + self.save_pre_dense = save_pre_dense def _sanitize_name(self, name): """ Replace any invalid/reserved transformation names with acceptable equivalents. @@ -448,6 +447,7 @@ def transform(self, collection, transformations): transformations : list List of transformations to apply. """ + changed_vars = [] for t in transformations: t = convert_JSON(t) # make sure all keys are snake case kwargs = dict(t) @@ -462,5 +462,36 @@ def transform(self, collection, transformations): "explicitly register a handler, or pass a" " default module that supports it." % name) func = getattr(self.default, name) - func(collection, cols, **kwargs) + + # check for sparse variables here and save them + # We want everything sparse (the last time it was sparse during the + # transformation process) and everything that is dense at the end + # of the transformations. This will allow downstream users to add + # convolutions etc. as they please. + matching_sparse_cols = [] + if self.save_pre_dense: + for variable in collection.match_variables(cols, return_type='variable'): + if isinstance(variable, SparseRunVariable): + matching_sparse_cols.append(variable.clone()) + + func(collection, cols, **kwargs) + + # check here to see if those variables are still sparse + # if so, continue, if not, save the sparse variables prior to transformation + if len(matching_sparse_cols) > 0: + for variable in matching_sparse_cols: + name = variable.name + matching_post_tfm = collection.match_variables(name, return_type='variable') + assert len(matching_post_tfm) < 2 + if (len(matching_post_tfm) == 0) or not isinstance(matching_post_tfm[0], SparseRunVariable): + changed_vars.append(variable) + + if self.save_pre_dense: + if len(changed_vars) > 0: + changed_vars = BIDSRunVariableCollection(changed_vars) + assert np.all([isinstance(vv, SparseRunVariable) for vv in changed_vars.variables.values()]) + return collection, changed_vars + else: + return collection, None return collection + diff --git a/bids/morphing_time.py b/bids/morphing_time.py new file mode 100755 index 000000000..ecfdab133 --- /dev/null +++ b/bids/morphing_time.py @@ -0,0 +1,86 @@ +#! /usr/bin/env python +import argparse +import sys +import json +from pathlib import Path +import pandas as pd +import numpy as np +from collections import namedtuple +from bids.modeling import transformations +from bids.utils import convert_JSON +from bids.variables import BIDSRunVariableCollection, SparseRunVariable, merge_collections +from bids.layout.utils import parse_file_entities +from bids.variables.io import get_events_collection, parse_transforms +from bids.variables.entities import RunNode + + +def morphing_time( + *, + events_tsv, + transforms, + nvol, + tr, + ta=None, + output_sampling_rate=None, + output_dir=None, + ): + + output_dir = Path(output_dir or "design_synthesizer") + output_dir.mkdir(exist_ok=True) + model_transforms = parse_transforms(transforms) + duration = nvol * tr + ta = ta or tr + + # Get relevant collection + coll_df = pd.read_csv(events_tsv, delimiter="\t") + RunInfo = namedtuple('RunInfo', ['entities', 'duration', 'tr', 'image', 'n_vols']) + + #run_info = RunInfo(parse_file_entities(events_tsv), duration) + # TODO: this will need to be implemented without RunNode to break cyclic + # dependencies if transformations is to be extracted + run_info = RunInfo(parse_file_entities(events_tsv), duration, tr, None, nvol) + coll = BIDSRunVariableCollection(get_events_collection(coll_df, run_info)) + + # perform transformations, additionally save variables that were changed. + # If a column is transformed but not densified it will not be in + # colls_pre_densification. + colls, colls_pre_densification = ( + transformations.TransformerManager(save_pre_dense=True) + .transform(coll, model_transforms) + ) + + # Save sparse vars + if colls_pre_densification is not None: + final_sparse_colls = BIDSRunVariableCollection(colls.get_sparse_variables()) + final_sparse_names = set([vv for vv in final_sparse_colls.variables]) + pre_dense_names = set([vv for vv in colls_pre_densification.variables]) + shared_names = final_sparse_names.intersection(pre_dense_names) + + if len(shared_names) > 0: + raise ValueError( + f"""Somehow you've ended up with a copy of {shared_names} in both the final + transformed variables and in the pre-densification variables. Did you delete a + variable and recreate one with same name?""" + ) + output = merge_collections( + [colls_pre_densification, final_sparse_colls] + ) + assert output.all_sparse() + + df_sparse = output.to_df() + else: + df_sparse = colls.to_df(include_dense=False) + + df_sparse.to_csv(output_dir / "transformed_events.tsv", index=None, sep="\t", na_rep="n/a") + # Save dense vars + try: + df_dense = colls.to_df(include_sparse=False) + df_dense.to_csv(output_dir / "transformed_time_series.tsv", index=None, sep="\t", na_rep="n/a") + except ValueError: + pass + + # Save full design_matrix + if output_sampling_rate: + df_full = colls.to_df(sampling_rate=output_sampling_rate) + df_full.to_csv(output_dir / "aggregated_design.tsv", index=None, sep="\t", na_rep="n/a") + diff --git a/bids/tests/data/ds005/models/ds-005_type-convolution_model.json b/bids/tests/data/ds005/models/ds-005_type-convolution_model.json new file mode 100644 index 000000000..404123259 --- /dev/null +++ b/bids/tests/data/ds005/models/ds-005_type-convolution_model.json @@ -0,0 +1,109 @@ +{ + "Name": "test_model", + "Description": "simple test model", + "Nodes": [ + { + "Name": "run", + "Level": "Run", + "GroupBy": [ + "run", + "subject" + ], + "Model": { + "X": [ + "RT", + "gain" + ], + "Formula": "1 + RT * gain" + }, + "Transformations": [ + { + "Name": "Factor", + "Input": "trial_type" + }, + { + "Name": "Rename", + "Input": "trial_type.parametric gain", + "Output": "gain" + }, + { + "Name": "Threshold", + "Input": "respcat", + "Output": "pos_respcat", + "Binarize": true + }, + { + "Name": "Scale", + "Input": "RT" + }, + { + "Name": "Convolve", + "Input": ["gain", "pos_respcat"], + "Model": "spm" + } + ], + "DummyContrasts": { + "Test": "t" + } + }, + { + "Name": "participant", + "Level": "Subject", + "Model": { + "X": [ + 1 + ] + }, + "DummyContrasts": { + "Type": "FEMA" + } + }, + { + "Name": "by-group", + "Level": "Dataset", + "Model": { + "X": [ + "@intercept" + ] + }, + "DummyContrasts": { + "Type": "t" + } + }, + { + "Name": "group-diff", + "Level": "Dataset", + "Model": { + "X": [ + "@intercept", + "sex" + ] + }, + "DummyContrasts": { + "Type": "t" + } + } + ], + "Edges": [ + { + "Source": "run", + "Destination": "participant", + "GroupBy": [ + "subject", + "contrast" + ] + }, + { + "Source": "participant", + "Destination": "by-group", + "GroupBy": [ + "sex" + ] + }, + { + "Source": "participant", + "Destination": "group-diff", + "GroupBy": [] + } + ] +} diff --git a/bids/tests/test_morphing_time.py b/bids/tests/test_morphing_time.py new file mode 100644 index 000000000..600e01ca5 --- /dev/null +++ b/bids/tests/test_morphing_time.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python + +import pytest +import subprocess as sp +from pathlib import Path +import tempfile +import pandas as pd + +SYNTHESIZER = "morphing-time" +from bids import morphing_time as synth_mod + +DATA_DIR = (Path(__file__).parent / "data/ds005").absolute() + +# Define some example user arg combinations (without output_dir which is better +# to define in the scope of the test) +EXAMPLE_USER_ARGS = { + "events_tsv": f"{DATA_DIR}/sub-01/func/sub-01_task-mixedgamblestask_run-01_events.tsv", + "transforms": f"{DATA_DIR}/models/ds-005_type-mfx_model.json", + "tr": 2, + "ta": 2, + "nvol": 160, + } +EXAMPLE_USER_ARGS_2 = { + "transforms": f"{DATA_DIR}/models/ds-005_type-test_model.json", + "events_tsv": f"{DATA_DIR}/sub-01/func/sub-01_task-mixedgamblestask_run-01_events.tsv", + "tr": 2, + "ta": 2, + "nvol": 160, + "output_sampling_rate":10, + } +EXAMPLE_USER_ARGS_3 = EXAMPLE_USER_ARGS_2.copy() +EXAMPLE_USER_ARGS_3["transforms"] = f"{DATA_DIR}/models/ds-005_type-convolution_model.json" + + +def test_cli_help(): + output = sp.check_output([SYNTHESIZER, "--help"]) + with pytest.raises(sp.CalledProcessError): + output = sp.check_output([SYNTHESIZER, "--non-existent"]) + + +@pytest.mark.parametrize( + "test_case,user_args", + [ + ("Model type test", EXAMPLE_USER_ARGS), + ("Model type mfx", EXAMPLE_USER_ARGS_2), + ] +) +def test_design_aggregation_function(tmp_path,test_case,user_args): + user_args['output_dir'] = str(tmp_path) + main_func = getattr(synth_mod, SYNTHESIZER.replace("-","_")) + main_func(**user_args) + +def test_design_aggregation_function_with_convolution(tmp_path): + EXAMPLE_USER_ARGS_3['output_dir'] = str(tmp_path) + synth_mod.morphing_time(**EXAMPLE_USER_ARGS_3) + sparse_output = pd.read_csv(tmp_path/"transformed_events.tsv", sep='\t') + assert 'pos_respcat' in sparse_output.columns + assert 'gain' in sparse_output.columns + + dense_output = pd.read_csv(tmp_path/"transformed_time_series.tsv", sep='\t') + assert 'pos_respcat' in dense_output.columns + assert 'gain' in dense_output.columns + +@pytest.mark.parametrize( + "test_case,user_args", + [ + ("Model type test", EXAMPLE_USER_ARGS), + ("Model type mfx", EXAMPLE_USER_ARGS_2), + ] +) +def test_minimal_cli_functionality(tmp_path,test_case,user_args): + """ + We roughly want to implement the equivalent of the following: + from bids.analysis import Analysis + from bids.layout import BIDSLayout + + layout = BIDSLayout("data/ds000003") + analysis = Analysis(model="data/ds000003/models/model-001_smdl.json",layout=layout) + analysis.setup() + + more specifically we want to reimplement this line + https://github.com/bids-standard/pybids/blob/b6cd0f6787230ce976a374fbd5fce650865752a3/bids/analysis/analysis.py#L282 + """ + user_args['output_dir'] = str(tmp_path) + arg_list = " " .join([f"""--{k.lower().replace("_","-")}={v}""" for k,v in user_args.items()]) + cmd = f"{SYNTHESIZER} {arg_list}" + output = sp.check_output(cmd.split()) + + diff --git a/bids/variables/io.py b/bids/variables/io.py index 8f239b30e..2692381db 100644 --- a/bids/variables/io.py +++ b/bids/variables/io.py @@ -1,16 +1,17 @@ """ Tools for reading/writing BIDS data files. """ from os.path import join +from pathlib import Path import warnings import json import numpy as np import pandas as pd -from bids.utils import listify +from bids.utils import listify, convert_JSON from .entities import NodeIndex from .variables import SparseRunVariable, DenseRunVariable, SimpleVariable - +from .collections import BIDSRunVariableCollection BASE_ENTITIES = ['subject', 'session', 'task', 'run'] ALL_ENTITIES = BASE_ENTITIES + ['datatype', 'suffix', 'acquisition'] @@ -120,6 +121,121 @@ def _get_nvols(img_f): return nvols +def get_events_collection(_data, run_info, drop_na=True, columns=None, entities=None): + """ + This is an attempt to minimally implement: + https://github.com/bids-standard/pybids/blob/statsmodels/bids/variables/io.py + + in a way that will still work for bids io, but will also work without layout. + """ + + run_info + if entities is None: + entities = run_info.entities + if 'amplitude' in _data.columns: + if (_data['amplitude'].astype(int) == 1).all() and \ + 'trial_type' in _data.columns: + msg = ("Column 'amplitude' with constant value 1 " + "is unnecessary in event files; ignoring it.") + _data = _data.drop('amplitude', axis=1) + else: + msg = ("Column name 'amplitude' is reserved; " + "renaming it to 'amplitude_'.") + _data = _data.rename( + columns={'amplitude': 'amplitude_'}) + warnings.warn(msg) + + _data = _data.replace('n/a', np.nan) # Replace BIDS' n/a + _data = _data.apply(pd.to_numeric, errors='ignore') + + _cols = columns or list(set(_data.columns.tolist()) - + {'onset', 'duration'}) + colls_output = [] + # Construct a DataFrame for each extra column + for col in _cols: + df = _data[['onset', 'duration']].copy() + df['amplitude'] = _data[col].values + + # Add in all of the run's entities as new columns for + # index + for entity, value in entities.items(): + if entity in ALL_ENTITIES: + df[entity] = value + + if drop_na: + df = df.dropna(subset=['amplitude']) + + if df.empty: + continue + + var = SparseRunVariable( + name=col, data=df, run_info=run_info, source='events') + colls_output.append(var) + return colls_output + + +def get_regressors_collection(_data, run_info, columns=None, entities=None): + + colls_output = [] + if entities is None: + entities = run_info.entities + + if columns is not None: + conf_cols = list(set(_data.columns) & set(columns)) + _data = _data.loc[:, conf_cols] + for col in _data.columns: + sr = 1. / run_info.tr + var = DenseRunVariable(name=col, values=_data[[col]], + run_info=run_info, source='regressors', + sampling_rate=sr) + colls_output.append(var) + return colls_output + + +def get_rec_collection(data,run_info,metadata,source,columns=None,entities=None): + + colls_output = [] + freq = metadata['SamplingFrequency'] + st = metadata['StartTime'] + rf_cols = metadata['Columns'] + data.columns = rf_cols + + # Filter columns if user passed names + if columns is not None: + rf_cols = list(set(rf_cols) & set(columns)) + data = data.loc[:, rf_cols] + + n_cols = len(rf_cols) + if not n_cols: + # nothing to do + return [] + + # Keep only in-scan samples + if st < 0: + start_ind = np.floor(-st * freq) + values = data.values[start_ind:, :] + else: + values = data.values + + if st > 0: + n_pad = int(freq * st) + pad = np.zeros((n_pad, n_cols)) + values = np.r_[pad, values] + + n_rows = int(run_info.duration * freq) + if len(values) > n_rows: + values = values[:n_rows, :] + elif len(values) < n_rows: + pad = np.zeros((n_rows - len(values), n_cols)) + values = np.r_[values, pad] + + df = pd.DataFrame(values, columns=rf_cols) + for col in df.columns: + var = DenseRunVariable(name=col, values=df[[col]], run_info=run_info, + source=source, sampling_rate=freq) + colls_output.append(var) + return colls_output + def _load_time_variables(layout, dataset=None, columns=None, scan_length=None, drop_na=True, events=True, physio=True, stim=True, @@ -254,50 +370,14 @@ def _load_time_variables(layout, dataset=None, columns=None, scan_length=None, # Process event files if events: - dfs = layout.get_nearest( + efiles = layout.get_nearest( img_f, extension='.tsv', suffix='events', all_=True, full_search=True, ignore_strict_entities=['suffix', 'extension']) - for _data in dfs: - _data = pd.read_csv(_data, sep='\t') - if 'amplitude' in _data.columns: - if (_data['amplitude'].astype(int) == 1).all() and \ - 'trial_type' in _data.columns: - msg = ("Column 'amplitude' with constant value 1 " - "is unnecessary in event files; ignoring it.") - _data = _data.drop('amplitude', axis=1) - else: - msg = ("Column name 'amplitude' is reserved; " - "renaming it to 'amplitude_'.") - _data = _data.rename( - columns={'amplitude': 'amplitude_'}) - warnings.warn(msg) - - _data = _data.replace('n/a', np.nan) # Replace BIDS' n/a - _data = _data.apply(pd.to_numeric, errors='ignore') - - _cols = columns or list(set(_data.columns.tolist()) - - {'onset', 'duration'}) - - # Construct a DataFrame for each extra column - for col in _cols: - df = _data[['onset', 'duration']].copy() - df['amplitude'] = _data[col].values - - # Add in all of the run's entities as new columns for - # index - for entity, value in entities.items(): - if entity in ALL_ENTITIES: - df[entity] = value - - if drop_na: - df = df.dropna(subset=['amplitude']) - - if df.empty: - continue - - var = SparseRunVariable( - name=col, data=df, run_info=run_info, source='events') - run.add_variable(var) + for ef in efiles: + _data = pd.read_csv(ef, sep='\t') + event_cols = get_events_collection(_data, run.get_info(), drop_na=drop_na, columns=columns) + for ec in event_cols: + run.add_variable(ec) # Process confound files if regressors: @@ -307,15 +387,9 @@ def _load_time_variables(layout, dataset=None, columns=None, scan_length=None, **sub_ents) for cf in confound_files: _data = pd.read_csv(cf.path, sep='\t', na_values='n/a') - if columns is not None: - conf_cols = list(set(_data.columns) & set(columns)) - _data = _data.loc[:, conf_cols] - for col in _data.columns: - sr = 1. / run.repetition_time - var = DenseRunVariable(name=col, values=_data[[col]], - run_info=run_info, source='regressors', - sampling_rate=sr) - run.add_variable(var) + reg_colls = get_regressors_collection(_data, run.get_info(), columns=columns) + for rc in reg_colls: + run.add_variable(rc) # Process recordinging files rec_types = [] @@ -332,46 +406,18 @@ def _load_time_variables(layout, dataset=None, columns=None, scan_length=None, metadata = layout.get_metadata(rf) if not metadata: raise ValueError("No .json sidecar found for '%s'." % rf) - data = pd.read_csv(rf, sep='\t') - freq = metadata['SamplingFrequency'] - st = metadata['StartTime'] - rf_cols = metadata['Columns'] - data.columns = rf_cols - - # Filter columns if user passed names - if columns is not None: - rf_cols = list(set(rf_cols) & set(columns)) - data = data.loc[:, rf_cols] - - n_cols = len(rf_cols) - if not n_cols: - continue - - # Keep only in-scan samples - if st < 0: - start_ind = np.floor(-st * freq) - values = data.values[start_ind:, :] - else: - values = data.values - - if st > 0: - n_pad = int(freq * st) - pad = np.zeros((n_pad, n_cols)) - values = np.r_[pad, values] - - n_rows = int(run.duration * freq) - if len(values) > n_rows: - values = values[:n_rows, :] - elif len(values) < n_rows: - pad = np.zeros((n_rows - len(values), n_cols)) - values = np.r_[values, pad] - - df = pd.DataFrame(values, columns=rf_cols) + # rec_file passed in for now because rec_type needs to be inferred source = 'physio' if '_physio.tsv' in rf else 'stim' - for col in df.columns: - var = DenseRunVariable(name=col, values=df[[col]], run_info=run_info, - source=source, sampling_rate=freq) - run.add_variable(var) + data = pd.read_csv(rf, sep='\t') + rec_colls = get_rec_collection( + data, + run.get_info(), + metadata, + source, + columns=columns) + for rc in rec_colls: + run.add_variable(rc) + return dataset @@ -509,3 +555,51 @@ def make_patt(x, regex_search=False): node.add_variable(SimpleVariable(name=col_name, data=df, source=suffix)) return dataset + + +def parse_transforms(transforms_in, validate=True,level="run"): + """ Adapted from bids.modeling.statsmodels.BIDSStatsModelsGraph. Also + handles files/jsons that only define the transformations section of the + model.json """ + + # input is JSON as string, dict, or path + if isinstance(transforms_in, str): + # read as file if file + if Path(transforms_in).exists(): + transforms_in = Path(transforms_in).read_text() + # convert json as string to dict + try: + transforms_raw = json.loads(transforms_in) + except json.JSONDecodeError as err: + raise json.JSONDecodeError(f""" + {transforms_in} + The above input could not be parsed as valid json... + {err} + """ + ) + else: + transforms_raw = transforms_in + + # Convert JSON from CamelCase to snake_case keys + transforms_raw = convert_JSON(transforms_raw) + + if validate: + # TODO + # validate_transforms(transforms_raw) + pass + + # Process transformations + # TODO: some basic error checking to confirm the correct level of + # transformations has been obtained. This will most likely be the case since + # transformations at higher levels will no longer be required when the new + # "flow" approach is used. + if "transformations" in transforms_raw: + transforms = transforms_raw["transformations"] + elif any(k in transforms_raw for k in ["nodes","steps"]): + nodes_key = "nodes" if "nodes" in transforms_raw else "steps" + transforms = transforms_raw[nodes_key][0]["transformations"] + else: + raise ValueError("Cannot find a key for nodes in the json input representing the model") + return transforms + + diff --git a/bids/variables/tests/test_io.py b/bids/variables/tests/test_io.py index c4690ce84..ce2df52a1 100644 --- a/bids/variables/tests/test_io.py +++ b/bids/variables/tests/test_io.py @@ -2,12 +2,21 @@ from bids.variables import (SparseRunVariable, SimpleVariable, DenseRunVariable, load_variables) from bids.variables.entities import Node, RunNode, NodeIndex +from bids.variables.io import parse_transforms from unittest.mock import patch import pytest from os.path import join +from pathlib import Path +import tempfile +import json from bids.tests import get_test_data_path from bids.config import set_option, get_option +EXAMPLE_TRANSFORM = { + "Transformations":[{"Name":"example_trans","Inputs":["col_a","col_b"]}] +} +TRANSFORMS_JSON = join(tempfile.tempdir,"tranformations.json") +Path(TRANSFORMS_JSON).write_text(json.dumps(EXAMPLE_TRANSFORM)) @pytest.fixture def layout1(): @@ -103,3 +112,29 @@ def test_load_synthetic_dataset(synthetic): subs = index.get_nodes('subject') assert len(subs) == 5 assert set(subs[0].variables.keys()) == {'systolic_blood_pressure'} + +@pytest.mark.parametrize( + "test_case,transform_input,expected_names", + [ + ("raw transform json", + EXAMPLE_TRANSFORM, + ["example_trans"] + ), + ("transform json file", + TRANSFORMS_JSON, + ["example_trans"] + ), + ("raw model json", + {"Nodes": [EXAMPLE_TRANSFORM]}, + ["example_trans"] + ), + ("model json file", + str(Path(get_test_data_path()) / "ds005/models/ds-005_type-mfx_model.json"), + ["Scale"] + ), + ] +) +def test_parse_transforms(test_case,transform_input,expected_names): + result = parse_transforms(transform_input) + transformation_names = [x['name'] for x in result] + assert expected_names == transformation_names diff --git a/setup.cfg b/setup.cfg index d4e73e724..a707d4baa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,6 +64,7 @@ dev = [options.entry_points] console_scripts = pybids=bids.cli:cli + morphing-time=bids.cli:morphing_time [versioneer] VCS = git