diff --git a/models/segmentation_template/configs/inference.yaml b/models/segmentation_template/configs/inference.yaml index 67903c17..bbbb2c7f 100644 --- a/models/segmentation_template/configs/inference.yaml +++ b/models/segmentation_template/configs/inference.yaml @@ -47,11 +47,11 @@ transforms: keys: '@image' - _target_: ScaleIntensityd keys: '@image' - -preprocessing: + +preprocessing: _target_: Compose transforms: $@transforms - + dataset: _target_: Dataset data: '@data_dicts' @@ -62,10 +62,10 @@ dataloader: dataset: '@dataset' batch_size: '@batch_size' num_workers: '@num_workers' - + # should be replaced with other inferer types if training process is different for your network inferer: - _target_: SimpleInferer + _target_: SimpleInferer # transform to apply to data from network to be suitable for loss function and validation postprocessing: @@ -86,8 +86,8 @@ postprocessing: output_dtype: $None output_postfix: '' resample: false - separate_folder: true - + separate_folder: true + # inference handlers to load checkpoint, gather statistics handlers: - _target_: CheckpointLoader @@ -98,7 +98,7 @@ handlers: - _target_: StatsHandler name: null # use engine.logger as the Logger object to log to output_transform: '$lambda x: None' - + # engine for running inference, ties together objects defined above and has metric definitions evaluator: _target_: SupervisedEvaluator @@ -109,5 +109,5 @@ evaluator: postprocessing: '@postprocessing' val_handlers: '@handlers' -run: +run: - $@evaluator.run() diff --git a/models/segmentation_template/configs/metadata.json b/models/segmentation_template/configs/metadata.json index 8ad534cc..446c56c5 100644 --- a/models/segmentation_template/configs/metadata.json +++ b/models/segmentation_template/configs/metadata.json @@ -56,4 +56,4 @@ } } } -} \ No newline at end of file +} diff --git a/models/segmentation_template/configs/multi_gpu_train.yaml b/models/segmentation_template/configs/multi_gpu_train.yaml index d7a0bb2b..26cf222b 100644 --- a/models/segmentation_template/configs/multi_gpu_train.yaml +++ b/models/segmentation_template/configs/multi_gpu_train.yaml @@ -25,10 +25,10 @@ val_sampler: dataset: '@val_dataset' even_divisible: false shuffle: false - + val_dataloader#sampler: '@val_sampler' -run: +run: - $import torch.distributed as dist - $dist.init_process_group(backend='nccl') - $torch.cuda.set_device(@device) diff --git a/models/segmentation_template/configs/test.yaml b/models/segmentation_template/configs/test.yaml index 4705d365..70f0991b 100644 --- a/models/segmentation_template/configs/test.yaml +++ b/models/segmentation_template/configs/test.yaml @@ -22,7 +22,7 @@ device: $torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # define various paths bundle_root: . # root directory of the bundle ckpt_path: $@bundle_root + '/models/model.pt' # checkpoint to load before starting -dataset_dir: $@bundle_root + '/test_data' # where data is coming from +dataset_dir: $@bundle_root + '/test_data' # where data is coming from output_dir: './outputs' # directory to store images to if save_pred is true # network definition, this could be parameterised by pre-defined values or on the command line @@ -53,11 +53,11 @@ transforms: keys: '@both_keys' - _target_: ScaleIntensityd keys: '@image' - -preprocessing: + +preprocessing: _target_: Compose transforms: $@transforms - + dataset: _target_: Dataset data: '@data_dicts' @@ -68,10 +68,10 @@ dataloader: dataset: '@dataset' batch_size: '@batch_size' num_workers: '@num_workers' - + # should be replaced with other inferer types if training process is different for your network inferer: - _target_: SimpleInferer + _target_: SimpleInferer # transform to apply to data from network to be suitable for loss function and validation postprocessing: @@ -93,8 +93,8 @@ postprocessing: output_dtype: $None output_postfix: '' resample: false - separate_folder: true - + separate_folder: true + # inference handlers to load checkpoint, gather statistics handlers: - _target_: CheckpointLoader @@ -105,7 +105,7 @@ handlers: - _target_: StatsHandler name: null # use engine.logger as the Logger object to log to output_transform: '$lambda x: None' - + # engine for running inference, ties together objects defined above and has metric definitions evaluator: _target_: SupervisedEvaluator @@ -119,7 +119,7 @@ evaluator: include_background: false output_transform: $monai.handlers.from_engine([@pred, @label]) val_handlers: '@handlers' - -run: + +run: - $@evaluator.run() - '$print(''Per-image Dice:\n'',@evaluator.state.metric_details[''val_mean_dice''].cpu().numpy())' diff --git a/models/segmentation_template/configs/train.yaml b/models/segmentation_template/configs/train.yaml index 55a6a289..8f7a20b4 100644 --- a/models/segmentation_template/configs/train.yaml +++ b/models/segmentation_template/configs/train.yaml @@ -1,7 +1,7 @@ -# This config file implements the training workflow. It can be combined with multi_gpu_train.yaml to use DDP for +# This config file implements the training workflow. It can be combined with multi_gpu_train.yaml to use DDP for # multi-GPU runs. Many definitions in this file are duplicated across other files for compatibility with MONAI # Label, eg. network_def, but ideally these would be in a common.yaml file used in conjunction with this one -# or the other config files for testing or inference. +# or the other config files for testing or inference. imports: - $import os @@ -34,7 +34,7 @@ device: $torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # define various paths bundle_root: . # root directory of the bundle ckpt_path: $@bundle_root + '/models/model.pt' # checkpoint to load before starting -dataset_dir: $@bundle_root + '/train_data' # where data is coming from +dataset_dir: $@bundle_root + '/train_data' # where data is coming from results_dir: $@bundle_root + '/results' # where results are being stored to # a new output directory is chosen using a timestamp for every invocation output_dir: '$datetime.datetime.now().strftime(@results_dir + ''/output_%y%m%d_%H%M%S'')' @@ -65,7 +65,7 @@ base_transforms: image_only: true - _target_: EnsureChannelFirstd keys: '@both_keys' - + # these are the random and regularising transforms used only for training train_transforms: - _target_: RandAxisFlipd @@ -80,34 +80,34 @@ train_transforms: std: 0.05 - _target_: ScaleIntensityd keys: '@image' - + # these are used for validation data so no randomness val_transforms: - _target_: ScaleIntensityd keys: '@image' - + # define the Compose objects for training and validation -preprocessing: +preprocessing: _target_: Compose transforms: $@base_transforms + @train_transforms - -val_preprocessing: + +val_preprocessing: _target_: Compose transforms: $@base_transforms + @val_transforms - + # define the datasets for training and validation train_dataset: _target_: Dataset data: '@train_sub' transform: '@preprocessing' - + val_dataset: _target_: Dataset data: '@val_sub' transform: '@val_preprocessing' - + # define the dataloaders for training and validation train_dataloader: @@ -116,13 +116,13 @@ train_dataloader: batch_size: '@batch_size' repeats: '@num_substeps' num_workers: '@num_workers' - + val_dataloader: _target_: DataLoader # faster transforms probably won't benefit from threading dataset: '@val_dataset' batch_size: '@batch_size' num_workers: '@num_workers' - + # Simple Dice loss configured for multi-class segmentation, for binary segmentation # use include_background==True and sigmoid==True instead of these values lossfn: @@ -130,16 +130,16 @@ lossfn: include_background: true # if your segmentations are relatively small it might help for this to be false to_onehot_y: true softmax: true - + # hyperparameters could be added for other arguments of this class optimizer: _target_: torch.optim.Adam params: $@network.parameters() lr: '@learning_rate' - + # should be replaced with other inferer types if training process is different for your network inferer: - _target_: SimpleInferer + _target_: SimpleInferer # transform to apply to data from network to be suitable for loss function and validation postprocessing: @@ -170,7 +170,7 @@ val_handlers: epoch_level: false save_key_metric: true key_metric_name: val_mean_dice # save the checkpoint when this value improves - + # engine for running validation, ties together objects defined above and has metric definitions evaluator: _target_: SupervisedEvaluator @@ -192,12 +192,12 @@ evaluator: _target_: MeanAbsoluteError output_transform: $monai.handlers.from_engine([@pred, @label]) val_handlers: '@val_handlers' - + # gathers the loss and validation values for each iteration, referred to by CheckpointSaver so defined separately -metriclogger: +metriclogger: _target_: MetricLogger - evaluator: '@evaluator' - + evaluator: '@evaluator' + handlers: - '@metriclogger' - _target_: CheckpointLoader @@ -224,7 +224,7 @@ handlers: output_transform: $monai.handlers.from_engine(['loss'], first=True) # log loss value - _target_: LogfileHandler # log outputs from the training engine output_dir: '@output_dir' - + # engine for training, ties values defined above together into the main engine for the training process trainer: _target_: SupervisedTrainer @@ -238,6 +238,6 @@ trainer: postprocessing: '@postprocessing' key_train_metric: null train_handlers: '@handlers' - -run: + +run: - $@trainer.run() diff --git a/models/segmentation_template/docs/README.md b/models/segmentation_template/docs/README.md index 907c2769..cc260c9e 100644 --- a/models/segmentation_template/docs/README.md +++ b/models/segmentation_template/docs/README.md @@ -1,14 +1,14 @@ # Template Segmentation Bundle -This bundle is meant to be an example of segmentation in 3D which you can copy and modify to create your own bundle. +This bundle is meant to be an example of segmentation in 3D which you can copy and modify to create your own bundle. It is only roughly trained for the synthetic data you can generate with [this notebook](./generate_data.ipynb) so doesn't do anything useful on its own. The purpose is to demonstrate the base line for segmentation network -bundles compatible with MONAILabel amongst other things. +bundles compatible with MONAILabel amongst other things. To use this bundle, copy the contents of the whole directory and change the definitions for network, data, transforms, or whatever else you want for your own new segmentation bundle. Some of the names are critical for MONAILable but -otherwise you're free to change just about whatever else is defined here to suit your network. +otherwise you're free to change just about whatever else is defined here to suit your network. This bundle should also demonstrate good practice and design, however there is one caveat about definitions being copied between config files. Ideally there should be a `common.yaml` file for all the definitions used by every other @@ -41,8 +41,8 @@ See MONAI installation information about what environment to create for the feat The training config includes a number of hyperparameters like `learning_rate` and `num_workers`. These control aspects of how training operates in terms of how many processes to use, when to perform validation, when to save checkpoints, -and other things. Other aspects of the script can be modified on the command line so these aren't exhaustive but are a -guide to the kind of parameterisation that make sense for a bundle. +and other things. Other aspects of the script can be modified on the command line so these aren't exhaustive but are a +guide to the kind of parameterisation that make sense for a bundle. ## Testing and Inference @@ -53,7 +53,7 @@ by setting `save_pred` to true but by default it will just run metrics and print The inference config is for generating new segmentations from images which don't have ground truths, so this is used for actually applying the network in practice. This will apply the network to every image in an input directory matching a -pattern and save the predicted segmentations to an output directory. +pattern and save the predicted segmentations to an output directory. Using inference on the command line is demonstrated in [this notebook](./visualise_inference.ipynb) with visualisation. Some explanation of some command line choices are given in the notebook as well, similar command line invocations can @@ -64,5 +64,5 @@ also be done with the included `inference.sh` script file. There is no `scripts` directory containing a valid Python module to be imported in your configs. This wasn't necessary for this bundle but if you want to include custom code in a bundle please follow the bundle tutorials on how to do this. -The `multi_gpu_train.yaml` config is defined as a "mixin" to implement DDP based multi-gpu training. The script +The `multi_gpu_train.yaml` config is defined as a "mixin" to implement DDP based multi-gpu training. The script `train_multigpu.sh` illustrates an example of how to invoke these configs together with `torchrun`. diff --git a/models/segmentation_template/docs/inference.sh b/models/segmentation_template/docs/inference.sh index 7ce03cd5..091e11c6 100755 --- a/models/segmentation_template/docs/inference.sh +++ b/models/segmentation_template/docs/inference.sh @@ -16,4 +16,3 @@ python -m monai.bundle run \ --config_file "$BUNDLE/configs/inference.yaml" \ --bundle_root "$BUNDLE" \ $@ - \ No newline at end of file diff --git a/models/segmentation_template/docs/run_monailabel.sh b/models/segmentation_template/docs/run_monailabel.sh index c724b681..955fd440 100755 --- a/models/segmentation_template/docs/run_monailabel.sh +++ b/models/segmentation_template/docs/run_monailabel.sh @@ -25,4 +25,4 @@ then fi cd "$LABELDIR" -monailabel start_server --app monaibundle --studies datasets --conf models $BUNDLENAME $* \ No newline at end of file +monailabel start_server --app monaibundle --studies datasets --conf models $BUNDLENAME $* diff --git a/models/segmentation_template/docs/test.sh b/models/segmentation_template/docs/test.sh index 2c314607..b5b6603f 100755 --- a/models/segmentation_template/docs/test.sh +++ b/models/segmentation_template/docs/test.sh @@ -16,4 +16,3 @@ python -m monai.bundle run \ --config_file "$BUNDLE/configs/test.yaml" \ --bundle_root "$BUNDLE" \ $@ - \ No newline at end of file diff --git a/models/segmentation_template/docs/train.sh b/models/segmentation_template/docs/train.sh index 57e0d84a..b56eef64 100755 --- a/models/segmentation_template/docs/train.sh +++ b/models/segmentation_template/docs/train.sh @@ -16,4 +16,3 @@ python -m monai.bundle run \ --config_file "$BUNDLE/configs/train.yaml" \ --bundle_root "$BUNDLE" \ $@ - \ No newline at end of file