From b5ceca6394e862cfca8bf151c56c12bea2ad82cd Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Fri, 1 Sep 2023 16:44:04 +0800 Subject: [PATCH 01/13] add guidance for bundle in pythonic way Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 342 ++++++++++++++++++ 1 file changed, 342 insertions(+) create mode 100644 bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb new file mode 100644 index 0000000000..429fefa6fc --- /dev/null +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -0,0 +1,342 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) MONAI Consortium \n", + "Licensed under the Apache License, Version 2.0 (the \"License\"); \n", + "you may not use this file except in compliance with the License. \n", + "You may obtain a copy of the License at \n", + "    http://www.apache.org/licenses/LICENSE-2.0 \n", + "Unless required by applicable law or agreed to in writing, software \n", + "distributed under the License is distributed on an \"AS IS\" BASIS, \n", + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n", + "See the License for the specific language governing permissions and \n", + "limitations under the License.\n", + "\n", + "# Accessing a Bundle Workflow in Python\n", + "\n", + "In this guide, we'll explore how to access a bundle in Python and use it in your own application. We'll cover the following topics:\n", + "\n", + "1. **Downloading the Bundle**: First, you'll need to download the bundle from its source. This can be done using the `download` API.\n", + "\n", + "2. **Creating a `BundleWorkflow`**: Once you have the bundle, you can create a `BundleWorkflow` object by passing the path to the bundle file as an argument to `create_worflow`.\n", + "\n", + "3. **Getting Properties from the Bundle**: You can then retrieve the properties of the bundle by directly accessing them. For example, to get the version of the bundle, you can use `workflow.version`.\n", + "\n", + "4. **Updating Properties**: If you need to update any of the properties, you can do so by directly overwriting them. For example, to update the max epochs of the bundle, you can use `workflow.max_epochs = 10`.\n", + "\n", + "5. **Using Components in Your Own Pipeline**: Finally, you can use the components from the bundle in your own pipeline by accessing them through the `BundleWorkflow` object.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python -c \"import monai\" || pip install -q \"monai-weekly[gdown, nibabel, tqdm, ignite]\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup imports" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MONAI version: 1.2.0+103.ga7a690ff\n", + "Numpy version: 1.22.2\n", + "Pytorch version: 1.13.1+cu117\n", + "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n", + "MONAI rev id: a7a690ffb781c806b73500e68ba05be6fcdfd11a\n", + "MONAI __file__: /workspace/Code/MONAI/monai/__init__.py\n", + "\n", + "Optional dependencies:\n", + "Pytorch Ignite version: 0.4.11\n", + "ITK version: 5.3.0\n", + "Nibabel version: 5.1.0\n", + "scikit-image version: 0.21.0\n", + "scipy version: 1.10.1\n", + "Pillow version: 9.2.0\n", + "Tensorboard version: 2.9.0\n", + "gdown version: 4.7.1\n", + "TorchVision version: 0.14.1+cu117\n", + "tqdm version: 4.65.0\n", + "lmdb version: 1.4.1\n", + "psutil version: 5.9.4\n", + "pandas version: 1.5.2\n", + "einops version: 0.6.1\n", + "transformers version: 4.21.3\n", + "mlflow version: 2.4.0\n", + "pynrrd version: 1.0.0\n", + "clearml version: 1.11.1rc1\n", + "\n", + "For details about installing the optional dependencies, please visit:\n", + " https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n", + "\n" + ] + } + ], + "source": [ + "import os\n", + "import tempfile\n", + "from pathlib import Path\n", + "from monai.config import print_config\n", + "from monai.bundle import download, create_workflow\n", + "\n", + "print_config()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup data directory\n", + "\n", + "You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. \n", + "This allows you to save results and reuse downloads. \n", + "If not specified a temporary directory will be used." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/workspace/Data\n" + ] + } + ], + "source": [ + "directory = os.environ.get(\"MONAI_DATA_DIRECTORY\")\n", + "root_dir = tempfile.mkdtemp() if directory is None else directory\n", + "print(root_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Downloading the Bundle" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "name spleen_ct_segmentation\n", + "version None\n", + "bundle_dir /workspace/Data\n", + "source github\n", + "repo None\n", + "url None\n", + "remove_prefix monai_\n", + "progress True\n", + "2023-09-01 08:36:39,219 - INFO - --- input summary of monai.bundle.scripts.download ---\n", + "2023-09-01 08:36:39,220 - INFO - > name: 'spleen_ct_segmentation'\n", + "2023-09-01 08:36:39,220 - INFO - > bundle_dir: '/workspace/Data'\n", + "2023-09-01 08:36:39,221 - INFO - > source: 'github'\n", + "2023-09-01 08:36:39,221 - INFO - > remove_prefix: 'monai_'\n", + "2023-09-01 08:36:39,222 - INFO - > progress: True\n", + "2023-09-01 08:36:39,223 - INFO - ---\n", + "\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-09-01 08:36:40,143 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", + "2023-09-01 08:36:40,143 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", + "2023-09-01 08:36:40,144 - INFO - Writing into directory: /workspace/Data.\n" + ] + } + ], + "source": [ + "download(name=\"spleen_ct_segmentation\", bundle_dir=root_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating a `BundleWorkflow`\n", + "Here we use `create_workflow` to create and initialize a `BundleWorkflow`. `create_workflow` can support both creating a config-based bundle by passing the config files and a python-based bundle by passing the bundle workflow name which should be subclass of `BundleWorkflow` and be available to import." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "workflow_name None\n", + "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", + "workflow_type train\n", + "2023-09-01 08:36:40,372 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-01 08:36:40,373 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", + "2023-09-01 08:36:40,374 - INFO - > workflow_type: 'train'\n", + "2023-09-01 08:36:40,375 - INFO - ---\n", + "\n", + "\n", + "2023-09-01 08:36:40,376 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + ] + } + ], + "source": [ + "config_file = Path(root_dir)/ \"spleen_ct_segmentation\" / \"configs\" / \"train.json\"\n", + "\n", + "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Getting Properties from the Bundle\n", + "You need to check all supported properties listed [here](https://docs.monai.io/en/latest/mb_properties.html).\n", + "\n", + "You can also use the `add_property` method of the `BundleWorkflow` object to add the property for the application requirements check and access." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "monai.transforms.io.dictionary LoadImaged.__init__:image_only: Current default value of argument `image_only=False` has been deprecated since version 1.1. It will be changed to `image_only=True` in version 1.3.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "# for existing properties\n", + "# Note that the properties got from `train_workflow` is already instantiated.\n", + "train_preprocessing = train_workflow.train_preprocessing\n", + "\n", + "# for meta information\n", + "version = train_workflow.version\n", + "\n", + "# add properties\n", + "train_workflow.add_property(name=\"lr_scheduler\", required=True, config_id=\"lr_scheduler\")\n", + "print(train_workflow.lr_scheduler)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Updating Properties\n", + "There are two ways to update the properties:\n", + "- You can override them when you create the workflow\n", + "- Or you can directly overwriting them after creating the workflow " + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "workflow_name None\n", + "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", + "workflow_type train\n", + "max_epochs 3\n", + "dataset_dir /workspace/Data/Task09_Spleen\n", + "2023-09-01 08:42:38,109 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-01 08:42:38,110 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", + "2023-09-01 08:42:38,111 - INFO - > workflow_type: 'train'\n", + "2023-09-01 08:42:38,111 - INFO - > max_epochs: 3\n", + "2023-09-01 08:42:38,111 - INFO - > dataset_dir: PosixPath('/workspace/Data/Task09_Spleen')\n", + "2023-09-01 08:42:38,111 - INFO - ---\n", + "\n", + "\n", + "2023-09-01 08:42:38,112 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + ] + } + ], + "source": [ + "#1 override them when you create the workflow\n", + "dataset_dir = Path(root_dir) / \"Task09_Spleen\"\n", + "override = {\n", + " \"max_epochs\": 3,\n", + " \"dataset_dir\": dataset_dir\n", + " }\n", + "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 7612c6d9cf7312e409612a4a56cab59f8362a9d4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 1 Sep 2023 08:46:51 +0000 Subject: [PATCH 02/13] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../pythonic_usage_guidance/pythonic_bundle_access.ipynb | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 429fefa6fc..6e55aa911e 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -215,7 +215,7 @@ } ], "source": [ - "config_file = Path(root_dir)/ \"spleen_ct_segmentation\" / \"configs\" / \"train.json\"\n", + "config_file = Path(root_dir) / \"spleen_ct_segmentation\" / \"configs\" / \"train.json\"\n", "\n", "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\")" ] @@ -300,12 +300,9 @@ } ], "source": [ - "#1 override them when you create the workflow\n", + "# 1 override them when you create the workflow\n", "dataset_dir = Path(root_dir) / \"Task09_Spleen\"\n", - "override = {\n", - " \"max_epochs\": 3,\n", - " \"dataset_dir\": dataset_dir\n", - " }\n", + "override = {\"max_epochs\": 3, \"dataset_dir\": dataset_dir}\n", "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)" ] }, From 42f0654ddfd113b77bd0027bddf4ab31f8d813d2 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Tue, 5 Sep 2023 18:48:38 +0800 Subject: [PATCH 03/13] add comparison of the use of `ConfigParser` and `BundleWorkflow` Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 193 +++++++++++++----- 1 file changed, 143 insertions(+), 50 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 6e55aa911e..590c7cb834 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -55,18 +55,26 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 1, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "MONAI version: 1.2.0+103.ga7a690ff\n", + "MONAI version: 1.2.0+110.g982755e5\n", "Numpy version: 1.22.2\n", "Pytorch version: 1.13.1+cu117\n", "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n", - "MONAI rev id: a7a690ffb781c806b73500e68ba05be6fcdfd11a\n", + "MONAI rev id: 982755e5a567f1c8a3cd93ac44a9684af2d030ed\n", "MONAI __file__: /workspace/Code/MONAI/monai/__init__.py\n", "\n", "Optional dependencies:\n", @@ -99,8 +107,9 @@ "import os\n", "import tempfile\n", "from pathlib import Path\n", + "from monai.transforms import MeanEnsembled, Compose\n", "from monai.config import print_config\n", - "from monai.bundle import download, create_workflow\n", + "from monai.bundle import download, create_workflow, ConfigParser\n", "\n", "print_config()" ] @@ -159,24 +168,18 @@ "url None\n", "remove_prefix monai_\n", "progress True\n", - "2023-09-01 08:36:39,219 - INFO - --- input summary of monai.bundle.scripts.download ---\n", - "2023-09-01 08:36:39,220 - INFO - > name: 'spleen_ct_segmentation'\n", - "2023-09-01 08:36:39,220 - INFO - > bundle_dir: '/workspace/Data'\n", - "2023-09-01 08:36:39,221 - INFO - > source: 'github'\n", - "2023-09-01 08:36:39,221 - INFO - > remove_prefix: 'monai_'\n", - "2023-09-01 08:36:39,222 - INFO - > progress: True\n", - "2023-09-01 08:36:39,223 - INFO - ---\n", + "2023-09-05 10:30:38,443 - INFO - --- input summary of monai.bundle.scripts.download ---\n", + "2023-09-05 10:30:38,444 - INFO - > name: 'spleen_ct_segmentation'\n", + "2023-09-05 10:30:38,444 - INFO - > bundle_dir: '/workspace/Data'\n", + "2023-09-05 10:30:38,445 - INFO - > source: 'github'\n", + "2023-09-05 10:30:38,445 - INFO - > remove_prefix: 'monai_'\n", + "2023-09-05 10:30:38,445 - INFO - > progress: True\n", + "2023-09-05 10:30:38,445 - INFO - ---\n", "\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2023-09-01 08:36:40,143 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", - "2023-09-01 08:36:40,143 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", - "2023-09-01 08:36:40,144 - INFO - Writing into directory: /workspace/Data.\n" + "\n", + "2023-09-05 10:30:39,306 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", + "2023-09-05 10:30:39,306 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", + "2023-09-05 10:30:39,308 - INFO - Writing into directory: /workspace/Data.\n" ] } ], @@ -189,7 +192,7 @@ "metadata": {}, "source": [ "## Creating a `BundleWorkflow`\n", - "Here we use `create_workflow` to create and initialize a `BundleWorkflow`. `create_workflow` can support both creating a config-based bundle by passing the config files and a python-based bundle by passing the bundle workflow name which should be subclass of `BundleWorkflow` and be available to import." + "In this section, we demonstrate how to create and initialize a `BundleWorkflow` using the `create_workflow` function. This function supports the creation of both config-based bundles by providing the necessary config files and python-based bundles by specifying a bundle workflow name. The specified name should be a subclass of `BundleWorkflow` and be accessible for import.\n" ] }, { @@ -204,13 +207,13 @@ "workflow_name None\n", "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", "workflow_type train\n", - "2023-09-01 08:36:40,372 - INFO - --- input summary of monai.bundle.scripts.run ---\n", - "2023-09-01 08:36:40,373 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", - "2023-09-01 08:36:40,374 - INFO - > workflow_type: 'train'\n", - "2023-09-01 08:36:40,375 - INFO - ---\n", + "2023-09-05 10:30:40,907 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-05 10:30:40,908 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", + "2023-09-05 10:30:40,908 - INFO - > workflow_type: 'train'\n", + "2023-09-05 10:30:40,908 - INFO - ---\n", "\n", "\n", - "2023-09-01 08:36:40,376 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + "2023-09-05 10:30:40,909 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" ] } ], @@ -225,28 +228,28 @@ "metadata": {}, "source": [ "## Getting Properties from the Bundle\n", - "You need to check all supported properties listed [here](https://docs.monai.io/en/latest/mb_properties.html).\n", + "To access properties from the bundle, please refer to the list of supported properties available [here](https://docs.monai.io/en/latest/mb_properties.html).\n", "\n", - "You can also use the `add_property` method of the `BundleWorkflow` object to add the property for the application requirements check and access." + "You can also utilize the `add_property` method of the `BundleWorkflow` object to add properties for application requirements checking and access.\n" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 14, "metadata": {}, "outputs": [ { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "monai.transforms.io.dictionary LoadImaged.__init__:image_only: Current default value of argument `image_only=False` has been deprecated since version 1.1. It will be changed to `image_only=True` in version 1.3.\n" + "\n" ] }, { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "\n" + "monai.transforms.io.dictionary LoadImaged.__init__:image_only: Current default value of argument `image_only=False` has been deprecated since version 1.1. It will be changed to `image_only=True` in version 1.3.\n" ] } ], @@ -268,14 +271,15 @@ "metadata": {}, "source": [ "## Updating Properties\n", - "There are two ways to update the properties:\n", - "- You can override them when you create the workflow\n", - "- Or you can directly overwriting them after creating the workflow " + "There are two primary methods for updating properties:\n", + "\n", + "1. You can override them during the workflow creation process.\n", + "2. Alternatively, you can directly overwrite them after the workflow has been created." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -285,25 +289,91 @@ "workflow_name None\n", "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", "workflow_type train\n", - "max_epochs 3\n", + "epochs 1\n", "dataset_dir /workspace/Data/Task09_Spleen\n", - "2023-09-01 08:42:38,109 - INFO - --- input summary of monai.bundle.scripts.run ---\n", - "2023-09-01 08:42:38,110 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", - "2023-09-01 08:42:38,111 - INFO - > workflow_type: 'train'\n", - "2023-09-01 08:42:38,111 - INFO - > max_epochs: 3\n", - "2023-09-01 08:42:38,111 - INFO - > dataset_dir: PosixPath('/workspace/Data/Task09_Spleen')\n", - "2023-09-01 08:42:38,111 - INFO - ---\n", + "bundle_root /workspace/Data\n", + "2023-09-05 10:32:00,679 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-05 10:32:00,682 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", + "2023-09-05 10:32:00,682 - INFO - > workflow_type: 'train'\n", + "2023-09-05 10:32:00,684 - INFO - > epochs: 1\n", + "2023-09-05 10:32:00,684 - INFO - > dataset_dir: PosixPath('/workspace/Data/Task09_Spleen')\n", + "2023-09-05 10:32:00,685 - INFO - > bundle_root: '/workspace/Data'\n", + "2023-09-05 10:32:00,685 - INFO - ---\n", "\n", "\n", - "2023-09-01 08:42:38,112 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + "2023-09-05 10:32:00,686 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n", + "max epochs: 1\n" ] } ], "source": [ - "# 1 override them when you create the workflow\n", + "#1 override them when you create the workflow\n", "dataset_dir = Path(root_dir) / \"Task09_Spleen\"\n", - "override = {\"max_epochs\": 3, \"dataset_dir\": dataset_dir}\n", - "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)" + "bundle_root = root_dir\n", + "override = {\"epochs\": 1, \"dataset_dir\": dataset_dir, \"bundle_root\": bundle_root}\n", + "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)\n", + "print('max epochs:', train_workflow.max_epochs)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "max epochs: 3\n", + "bundle root: /workspace/Data\n" + ] + } + ], + "source": [ + "#2 directly overwriting them after creating the workflow\n", + "train_workflow.max_epochs = 3\n", + "train_workflow.bundle_root = bundle_root\n", + "\n", + "# Note that must initialize again after changing the content\n", + "train_workflow.initialize()\n", + "print('max epochs:', train_workflow.max_epochs)\n", + "print('bundle root:', train_workflow.bundle_root)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using Components in Your Own Pipeline\n", + "If you wish to incorporate additional processing into the bundle's existing post-processing and use it within your custom pipeline, you can follow these steps. A comprehensive example can be found [here](https://github.com/Project-MONAI/tutorials/tree/main/model_zoo/app_integrate_bundle)." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(, )\n" + ] + } + ], + "source": [ + "n_splits = 5\n", + "ensemble_transform = MeanEnsembled(keys=[\"pred\"] * n_splits, output_key=\"pred\")\n", + "update_postprocessing = Compose((ensemble_transform, train_workflow.val_postprocessing))\n", + "\n", + "print(update_postprocessing.transforms)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## A simple comparison of the use of `ConfigParser` and `BundleWorkflow`" ] }, { @@ -311,7 +381,30 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# ConfigParser\n", + "\n", + "## load config\n", + "bundle_config = ConfigParser()\n", + "bundle_config.read_config(config_file)\n", + "## update config\n", + "bundle_config.config.update({\"bundle_root\": bundle_root})\n", + "## get config\n", + "bundle_config.get(\"bundle_root\")\n", + "bundle_config.get_parsed_content(\"train#handlers\", instantiate=True)\n", + "\n", + "\n", + "# BundleWorkflow\n", + "\n", + "## load config\n", + "workflow = create_workflow(config_file=config_file, workflow_type=\"train\")\n", + "## update config\n", + "workflow.bundle_root = bundle_root\n", + "workflow.initialize()\n", + "## get config\n", + "workflow.bundle_root\n", + "workflow.train_handlers" + ] } ], "metadata": { From 1ea54777244108d5af8feab71187067afa3c4ba8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 10:49:48 +0000 Subject: [PATCH 04/13] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../pythonic_bundle_access.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 590c7cb834..4acf653e1b 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -307,12 +307,12 @@ } ], "source": [ - "#1 override them when you create the workflow\n", + "# 1 override them when you create the workflow\n", "dataset_dir = Path(root_dir) / \"Task09_Spleen\"\n", "bundle_root = root_dir\n", "override = {\"epochs\": 1, \"dataset_dir\": dataset_dir, \"bundle_root\": bundle_root}\n", "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)\n", - "print('max epochs:', train_workflow.max_epochs)" + "print(\"max epochs:\", train_workflow.max_epochs)" ] }, { @@ -330,14 +330,14 @@ } ], "source": [ - "#2 directly overwriting them after creating the workflow\n", + "# 2 directly overwriting them after creating the workflow\n", "train_workflow.max_epochs = 3\n", "train_workflow.bundle_root = bundle_root\n", "\n", "# Note that must initialize again after changing the content\n", "train_workflow.initialize()\n", - "print('max epochs:', train_workflow.max_epochs)\n", - "print('bundle root:', train_workflow.bundle_root)\n" + "print(\"max epochs:\", train_workflow.max_epochs)\n", + "print(\"bundle root:\", train_workflow.bundle_root)" ] }, { From ddcedf2c63c4793493dc3caefb9a61b90c04fc23 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 12:28:39 +0800 Subject: [PATCH 05/13] add usage of `load` API Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 96 ++++++++++++------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 4acf653e1b..db0dd7a382 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -25,9 +25,11 @@ "\n", "3. **Getting Properties from the Bundle**: You can then retrieve the properties of the bundle by directly accessing them. For example, to get the version of the bundle, you can use `workflow.version`.\n", "\n", - "4. **Updating Properties**: If you need to update any of the properties, you can do so by directly overwriting them. For example, to update the max epochs of the bundle, you can use `workflow.max_epochs = 10`.\n", + "4. **Using Pretrained Weights from the Bundle**: You can conveniently employ pretrained weights from the bundle and customize them using the `load` API.\n", "\n", - "5. **Using Components in Your Own Pipeline**: Finally, you can use the components from the bundle in your own pipeline by accessing them through the `BundleWorkflow` object.\n" + "5. **Updating Properties**: If you need to update any of the properties, you can do so by directly overwriting them. For example, to update the max epochs of the bundle, you can use `workflow.max_epochs = 10`.\n", + "\n", + "6. **Using Components in Your Own Pipeline**: Finally, you can use the components from the bundle in your own pipeline by accessing them through the `BundleWorkflow` object.\n" ] }, { @@ -55,26 +57,18 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 8, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ - "MONAI version: 1.2.0+110.g982755e5\n", + "MONAI version: 1.2.0+107.g1ed4f94b\n", "Numpy version: 1.22.2\n", "Pytorch version: 1.13.1+cu117\n", "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n", - "MONAI rev id: 982755e5a567f1c8a3cd93ac44a9684af2d030ed\n", + "MONAI rev id: 1ed4f94ba3ab84cd5b7c14c840f1cf8b9269e266\n", "MONAI __file__: /workspace/Code/MONAI/monai/__init__.py\n", "\n", "Optional dependencies:\n", @@ -107,9 +101,10 @@ "import os\n", "import tempfile\n", "from pathlib import Path\n", + "from monai.networks.nets import SegResNet\n", "from monai.transforms import MeanEnsembled, Compose\n", "from monai.config import print_config\n", - "from monai.bundle import download, create_workflow, ConfigParser\n", + "from monai.bundle import download, create_workflow, ConfigParser, load\n", "\n", "print_config()" ] @@ -153,7 +148,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -163,23 +158,23 @@ "name spleen_ct_segmentation\n", "version None\n", "bundle_dir /workspace/Data\n", - "source github\n", + "source monaihosting\n", "repo None\n", "url None\n", "remove_prefix monai_\n", "progress True\n", - "2023-09-05 10:30:38,443 - INFO - --- input summary of monai.bundle.scripts.download ---\n", - "2023-09-05 10:30:38,444 - INFO - > name: 'spleen_ct_segmentation'\n", - "2023-09-05 10:30:38,444 - INFO - > bundle_dir: '/workspace/Data'\n", - "2023-09-05 10:30:38,445 - INFO - > source: 'github'\n", - "2023-09-05 10:30:38,445 - INFO - > remove_prefix: 'monai_'\n", - "2023-09-05 10:30:38,445 - INFO - > progress: True\n", - "2023-09-05 10:30:38,445 - INFO - ---\n", + "2023-09-06 03:32:53,482 - INFO - --- input summary of monai.bundle.scripts.download ---\n", + "2023-09-06 03:32:53,483 - INFO - > name: 'spleen_ct_segmentation'\n", + "2023-09-06 03:32:53,483 - INFO - > bundle_dir: '/workspace/Data'\n", + "2023-09-06 03:32:53,483 - INFO - > source: 'monaihosting'\n", + "2023-09-06 03:32:53,484 - INFO - > remove_prefix: 'monai_'\n", + "2023-09-06 03:32:53,484 - INFO - > progress: True\n", + "2023-09-06 03:32:53,484 - INFO - ---\n", "\n", "\n", - "2023-09-05 10:30:39,306 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", - "2023-09-05 10:30:39,306 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", - "2023-09-05 10:30:39,308 - INFO - Writing into directory: /workspace/Data.\n" + "2023-09-06 03:32:54,228 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", + "2023-09-06 03:32:54,228 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", + "2023-09-06 03:32:54,230 - INFO - Writing into directory: /workspace/Data.\n" ] } ], @@ -197,7 +192,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -207,13 +202,13 @@ "workflow_name None\n", "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", "workflow_type train\n", - "2023-09-05 10:30:40,907 - INFO - --- input summary of monai.bundle.scripts.run ---\n", - "2023-09-05 10:30:40,908 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", - "2023-09-05 10:30:40,908 - INFO - > workflow_type: 'train'\n", - "2023-09-05 10:30:40,908 - INFO - ---\n", + "2023-09-06 03:33:02,472 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-06 03:33:02,473 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", + "2023-09-06 03:33:02,473 - INFO - > workflow_type: 'train'\n", + "2023-09-06 03:33:02,474 - INFO - ---\n", "\n", "\n", - "2023-09-05 10:30:40,909 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + "2023-09-06 03:33:02,474 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" ] } ], @@ -266,6 +261,43 @@ "print(train_workflow.lr_scheduler)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Utilizing Pretrained Weights from the Bundle\n", + "\n", + "This function primarily serves to provide an instantiated network by loading pretrained weights from the bundle. You have the flexibility to directly update the parameters or filter the weights. Additionally, it's possible to use your own model instead of the one included in the bundle.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-09-06 04:06:53,867 - INFO - 'dst' model updated: 82 of 83 variables.\n" + ] + } + ], + "source": [ + "# directly get an instantiated network that loaded the weights.\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\")\n", + "\n", + "# directly update the parameters for the model from the bundle.\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", in_channels=3, out_channels=1)\n", + "\n", + "# using `exclude_vars` to filter loading weights.\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", copy_model_args={\"exclude_vars\": \"convInit|conv_final\"})\n", + "\n", + "# pass model and return an instantiated network that loaded the weights.\n", + "my_model = SegResNet(blocks_down=[1, 2, 2, 4], blocks_up=[1, 1, 1], init_filters=16, in_channels=1, out_channels=3)\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", model=my_model)" + ] + }, { "cell_type": "markdown", "metadata": {}, From afbfeaed30f3677b5d3e5ffe20fdbfdf17c2d47a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 04:29:53 +0000 Subject: [PATCH 06/13] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../pythonic_usage_guidance/pythonic_bundle_access.ipynb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index db0dd7a382..1bfcf4f7f9 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -291,7 +291,12 @@ "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", in_channels=3, out_channels=1)\n", "\n", "# using `exclude_vars` to filter loading weights.\n", - "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", copy_model_args={\"exclude_vars\": \"convInit|conv_final\"})\n", + "model = load(\n", + " name=\"brats_mri_segmentation\",\n", + " bundle_dir=root_dir,\n", + " source=\"monaihosting\",\n", + " copy_model_args={\"exclude_vars\": \"convInit|conv_final\"},\n", + ")\n", "\n", "# pass model and return an instantiated network that loaded the weights.\n", "my_model = SegResNet(blocks_down=[1, 2, 2, 4], blocks_up=[1, 1, 1], init_filters=16, in_channels=1, out_channels=3)\n", From 287512d2386baae2103ec74c2fec10d0763b8bf9 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 13:51:40 +0800 Subject: [PATCH 07/13] fix flake8 Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 38 ++++++++++++++----- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index db0dd7a382..a4011f2184 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -99,6 +99,7 @@ ], "source": [ "import os\n", + "import shutil\n", "import tempfile\n", "from pathlib import Path\n", "from monai.networks.nets import SegResNet\n", @@ -291,7 +292,8 @@ "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", in_channels=3, out_channels=1)\n", "\n", "# using `exclude_vars` to filter loading weights.\n", - "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", copy_model_args={\"exclude_vars\": \"convInit|conv_final\"})\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\",\n", + " copy_model_args={\"exclude_vars\": \"convInit|conv_final\"})\n", "\n", "# pass model and return an instantiated network that loaded the weights.\n", "my_model = SegResNet(blocks_down=[1, 2, 2, 4], blocks_up=[1, 1, 1], init_filters=16, in_channels=1, out_channels=3)\n", @@ -416,27 +418,46 @@ "source": [ "# ConfigParser\n", "\n", - "## load config\n", + "# load config\n", "bundle_config = ConfigParser()\n", "bundle_config.read_config(config_file)\n", - "## update config\n", + "# update config\n", "bundle_config.config.update({\"bundle_root\": bundle_root})\n", - "## get config\n", + "# get config\n", "bundle_config.get(\"bundle_root\")\n", "bundle_config.get_parsed_content(\"train#handlers\", instantiate=True)\n", "\n", "\n", "# BundleWorkflow\n", "\n", - "## load config\n", + "# load config\n", "workflow = create_workflow(config_file=config_file, workflow_type=\"train\")\n", - "## update config\n", + "# update config\n", "workflow.bundle_root = bundle_root\n", "workflow.initialize()\n", - "## get config\n", + "# get config\n", "workflow.bundle_root\n", "workflow.train_handlers" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Cleanup data directory\n", + "\n", + "Remove directory if a temporary was used." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if directory is None:\n", + " shutil.rmtree(root_dir)" + ] } ], "metadata": { @@ -456,8 +477,7 @@ "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" - }, - "orig_nbformat": 4 + } }, "nbformat": 4, "nbformat_minor": 2 From 423e8a82bf977637762eed4635fe9e0af44573bf Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 14:13:37 +0800 Subject: [PATCH 08/13] rm cell out Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 41 +------------------ 1 file changed, 2 insertions(+), 39 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 8e32540313..3695af980b 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -57,46 +57,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MONAI version: 1.2.0+107.g1ed4f94b\n", - "Numpy version: 1.22.2\n", - "Pytorch version: 1.13.1+cu117\n", - "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n", - "MONAI rev id: 1ed4f94ba3ab84cd5b7c14c840f1cf8b9269e266\n", - "MONAI __file__: /workspace/Code/MONAI/monai/__init__.py\n", - "\n", - "Optional dependencies:\n", - "Pytorch Ignite version: 0.4.11\n", - "ITK version: 5.3.0\n", - "Nibabel version: 5.1.0\n", - "scikit-image version: 0.21.0\n", - "scipy version: 1.10.1\n", - "Pillow version: 9.2.0\n", - "Tensorboard version: 2.9.0\n", - "gdown version: 4.7.1\n", - "TorchVision version: 0.14.1+cu117\n", - "tqdm version: 4.65.0\n", - "lmdb version: 1.4.1\n", - "psutil version: 5.9.4\n", - "pandas version: 1.5.2\n", - "einops version: 0.6.1\n", - "transformers version: 4.21.3\n", - "mlflow version: 2.4.0\n", - "pynrrd version: 1.0.0\n", - "clearml version: 1.11.1rc1\n", - "\n", - "For details about installing the optional dependencies, please visit:\n", - " https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", "import shutil\n", From 095597bb15b4503705b99f2d6a8869ca158457f9 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 14:57:28 +0800 Subject: [PATCH 09/13] add usage in your own pipeline Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 192 ++++++++++-------- 1 file changed, 104 insertions(+), 88 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 3695af980b..dbc6243808 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -25,11 +25,13 @@ "\n", "3. **Getting Properties from the Bundle**: You can then retrieve the properties of the bundle by directly accessing them. For example, to get the version of the bundle, you can use `workflow.version`.\n", "\n", - "4. **Using Pretrained Weights from the Bundle**: You can conveniently employ pretrained weights from the bundle and customize them using the `load` API.\n", + "4. **Updating Properties**: If you need to update any of the properties, you can do so by directly overwriting them. For example, to update the max epochs of the bundle, you can use `workflow.max_epochs = 10`.\n", "\n", - "5. **Updating Properties**: If you need to update any of the properties, you can do so by directly overwriting them. For example, to update the max epochs of the bundle, you can use `workflow.max_epochs = 10`.\n", + "5. **Using Components in Your Own Pipeline**: Finally, you can use the components from the bundle in your own pipeline by accessing them through the `BundleWorkflow` object.\n", "\n", - "6. **Using Components in Your Own Pipeline**: Finally, you can use the components from the bundle in your own pipeline by accessing them through the `BundleWorkflow` object.\n" + "6. **Utilizing Pretrained Weights from the Bundle**: You can conveniently employ pretrained weights from the bundle and customize them using the `load` API.\n", + "\n", + "7. **A Simple Comparison of the Usage between `ConfigParser` and `BundleWorkflow`**" ] }, { @@ -65,6 +67,7 @@ "import shutil\n", "import tempfile\n", "from pathlib import Path\n", + "from monai.engines import EnsembleEvaluator\n", "from monai.networks.nets import SegResNet\n", "from monai.transforms import MeanEnsembled, Compose\n", "from monai.config import print_config\n", @@ -112,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -127,18 +130,18 @@ "url None\n", "remove_prefix monai_\n", "progress True\n", - "2023-09-06 03:32:53,482 - INFO - --- input summary of monai.bundle.scripts.download ---\n", - "2023-09-06 03:32:53,483 - INFO - > name: 'spleen_ct_segmentation'\n", - "2023-09-06 03:32:53,483 - INFO - > bundle_dir: '/workspace/Data'\n", - "2023-09-06 03:32:53,483 - INFO - > source: 'monaihosting'\n", - "2023-09-06 03:32:53,484 - INFO - > remove_prefix: 'monai_'\n", - "2023-09-06 03:32:53,484 - INFO - > progress: True\n", - "2023-09-06 03:32:53,484 - INFO - ---\n", + "2023-09-06 06:40:06,678 - INFO - --- input summary of monai.bundle.scripts.download ---\n", + "2023-09-06 06:40:06,679 - INFO - > name: 'spleen_ct_segmentation'\n", + "2023-09-06 06:40:06,679 - INFO - > bundle_dir: '/workspace/Data'\n", + "2023-09-06 06:40:06,679 - INFO - > source: 'monaihosting'\n", + "2023-09-06 06:40:06,679 - INFO - > remove_prefix: 'monai_'\n", + "2023-09-06 06:40:06,679 - INFO - > progress: True\n", + "2023-09-06 06:40:06,680 - INFO - ---\n", "\n", "\n", - "2023-09-06 03:32:54,228 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", - "2023-09-06 03:32:54,228 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", - "2023-09-06 03:32:54,230 - INFO - Writing into directory: /workspace/Data.\n" + "2023-09-06 06:40:07,316 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", + "2023-09-06 06:40:07,316 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", + "2023-09-06 06:40:07,317 - INFO - Writing into directory: /workspace/Data.\n" ] } ], @@ -156,7 +159,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -166,20 +169,20 @@ "workflow_name None\n", "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", "workflow_type train\n", - "2023-09-06 03:33:02,472 - INFO - --- input summary of monai.bundle.scripts.run ---\n", - "2023-09-06 03:33:02,473 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", - "2023-09-06 03:33:02,473 - INFO - > workflow_type: 'train'\n", - "2023-09-06 03:33:02,474 - INFO - ---\n", + "2023-09-06 06:47:55,435 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-06 06:47:55,437 - INFO - > config_file: '/workspace/Data/spleen_ct_segmentation/configs/train.json'\n", + "2023-09-06 06:47:55,439 - INFO - > workflow_type: 'train'\n", + "2023-09-06 06:47:55,440 - INFO - ---\n", "\n", "\n", - "2023-09-06 03:33:02,474 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + "2023-09-06 06:47:55,441 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" ] } ], "source": [ "config_file = Path(root_dir) / \"spleen_ct_segmentation\" / \"configs\" / \"train.json\"\n", "\n", - "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\")" + "train_workflow = create_workflow(config_file=str(config_file), workflow_type=\"train\")" ] }, { @@ -194,21 +197,14 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "monai.transforms.io.dictionary LoadImaged.__init__:image_only: Current default value of argument `image_only=False` has been deprecated since version 1.1. It will be changed to `image_only=True` in version 1.3.\n" + "\n" ] } ], @@ -225,48 +221,6 @@ "print(train_workflow.lr_scheduler)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Utilizing Pretrained Weights from the Bundle\n", - "\n", - "This function primarily serves to provide an instantiated network by loading pretrained weights from the bundle. You have the flexibility to directly update the parameters or filter the weights. Additionally, it's possible to use your own model instead of the one included in the bundle.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2023-09-06 04:06:53,867 - INFO - 'dst' model updated: 82 of 83 variables.\n" - ] - } - ], - "source": [ - "# directly get an instantiated network that loaded the weights.\n", - "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\")\n", - "\n", - "# directly update the parameters for the model from the bundle.\n", - "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", in_channels=3, out_channels=1)\n", - "\n", - "# using `exclude_vars` to filter loading weights.\n", - "model = load(\n", - " name=\"brats_mri_segmentation\",\n", - " bundle_dir=root_dir,\n", - " source=\"monaihosting\",\n", - " copy_model_args={\"exclude_vars\": \"convInit|conv_final\"},\n", - ")\n", - "\n", - "# pass model and return an instantiated network that loaded the weights.\n", - "my_model = SegResNet(blocks_down=[1, 2, 2, 4], blocks_up=[1, 1, 1], init_filters=16, in_channels=1, out_channels=3)\n", - "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", model=my_model)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -280,7 +234,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -293,16 +247,16 @@ "epochs 1\n", "dataset_dir /workspace/Data/Task09_Spleen\n", "bundle_root /workspace/Data\n", - "2023-09-05 10:32:00,679 - INFO - --- input summary of monai.bundle.scripts.run ---\n", - "2023-09-05 10:32:00,682 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", - "2023-09-05 10:32:00,682 - INFO - > workflow_type: 'train'\n", - "2023-09-05 10:32:00,684 - INFO - > epochs: 1\n", - "2023-09-05 10:32:00,684 - INFO - > dataset_dir: PosixPath('/workspace/Data/Task09_Spleen')\n", - "2023-09-05 10:32:00,685 - INFO - > bundle_root: '/workspace/Data'\n", - "2023-09-05 10:32:00,685 - INFO - ---\n", + "2023-09-06 06:51:08,123 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-06 06:51:08,124 - INFO - > config_file: PosixPath('/workspace/Data/spleen_ct_segmentation/configs/train.json')\n", + "2023-09-06 06:51:08,125 - INFO - > workflow_type: 'train'\n", + "2023-09-06 06:51:08,126 - INFO - > epochs: 1\n", + "2023-09-06 06:51:08,126 - INFO - > dataset_dir: '/workspace/Data/Task09_Spleen'\n", + "2023-09-06 06:51:08,126 - INFO - > bundle_root: '/workspace/Data'\n", + "2023-09-06 06:51:08,127 - INFO - ---\n", "\n", "\n", - "2023-09-05 10:32:00,686 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n", + "2023-09-06 06:51:08,127 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n", "max epochs: 1\n" ] } @@ -311,14 +265,14 @@ "# 1 override them when you create the workflow\n", "dataset_dir = Path(root_dir) / \"Task09_Spleen\"\n", "bundle_root = root_dir\n", - "override = {\"epochs\": 1, \"dataset_dir\": dataset_dir, \"bundle_root\": bundle_root}\n", + "override = {\"epochs\": 1, \"dataset_dir\": str(dataset_dir), \"bundle_root\": bundle_root}\n", "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)\n", "print(\"max epochs:\", train_workflow.max_epochs)" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -351,30 +305,92 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "(, )\n" + "(, )\n", + "2023-09-06 06:53:28,284 - ignite.engine.engine.EnsembleEvaluator - INFO - Engine run resuming from iteration 0, epoch 0 until 1 epochs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "property 'dataloader' already exists in the properties list, overriding it.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2023-09-06 06:53:31,896 - ignite.engine.engine.EnsembleEvaluator - INFO - Epoch[1] Complete. Time taken: 00:00:03.305\n", + "2023-09-06 06:53:31,897 - ignite.engine.engine.EnsembleEvaluator - INFO - Engine run complete. Time taken: 00:00:03.612\n" ] } ], "source": [ - "n_splits = 5\n", + "n_splits = 3\n", "ensemble_transform = MeanEnsembled(keys=[\"pred\"] * n_splits, output_key=\"pred\")\n", "update_postprocessing = Compose((ensemble_transform, train_workflow.val_postprocessing))\n", "\n", - "print(update_postprocessing.transforms)" + "print(update_postprocessing.transforms)\n", + "\n", + "train_workflow.add_property(name=\"dataloader\", required=True, config_id=\"train#dataloader\")\n", + "evaluator = EnsembleEvaluator(\n", + " device=train_workflow.device,\n", + " val_data_loader=train_workflow.dataloader,\n", + " pred_keys=[\"pred\"] * n_splits,\n", + " networks=[train_workflow.network_def] * n_splits,\n", + " inferer=train_workflow.train_inferer,\n", + " postprocessing=update_postprocessing,\n", + ")\n", + "\n", + "evaluator.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Utilizing Pretrained Weights from the Bundle\n", + "\n", + "This function primarily serves to provide an instantiated network by loading pretrained weights from the bundle. You have the flexibility to directly update the parameters or filter the weights. Additionally, it's possible to use your own model instead of the one included in the bundle.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# directly get an instantiated network that loaded the weights.\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\")\n", + "\n", + "# directly update the parameters for the model from the bundle.\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", in_channels=3, out_channels=1)\n", + "\n", + "# using `exclude_vars` to filter loading weights.\n", + "model = load(\n", + " name=\"brats_mri_segmentation\",\n", + " bundle_dir=root_dir,\n", + " source=\"monaihosting\",\n", + " copy_model_args={\"exclude_vars\": \"convInit|conv_final\"},\n", + ")\n", + "\n", + "# pass model and return an instantiated network that loaded the weights.\n", + "my_model = SegResNet(blocks_down=[1, 2, 2, 4], blocks_up=[1, 1, 1], init_filters=16, in_channels=1, out_channels=3)\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", model=my_model)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## A simple comparison of the use of `ConfigParser` and `BundleWorkflow`" + "## A Simple Comparison of the Usage between `ConfigParser` and `BundleWorkflow`" ] }, { From 0bdb79b15ab067914441f936773d4367f679c45d Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 15:09:25 +0800 Subject: [PATCH 10/13] add readme Signed-off-by: KumoLiu --- bundle/pythonic_usage_guidance/README.md | 39 +++++++++++++++++++ .../pythonic_bundle_access.ipynb | 6 +-- 2 files changed, 42 insertions(+), 3 deletions(-) create mode 100644 bundle/pythonic_usage_guidance/README.md diff --git a/bundle/pythonic_usage_guidance/README.md b/bundle/pythonic_usage_guidance/README.md new file mode 100644 index 0000000000..6fc779635b --- /dev/null +++ b/bundle/pythonic_usage_guidance/README.md @@ -0,0 +1,39 @@ +# Pythonic Bundle Access Tutorial + +A MONAI bundle contains the stored weights of a model, training, inference, post-processing transform sequences and other information. This tutorial aims to explore how to access a bundle in Python and use it in your own application. We'll cover the following topics: +1. Downloading the Bundle. +2. Creating a `BundleWorkflow`. +3. Getting Properties from the Bundle. +4. Updating Properties. +5. Using Components in Your Own Pipeline. +6. Utilizing Pretrained Weights from the Bundle. +7. A Simple Comparison of the Usage between `ConfigParser` and `BundleWorkflow`. + +The example training dataset is Task09_Spleen.tar from http://medicaldecathlon.com/. + +## Requirements + +The script is tested with: + +- `Ubuntu 20.04` | `Python 3.8.10` | `CUDA 12.2` | `Pytorch 1.13.1` + +- it is tested on 24gb single-gpu machine + +## Dependencies and installation + +### MONAI + +You can conda environments to install the dependencies. + +or you can just use MONAI docker. +```bash +docker pull projectmonai/monai:latest +``` + +For more information please check out [the installation guide](https://docs.monai.io/en/latest/installation.html). + +## Questions and bugs + +- For questions relating to the use of MONAI, please use our [Discussions tab](https://github.com/Project-MONAI/MONAI/discussions) on the main repository of MONAI. +- For bugs relating to MONAI functionality, please create an issue on the [main repository](https://github.com/Project-MONAI/MONAI/issues). +- For bugs relating to the running of a tutorial, please create an issue in [this repository](https://github.com/Project-MONAI/Tutorials/issues). diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index dbc6243808..a9f3d9c5f4 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -336,15 +336,15 @@ "n_splits = 3\n", "ensemble_transform = MeanEnsembled(keys=[\"pred\"] * n_splits, output_key=\"pred\")\n", "update_postprocessing = Compose((ensemble_transform, train_workflow.val_postprocessing))\n", - "\n", "print(update_postprocessing.transforms)\n", "\n", + "device = train_workflow.device\n", "train_workflow.add_property(name=\"dataloader\", required=True, config_id=\"train#dataloader\")\n", "evaluator = EnsembleEvaluator(\n", - " device=train_workflow.device,\n", + " device=device,\n", " val_data_loader=train_workflow.dataloader,\n", " pred_keys=[\"pred\"] * n_splits,\n", - " networks=[train_workflow.network_def] * n_splits,\n", + " networks=[train_workflow.network_def.to(train_workflow.device)] * n_splits,\n", " inferer=train_workflow.train_inferer,\n", " postprocessing=update_postprocessing,\n", ")\n", From 48e159e0e40f8b9e18beed7c8efd2456e003653e Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 15:28:08 +0800 Subject: [PATCH 11/13] add skip run Signed-off-by: KumoLiu --- runner.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/runner.sh b/runner.sh index f1c827d8a0..41f2df772e 100755 --- a/runner.sh +++ b/runner.sh @@ -110,6 +110,7 @@ skip_run_papermill=("${skip_run_papermill[@]}" .*TensorRT_inference_acceleration skip_run_papermill=("${skip_run_papermill[@]}" .*mednist_classifier_ray*) # https://github.com/Project-MONAI/tutorials/issues/1307 skip_run_papermill=("${skip_run_papermill[@]}" .*TorchIO_MONAI_PyTorch_Lightning*) # https://github.com/Project-MONAI/tutorials/issues/1324 skip_run_papermill=("${skip_run_papermill[@]}" .*GDS_dataset*) # https://github.com/Project-MONAI/tutorials/issues/1324 +skip_run_papermill=("${skip_run_papermill[@]}" .*pythonic_bundle_access*) # output formatting separator="" From bd7b1fe234cd1ca498c7bfe09d2a3b1583f5174e Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Wed, 6 Sep 2023 17:31:28 +0800 Subject: [PATCH 12/13] address comments Signed-off-by: KumoLiu --- .../pythonic_bundle_access.ipynb | 195 ++++++++++++++---- runner.sh | 1 - 2 files changed, 153 insertions(+), 43 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index a9f3d9c5f4..640f36d55a 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -31,7 +31,9 @@ "\n", "6. **Utilizing Pretrained Weights from the Bundle**: You can conveniently employ pretrained weights from the bundle and customize them using the `load` API.\n", "\n", - "7. **A Simple Comparison of the Usage between `ConfigParser` and `BundleWorkflow`**" + "7. **A Simple Comparison of the Usage between `ConfigParser` and `BundleWorkflow`**\n", + "\n", + "The bundle documentation and specification can be found here: https://docs.monai.io/en/stable/bundle_intro.html" ] }, { @@ -67,10 +69,12 @@ "import shutil\n", "import tempfile\n", "from pathlib import Path\n", + "import monai\n", "from monai.engines import EnsembleEvaluator\n", "from monai.networks.nets import SegResNet\n", "from monai.transforms import MeanEnsembled, Compose\n", "from monai.config import print_config\n", + "from monai.apps import download_and_extract\n", "from monai.bundle import download, create_workflow, ConfigParser, load\n", "\n", "print_config()" @@ -106,6 +110,31 @@ "print(root_dir)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download dataset\n", + "\n", + "Downloads and extracts the dataset. \n", + "The dataset comes from http://medicaldecathlon.com/." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "resource = \"https://msd-for-monai.s3-us-west-2.amazonaws.com/Task09_Spleen.tar\"\n", + "md5 = \"410d4a301da4e5b2f6f86ec3ddba524e\"\n", + "\n", + "compressed_file = os.path.join(root_dir, \"Task09_Spleen.tar\")\n", + "data_dir = os.path.join(root_dir, \"Task09_Spleen\")\n", + "if not os.path.exists(data_dir):\n", + " download_and_extract(resource, compressed_file, root_dir, md5)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -115,7 +144,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -125,23 +154,23 @@ "name spleen_ct_segmentation\n", "version None\n", "bundle_dir /workspace/Data\n", - "source monaihosting\n", + "source github\n", "repo None\n", "url None\n", "remove_prefix monai_\n", "progress True\n", - "2023-09-06 06:40:06,678 - INFO - --- input summary of monai.bundle.scripts.download ---\n", - "2023-09-06 06:40:06,679 - INFO - > name: 'spleen_ct_segmentation'\n", - "2023-09-06 06:40:06,679 - INFO - > bundle_dir: '/workspace/Data'\n", - "2023-09-06 06:40:06,679 - INFO - > source: 'monaihosting'\n", - "2023-09-06 06:40:06,679 - INFO - > remove_prefix: 'monai_'\n", - "2023-09-06 06:40:06,679 - INFO - > progress: True\n", - "2023-09-06 06:40:06,680 - INFO - ---\n", + "2023-09-06 08:44:11,165 - INFO - --- input summary of monai.bundle.scripts.download ---\n", + "2023-09-06 08:44:11,167 - INFO - > name: 'spleen_ct_segmentation'\n", + "2023-09-06 08:44:11,167 - INFO - > bundle_dir: '/workspace/Data'\n", + "2023-09-06 08:44:11,167 - INFO - > source: 'github'\n", + "2023-09-06 08:44:11,167 - INFO - > remove_prefix: 'monai_'\n", + "2023-09-06 08:44:11,168 - INFO - > progress: True\n", + "2023-09-06 08:44:11,168 - INFO - ---\n", "\n", "\n", - "2023-09-06 06:40:07,316 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", - "2023-09-06 06:40:07,316 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", - "2023-09-06 06:40:07,317 - INFO - Writing into directory: /workspace/Data.\n" + "2023-09-06 08:44:12,165 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/spleen_ct_segmentation_v0.5.3.zip.\n", + "2023-09-06 08:44:12,165 - INFO - File exists: /workspace/Data/spleen_ct_segmentation_v0.5.3.zip, skipped downloading.\n", + "2023-09-06 08:44:12,166 - INFO - Writing into directory: /workspace/Data.\n" ] } ], @@ -159,7 +188,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -169,13 +198,13 @@ "workflow_name None\n", "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", "workflow_type train\n", - "2023-09-06 06:47:55,435 - INFO - --- input summary of monai.bundle.scripts.run ---\n", - "2023-09-06 06:47:55,437 - INFO - > config_file: '/workspace/Data/spleen_ct_segmentation/configs/train.json'\n", - "2023-09-06 06:47:55,439 - INFO - > workflow_type: 'train'\n", - "2023-09-06 06:47:55,440 - INFO - ---\n", + "2023-09-06 09:18:47,393 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-06 09:18:47,395 - INFO - > config_file: '/workspace/Data/spleen_ct_segmentation/configs/train.json'\n", + "2023-09-06 09:18:47,396 - INFO - > workflow_type: 'train'\n", + "2023-09-06 09:18:47,397 - INFO - ---\n", "\n", "\n", - "2023-09-06 06:47:55,441 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + "2023-09-06 09:18:47,397 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" ] } ], @@ -263,9 +292,8 @@ ], "source": [ "# 1 override them when you create the workflow\n", - "dataset_dir = Path(root_dir) / \"Task09_Spleen\"\n", "bundle_root = root_dir\n", - "override = {\"epochs\": 1, \"dataset_dir\": str(dataset_dir), \"bundle_root\": bundle_root}\n", + "override = {\"epochs\": 1, \"dataset_dir\": str(data_dir), \"bundle_root\": bundle_root}\n", "train_workflow = create_workflow(config_file=config_file, workflow_type=\"train\", **override)\n", "print(\"max epochs:\", train_workflow.max_epochs)" ] @@ -393,34 +421,117 @@ "## A Simple Comparison of the Usage between `ConfigParser` and `BundleWorkflow`" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Loading Configuration\n", + "\n", + "In the past, we needed to instantiate a `ConfigParser` and then read the configuration file and meta information using `read_config` and `read_meta` functions. However, now you can skip using `ConfigParser` and directly run `create_workflow` to create a `BundleWorkflow`. This new approach supports both configuration-based and Python-based bundles.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "workflow_name None\n", + "config_file /workspace/Data/spleen_ct_segmentation/configs/train.json\n", + "workflow_type train\n", + "2023-09-06 09:19:00,935 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2023-09-06 09:19:00,936 - INFO - > config_file: '/workspace/Data/spleen_ct_segmentation/configs/train.json'\n", + "2023-09-06 09:19:00,938 - INFO - > workflow_type: 'train'\n", + "2023-09-06 09:19:00,939 - INFO - ---\n", + "\n", + "\n", + "2023-09-06 09:19:00,940 - INFO - Setting logging properties based on config: /workspace/Data/spleen_ct_segmentation/configs/logging.conf.\n" + ] + } + ], + "source": [ + "# Using ConfigParser\n", + "meta_file = Path(root_dir) / \"spleen_ct_segmentation\" / \"configs\" / \"metadata.json\"\n", + "bundle_config = ConfigParser()\n", + "bundle_config.read_config(config_file)\n", + "bundle_config.read_meta(meta_file)\n", + "\n", + "# Using BundleWorkflow\n", + "# config-based\n", + "workflow = create_workflow(config_file=str(config_file), workflow_type=\"train\")\n", + "# python-based\n", + "# more details refer to https://github.com/Project-MONAI/tutorials/tree/main/bundle/python_bundle_workflow\n", + "# workflow = create_workflow(workflow_name=scripts.train.TrainWorkflow)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Getting and Updating Configuration\n", + "\n", + "Previously, we utilized the `update` method to override configuration content. Now, with `BundleWorkflow`, you can override contents during workflow creation. To obtain an instantiated component, we used to use `get_parsed_content` before. However, now you can access it directly. Additionally, it's worth noting that you can also override the instantiated component, but be sure to initialize it again as needed.\n" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# ConfigParser\n", + "overrides = {\n", + " \"network_def#in_channels\": 1,\n", + " \"lr_scheduler#step_size\": 4000,\n", + " \"dataset_dir\": str(data_dir),\n", + " \"epochs\": 1\n", + "}\n", "\n", - "# load config\n", - "bundle_config = ConfigParser()\n", - "bundle_config.read_config(config_file)\n", - "# update config\n", - "bundle_config.config.update({\"bundle_root\": bundle_root})\n", - "# get config\n", - "bundle_config.get(\"bundle_root\")\n", - "bundle_config.get_parsed_content(\"train#handlers\", instantiate=True)\n", - "\n", - "\n", - "# BundleWorkflow\n", - "\n", - "# load config\n", - "workflow = create_workflow(config_file=config_file, workflow_type=\"train\")\n", - "# update config\n", - "workflow.bundle_root = bundle_root\n", - "workflow.initialize()\n", - "# get config\n", - "workflow.bundle_root\n", - "workflow.train_handlers" + "# Using ConfigParser\n", + "# override configuration content\n", + "bundle_config.config.update(overrides)\n", + "# get instantiate the network component\n", + "net = bundle_config.get_parsed_content(\"network_def\", instantiate=True)\n", + "\n", + "# Using BundleWorkflow\n", + "workflow = create_workflow(config_file=str(config_file), workflow_type=\"train\", **overrides)\n", + "# get instantiate the network component\n", + "net = workflow.network_def\n", + "workflow.network_def = SegResNet(\n", + " blocks_down=[1, 2, 2, 4],\n", + " blocks_up=[1, 1, 1],\n", + " init_filters=16,\n", + " in_channels=1,\n", + " out_channels=2\n", + ")\n", + "workflow.initialize() # re-initialize the workflow after changing the content\n", + "print(workflow.network_def)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Running the Updated Bundle\n", + "\n", + "In the past, running an updated configuration required using `export_config_file` to export the new configuration and using the `run` command. But now, you can streamline the process by directly using the `run` command to execute the new workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Using ConfigParser\n", + "new_config_path = Path(root_dir) / \"spleen_ct_segmentation\" / \"configs\" / \"new_train_config.json\"\n", + "ConfigParser.export_config_file(bundle_config.config, str(new_config_path), indent=2)\n", + "monai.bundle.run(run_id=\"run\", init_id=None, final_id=None, meta_file=str(meta_file), config_file=str(new_config_path))\n", + "\n", + "# Using BundleWorkflow\n", + "workflow.run()" ] }, { diff --git a/runner.sh b/runner.sh index 41f2df772e..f1c827d8a0 100755 --- a/runner.sh +++ b/runner.sh @@ -110,7 +110,6 @@ skip_run_papermill=("${skip_run_papermill[@]}" .*TensorRT_inference_acceleration skip_run_papermill=("${skip_run_papermill[@]}" .*mednist_classifier_ray*) # https://github.com/Project-MONAI/tutorials/issues/1307 skip_run_papermill=("${skip_run_papermill[@]}" .*TorchIO_MONAI_PyTorch_Lightning*) # https://github.com/Project-MONAI/tutorials/issues/1324 skip_run_papermill=("${skip_run_papermill[@]}" .*GDS_dataset*) # https://github.com/Project-MONAI/tutorials/issues/1324 -skip_run_papermill=("${skip_run_papermill[@]}" .*pythonic_bundle_access*) # output formatting separator="" From 09e10b097f94a631c37dd67424b7648b1b735a53 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 6 Sep 2023 09:32:48 +0000 Subject: [PATCH 13/13] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../pythonic_bundle_access.ipynb | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index 640f36d55a..c0910e2e9c 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -482,12 +482,7 @@ "metadata": {}, "outputs": [], "source": [ - "overrides = {\n", - " \"network_def#in_channels\": 1,\n", - " \"lr_scheduler#step_size\": 4000,\n", - " \"dataset_dir\": str(data_dir),\n", - " \"epochs\": 1\n", - "}\n", + "overrides = {\"network_def#in_channels\": 1, \"lr_scheduler#step_size\": 4000, \"dataset_dir\": str(data_dir), \"epochs\": 1}\n", "\n", "# Using ConfigParser\n", "# override configuration content\n", @@ -500,11 +495,7 @@ "# get instantiate the network component\n", "net = workflow.network_def\n", "workflow.network_def = SegResNet(\n", - " blocks_down=[1, 2, 2, 4],\n", - " blocks_up=[1, 1, 1],\n", - " init_filters=16,\n", - " in_channels=1,\n", - " out_channels=2\n", + " blocks_down=[1, 2, 2, 4], blocks_up=[1, 1, 1], init_filters=16, in_channels=1, out_channels=2\n", ")\n", "workflow.initialize() # re-initialize the workflow after changing the content\n", "print(workflow.network_def)"