diff --git a/modules/omniverse/Spleen.png b/modules/omniverse/Spleen.png new file mode 100644 index 000000000..b3b60adfa Binary files /dev/null and b/modules/omniverse/Spleen.png differ diff --git a/modules/omniverse/omniverse.png b/modules/omniverse/omniverse.png new file mode 100644 index 000000000..858cb137a Binary files /dev/null and b/modules/omniverse/omniverse.png differ diff --git a/modules/omniverse/omniverse_integration.ipynb b/modules/omniverse/omniverse_integration.ipynb new file mode 100644 index 000000000..8ba6082f5 --- /dev/null +++ b/modules/omniverse/omniverse_integration.ipynb @@ -0,0 +1,605 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) MONAI Consortium \n", + "Licensed under the Apache License, Version 2.0 (the \"License\"); \n", + "you may not use this file except in compliance with the License. \n", + "You may obtain a copy of the License at \n", + "    http://www.apache.org/licenses/LICENSE-2.0 \n", + "Unless required by applicable law or agreed to in writing, software \n", + "distributed under the License is distributed on an \"AS IS\" BASIS, \n", + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n", + "See the License for the specific language governing permissions and \n", + "limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# From 3D Segmentation to USD: A Complete Workflow for Hierarchical Mesh Conversion, USD Export, and NVIDIA Omniverse Integration\n", + "In this tutorial, we’ll cover:\n", + "\n", + "- **Download the MONAI Bundle**: First, we download a pre-trained model bundle from the MONAI Model Zoo. This model bundle contains the necessary models and configurations for medical image analysis, which can accelerate our development process.\n", + "\n", + "- **Run the Inference Workflow of the Bundle**: Using the downloaded model bundle, we run its built-in inference workflow to automatically segment or analyze the input medical imaging data and obtain the desired results.\n", + "\n", + "- **Convert NIfTI/DICOM to Mesh**: The inference results are usually in NIfTI or DICOM format. We need to convert this volumetric data into a 3D mesh model for visualization and further processing.\n", + "\n", + "- **Save the Mesh as OBJ and GLTF Formats**:\n", + " - Save Single Mesh as OBJ Format: For a single organ or structure mesh, we save it in OBJ format, which is convenient to open and view in various 3D software.\n", + " - Save Combined Mesh as GLTF Format: When we have meshes of multiple organs, we save them in GLTF format. This format preserves the hierarchical structure of the organs, making it easier to reflect the relationships between different organs during visualization.\n", + "\n", + "- **Visualization in the Omniverse**: Finally, we import the GLTF format mesh into NVIDIA Omniverse. In Omniverse, we can utilize its powerful rendering and interactive capabilities to perform high-quality 3D visualization of medical imaging data, exploring the structures and spatial relationships of organs.\n", + "\n", + "This end-to-end process enables efficient, high-quality visualization in NVIDIA Omniverse from raw segmentation data.\n", + "\n", + "\n", + "References:\n", + "\n", + "[1] https://developer.nvidia.com/blog/advancing-surgical-robotics-with-ai-driven-simulation-and-digital-twin-technology/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup environment" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!python -c \"import monai\" || pip install -q \"monai-weekly[nibabel]\"\n", + "!python -c \"import vtk\" || pip install -q vtk\n", + "!python -c \"import pxr\" || pip install -q usd-core\n", + "!python -c \"import trimesh\" || pip install -q trimesh\n", + "!python -c \"import ipyvtklink\" || pip install -q ipyvtklink\n", + "!apt update\n", + "!apt install -y libgl1-mesa-glx\n", + "!apt install libxrender1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MONAI version: 1.4.1rc1\n", + "Numpy version: 1.24.4\n", + "Pytorch version: 2.5.0a0+872d972e41.nv24.08\n", + "MONAI flags: HAS_EXT = False, USE_COMPILED = False, USE_META_DICT = False\n", + "MONAI rev id: e604d1841fe60c0ffb6978ae4116535ca8d8f34f\n", + "MONAI __file__: /workspace/Code/MONAI/monai/__init__.py\n", + "\n", + "Optional dependencies:\n", + "Pytorch Ignite version: 0.4.11\n", + "ITK version: 5.4.0\n", + "Nibabel version: 5.3.2\n", + "scikit-image version: 0.24.0\n", + "scipy version: 1.14.0\n", + "Pillow version: 10.4.0\n", + "Tensorboard version: 2.16.2\n", + "gdown version: 5.2.0\n", + "TorchVision version: 0.20.0a0\n", + "tqdm version: 4.66.5\n", + "lmdb version: 1.5.1\n", + "psutil version: 6.0.0\n", + "pandas version: 2.2.2\n", + "einops version: 0.8.0\n", + "transformers version: 4.40.2\n", + "mlflow version: 2.17.2\n", + "pynrrd version: 1.1.1\n", + "clearml version: 1.16.5\n", + "\n", + "For details about installing the optional dependencies, please visit:\n", + " https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n", + "\n" + ] + } + ], + "source": [ + "import os\n", + "import tempfile\n", + "import numpy as np\n", + "\n", + "import vtk\n", + "import vtkmodules\n", + "\n", + "# from ipyvtklink.viewer import ViewInteractiveWidget\n", + "\n", + "from utility import convert_to_mesh, convert_mesh_to_usd\n", + "\n", + "from monai.config import print_config\n", + "from monai.bundle.scripts import create_workflow, download\n", + "from monai.transforms import LoadImaged, SaveImage, Compose, BorderPadd, SqueezeDimd\n", + "\n", + "print_config()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup data directory\n", + "\n", + "You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. \n", + "This allows you to save results and reuse downloads. \n", + "If not specified a temporary directory will be used." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/workspace/Data\n" + ] + } + ], + "source": [ + "directory = os.environ.get(\"MONAI_DATA_DIRECTORY\")\n", + "if directory is not None:\n", + " os.makedirs(directory, exist_ok=True)\n", + "root_dir = tempfile.mkdtemp() if directory is None else directory\n", + "print(root_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generate segmentation from MAISI\n", + "\n", + "### Download the MONAI Bundle\n", + "In this section, we download the MAISI bundle from monai model-zoo." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-12-11 11:31:31,904 - INFO - --- input summary of monai.bundle.scripts.download ---\n", + "2024-12-11 11:31:31,905 - INFO - > name: 'maisi_ct_generative'\n", + "2024-12-11 11:31:31,905 - INFO - > bundle_dir: '/workspace/Data'\n", + "2024-12-11 11:31:31,905 - INFO - > source: 'monaihosting'\n", + "2024-12-11 11:31:31,906 - INFO - > remove_prefix: 'monai_'\n", + "2024-12-11 11:31:31,906 - INFO - > progress: True\n", + "2024-12-11 11:31:31,906 - INFO - ---\n", + "\n", + "\n", + "2024-12-11 11:31:32,611 - INFO - Expected md5 is None, skip md5 check for file /workspace/Data/maisi_ct_generative_v0.4.5.zip.\n", + "2024-12-11 11:31:32,612 - INFO - File exists: /workspace/Data/maisi_ct_generative_v0.4.5.zip, skipped downloading.\n", + "2024-12-11 11:31:32,612 - INFO - Writing into directory: /workspace/Data.\n" + ] + } + ], + "source": [ + "download(name=\"maisi_ct_generative\", bundle_dir=root_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run the Inference Workflow of the Bundle\n", + "We use the `create_workflow` API from MONAI to streamline the inference process directly from the bundle.\n", + "\n", + "Key input details for inference, such as the body region and target anatomy, are specified in [./configs/inference.json]. For a comprehensive explanation of the parameters, refer to [./docs/README.md] in the bundle directory. Additionally, we adjust the `output_size`, `spacing` and `num_splits` parameters to prevent out-of-memory issues during inference." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2024-12-11 11:32:14,010 - INFO - Setting logging properties based on config: /workspace/Data/maisi_ct_generative/configs/logging.conf.\n", + "2024-12-11 11:32:14,011 - py.warnings - WARNING - Detected deprecated name 'optional_packages_version' in configuration file, replacing with 'required_packages_version'.\n", + "\n", + "2024-12-11 11:32:14,021 - INFO - --- input summary of monai.bundle.scripts.run ---\n", + "2024-12-11 11:32:14,022 - INFO - > workflow_type: 'inference'\n", + "2024-12-11 11:32:14,022 - INFO - > bundle_root: '/workspace/Data/maisi_ct_generative'\n", + "2024-12-11 11:32:14,022 - INFO - > output_size_xy: 256\n", + "2024-12-11 11:32:14,022 - INFO - > output_size_z: 256\n", + "2024-12-11 11:32:14,022 - INFO - > spacing_xy: 1.5\n", + "2024-12-11 11:32:14,023 - INFO - > spacing_z: 1.5\n", + "2024-12-11 11:32:14,023 - INFO - > autoencoder_def#num_splits: 16\n", + "2024-12-11 11:32:14,023 - INFO - > mask_generation_autoencoder_def#num_splits: 16\n", + "2024-12-11 11:32:14,023 - INFO - ---\n", + "\n", + "\n" + ] + } + ], + "source": [ + "bundle_root = os.path.join(root_dir, \"maisi_ct_generative\")\n", + "override = {\n", + " \"output_size_xy\": 256,\n", + " \"output_size_z\": 256,\n", + " \"spacing_xy\": 1.5,\n", + " \"spacing_z\": 1.5,\n", + " \"autoencoder_def#num_splits\": 16,\n", + " \"mask_generation_autoencoder_def#num_splits\": 16,\n", + "}\n", + "workflow = create_workflow(\n", + " config_file=os.path.join(bundle_root, \"configs/inference.json\"),\n", + " workflow_type=\"inference\",\n", + " bundle_root=bundle_root,\n", + " **override,\n", + ")\n", + "\n", + "# uncomment this line to run the inference workflow\n", + "# then you will get the generated CT images and paired masks which can be used for the following steps.\n", + "# In this tutorial, we just use the tested data (IntegrationTest-AbdomenCT.nii.gz) from bundle for demonstration.\n", + "# workflow.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert NIfTI/DICOM to Mesh and Save as OBJ/GLTF\n", + "\n", + "In this section, we convert the generated NII segmentation files into mesh format and save them as OBJ files. This process involves mapping labels to organs, exporting each organ as an individual mesh file, and generating a combined mesh file for all organs.\n", + "\n", + "We define a function `nii_to_mesh` to handle the conversion of NIfTI files to OBJ files. The workflow is as follows:\n", + "\n", + "- **Preprocessing**:\n", + "The function uses a series of transformations (`LoadImaged`, `BorderPadd`, and `SqueezeDimd`) to load and preprocess the input NIfTI file, ensuring it is ready for convertion.\n", + "\n", + "- **Organ Label Mapping**:\n", + "It iterates over a dictionary mapping organ names to their respective label values. For each organ:\n", + "A binary mask (single_organ) is created to isolate the organ by assigning its corresponding label value.\n", + "The segmented organ is saved as a NIfTI file.\n", + "\n", + "- **Mesh Conversion**:\n", + "Each segmented NIfTI file is converted into an OBJ file using the `convert_to_mesh` function.\n", + "\n", + "- **Combined Mesh**:\n", + "A combined segmentation file is created by merging all organ segmentations into a single NIfTI file. This file is then converted into a GLTF file, preserving the hierarchical structure of the organs.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 17 groupings that cover 101 segments/regions out of 140\n", + "labels = {\n", + " \"Liver\": 1,\n", + " \"Spleen\": 3,\n", + " \"Pancreas\": 4,\n", + " \"Heart\": 115,\n", + " \"Body\": 200,\n", + " \"Gallbladder\": 10,\n", + " \"Stomach\": 12,\n", + " \"Small_bowel\": 19,\n", + " \"Colon\": 62,\n", + " \"Kidney\": {\"right_kidney\": 5, \"left_kidney\": 14},\n", + " \"Veins\": {\n", + " \"aorta\": 6,\n", + " \"inferior_vena_cava\": 7,\n", + " \"portal_vein_and_splenic_vein\": 17,\n", + " \"left_iliac_artery\": 58,\n", + " \"right_iliac_artery\": 59,\n", + " \"left_iliac_vena\": 60,\n", + " \"right_iliac_vena\": 61,\n", + " \"pulmonary_vein\": 119,\n", + " \"left_subclavian_artery\": 123,\n", + " \"right_subclavian_artery\": 124,\n", + " \"superior_vena_cava\": 125,\n", + " \"brachiocephalic_trunk\": 109,\n", + " \"left_brachiocephalic_vein\": 110,\n", + " \"right_brachiocephalic_vein\": 111,\n", + " \"left_common_carotid_artery\": 112,\n", + " \"right_common_carotid_artery\": 113,\n", + " },\n", + " \"Lungs\": {\n", + " \"left_lung_upper_lobe\": 28,\n", + " \"left_lung_lower_lobe\": 29,\n", + " \"right_lung_upper_lobe\": 30,\n", + " \"right_lung_middle_lobe\": 31,\n", + " \"right_lung_lower_lobe\": 32,\n", + " },\n", + " \"Spine\": {\n", + " \"vertebrae_L6\": 131,\n", + " \"vertebrae_L5\": 33,\n", + " \"vertebrae_L4\": 34,\n", + " \"vertebrae_L3\": 35,\n", + " \"vertebrae_L2\": 36,\n", + " \"vertebrae_L1\": 37,\n", + " \"vertebrae_T12\": 38,\n", + " \"vertebrae_T11\": 39,\n", + " \"vertebrae_T10\": 40,\n", + " \"vertebrae_T9\": 41,\n", + " \"vertebrae_T8\": 42,\n", + " \"vertebrae_T7\": 43,\n", + " \"vertebrae_T6\": 44,\n", + " \"vertebrae_T5\": 45,\n", + " \"vertebrae_T4\": 46,\n", + " \"vertebrae_T3\": 47,\n", + " \"vertebrae_T2\": 48,\n", + " \"vertebrae_T1\": 49,\n", + " \"vertebrae_C7\": 50,\n", + " \"vertebrae_C6\": 51,\n", + " \"vertebrae_C5\": 52,\n", + " \"vertebrae_C4\": 53,\n", + " \"vertebrae_C3\": 54,\n", + " \"vertebrae_C2\": 55,\n", + " \"vertebrae_C1\": 56,\n", + " \"sacrum\": 97,\n", + " \"vertebrae_S1\": 127,\n", + " },\n", + " \"Ribs\": {\n", + " \"left_rib_1\": 63,\n", + " \"left_rib_2\": 64,\n", + " \"left_rib_3\": 65,\n", + " \"left_rib_4\": 66,\n", + " \"left_rib_5\": 67,\n", + " \"left_rib_6\": 68,\n", + " \"left_rib_7\": 69,\n", + " \"left_rib_8\": 70,\n", + " \"left_rib_9\": 71,\n", + " \"left_rib_10\": 72,\n", + " \"left_rib_11\": 73,\n", + " \"left_rib_12\": 74,\n", + " \"right_rib_1\": 75,\n", + " \"right_rib_2\": 76,\n", + " \"right_rib_3\": 77,\n", + " \"right_rib_4\": 78,\n", + " \"right_rib_5\": 79,\n", + " \"right_rib_6\": 80,\n", + " \"right_rib_7\": 81,\n", + " \"right_rib_8\": 82,\n", + " \"right_rib_9\": 83,\n", + " \"right_rib_10\": 84,\n", + " \"right_rib_11\": 85,\n", + " \"right_rib_12\": 86,\n", + " \"costal_cartilages\": 114,\n", + " \"sternum\": 122,\n", + " },\n", + " \"Shoulders\": {\"left_scapula\": 89, \"right_scapula\": 90, \"left_clavicula\": 91, \"right_clavicula\": 92},\n", + " \"Hips\": {\"left_hip\": 95, \"right_hip\": 96},\n", + " \"Back_muscles\": {\n", + " \"left_gluteus_maximus\": 98,\n", + " \"right_gluteus_maximus\": 99,\n", + " \"left_gluteus_medius\": 100,\n", + " \"right_gluteus_medius\": 101,\n", + " \"left_gluteus_minimus\": 102,\n", + " \"right_gluteus_minimus\": 103,\n", + " \"left_autochthon\": 104,\n", + " \"right_autochthon\": 105,\n", + " \"left_iliopsoas\": 106,\n", + " \"right_iliopsoas\": 107,\n", + " },\n", + "}\n", + "\n", + "\n", + "def nii_to_mesh(input_nii_path, output_nii_path, output_obj_path):\n", + " \"\"\"\n", + " This function converts each organ into a separate OBJ file and generates a GLTF file\n", + " containing all organs with hierarchical structure.\n", + " It processes the input NIfTI file and groups 140 labels into 17 categories.\n", + "\n", + " Args:\n", + " input_nii_path: path to the nii file\n", + " output_nii_path: path to save the obj files\n", + " output_obj_path: path to save the gltf file\n", + " \"\"\"\n", + " if not os.path.exists(output_nii_path):\n", + " os.makedirs(output_nii_path)\n", + " pre_trans = Compose(\n", + " [\n", + " LoadImaged(keys=\"label\", ensure_channel_first=True),\n", + " BorderPadd(keys=\"label\", spatial_border=2),\n", + " SqueezeDimd(keys=\"label\", dim=0),\n", + " ]\n", + " )\n", + " orig_seg = pre_trans({\"label\": input_nii_path})[\"label\"]\n", + " all_organ = np.zeros_like(orig_seg, dtype=np.uint8)\n", + " all_label_values = {}\n", + "\n", + " save_trans = SaveImage(output_ext=\"nii.gz\", output_dtype=np.uint8)\n", + " for j, (organ_name, label_val) in enumerate(labels.items(), start=1):\n", + " single_organ = np.zeros_like(orig_seg, dtype=np.uint8)\n", + " print(f\"Assigning index {j} to label {organ_name}\")\n", + " if isinstance(label_val, dict):\n", + " for _, i in label_val.items():\n", + " all_organ[orig_seg == i] = j\n", + " single_organ[orig_seg == i] = j\n", + " else:\n", + " all_organ[orig_seg == label_val] = j\n", + " single_organ[orig_seg == label_val] = j\n", + " organ_filename = os.path.join(output_nii_path, organ_name)\n", + " save_trans(single_organ[None], meta_data=orig_seg.meta, filename=organ_filename)\n", + " convert_to_mesh(\n", + " f\"{organ_filename}.nii.gz\",\n", + " output_obj_path,\n", + " f\"{organ_name}.obj\",\n", + " label_value=j,\n", + " smoothing_factor=0.5,\n", + " reduction_ratio=0.0,\n", + " )\n", + " all_label_values[j] = organ_name\n", + "\n", + " all_organ_filename = os.path.join(output_nii_path, \"all_organs\")\n", + " save_trans(all_organ[None], meta_data=orig_seg.meta, filename=all_organ_filename)\n", + " convert_to_mesh(\n", + " f\"{all_organ_filename}.nii.gz\",\n", + " output_obj_path,\n", + " \"all_organs.gltf\",\n", + " label_value=all_label_values,\n", + " smoothing_factor=0.6,\n", + " reduction_ratio=0.0,\n", + " )\n", + " print(f\"Saved whole segmentation {all_organ_filename}\")\n", + "\n", + "\n", + "input_nii_path = f\"{bundle_root}/datasets/IntegrationTest-AbdomenCT.nii.gz\"\n", + "output_nii_path = f\"{bundle_root}/datasets/monai/nii\"\n", + "output_obj_path = f\"{bundle_root}/datasets/monai/obj\"\n", + "out = nii_to_mesh(input_nii_path, output_nii_path, output_obj_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Convert 3D model contain all organs to USD format\n", + "\n", + "[Universal Scene Description (OpenUSD)](https://openusd.org/release/index.html) is an extensible ecosystem of file formats, compositors, renderers, and other plugins for comprehensive 3D scene description." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "USD file successfully exported to /workspace/Data/maisi_ct_generative/datasets/monai/obj/all_organs.usd\n" + ] + } + ], + "source": [ + "obj_filename = f\"{bundle_root}/datasets/monai/obj/all_organs.gltf\"\n", + "usd_filename = f\"{bundle_root}/datasets/monai/obj/all_organs.usd\"\n", + "\n", + "convert_mesh_to_usd(obj_filename, usd_filename)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize one single organ mesh\n", + "\n", + "Here we randomly select one organ to visualize the mesh using `ViewInteractiveWidget`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Step 1: Read the mesh\n", + "reader = vtk.vtkOBJReader()\n", + "reader.SetFileName(f\"{bundle_root}/datasets/monai/obj/Spleen.obj\")\n", + "reader.Update()\n", + "\n", + "# Step 2: Create a mapper\n", + "mapper = vtkmodules.vtkRenderingCore.vtkPolyDataMapper()\n", + "mapper.SetInputData(reader.GetOutput())\n", + "print(f\"Number of Points: {mapper.GetInput().GetNumberOfPoints()}\")\n", + "print(f\"Number of Cells: {mapper.GetInput().GetNumberOfCells()}\")\n", + "\n", + "# Step 3: Create an actor\n", + "actor = vtkmodules.vtkRenderingCore.vtkActor()\n", + "actor.SetMapper(mapper)\n", + "\n", + "# Step 4: Create a renderer\n", + "renderer = vtk.vtkRenderer()\n", + "renderer.AddActor(actor)\n", + "render_window = vtk.vtkRenderWindow()\n", + "render_window.AddRenderer(renderer)\n", + "render_window.SetSize(800, 600)\n", + "render_window.SetOffScreenRendering(1)\n", + "\n", + "# Step 5: Create a render window interactor\n", + "render_window_interactor = vtk.vtkRenderWindowInteractor()\n", + "render_window_interactor.SetRenderWindow(render_window)\n", + "interactor_style = vtk.vtkInteractorStyleTrackballCamera()\n", + "render_window_interactor.SetInteractorStyle(interactor_style)\n", + "\n", + "# Uncomment the following line to display the interactive widget\n", + "# render_window.Render()\n", + "# interactive_widget = ViewInteractiveWidget(render_window)\n", + "# interactive_widget" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Spleen](Spleen.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization in the Omniverse\n", + "\n", + "Download the [NVIDIA Omniverse](https://www.nvidia.com/en-us/omniverse/) launcher to explore applications such as USD Composer for viewing and manipulating the OpenUSD output file.\n", + "\n", + "![omniverse](./omniverse.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization of All Organs\n", + "\n", + "You can view the 3D models using online viewer such as https://3dviewer.net/#\n", + "\n", + "![all organs](result.png)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/modules/omniverse/result.png b/modules/omniverse/result.png new file mode 100644 index 000000000..14925c25e Binary files /dev/null and b/modules/omniverse/result.png differ diff --git a/modules/omniverse/utility.py b/modules/omniverse/utility.py new file mode 100644 index 000000000..96e18d95f --- /dev/null +++ b/modules/omniverse/utility.py @@ -0,0 +1,228 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import vtk +import json +from pxr import Usd, UsdGeom, Gf, Sdf +import trimesh +import numpy as np +import matplotlib.pyplot as plt + + +def convert_to_mesh( + segmentation_path, output_folder, filename, label_value=1, smoothing_factor=0.5, reduction_ratio=0.0 +): + """ + Function to perform segmentation-to-mesh conversion and smoothing + """ + if not os.path.exists(output_folder): + os.makedirs(output_folder) + + # Generate 16 distinct colors using a colormap + colormap = plt.get_cmap("tab20") # Using tab20 for distinct colors + colors = [colormap(i) for i in np.linspace(0, 1, 16)] + + # Step 1: Load segmentation (binary labelmap, e.g., NRRD file) + reader = vtk.vtkNIFTIImageReader() + reader.SetFileName(segmentation_path) + reader.Update() + + label_values = {label_value: None} if isinstance(label_value, int) else label_value + if len(label_values.keys()) > 1: + renderer = vtk.vtkRenderer() + render_window = vtk.vtkRenderWindow() + render_window.AddRenderer(renderer) + actor_metadata = {} + for i, name in label_values.items(): + # Step 2: Create Closed Surface Representation using vtkDiscreteFlyingEdges3D + flying_edges = vtk.vtkDiscreteFlyingEdges3D() + flying_edges.SetInputConnection(reader.GetOutputPort()) + flying_edges.ComputeGradientsOff() + flying_edges.ComputeNormalsOff() + flying_edges.SetValue(0, i) + flying_edges.Update() + + if flying_edges.GetOutput().GetNumberOfPoints() == 0: + print(f"No points found for label {i}. Skipping...") + continue + + # Step 3: Decimate the mesh + if reduction_ratio > 0.0: + decimation_filter = vtk.vtkDecimatePro() + decimation_filter.SetInputConnection(flying_edges.GetOutputPort()) + decimation_filter.SetFeatureAngle(60) + decimation_filter.SplittingOff() + decimation_filter.PreserveTopologyOn() + decimation_filter.SetMaximumError(1) + decimation_filter.SetTargetReduction(reduction_ratio) + decimation_filter.Update() + + # Step 4: Smooth the resulting mesh + smoothing_filter = vtk.vtkWindowedSincPolyDataFilter() + numberOfIterations = int(20 + smoothing_factor * 40) + passBand = pow(10.0, -4.0 * smoothing_factor) + if reduction_ratio > 0.0: + smoothing_filter.SetInputConnection(decimation_filter.GetOutputPort()) + else: + smoothing_filter.SetInputConnection(flying_edges.GetOutputPort()) + smoothing_filter.SetNumberOfIterations(numberOfIterations) + smoothing_filter.SetPassBand(passBand) + smoothing_filter.BoundarySmoothingOff() + smoothing_filter.FeatureEdgeSmoothingOff() + smoothing_filter.NonManifoldSmoothingOn() + smoothing_filter.NormalizeCoordinatesOn() + smoothing_filter.Update() + + # Step 5: Decimate the mesh further + decimation = vtk.vtkQuadricDecimation() + decimation.SetInputConnection(smoothing_filter.GetOutputPort()) + decimation.SetTargetReduction(0.9) # 90% reduction, the same as slicer + decimation.VolumePreservationOn() + decimation.Update() + + # Step 6: Generate normals for better shading + decimatedNormals = vtk.vtkPolyDataNormals() + decimatedNormals.SetInputConnection(decimation.GetOutputPort()) + decimatedNormals.SplittingOff() + decimatedNormals.ConsistencyOn() + decimatedNormals.Update() + + # Step 7: convert to LPS + ras2lps = vtk.vtkMatrix4x4() + ras2lps.SetElement(0, 0, -1) + ras2lps.SetElement(1, 1, -1) + ras2lpsTransform = vtk.vtkTransform() + ras2lpsTransform.SetMatrix(ras2lps) + transformer = vtk.vtkTransformPolyDataFilter() + transformer.SetTransform(ras2lpsTransform) + transformer.SetInputConnection(decimatedNormals.GetOutputPort()) + transformer.Update() + + if len(label_values.keys()) > 1: + mapper = vtk.vtkPolyDataMapper() + mapper.SetInputData(transformer.GetOutput()) + actor = vtk.vtkActor() + actor.SetMapper(mapper) + colorRGB = colors[i - 1][:3] if i < 15 else colors[i - 2][:3] + colorHSV = [0, 0, 0] + vtk.vtkMath.RGBToHSV(colorRGB, colorHSV) + colorHSV[1] = min(colorHSV[1] * 1.5, 1.0) # increase saturation + colorHSV[2] = min(colorHSV[2] * 1.0, 1.0) # increase brightness + colorRGB = [0, 0, 0] + vtk.vtkMath.HSVToRGB(colorHSV, colorRGB) + actor.GetProperty().SetColor(colorRGB[0], colorRGB[1], colorRGB[2]) + actor.GetProperty().SetInterpolationToGouraud() + actor_metadata[actor] = name + renderer.AddActor(actor) + + output_filename = os.path.join(output_folder, filename) + if len(label_values.keys()) > 1: + exporter = vtk.vtkGLTFExporter() + exporter.SetFileName(output_filename) + exporter.SetRenderWindow(render_window) + exporter.SetInlineData(True) + exporter.Write() + else: + if flying_edges.GetOutput().GetNumberOfPoints() > 0: + polydata = transformer.GetOutput() + writer = vtk.vtkOBJWriter() + writer.SetFileName(output_filename) + writer.SetInputData(polydata) + writer.Write() + + print(f"Mesh successfully exported to {output_filename}") + + if len(label_values.keys()) > 1: + # Modify GLTF to include actor names + with open(output_filename, "r") as f: + gltf_data = json.load(f) + + # Iterate over actors and add names to GLTF nodes + actors = renderer.GetActors() + actors.InitTraversal() + + for i, node in enumerate(gltf_data.get("nodes", [])): + actor = actors.GetNextActor() + if actor in actor_metadata: + node["name"] = actor_metadata[actor] + + # Save the modified GLTF file + modified_output_filename = output_filename.replace(".gltf", "_modified.gltf") + with open(modified_output_filename, "w") as f: + json.dump(gltf_data, f, indent=2) + print(f"Modified GLTF successfully exported to {modified_output_filename}") + + +def convert_mesh_to_usd(input_file, output_file): + """ + convert a mesh file to USD format + """ + # Load the mesh + mesh = trimesh.load(input_file) + + # Create a new USD stage + stage = Usd.Stage.CreateNew(output_file) + + # If the mesh is a Scene, process each geometry + if isinstance(mesh, trimesh.Scene): + for name, geometry in mesh.geometry.items(): + # Create a unique path for each mesh + mesh_path = f"/{name}" + usd_mesh = UsdGeom.Mesh.Define(stage, mesh_path) + + # Set vertex positions + usd_mesh.GetPointsAttr().Set([Gf.Vec3f(*vertex) for vertex in geometry.vertices]) + + # Set face indices and counts + face_vertex_indices = geometry.faces.flatten().tolist() + face_vertex_counts = [len(face) for face in geometry.faces] + + usd_mesh.GetFaceVertexIndicesAttr().Set(face_vertex_indices) + usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts) + + # Optionally, set normals + if geometry.vertex_normals is not None: + usd_mesh.GetNormalsAttr().Set([Gf.Vec3f(*normal) for normal in geometry.vertex_normals]) + usd_mesh.SetNormalsInterpolation("vertex") + + # Handle materials and other attributes if needed + else: + # It's a single mesh, proceed as before + usd_mesh = UsdGeom.Mesh.Define(stage, "/Mesh") + + # Set vertex positions + usd_mesh.GetPointsAttr().Set([Gf.Vec3f(*vertex) for vertex in mesh.vertices]) + + # Set face indices and counts + face_vertex_indices = mesh.faces.flatten().tolist() + face_vertex_counts = [len(face) for face in mesh.faces] + + usd_mesh.GetFaceVertexIndicesAttr().Set(face_vertex_indices) + usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts) + + # Optionally, set normals + if mesh.vertex_normals is not None: + usd_mesh.GetNormalsAttr().Set([Gf.Vec3f(*normal) for normal in mesh.vertex_normals]) + usd_mesh.SetNormalsInterpolation("vertex") + + # Save the USD file + stage.GetRootLayer().Save() + print(f"USD file successfully exported to {output_file}") + + +if __name__ == "__main__": + input_file = "/workspace/Data/maisi_ct_generative/datasets/output_scene.gltf" # or "input.obj" + output_file = "/workspace/Code/tutorials/modules/omniverse/output_scene.usd" + + convert_mesh_to_usd(input_file, output_file) diff --git a/runner.sh b/runner.sh index e547e859d..07c9c07d7 100755 --- a/runner.sh +++ b/runner.sh @@ -81,6 +81,7 @@ doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" bending_energy_diff doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" mask_augmentation_example.ipynb) doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" maisi_inference_tutorial.ipynb) doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" realism_diversity_metrics.ipynb) +doesnt_contain_max_epochs=("${doesnt_contain_max_epochs[@]}" omniverse_integration.ipynb) # Execution of the notebook in these folders / with the filename cannot be automated skip_run_papermill=() @@ -127,6 +128,7 @@ skip_run_papermill=("${skip_run_papermill[@]}" .*nuclei_classification_infer.ipy skip_run_papermill=("${skip_run_papermill[@]}" .*nuclick_infer.ipynb*) # https://github.com/Project-MONAI/tutorials/issues/1542 skip_run_papermill=("${skip_run_papermill[@]}" .*unet_segmentation_3d_ignite_clearml.ipynb*) # https://github.com/Project-MONAI/tutorials/issues/1555 skip_run_papermill=("${skip_run_papermill[@]}" .*vista_2d_tutorial_monai.ipynb*) +skip_run_papermill=("${skip_run_papermill[@]}" .*learn2reg_oasis_unpaired_brain_mr.ipynb*) # output formatting separator=""