Skip to content

Commit

Permalink
feat: change nnUNetPredictor
Browse files Browse the repository at this point in the history
  • Loading branch information
jaehwan committed May 28, 2024
1 parent 78340d2 commit b16a1a7
Show file tree
Hide file tree
Showing 8 changed files with 255 additions and 97 deletions.
80 changes: 53 additions & 27 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,42 +1,68 @@
FROM pytorch/pytorch
FROM ubuntu:20.04

RUN groupadd -r user && useradd -m --no-log-init -r -g user user
# Set up environment variables
ENV DEBIAN_FRONTEND=noninteractive
ENV PATH="/home/user/.local/bin:${PATH}"
ENV nnUNet_results="/opt/algorithm/checkpoint/"
ENV nnUNet_raw="/opt/algorithm/nnUNet_raw_data_base"
ENV nnUNet_preprocessed="/opt/algorithm/preproc"
ENV MKL_SERVICE_FORCE_INTEL=1
ENV OMP_NUM_THREADS=1
ENV OPENBLAS_NUM_THREADS=1
ENV MKL_NUM_THREADS=1
ENV NUMEXPR_NUM_THREADS=1

RUN mkdir -p /opt/app /input /output \
&& chown user:user /opt/app /input /output
# Install system dependencies
RUN apt-get update && apt-get install -y \
python3.9 \
python3.9-venv \
python3.9-dev \
python3-pip \
zip \
unzip \
gdb \
&& rm -rf /var/lib/apt/lists/*

# Set python3.9 as the default python3
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1

# Add user
ARG UID=1000
ARG GID=1000
RUN groupadd -g ${GID} user && useradd -u ${UID} -g user -m --no-log-init -r -g user user

# Create necessary directories and set permissions
RUN mkdir -p /opt/app /input /output /opt/algorithm/checkpoint/nnUNet \
&& chown -R user:user /opt/app /input /output /opt/algorithm/checkpoint/nnUNet

# Switch to user
USER user
WORKDIR /opt/app

ENV PATH="/home/user/.local/bin:${PATH}"
# Install Python packages
RUN python3 -m pip install --user -U pip
RUN python3 -m pip install --user pip-tools
RUN python3 -m pip install --upgrade pip

RUN python -m pip install --user -U pip && python -m pip install --user pip-tools && python -m pip install --upgrade pip
COPY --chown=user:user nnUNet/ /opt/app/nnUNet/
RUN python -m pip install -e nnUNet
#RUN python -m pip uninstall -y scipy
#RUN python -m pip install --user --upgrade scipy
# Install PyTorch and related packages
RUN python3 -m pip install --user torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu118

COPY --chown=user:user requirements.txt /opt/app/
RUN python -m pip install --user -r requirements.txt
# Copy nnUNet and install
COPY --chown=user:user nnUNet/ /opt/app/nnUNet/
RUN python3 -m pip install --user -e nnUNet

# Copy requirements and install
COPY --chown=user:user requirements.txt /opt/app/
RUN python3 -m pip install --user -r requirements.txt

# This is the checkpoint file, uncomment the line below and modify /local/path/to/the/checkpoint to your needs
# Copy checkpoint and extract
COPY --chown=user:user nnUNetTrainer__nnUNetPlans__3d_fullres.zip /opt/algorithm/checkpoint/nnUNet/
RUN python -c "import zipfile; import os; zipfile.ZipFile('/opt/algorithm/checkpoint/nnUNet/nnUNetTrainer__nnUNetPlans__3d_fullres.zip').extractall('/opt/algorithm/checkpoint/nnUNet/')"
RUN python3 -c "import zipfile; import os; zipfile.ZipFile('/opt/algorithm/checkpoint/nnUNet/nnUNetTrainer__nnUNetPlans__3d_fullres.zip').extractall('/opt/algorithm/checkpoint/nnUNet/')"

# Copy custom scripts
COPY --chown=user:user custom_algorithm.py /opt/app/
COPY --chown=user:user process.py /opt/app/
COPY --chown=user:user calc_dice.py /opt/app/

# COPY --chown=user:user weights /opt/algorithm/checkpoint
ENV nnUNet_results="/opt/algorithm/checkpoint/"
ENV nnUNet_raw="/opt/algorithm/nnUNet_raw_data_base"
ENV nnUNet_preprocessed="/opt/algorithm/preproc"
# ENV ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=64(nope!)
# ENV nnUNet_def_n_proc=1

#ENTRYPOINT [ "python3", "-m", "process" ]

ENV MKL_SERVICE_FORCE_INTEL=1

# Launches the script
ENTRYPOINT python -m process $0 $@
# Launch the script
ENTRYPOINT ["python3", "-m", "process"]
40 changes: 33 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,39 @@
# DMX Solution to HaNSeg Challenge
# DMX Solution to HaNSeg

The Head and Neck oragan-at-risk CT & MR segmentation challenge. Contribution to the Grand Challenge (MICCAI 2023)
## Overview

Challenge URL: **[HaN-Seg 2023 challenge](https://han-seg2023.grand-challenge.org/)**
This repository contains scripts and tools for building a Docker algorithm, performing prediction on a test dataset, and calculating DSC (Dice Similarity Coefficient). Below are the steps to execute each of these tasks.

This solution is based on:
## Prerequisites

- [ANTsPY](https://antspy.readthedocs.io/en/latest/)
- [nnUNetv2](https://github.com/MIC-DKFZ/nnUNet/)
- [Zhack47](https://github.com/Zhack47/HaNSeg-QuantIF)
Make sure you have the following installed:
- Docker
- Python 3.9
- Necessary Python packages (can be installed using `requirements.txt` if provided)

## Steps

### 1. Build the Docker Algorithm

To build the Docker algorithm, run the following command in your terminal:

```sh
sh test.sh
```

### 2. Prediction on Test Dataset

To perform predictions on the test dataset, execute the following command:
```sh
python3 process.py
```

### 3. DSC Calculation

To calculate the Dice Similarity Coefficient (DSC), use the following command:
```sh
python3 calc_dice.py
```

## Contact
Email: [email protected]
2 changes: 1 addition & 1 deletion build.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env bash
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
# docker build --no-cache -t hanseg2023algorithm "$SCRIPTPATH"
docker build -t hanseg2023algorithm_dmx "$SCRIPTPATH"
docker build -t hanseg2023algorithm_dmx:jhhan "$SCRIPTPATH"
107 changes: 107 additions & 0 deletions calc_dice.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))))

import numpy as np
import nrrd
import pandas as pd

LABEL_dict = {
"background": 0,
"A_Carotid_L": 1,
"A_Carotid_R": 2,
"Arytenoid": 3,
"Bone_Mandible": 4,
"Brainstem": 5,
"BuccalMucosa": 6,
"Cavity_Oral": 7,
"Cochlea_L": 8,
"Cochlea_R": 9,
"Cricopharyngeus": 10,
"Esophagus_S": 11,
"Eye_AL": 12,
"Eye_AR": 13,
"Eye_PL": 14,
"Eye_PR": 15,
"Glnd_Lacrimal_L": 16,
"Glnd_Lacrimal_R": 17,
"Glnd_Submand_L": 18,
"Glnd_Submand_R": 19,
"Glnd_Thyroid": 20,
"Glottis": 21,
"Larynx_SG": 22,
"Lips": 23,
"OpticChiasm": 24,
"OpticNrv_L": 25,
"OpticNrv_R": 26,
"Parotid_L": 27,
"Parotid_R": 28,
"Pituitary": 29,
"SpinalCord": 30,
}

def load_nrrd(file_path):
data, _ = nrrd.read(file_path)
return data

def dice_score(y_true, y_pred):
eps = 1e-6
y_true = y_true.flatten()
y_pred = y_pred.flatten()
intersection = np.sum(y_true * y_pred)
return (2. * intersection) / (np.sum(y_true) + np.sum(y_pred)) + eps


def calculate_dice_scores(result_folder, gt_folder):
data = []
result_files = [f for f in os.listdir(result_folder) if f.endswith('.nrrd')]

for result_file in result_files:
case_id = result_file.split('_IMG')[0]
gt_file = f"{case_id}_all_rois.seg.nrrd"

result_path = os.path.join(result_folder, result_file)
gt_path = os.path.join(gt_folder, gt_file)

if not os.path.exists(gt_path):
print(f"Ground truth file not found for {result_file}")
continue

result_data = load_nrrd(result_path)
gt_data = load_nrrd(gt_path)

case_dice_scores = {"file_name": result_file}

for label, label_index in LABEL_dict.items():
result_label = (result_data == label_index).astype(np.uint8)
gt_label = (gt_data == label_index).astype(np.uint8)

if np.sum(gt_label) == 0:
case_dice_scores[label] = None
else:
score = dice_score(gt_label, result_label)
case_dice_scores[label] = score

# Calculate total mean DICE score for this case
valid_scores = [score for score in case_dice_scores.values() if isinstance(score, (float, int))]
total_dice_score = np.mean(valid_scores) if valid_scores else 0.0
case_dice_scores["total"] = total_dice_score

data.append(case_dice_scores)
print(f"Processed {result_file}")

return data


if __name__ == '__main__':
result_folder = '/output/images/head_neck_oar'
gt_folder = '/input/gt'

data = calculate_dice_scores(result_folder, gt_folder)

# Create a DataFrame and save to CSV
df = pd.DataFrame(data)
df = df[["file_name"] + list(LABEL_dict.keys()) + ["total"]] # Ensure columns are in the correct order
csv_path = "/output/dice_scores.csv"
df.to_csv(csv_path, index=False)

print(f"CSV file saved to {csv_path}")
5 changes: 0 additions & 5 deletions export.sh

This file was deleted.

70 changes: 37 additions & 33 deletions process.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import time
import SimpleITK as sitk
import numpy as np
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
np.lib.index_tricks.int = np.uint16
import ants
from os.path import join
Expand Down Expand Up @@ -104,39 +106,41 @@ def predict(self, *, image_ct: ants.ANTsImage, image_mrt1: ants.ANTsImage) -> si
del ct_image
# Shamelessly copied from nnUNet/nnunetv2/preprocessing/resampling/default_resampling.py
new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(fin_spacing, spacing[::-1], fin_size)])
if new_shape.prod()< 1e8:
print(f"Image is not too large ({new_shape.prod()}), using the folds (0,1,2,3,4) with mirror")
predictor = nnUNetPredictor(tile_step_size=0.4, use_mirroring=True, perform_everything_on_gpu=True,
verbose=True, verbose_preprocessing=True,
allow_tqdm=True)
predictor.initialize_from_trained_model_folder(trained_model_path, use_folds=(0,1,2,3),
checkpoint_name="checkpoint_best.pth")
# predictor.allowed_mirroring_axes = (0, 2)
elif new_shape.prod()< 1.3e8:
print(f"Image is not too large ({new_shape.prod()}), using the folds (0,1,2,3,4)")

predictor = nnUNetPredictor(tile_step_size=0.6, use_mirroring=True, perform_everything_on_gpu=False,
verbose=True, verbose_preprocessing=True,
allow_tqdm=True)
predictor.initialize_from_trained_model_folder(trained_model_path, use_folds=(0,1,2,3), #(0,1,2,3,4)
checkpoint_name="checkpoint_best.pth")
elif new_shape.prod()< 1.7e8:
print(f"Image is not too large ({new_shape.prod()}), using the 'all' fold with mirror")

predictor = nnUNetPredictor(tile_step_size=0.4, use_mirroring=True, perform_everything_on_gpu=False,
verbose=True, verbose_preprocessing=True,
allow_tqdm=True)
predictor.initialize_from_trained_model_folder(trained_model_path, use_folds="0",
checkpoint_name="checkpoint_best.pth")
# predictor.allowed_mirroring_axes = (0, 2)

else:
predictor = nnUNetPredictor(tile_step_size=0.6, use_mirroring=True, perform_everything_on_gpu=False,
verbose=True, verbose_preprocessing=True,
allow_tqdm=True)
print(f"Image is too large ({new_shape.prod()}), using the 'all' fold")
predictor.initialize_from_trained_model_folder(trained_model_path, use_folds="0",
checkpoint_name="checkpoint_best.pth")
# if new_shape.prod()< 1e8:
# print(f"Image is not too large ({new_shape.prod()}), using the folds (0,1,2,3,4) with mirror")
# predictor = nnUNetPredictor(tile_step_size=0.4, use_mirroring=True, perform_everything_on_gpu=True,
# verbose=True, verbose_preprocessing=True,
# allow_tqdm=True)
# predictor.initialize_from_trained_model_folder(trained_model_path, use_folds=(0,1,2,3),
# checkpoint_name="checkpoint_best.pth")
# # predictor.allowed_mirroring_axes = (0, 2)
# elif new_shape.prod()< 1.3e8:
# print(f"Image is not too large ({new_shape.prod()}), using the folds (0,1,2,3,4)")

# predictor = nnUNetPredictor(tile_step_size=0.6, use_mirroring=True, perform_everything_on_gpu=False,
# verbose=True, verbose_preprocessing=True,
# allow_tqdm=True)
# predictor.initialize_from_trained_model_folder(trained_model_path, use_folds=(0,1,2,3), #(0,1,2,3,4)
# checkpoint_name="checkpoint_best.pth")
# elif new_shape.prod()< 1.7e8:
# print(f"Image is not too large ({new_shape.prod()}), using the 'all' fold with mirror")

# predictor = nnUNetPredictor(tile_step_size=0.4, use_mirroring=True, perform_everything_on_gpu=False,
# verbose=True, verbose_preprocessing=True,
# allow_tqdm=True)
# predictor.initialize_from_trained_model_folder(trained_model_path, use_folds="0",
# checkpoint_name="checkpoint_best.pth")
# # predictor.allowed_mirroring_axes = (0, 2)

# else:
predictor = nnUNetPredictor(tile_step_size=0.5, use_mirroring=False, perform_everything_on_gpu=True,
verbose=True, verbose_preprocessing=True,
allow_tqdm=True)
print(f"Image is too large ({new_shape.prod()}), using the 'all' fold")
predictor.initialize_from_trained_model_folder(trained_model_path, use_folds="0",
checkpoint_name="checkpoint_best.pth")
# predictor.initialize_from_trained_model_folder(trained_model_path, use_folds=(0,1,2,3), #(0,1,2,3,4)
# checkpoint_name="checkpoint_best.pth")

img_temp = predictor.predict_single_npy_array(images, properties, None, None, False).astype(np.uint8)
del images
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ urllib3==1.26.15
# via requests
wheel==0.40.0
# via pip-tools

pynrrd
# The following packages are considered to be unsafe in a requirements file:
# pip
# setuptools
Loading

0 comments on commit b16a1a7

Please sign in to comment.