Skip to content

Commit

Permalink
Updated for new API
Browse files Browse the repository at this point in the history
  • Loading branch information
SiddhantSadangi committed May 16, 2023
1 parent c68cf57 commit 7324320
Show file tree
Hide file tree
Showing 11 changed files with 65 additions and 87 deletions.
7 changes: 2 additions & 5 deletions ci_evaluate.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import os

import neptune.new as neptune
import neptune
import torch

from dataset import BrainSegmentationDataset
from model_utils import DiceLoss, UNet

# (neptune) fetch project
project = neptune.init_project(name="common/project-images-segmentation")
project = neptune.init_project(project="common/project-images-segmentation")

# (neptune) find best run
best_run_df = project.fetch_runs_table(tag="best").to_pandas()
Expand All @@ -16,7 +14,6 @@
# (neptune) re-init the chosen run
base_namespace = "evaluate"
ref_run = neptune.init_run(
api_token=os.getenv("NEPTUNE_API_TOKEN"),
project="common/project-images-segmentation",
tags=["evaluation"],
source_files=None,
Expand Down
9 changes: 3 additions & 6 deletions ci_monitoring.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
import os
import random

import neptune.new as neptune
import neptune

# Fetch project
project = neptune.init_project(
api_token=os.getenv("NEPTUNE_API_TOKEN"),
name="common/project-images-segmentation",
project="common/project-images-segmentation",
)

# Find run with "in-prod" tag
Expand All @@ -15,11 +13,10 @@

# Resume run
run = neptune.init_run(
api_token=os.getenv("NEPTUNE_API_TOKEN"),
project="common/project-images-segmentation",
with_id=run_id,
)

# Run monitoring logic
# ... and log metadata to the run
run["production/monitoring/loss"].log(random.random() * 100)
run["production/monitoring/loss"].append(random.random() * 100)
2 changes: 1 addition & 1 deletion ci_monitoring_requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
neptune-client
neptune
23 changes: 10 additions & 13 deletions dataset.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
import random

import numpy as np
import torch
Expand All @@ -23,8 +22,6 @@ def __init__(
image_size=256,
subset="train",
random_sampling=True,
validation_cases=10,
seed=42,
):
assert subset in ["all", "train", "validation"]

Expand All @@ -34,7 +31,7 @@ def __init__(
self.volume_fnames = {}
img_cnt = 0
print(f"reading {subset} images...")
for (dirpath, dirnames, filenames) in os.walk(images_dir):
for dirpath, dirnames, filenames in os.walk(images_dir):
image_slices = []
mask_slices = []
image_names = []
Expand All @@ -49,7 +46,7 @@ def __init__(
image_slices.append(imread(filepath))
image_names.append(filename)
img_cnt += 1
if len(image_slices) > 0:
if image_slices:
patient_id = dirpath.split("/")[-1]
volumes[patient_id] = np.array(image_slices[1:-1])
masks[patient_id] = np.array(mask_slices[1:-1])
Expand All @@ -58,23 +55,23 @@ def __init__(
self.patients = sorted(volumes)
self.volume_fnames

print("preprocessing {} volumes...".format(subset))
print(f"preprocessing {subset} volumes...")
# create list of tuples (volume, mask)
self.volumes = [(volumes[k], masks[k]) for k in self.patients]

print("cropping {} volumes...".format(subset))
print(f"cropping {subset} volumes...")
# crop to smallest enclosing volume
self.volumes = [crop_sample(v) for v in self.volumes]

print("padding {} volumes...".format(subset))
print(f"padding {subset} volumes...")
# pad to square
self.volumes = [pad_sample(v) for v in self.volumes]

print("resizing {} volumes...".format(subset))
print(f"resizing {subset} volumes...")
# resize
self.volumes = [resize_sample(v, size=image_size) for v in tqdm.tqdm(self.volumes)]

print("normalizing {} volumes...".format(subset))
print(f"normalizing {subset} volumes...")
# normalize channel-wise
self.volumes = [(normalize_volume(v), m) for v, m in self.volumes]

Expand All @@ -87,14 +84,14 @@ def __init__(
# add channel dimension to masks
self.volumes = [(v, m[..., np.newaxis]) for (v, m) in self.volumes]

print("done creating {} dataset".format(subset))
print(f"done creating {subset} dataset")

# create global index for patient and slice (idx -> (p_idx, s_idx))
num_slices = [v.shape[0] for v, m in self.volumes]
self.patient_slice_index = list(
zip(
sum([[i] * num_slices[i] for i in range(len(num_slices))], []),
sum([list(range(x)) for x in num_slices], []),
sum(([i] * num_slices[i] for i in range(len(num_slices))), []),
sum((list(range(x)) for x in num_slices), []),
)
)

Expand Down
19 changes: 9 additions & 10 deletions finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,16 @@
# Date accessed: 23rd June, 2022

import argparse
import json
import math
import os

import neptune.new as neptune
import neptune
import numpy as np
import torch
import torch.optim as optim
from neptune.new.types import File
from neptune.types import File
from torch.utils.data import DataLoader
from tqdm import tqdm
from tqdm.auto import tqdm

from dataset import BrainSegmentationDataset
from model_utils import DiceLoss, UNet
Expand Down Expand Up @@ -69,7 +68,7 @@ def main(args):
##########################################

# (neptune) fetch project
project = neptune.init_project(name="common/project-images-segmentation")
project = neptune.init_project(project="common/project-images-segmentation")

# (neptune) find best run
best_run_df = project.fetch_runs_table(tag="best").to_pandas()
Expand Down Expand Up @@ -108,7 +107,7 @@ def main(args):
if outline_image.max() > 1:
outline_image = outline_image.astype(np.float32) / 255
# (neptune) Log sample images with mask overlay
ref_run["finetune/data/samples/images"].log(File.as_image(outline_image), name=fname)
ref_run["finetune/data/samples/images"].append(File.as_image(outline_image), name=fname)

# (neptune) Log Preprocessing Params
ref_run["finetune/data/preprocessing_params"] = {
Expand Down Expand Up @@ -180,7 +179,7 @@ def main(args):
optimizer.step()

# (neptune) Log train loss to finetune namespace
ref_run["finetuning/metrics/train_dice_loss"].log(loss.item())
ref_run["finetuning/metrics/train_dice_loss"].append(loss.item())

####################
# Validation Phase #
Expand All @@ -205,7 +204,7 @@ def main(args):
loss = dsc_loss(y_pred, y_true)

# (neptune) Log validation lsos to finetune namespace
ref_run["finetuning/metrics/validation_dice_loss"].log(loss.item())
ref_run["finetuning/metrics/validation_dice_loss"].append(loss.item())

y_pred_np = y_pred.detach().cpu().numpy()
validation_pred.extend([y_pred_np[s] for s in range(y_pred_np.shape[0])])
Expand Down Expand Up @@ -240,7 +239,7 @@ def main(args):
# (neptune) Log prediction and ground-truth on original image
ref_run[
f"finetuning/validation_prediction_progression/{fname}"
].log(
].append(
File.as_image(img),
name=f"Dice: {dice_coeff}",
description=desc,
Expand All @@ -261,7 +260,7 @@ def main(args):
mean_dsc = 0.0
print(e)

ref_run["finetuning/metrics/validation_dice_coefficient"].log(mean_dsc)
ref_run["finetuning/metrics/validation_dice_coefficient"].append(mean_dsc)

if best_validation_dsc is None or mean_dsc > best_validation_dsc:
best_validation_dsc = mean_dsc
Expand Down
36 changes: 12 additions & 24 deletions model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,26 +20,16 @@ def __init__(self, in_channels=3, out_channels=1, init_features=32):

self.bottleneck = UNet._block(features * 8, features * 16, name="bottleneck")

self.upconv4 = nn.ConvTranspose2d(
features * 16, features * 8, kernel_size=2, stride=2
)
self.upconv4 = nn.ConvTranspose2d(features * 16, features * 8, kernel_size=2, stride=2)
self.decoder4 = UNet._block((features * 8) * 2, features * 8, name="dec4")
self.upconv3 = nn.ConvTranspose2d(
features * 8, features * 4, kernel_size=2, stride=2
)
self.upconv3 = nn.ConvTranspose2d(features * 8, features * 4, kernel_size=2, stride=2)
self.decoder3 = UNet._block((features * 4) * 2, features * 4, name="dec3")
self.upconv2 = nn.ConvTranspose2d(
features * 4, features * 2, kernel_size=2, stride=2
)
self.upconv2 = nn.ConvTranspose2d(features * 4, features * 2, kernel_size=2, stride=2)
self.decoder2 = UNet._block((features * 2) * 2, features * 2, name="dec2")
self.upconv1 = nn.ConvTranspose2d(
features * 2, features, kernel_size=2, stride=2
)
self.upconv1 = nn.ConvTranspose2d(features * 2, features, kernel_size=2, stride=2)
self.decoder1 = UNet._block(features * 2, features, name="dec1")

self.conv = nn.Conv2d(
in_channels=features, out_channels=out_channels, kernel_size=1
)
self.conv = nn.Conv2d(in_channels=features, out_channels=out_channels, kernel_size=1)

def forward(self, x):
enc1 = self.encoder1(x)
Expand Down Expand Up @@ -69,7 +59,7 @@ def _block(in_channels, features, name):
OrderedDict(
[
(
name + "conv1",
f"{name}conv1",
nn.Conv2d(
in_channels=in_channels,
out_channels=features,
Expand All @@ -78,10 +68,10 @@ def _block(in_channels, features, name):
bias=False,
),
),
(name + "norm1", nn.BatchNorm2d(num_features=features)),
(name + "relu1", nn.ReLU(inplace=True)),
(f"{name}norm1", nn.BatchNorm2d(num_features=features)),
(f"{name}relu1", nn.ReLU(inplace=True)),
(
name + "conv2",
f"{name}conv2",
nn.Conv2d(
in_channels=features,
out_channels=features,
Expand All @@ -90,8 +80,8 @@ def _block(in_channels, features, name):
bias=False,
),
),
(name + "norm2", nn.BatchNorm2d(num_features=features)),
(name + "relu2", nn.ReLU(inplace=True)),
(f"{name}norm2", nn.BatchNorm2d(num_features=features)),
(f"{name}relu2", nn.ReLU(inplace=True)),
]
)
)
Expand All @@ -107,7 +97,5 @@ def forward(self, y_pred, y_true):
y_pred = y_pred[:, 0].contiguous().view(-1)
y_true = y_true[:, 0].contiguous().view(-1)
intersection = (y_pred * y_true).sum()
dsc = (2.0 * intersection + self.smooth) / (
y_pred.sum() + y_true.sum() + self.smooth
)
dsc = (2.0 * intersection + self.smooth) / (y_pred.sum() + y_true.sum() + self.smooth)
return 1.0 - dsc
10 changes: 7 additions & 3 deletions notebooks/FetchRunMetadata.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,12 @@
"import os\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import neptune.new as neptune\n",
"import neptune\n",
"from skimage.io import imread"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "69f606be-21ab-4aa2-bb33-a88f3aece7ba",
"metadata": {},
Expand All @@ -38,7 +39,7 @@
}
],
"source": [
"project = neptune.init_project(name=\"common/project-images-segmentation\")\n",
"project = neptune.init_project(project=\"common/project-images-segmentation\")\n",
"\n",
"# (neptune) find best run for given data version\n",
"best_run_df = project.fetch_runs_table(tag=\"best\").to_pandas()\n",
Expand Down Expand Up @@ -79,6 +80,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "e38dd3f5-449b-4f71-bac1-4d807b7b8119",
"metadata": {},
Expand Down Expand Up @@ -110,6 +112,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "3c26ae75-23bc-411e-ae21-9839407e6279",
"metadata": {},
Expand Down Expand Up @@ -137,6 +140,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "9d73d521-e5a9-4faa-9bb5-825d7396e7af",
"metadata": {
Expand Down Expand Up @@ -193,7 +197,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.8"
"version": "3.8.15"
},
"neptune": {
"notebookId": "96aa97ac-0997-44a2-b8a2-a3452feaa830",
Expand Down
7 changes: 3 additions & 4 deletions notebooks/ProjectMetaData.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
"metadata": {},
"outputs": [],
"source": [
"import neptune.new as neptune"
"import neptune"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
Expand Down Expand Up @@ -61,8 +62,6 @@
"metadata": {},
"outputs": [],
"source": [
"import neptune.new as neptune\n",
"\n",
"model = neptune.init_model(name=\"Prediction model\", key=\"MOD\", project=\"common/project-images-segmentation\")"
]
},
Expand Down Expand Up @@ -92,7 +91,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.12"
"version": "3.10.11"
},
"vscode": {
"interpreter": {
Expand Down
Loading

0 comments on commit 7324320

Please sign in to comment.