Skip to content

Commit

Permalink
Merge pull request #15 from angelolab/fix_issues
Browse files Browse the repository at this point in the history
Fix issues
  • Loading branch information
JLrumberger authored Jun 25, 2024
2 parents 9aa5375 + 041c29a commit e097da0
Show file tree
Hide file tree
Showing 9 changed files with 104 additions and 63 deletions.
38 changes: 19 additions & 19 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,22 +33,22 @@ jobs:
secrets: inherit
uses: ./.github/workflows/build.yaml

upload_coverage:
needs: [test]
name: Upload Coverage
runs-on: ubuntu-latest
steps:
- name: Checkout ${{github.repository }}
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Download Coverage Artifact
uses: actions/download-artifact@v4
# if `name` is not specified, all artifacts are downloaded.

- name: Upload Coverage to Coveralls
uses: coverallsapp/github-action@v2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
format: lcov
# upload_coverage:
# needs: [test]
# name: Upload Coverage
# runs-on: ubuntu-latest
# steps:
# - name: Checkout ${{github.repository }}
# uses: actions/checkout@v4
# with:
# fetch-depth: 0

# - name: Download Coverage Artifact
# uses: actions/download-artifact@v4
# # if `name` is not specified, all artifacts are downloaded.

# - name: Upload Coverage to Coveralls
# uses: coverallsapp/github-action@v2
# with:
# github-token: ${{ secrets.GITHUB_TOKEN }}
# format: lcov
2 changes: 1 addition & 1 deletion .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11"]
os: [ubuntu-latest, macos-latest, windows-latest]
os: [ubuntu-latest, macos-13, windows-latest]

steps:
- name: Checkout ${{ github.repository }}
Expand Down
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
# Nimbus-Inference
<p align="center">
<img src="https://github.com/angelolab/Nimbus-Inference/blob/fix_issues/assets/nimbus_logo.png">
</p>

[![Tests][badge-tests]][link-tests]
[![Documentation][badge-docs]][link-docs]
Expand All @@ -24,7 +26,7 @@ Make a conda environment for Nimbus and activate it

Install CUDA libraries if you have a NVIDIA GPU available

`conda install -c conda-forge cudatoolkit=11.2 cudnn=8.1.0`
`conda install -c conda-forge cudatoolkit=11.8 cudnn=8.2.0`

Install the package and all depedencies in the conda environment

Expand Down
Binary file added assets/nimbus_logo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ dependencies = [
"zarr",
]

[[project.source]]
name = "pytorch"
url = "https://download.pytorch.org/whl/cu118"
priority = "supplemental"

[project.optional-dependencies]
dev = ["pre-commit", "twine>=4.0.2"]
Expand Down
2 changes: 1 addition & 1 deletion src/nimbus_inference/nimbus.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def predict_segmentation(self, input_data, preprocess_kwargs):
input_data = torch.tensor(input_data).float()
input_data = input_data.to(self.device)
prediction = self.model(input_data)
prediction = prediction.cpu().squeeze(0).numpy()
prediction = prediction.cpu()
else:
if not hasattr(self, "model") or self.model.padding != "valid":
self.initialize_model(padding="valid")
Expand Down
51 changes: 29 additions & 22 deletions src/nimbus_inference/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,8 @@ def __init__(
self.fov_paths = fov_paths
self.segmentation_naming_convention = segmentation_naming_convention
self.suffix = suffix
if self.suffix[0] != ".":
self.suffix = "." + self.suffix
self.silent = silent
self.include_channels = include_channels
self.multi_channel = self.is_multi_channel_tiff(fov_paths[0])
Expand Down Expand Up @@ -276,7 +278,7 @@ def segment_mean(instance_mask, prediction):
"""
props_df = regionprops_table(
label_image=instance_mask, intensity_image=prediction,
properties=['label' ,'intensity_mean']
properties=['label' , 'centroid', 'intensity_mean']
)
return props_df

Expand Down Expand Up @@ -313,25 +315,22 @@ def test_time_aug(
lambda x: torch.flip(x, [2]),
lambda x: torch.flip(x, [3])
]
input_batch = []
for forw_aug in forward_augmentations:
input_data_tmp = forw_aug(input_data).numpy() # bhwc
input_batch.append(np.concatenate(input_data_tmp))
input_batch = np.stack(input_batch, 0)
seg_map = app.predict_segmentation(
input_batch,
preprocess_kwargs={
"normalize": True,
"marker": channel,
"normalization_dict": normalization_dict},
)
seg_map = torch.from_numpy(seg_map)
tmp = []
for backw_aug, seg_map_tmp in zip(backward_augmentations, seg_map):
seg_map_tmp = backw_aug(seg_map_tmp[np.newaxis,...])
seg_map_tmp = np.squeeze(seg_map_tmp)
tmp.append(seg_map_tmp)
seg_map = np.stack(tmp, 0)
output = []
for forw_aug, backw_aug in zip(forward_augmentations, backward_augmentations):
input_data_aug = forw_aug(input_data).numpy() # bhwc
seg_map = app.predict_segmentation(
input_data_aug,
preprocess_kwargs={
"normalize": True,
"marker": channel,
"normalization_dict": normalization_dict},
)
if not isinstance(seg_map, torch.Tensor):
seg_map = torch.from_numpy(seg_map)
seg_map = backw_aug(seg_map)
seg_map = np.squeeze(seg_map)
output.append(seg_map)
seg_map = np.stack(output, 0)
seg_map = np.mean(seg_map, axis = 0)
return seg_map

Expand Down Expand Up @@ -387,9 +386,11 @@ def predict_fovs(
"normalization_dict": normalization_dict
},
)
if not isinstance(prediction, np.ndarray):
prediction = prediction.cpu().numpy()
prediction = np.squeeze(prediction)
if half_resolution:
prediction = cv2.resize(prediction, (w, h))
prediction = cv2.resize(prediction, (w, h), interpolation=cv2.INTER_NEAREST)
df = pd.DataFrame(segment_mean(instance_mask, prediction))
if df_fov.empty:
df_fov["label"] = df["label"]
Expand Down Expand Up @@ -502,7 +503,13 @@ def prepare_normalization_dict(
if n_jobs > 1:
get_reusable_executor().shutdown(wait=True)
for channel in normalization_dict.keys():
normalization_dict[channel] = np.mean(normalization_dict[channel])
# exclude None and NaN values before averaging
norm_values = np.array(normalization_dict[channel])
norm_values = norm_values[~np.isnan(norm_values)]
norm_values = np.mean(norm_values)
if np.isnan(norm_values):
norm_values = 1e-8
normalization_dict[channel] = norm_values
# save normalization dict
with open(os.path.join(output_dir, output_name), 'w') as f:
json.dump(normalization_dict, f)
Expand Down
3 changes: 3 additions & 0 deletions templates/1_Nimbus_Predict.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@
"# ... or optionally, select a specific set of fovs manually\n",
"# fovs = [\"fov0\", \"fov1\"]\n",
"\n",
"# make sure to filter paths out that don't lead to FoVs, e.g. .DS_Store files.\n",
"fov_names = [fov_name for fov_name in fov_names if not fov_name.startswith(\".\")] \n",
"\n",
"# construct paths for fovs\n",
"fov_paths = [os.path.join(tiff_dir, fov_name) for fov_name in fov_names]"
]
Expand Down
61 changes: 43 additions & 18 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ def forward(self, x):
return self.fn(x)


def prepare_tif_data(num_samples, temp_dir, selected_markers, random=False, std=1):
def prepare_tif_data(
num_samples, temp_dir, selected_markers, random=False, std=1, shape=(256, 256),
):
np.random.seed(42)
fov_paths = []
inst_paths = []
Expand All @@ -35,9 +37,9 @@ def prepare_tif_data(num_samples, temp_dir, selected_markers, random=False, std=
os.makedirs(folder, exist_ok=True)
for marker, scale in zip(selected_markers, std):
if random:
img = np.random.rand(256, 256) * scale
img = np.random.rand(*shape) * scale
else:
img = np.ones([256, 256])
img = np.ones(shape)
io.imsave(
os.path.join(folder, marker + ".tiff"),
img,
Expand All @@ -46,19 +48,30 @@ def prepare_tif_data(num_samples, temp_dir, selected_markers, random=False, std=
io.imsave(
inst_path, np.array(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
).repeat(64, axis=1).repeat(64, axis=0)
).repeat(shape[1]//4, axis=1).repeat(shape[0]//4, axis=0)
)
if folder not in fov_paths:
fov_paths.append(folder)
inst_paths.append(inst_path)
# add ds_store file to test if it gets ignored
ds_store_paths = [
os.path.join(temp_dir, ".DS_Store"),
os.path.join(temp_dir, "fov_0", ".DS_Store"),
os.path.join(temp_dir, "deepcell_output", ".DS_Store"),
]
for ds_store in ds_store_paths:
with open(ds_store, "w") as f:
f.write("test")
return fov_paths, inst_paths


def prepare_ome_tif_data(num_samples, temp_dir, selected_markers, random=False, std=1):
def prepare_ome_tif_data(
num_samples, temp_dir, selected_markers, random=False, std=1, shape=(256, 256),
):
np.random.seed(42)
metadata_dict = {
"SizeX" : 256,
"SizeY" : 256,
"SizeX" : shape[0],
"SizeY" : shape[1],
"SizeC" : len(selected_markers) + 3,
"PhysicalSizeX" : 0.5,
"PhysicalSizeXUnit" : "µm",
Expand All @@ -74,9 +87,9 @@ def prepare_ome_tif_data(num_samples, temp_dir, selected_markers, random=False,
channels = []
for j, (marker, s) in enumerate(zip(selected_markers, std)):
if random:
img = np.random.rand(256, 256) * s
img = np.random.rand(*shape) * s
else:
img = np.ones([256, 256])
img = np.ones(shape)
channels.append(img)
metadata_dict["Channels"][marker] = {
"Name" : marker,
Expand All @@ -99,10 +112,18 @@ def prepare_ome_tif_data(num_samples, temp_dir, selected_markers, random=False,
io.imsave(
inst_path, np.array(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
).repeat(64, axis=1).repeat(64, axis=0)
).repeat(shape[1]//4, axis=1).repeat(shape[0]//4, axis=0)
)
fov_paths.append(sample_name)
inst_paths.append(inst_path)
# add ds_store file to test if it gets ignored
ds_store_paths = [
os.path.join(temp_dir, ".DS_Store"),
os.path.join(temp_dir, "deepcell_output", ".DS_Store"),
]
for ds_store in ds_store_paths:
with open(ds_store, "w") as f:
f.write("test")
return fov_paths, inst_paths


Expand Down Expand Up @@ -189,7 +210,7 @@ def segmentation_naming_convention(fov_path):
return os.path.join(temp_dir_, "deepcell_output", fov_ + "_whole_cell.tiff")
channel = "CD4"
fov_paths, inst_paths = prepare_tif_data(
num_samples=1, temp_dir=temp_dir, selected_markers=[channel]
num_samples=1, temp_dir=temp_dir, selected_markers=[channel], shape=(512, 256)
)
output_dir = os.path.join(temp_dir, "nimbus_output")
dataset = MultiplexDataset(fov_paths, segmentation_naming_convention, suffix=".tiff")
Expand All @@ -204,7 +225,7 @@ def segmentation_naming_convention(fov_path):
batch_size=32
)
# check if we get the correct shape
assert pred_map.shape == (2, 256, 256)
assert pred_map.shape == (2, 512, 256)

pred_map_2 = tt_aug(
input_data, channel, nimbus, nimbus.normalization_dict, rotate=False, flip=True,
Expand Down Expand Up @@ -234,7 +255,7 @@ def segmentation_naming_convention(fov_path):
return os.path.join(temp_dir_, "deepcell_output", fov_ + "_whole_cell.tiff")

fov_paths, _ = prepare_tif_data(
num_samples=1, temp_dir=temp_dir, selected_markers=["CD4", "CD56"]
num_samples=1, temp_dir=temp_dir, selected_markers=["CD4", "CD56"], shape=(512, 256)
)
dataset = MultiplexDataset(fov_paths, segmentation_naming_convention, suffix=".tiff")
output_dir = os.path.join(temp_dir, "nimbus_output")
Expand All @@ -244,7 +265,7 @@ def segmentation_naming_convention(fov_path):
cell_table = predict_fovs(
nimbus=nimbus, dataset=dataset, output_dir=output_dir,
normalization_dict=nimbus.normalization_dict, suffix=".tiff",
save_predictions=False, half_resolution=True,
save_predictions=False, half_resolution=True, test_time_augmentation=False
)
# check if we get the correct number of cells
assert len(cell_table) == 15
Expand All @@ -260,7 +281,7 @@ def segmentation_naming_convention(fov_path):
cell_table = predict_fovs(
nimbus=nimbus, dataset=dataset, output_dir=output_dir,
normalization_dict=nimbus.normalization_dict, suffix=".tiff",
save_predictions=True, half_resolution=True,
save_predictions=True, half_resolution=True, test_time_augmentation=False
)
assert os.path.exists(os.path.join(output_dir, "fov_0", "CD4.tiff"))
assert os.path.exists(os.path.join(output_dir, "fov_0", "CD56.tiff"))
Expand Down Expand Up @@ -288,7 +309,8 @@ def segmentation_naming_convention(fov_path):
return os.path.join(temp_dir_, "deepcell_output", fov_ + "_whole_cell.tiff")

fov_paths, _ = prepare_ome_tif_data(
num_samples=1, temp_dir=temp_dir, selected_markers=["CD4", "CD56"]
num_samples=1, temp_dir=temp_dir, selected_markers=["CD4", "CD56"],
shape=(512, 256)
)
# check if check inputs raises error when inputs are incorrect
with pytest.raises(FileNotFoundError):
Expand All @@ -306,10 +328,13 @@ def segmentation_naming_convention(fov_path):
fov_0_seg_ = dataset.get_segmentation(fov="fov_0")
assert np.alltrue(fov_0_seg == fov_0_seg_)

# test everything again with single channel data
# test everything again with single channel
fov_paths, _ = prepare_tif_data(
num_samples=1, temp_dir=temp_dir, selected_markers=["CD4", "CD56"]
num_samples=1, temp_dir=temp_dir, selected_markers=["CD4", "CD56"],
shape=(512, 256)
)
cd4_channel = io.imread(os.path.join(fov_paths[0], "CD4.tiff"))
fov_0_seg = io.imread(segmentation_naming_convention(fov_paths[0]))
dataset = MultiplexDataset(fov_paths, segmentation_naming_convention, suffix=".tiff")
assert len(dataset) == 1
assert set(dataset.channels) == set(["CD4", "CD56"])
Expand Down

0 comments on commit e097da0

Please sign in to comment.