Skip to content

Commit

Permalink
Add support for saving one or many files in slicing.
Browse files Browse the repository at this point in the history
  • Loading branch information
We-Gold committed Aug 5, 2024
1 parent 3b6f80a commit da3b97d
Show file tree
Hide file tree
Showing 5 changed files with 112 additions and 47 deletions.
41 changes: 32 additions & 9 deletions python/ouroboros/common/file_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,13 +134,17 @@ def save_output_for_backproject_docker(

# Copy the output files to the host
files = [
{"sourcePath": host_output_file, "targetPath": target_path},
{
"sourcePath": (
host_output_slices
if host_output_slices is not None
else host_output_file
),
"targetPath": target_path,
},
{"sourcePath": host_output_config_file, "targetPath": target_path},
]

if host_output_slices is not None:
files.append({"sourcePath": host_output_slices, "targetPath": target_path})

success, error = copy_to_host(files)

if not success:
Expand Down Expand Up @@ -172,7 +176,7 @@ def load_options_for_slice(options_path: str) -> SliceOptions | str:

def load_options_for_slice_docker(
options_path: str, target_path: str = "./"
) -> tuple[SliceOptions, str, str] | str:
) -> tuple[SliceOptions, str, str, str | None] | str:
"""
Loads the options for slicing a volume and copies the necessary files to the docker volume.
Expand All @@ -185,7 +189,7 @@ def load_options_for_slice_docker(
Returns
-------
tuple[SliceOptions, str, str] | str
tuple[SliceOptions, str, str, str | None] | str
The options for slicing the volume, the host path to the output file, and the host path to the config file.
"""

Expand All @@ -208,6 +212,13 @@ def load_options_for_slice_docker(
host_output_file = combine_unknown_folder(
host_output_folder, slice_options.output_file_name + ".tif"
)
host_output_slices = (
combine_unknown_folder(
host_output_folder, slice_options.output_file_name + "-slices"
)
if slice_options.make_single_file is False
else None
)
host_output_config_file = combine_unknown_folder(
host_output_folder, slice_options.output_file_name + "-configuration.json"
)
Expand All @@ -233,11 +244,14 @@ def load_options_for_slice_docker(
slice_options.neuroglancer_json
)

return slice_options, host_output_file, host_output_config_file
return slice_options, host_output_file, host_output_config_file, host_output_slices


def save_output_for_slice_docker(
host_output_file: str, host_output_config_file: str, target_path: str = "./"
host_output_file: str,
host_output_config_file: str,
host_output_slices=None,
target_path: str = "./",
) -> None | str:
"""
Saves the output files for slicing a volume to the host.
Expand All @@ -248,6 +262,8 @@ def save_output_for_slice_docker(
The path to the output file on the host.
host_output_config_file : str
The path to the config file on the host.
host_output_slices : str, optional
The path to the slices folder on the host, by default None
target_path : str, optional
The path to the target folder in the docker volume, by default "./"
Expand All @@ -259,7 +275,14 @@ def save_output_for_slice_docker(

# Copy the output files to the host
files = [
{"sourcePath": host_output_file, "targetPath": target_path},
{
"sourcePath": (
host_output_slices
if host_output_slices is not None
else host_output_file
),
"targetPath": target_path,
},
{"sourcePath": host_output_config_file, "targetPath": target_path},
]
success, error = copy_to_host(files)
Expand Down
6 changes: 4 additions & 2 deletions python/ouroboros/common/server_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,9 @@ def handle_slice_docker(task: SliceTask):
task.status = "error"
return

slice_options, host_output_file, host_output_config_file = load_result
slice_options, host_output_file, host_output_config_file, host_output_slices = (
load_result
)

slice_result = handle_slice_core(task, slice_options)

Expand All @@ -56,7 +58,7 @@ def handle_slice_docker(task: SliceTask):
return

save_result = save_output_for_slice_docker(
host_output_file, host_output_config_file
host_output_file, host_output_config_file, host_output_slices=host_output_slices
)

if save_result:
Expand Down
106 changes: 74 additions & 32 deletions python/ouroboros/pipeline/slice_parallel_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
slice_volume_from_grids,
)
from ouroboros.helpers.volume_cache import VolumeCache
from ouroboros.helpers.files import join_path, load_and_save_tiff_from_slices
from ouroboros.helpers.files import join_path
from .pipeline import PipelineStep
from ouroboros.helpers.options import SliceOptions
import numpy as np
import concurrent.futures
from tifffile import imwrite
from tifffile import imwrite, memmap
import os
import multiprocessing
import time
Expand Down Expand Up @@ -59,6 +59,45 @@ def _process(self, input_data: tuple[any]) -> None | str:
config.output_file_folder, config.output_file_name + ".tif"
)

# Create an empty tiff to store the slices
if config.make_single_file:
# Make sure slice rects is not empty
if len(slice_rects) == 0:
return "No slice rects were provided."

try:
resolution = volume_cache.get_resolution_um()[:2]
resolutionunit = "MICROMETER"

# Determine the dimensions of the image
has_color_channels = volume_cache.has_color_channels()
num_color_channels = (
volume_cache.get_num_channels() if has_color_channels else None
)

# Create a single tif file with the same dimensions as the slices
temp_shape = (
slice_rects.shape[0],
config.slice_width,
config.slice_height,
) + ((num_color_channels,) if has_color_channels else ())
temp_data = np.zeros(temp_shape, dtype=volume_cache.get_volume_dtype())

imwrite(
output_file_path,
temp_data,
software="ouroboros",
resolution=resolution,
resolutionunit=resolutionunit,
photometric=(
"rgb"
if has_color_channels and num_color_channels > 1
else "minisblack"
),
)
except BaseException as e:
return f"Error creating single tif file: {e}"

# Calculate the number of digits needed to store the number of slices
num_digits = len(str(len(slice_rects) - 1))

Expand Down Expand Up @@ -115,6 +154,11 @@ def downloads_done():
slice_rects,
self.num_threads,
num_digits,
single_output_path=(
output_file_path
if config.make_single_file
else None
),
)
)

Expand Down Expand Up @@ -154,21 +198,6 @@ def downloads_done():
except BaseException as e:
return f"Error downloading data: {e}"

if config.make_single_file:
try:
resolution = volume_cache.get_resolution_um()[:2]
resolutionunit = "MICROMETER"

load_and_save_tiff_from_slices(
folder_name,
output_file_path,
delete_intermediate=self.delete_intermediate,
resolution=resolution,
resolutionunit=resolutionunit,
)
except BaseException as e:
return f"Error creating single tif file: {e}"

# Update the pipeline input with the output file path
pipeline_input.output_file_path = output_file_path

Expand All @@ -189,7 +218,13 @@ def thread_worker_iterative(


def process_worker_save_parallel(
config, folder_name, processing_data, slice_rects, num_threads, num_digits
config,
folder_name,
processing_data,
slice_rects,
num_threads,
num_digits,
single_output_path=None,
):
volume, bounding_box, slice_indices, volume_index = processing_data

Expand Down Expand Up @@ -221,20 +256,27 @@ def process_worker_save_parallel(
)
durations["slice_volume"].append(time.perf_counter() - start)

# Using a ThreadPoolExecutor within the process for saving slices
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_threads
) as thread_executor:
futures = []

for i, slice_i in zip(slice_indices, slices):
start = time.perf_counter()
filename = join_path(folder_name, f"{str(i).zfill(num_digits)}.tif")
futures.append(thread_executor.submit(save_thread, filename, slice_i))
durations["save"].append(time.perf_counter() - start)

for future in concurrent.futures.as_completed(futures):
future.result()
if single_output_path is None:
# Using a ThreadPoolExecutor within the process for saving slices
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_threads
) as thread_executor:
futures = []

for i, slice_i in zip(slice_indices, slices):
start = time.perf_counter()
filename = join_path(folder_name, f"{str(i).zfill(num_digits)}.tif")
futures.append(thread_executor.submit(save_thread, filename, slice_i))
durations["save"].append(time.perf_counter() - start)

for future in concurrent.futures.as_completed(futures):
future.result()
else:
# Save the slices to a previously created tiff file
mmap = memmap(single_output_path)
mmap[slice_indices] = slices
mmap.flush()
del mmap

durations["total_process"].append(time.perf_counter() - start_total)

Expand Down
4 changes: 1 addition & 3 deletions resources/processes/volume-server-script.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ function copyFileToHostCommand(sourceFolder, fileName, volumeName, destFolder) {
// Construct the Docker command
const command = `
docker run --rm -v "${sourceFolder}":/host -v ${volumeName}:/volume -w /host alpine
cp -r "${destFile}" "${innerFilePath}"
sh -c "rm -rf '${innerFilePath}' && cp -r '${destFile}' '${innerFilePath}'"
`
.replace(/\s+/g, ' ')
.trim()
Expand All @@ -154,7 +154,5 @@ function deleteFilesFromVolumeFolder(volumeName, targetFolder) {
.replace(/\s+/g, ' ')
.trim()

console.log(command)

return command
}
2 changes: 1 addition & 1 deletion src/renderer/src/interfaces/options.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ export class SliceOptionsFile extends CompoundEntry {
new Entry('output_file_folder', 'Output File Folder', './', 'filePath'),
new Entry('output_file_name', 'Output File Name', 'sample', 'string'),
new Entry('dist_between_slices', 'Distance Between Slices', 1, 'number'),
new Entry('make_single_file', 'Output Single File', true, 'boolean').withHidden(),
new Entry('make_single_file', 'Output Single File', true, 'boolean'),
new Entry('connect_start_and_end', 'Connect Endpoints', false, 'boolean'),
new Entry('flush_cache', 'Flush CloudVolume Cache', false, 'boolean').withHidden(),
new CompoundEntry('bounding_box_params', 'Bounding Box Parameters', [
Expand Down

0 comments on commit da3b97d

Please sign in to comment.