From 08a833aa3f3458c64ef114e98d63f16c07b719e5 Mon Sep 17 00:00:00 2001 From: David Stansby Date: Tue, 2 Apr 2024 08:11:41 +0100 Subject: [PATCH] Stash benchmark work --- .gitignore | 3 +++ benchmarks/behnchmark.py | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 benchmarks/behnchmark.py diff --git a/.gitignore b/.gitignore index 46f6657..1c069a9 100644 --- a/.gitignore +++ b/.gitignore @@ -170,3 +170,6 @@ src/stack_to_chunk/_version.py docs/auto_examples docs/sg_execution_times.rst + +memray*.bin +benchmarks/data diff --git a/benchmarks/behnchmark.py b/benchmarks/behnchmark.py new file mode 100644 index 0000000..5f4f21c --- /dev/null +++ b/benchmarks/behnchmark.py @@ -0,0 +1,39 @@ +""" +Benchmark +""" + +import pathlib +import shutil +import time + +import dask_image.imread +import numpy as np +import tifffile + +import stack_to_chunk + +if __name__ == "__main__": + plane = np.random.randint(low=0, high=2**16, size=(2000, 2000), dtype=np.uint16) + image_dir = pathlib.Path(__file__).parent / "data" + if not image_dir.exists(): + image_dir.mkdir() + for i in range(64): + tifffile.imwrite(image_dir / f"{str(i).zfill(3)}.tif", plane) + + images = dask_image.imread.imread(str(image_dir / "*.tif")).T + print(f"Volume size: {images.nbytes / 1e6} MB") + + for n_processes in [1, 2, 3, 4]: + shutil.rmtree(image_dir / "chunked.zarr") + group = stack_to_chunk.MultiScaleGroup( + image_dir / "chunked.zarr", + name="my_zarr_group", + spatial_unit="centimeter", + voxel_size=(3, 4, 5), + ) + t_start = time.time() + group.add_full_res_data( + images, chunk_size=32, compressor="default", n_processes=n_processes + ) + t_end = time.time() + print(f"{n_processes=}, t={t_end - t_start} seconds")