From 398c64fccc97bdba5bc00b1ecc22e558ce241326 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 31 May 2024 15:26:47 -0400 Subject: [PATCH 01/28] VERSION to 1.11.2 after release --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 720c7384c..ca7176690 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.11.1 +1.11.2 From e401f75c48ae6c5774cec230c84841d10ee8050e Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Tue, 11 Jun 2024 16:34:31 -0400 Subject: [PATCH 02/28] Add is3D setting to summary_images functions --- caiman/summary_images.py | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/caiman/summary_images.py b/caiman/summary_images.py index f9a841d35..7bb50a108 100644 --- a/caiman/summary_images.py +++ b/caiman/summary_images.py @@ -591,7 +591,8 @@ def local_correlations_movie(file_name, stride: int = 1, swap_dim: bool = False, eight_neighbours: bool = True, - mode: str = 'simple'): + mode: str = 'simple', + is3D: bool = False): """ Compute an online correlation image as moving average @@ -614,13 +615,15 @@ def local_correlations_movie(file_name, Use 18 neighbors if true, and 6 if false for 4D data mode: 'simple', 'exponential', or 'cumulative' Mode of moving average + is3D: Boolean + Whether the movie has 3 spatial dimensions Returns: corr_movie: caiman.movie (3D or 4D). local correlation movie """ - Y = caiman.load(file_name) if isinstance(file_name, str) else file_name + Y = caiman.load(file_name, is3D=is3D) if isinstance(file_name, str) else file_name Y = Y[..., :tot_frames] if swap_dim else Y[:tot_frames] first_moment, second_moment, crosscorr, col_ind, row_ind, num_neigbors, M, cn = \ prepare_local_correlations(Y[..., :window] if swap_dim else Y[:window], @@ -662,7 +665,8 @@ def local_correlations_movie_offline(file_name, remove_baseline: bool = False, winSize_baseline: int = 50, quantil_min_baseline: float = 8, - gaussian_blur: bool=False): + gaussian_blur: bool=False, + is3D: bool = False): """ Efficient (parallel) computation of correlation image in shifting windows with option for prior baseline removal @@ -705,6 +709,9 @@ def local_correlations_movie_offline(file_name, gaussian_blur: bool (False) Gaussian smooth the signal + + is3D: bool (False) + Whether movie has 3 spatial dimensions Returns: mm: caiman.movie (3D or 4D). @@ -716,12 +723,12 @@ def local_correlations_movie_offline(file_name, params:list = [[file_name, range(j, j + window), eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, - quantil_min_baseline, gaussian_blur] + quantil_min_baseline, gaussian_blur, is3D] for j in range(0, Tot_frames - window, stride)] params.append([file_name, range(Tot_frames - window, Tot_frames), eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, - quantil_min_baseline, gaussian_blur]) + quantil_min_baseline, gaussian_blur, is3D]) if dview is None: parallel_result = list(map(local_correlations_movie_parallel, params)) @@ -738,8 +745,9 @@ def local_correlations_movie_offline(file_name, def local_correlations_movie_parallel(params:tuple) -> np.ndarray: - mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, quantil_min_baseline, gaussian_blur = params - mv = caiman.load(mv_name, subindices=idx, in_memory=True) + (mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor, + remove_baseline, winSize_baseline, quantil_min_baseline, gaussian_blur, is3D) = params + mv = caiman.load(mv_name, subindices=idx, in_memory=True, is3D=is3D) if gaussian_blur: mv = mv.gaussian_blur_2D() @@ -756,7 +764,8 @@ def mean_image(file_name, Tot_frames=None, fr: float = 10., window: int = 100, - dview=None): + dview=None, + is3D: bool = False): """ Efficient (parallel) computation of mean image in chunks @@ -775,21 +784,24 @@ def mean_image(file_name, dview: map object Use it for parallel computation + + is3D: bool (False) + Whether movie has 3 spatial dimensions Returns: - mm: caiman.movie (2D). + mm: caiman.movie (2D or 3D). mean image """ if Tot_frames is None: _, Tot_frames = caiman.base.movies.get_file_size(file_name) - params:list = [[file_name, range(j * window, (j + 1) * window)] + params:list = [[file_name, range(j * window, (j + 1) * window), is3D] for j in range(int(Tot_frames / window))] remain_frames = Tot_frames - int(Tot_frames / window) * window if remain_frames > 0: - params.append([file_name, range(int(Tot_frames / window) * window, Tot_frames)]) + params.append([file_name, range(int(Tot_frames / window) * window, Tot_frames), is3D]) if dview is None: parallel_result = list(map(mean_image_parallel, params)) @@ -808,6 +820,6 @@ def mean_image(file_name, return mean_image def mean_image_parallel(params:tuple) -> np.ndarray: - mv_name, idx = params - mv = caiman.load(mv_name, subindices=idx, in_memory=True) + mv_name, idx, is3D = params + mv = caiman.load(mv_name, subindices=idx, in_memory=True, is3D=is3D) return mv.mean(axis=0)[np.newaxis,:,:] From af098769baccda2801f1bdc51fe200b080865fb7 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Wed, 12 Jun 2024 21:54:25 -0400 Subject: [PATCH 03/28] Make movie.removeBL work with 3D movies --- caiman/base/movies.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 9f44dac43..1dd85551a 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -552,15 +552,15 @@ def removeBL(self, windowSize:int=100, quantilMin:int=8, in_place:bool=False, re def to2DPixelxTime(self, order='F'): """ - Transform 3D movie into 2D + Transform 3D or 4D movie into 2D """ - return self.transpose([2,1,0]).reshape((-1,self.shape[0]),order=order) + return self.transpose().reshape((-1,self.shape[0]),order=order) def to3DFromPixelxTime(self, shape, order='F'): """ - Transform 2D movie into 3D + Transform 2D movie into 3D or 4D """ - return to_3D(self,shape[::-1],order=order).transpose([2,1,0]) + return to_3D(self,shape[::-1],order=order).transpose() def computeDFF(self, secsWindow: int = 5, quantilMin: int = 8, method: str = 'only_baseline', in_place: bool = False, order: str = 'F') -> tuple[Any, Any]: From d13958884dacfa5768d89094f57624610ea413b6 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 14 Jun 2024 10:42:46 -0400 Subject: [PATCH 04/28] Revert "Add is3D setting to summary_images functions" This reverts commit e401f75c48ae6c5774cec230c84841d10ee8050e. --- caiman/summary_images.py | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/caiman/summary_images.py b/caiman/summary_images.py index 7bb50a108..f9a841d35 100644 --- a/caiman/summary_images.py +++ b/caiman/summary_images.py @@ -591,8 +591,7 @@ def local_correlations_movie(file_name, stride: int = 1, swap_dim: bool = False, eight_neighbours: bool = True, - mode: str = 'simple', - is3D: bool = False): + mode: str = 'simple'): """ Compute an online correlation image as moving average @@ -615,15 +614,13 @@ def local_correlations_movie(file_name, Use 18 neighbors if true, and 6 if false for 4D data mode: 'simple', 'exponential', or 'cumulative' Mode of moving average - is3D: Boolean - Whether the movie has 3 spatial dimensions Returns: corr_movie: caiman.movie (3D or 4D). local correlation movie """ - Y = caiman.load(file_name, is3D=is3D) if isinstance(file_name, str) else file_name + Y = caiman.load(file_name) if isinstance(file_name, str) else file_name Y = Y[..., :tot_frames] if swap_dim else Y[:tot_frames] first_moment, second_moment, crosscorr, col_ind, row_ind, num_neigbors, M, cn = \ prepare_local_correlations(Y[..., :window] if swap_dim else Y[:window], @@ -665,8 +662,7 @@ def local_correlations_movie_offline(file_name, remove_baseline: bool = False, winSize_baseline: int = 50, quantil_min_baseline: float = 8, - gaussian_blur: bool=False, - is3D: bool = False): + gaussian_blur: bool=False): """ Efficient (parallel) computation of correlation image in shifting windows with option for prior baseline removal @@ -709,9 +705,6 @@ def local_correlations_movie_offline(file_name, gaussian_blur: bool (False) Gaussian smooth the signal - - is3D: bool (False) - Whether movie has 3 spatial dimensions Returns: mm: caiman.movie (3D or 4D). @@ -723,12 +716,12 @@ def local_correlations_movie_offline(file_name, params:list = [[file_name, range(j, j + window), eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, - quantil_min_baseline, gaussian_blur, is3D] + quantil_min_baseline, gaussian_blur] for j in range(0, Tot_frames - window, stride)] params.append([file_name, range(Tot_frames - window, Tot_frames), eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, - quantil_min_baseline, gaussian_blur, is3D]) + quantil_min_baseline, gaussian_blur]) if dview is None: parallel_result = list(map(local_correlations_movie_parallel, params)) @@ -745,9 +738,8 @@ def local_correlations_movie_offline(file_name, def local_correlations_movie_parallel(params:tuple) -> np.ndarray: - (mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor, - remove_baseline, winSize_baseline, quantil_min_baseline, gaussian_blur, is3D) = params - mv = caiman.load(mv_name, subindices=idx, in_memory=True, is3D=is3D) + mv_name, idx, eight_neighbours, swap_dim, order_mean, ismulticolor, remove_baseline, winSize_baseline, quantil_min_baseline, gaussian_blur = params + mv = caiman.load(mv_name, subindices=idx, in_memory=True) if gaussian_blur: mv = mv.gaussian_blur_2D() @@ -764,8 +756,7 @@ def mean_image(file_name, Tot_frames=None, fr: float = 10., window: int = 100, - dview=None, - is3D: bool = False): + dview=None): """ Efficient (parallel) computation of mean image in chunks @@ -784,24 +775,21 @@ def mean_image(file_name, dview: map object Use it for parallel computation - - is3D: bool (False) - Whether movie has 3 spatial dimensions Returns: - mm: caiman.movie (2D or 3D). + mm: caiman.movie (2D). mean image """ if Tot_frames is None: _, Tot_frames = caiman.base.movies.get_file_size(file_name) - params:list = [[file_name, range(j * window, (j + 1) * window), is3D] + params:list = [[file_name, range(j * window, (j + 1) * window)] for j in range(int(Tot_frames / window))] remain_frames = Tot_frames - int(Tot_frames / window) * window if remain_frames > 0: - params.append([file_name, range(int(Tot_frames / window) * window, Tot_frames), is3D]) + params.append([file_name, range(int(Tot_frames / window) * window, Tot_frames)]) if dview is None: parallel_result = list(map(mean_image_parallel, params)) @@ -820,6 +808,6 @@ def mean_image(file_name, return mean_image def mean_image_parallel(params:tuple) -> np.ndarray: - mv_name, idx, is3D = params - mv = caiman.load(mv_name, subindices=idx, in_memory=True, is3D=is3D) + mv_name, idx = params + mv = caiman.load(mv_name, subindices=idx, in_memory=True) return mv.mean(axis=0)[np.newaxis,:,:] From 0857d7aae5f285e5da35ee81da50bc4abf25e0d6 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 14 Jun 2024 11:24:21 -0400 Subject: [PATCH 05/28] Add tests for to2D and to3D --- .gitignore | 1 + caiman/base/movies.py | 12 ++++++------ caiman/tests/test_movies.py | 25 ++++++++++++++++++++++--- 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index 147a0db9c..03bbb9705 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ __pycache__/ # C extensions *.so *.c +*.cpp #movie tif *.tif diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 1dd85551a..2cfb3d245 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -550,17 +550,17 @@ def removeBL(self, windowSize:int=100, quantilMin:int=8, in_place:bool=False, re self -= caiman.movie(cv2.resize(res,pixs.shape[::-1]),fr=self.fr).to3DFromPixelxTime(self.shape) return self - def to2DPixelxTime(self, order='F'): + def to2DPixelxTime(self, order='F') -> 'movie': """ - Transform 3D or 4D movie into 2D + Transform 3D or 4D movie into 2D (pixels x time) """ - return self.transpose().reshape((-1,self.shape[0]),order=order) + return self.T.reshape((-1,self.shape[0]),order=order) - def to3DFromPixelxTime(self, shape, order='F'): + def to3DFromPixelxTime(self, shape, order='F') -> 'movie': """ - Transform 2D movie into 3D or 4D + Transform 2D movie (pixels x time) into 3D or 4D """ - return to_3D(self,shape[::-1],order=order).transpose() + return to_3D(self,shape[::-1],order=order).T def computeDFF(self, secsWindow: int = 5, quantilMin: int = 8, method: str = 'only_baseline', in_place: bool = False, order: str = 'F') -> tuple[Any, Any]: diff --git a/caiman/tests/test_movies.py b/caiman/tests/test_movies.py index 2287307f9..35e8c6f9f 100644 --- a/caiman/tests/test_movies.py +++ b/caiman/tests/test_movies.py @@ -2,18 +2,37 @@ import numpy as np import numpy.testing as npt import os -from caiman.base.movies import load, load_iter +from caiman.base import movies from caiman.paths import caiman_datadir def test_load_iter(): fname = os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif') for subindices in (None, slice(100, None, 2)): - m = load_iter(fname, subindices=subindices) + m = movies.load_iter(fname, subindices=subindices) S = 0 while True: try: S += np.sum(next(m)) except StopIteration: break - npt.assert_allclose(S, load(fname, subindices=subindices).sum(), rtol=1e-6) + npt.assert_allclose(S, movies.load(fname, subindices=subindices).sum(), rtol=1e-6) + + +def test_to2D_to3D(): + def reference_to2D(mov, order: str): + return mov.T.reshape((-1, mov.shape[0]), order=order) + + # set up small test movies + rng = np.random.default_rng() + T, y, x, z = 10, 6, 4, 2 + movie_3D = movies.movie(rng.random((T, y, x))) + movie_4D = movies.movie(rng.random((T, y, x, z))) + + for movie in (movie_3D, movie_4D): + for order in ('F', 'C'): + shape = movie.shape + movie_flat = movie.to2DPixelxTime(order=order) + npt.assert_array_equal(movie_flat, reference_to2D(movie, order)) + movie_from_flat = movie_flat.to3DFromPixelxTime(shape, order) + npt.assert_array_equal(movie_from_flat, movie) From 7e372b397268a04fc788a7e91dcfd5d0ae267227 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Sun, 16 Jun 2024 14:04:54 -0400 Subject: [PATCH 06/28] Make cpp exclusion more specific --- .gitignore | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 03bbb9705..b6f9b597e 100644 --- a/.gitignore +++ b/.gitignore @@ -52,8 +52,7 @@ __pycache__/ # C extensions *.so -*.c -*.cpp +caiman/source_extraction/cnmf/oasis.cpp #movie tif *.tif From 860e8d8d535a20b3d3af85b9a1cc1071f8b0bfa5 Mon Sep 17 00:00:00 2001 From: salehtahini Date: Tue, 18 Jun 2024 01:32:04 +0200 Subject: [PATCH 07/28] added bokeh implementation of view_quilt --- caiman/utils/visualization.py | 116 ++++++++++++++++++++++++++++ demos/notebooks/demo_pipeline.ipynb | 14 ++-- 2 files changed, 121 insertions(+), 9 deletions(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index 39e0db78f..5206ca203 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -1362,3 +1362,119 @@ def view_quilt(template_image: np.ndarray, ax=ax) return ax + + +def create_quilt_patches(patch_rows, patch_cols): + """ + Helper function for nb_view_quilt. + Create patches given the row and column coordinates. + + Args: + patch_rows (ndarray): Array of row start and end positions for each patch. + patch_cols (ndarray): Array of column start and end positions for each patch. + + Returns: + list: A list of dictionaries, each containing the center coordinates, width, + and height of a patch. + """ + patches = [] + for row in patch_rows: + for col in patch_cols: + center_x = (col[0] + col[1]) / 2 + center_y = (row[0] + row[1]) / 2 + width = col[1] - col[0] + height = row[1] - row[0] + patches.append({'center_x': center_x, 'center_y': center_y, 'width': width, 'height': height}) + return patches + + +def nb_view_quilt(template_image: np.ndarray, + rf: int, + stride_input: int, + color: Optional[Any]='white', + alpha: Optional[float]=0.2): + """ + Bokeh implementation of view_quilt. + Plot patches on the template image given stride and overlap parameters. + + Args: + template_image (ndarray): Row x column summary image upon which to draw patches (e.g., correlation image). + rf (int): Half-size of the patches in pixels (patch width is rf*2 + 1). + stride_input (int): Amount of overlap between the patches in pixels (overlap is stride_input + 1). + color (Optional[Any]): Color of the patches, default 'white'. + alpha (Optional[float]): Patch transparency, default 0.2. + """ + + width = (rf*2)+1 + overlap = stride_input+1 + stride = width-overlap + + im_dims = template_image.shape + patch_rows, patch_cols = get_rectangle_coords(im_dims, stride, overlap) + patches = create_quilt_patches(patch_rows, patch_cols) + + plot = bpl.figure(x_range=(0, im_dims[1]), y_range=(im_dims[0], 0), width=600, height=600) + #plot.y_range.flipped = True + plot.image(image=[template_image], x=0, y=0, dw=im_dims[1], dh=im_dims[0], palette="Greys256") + source = ColumnDataSource(data=dict( + center_x=[patch['center_x'] for patch in patches], + center_y=[patch['center_y'] for patch in patches], + width=[patch['width'] for patch in patches], + height=[patch['height'] for patch in patches] + )) + plot.rect(x='center_x', y='center_y', width='width', height='height', source=source, color=color, alpha=alpha) + + # Create sliders + stride_slider = Slider(start=1, end=100, value=rf, step=1, title="Patch half-size (rf)") + overlap_slider = Slider(start=0, end=100, value=stride_input, step=1, title="Overlap (stride)") + + callback = CustomJS(args=dict(source=source, im_dims=im_dims, stride_slider=stride_slider, overlap_slider=overlap_slider), code=""" + function get_rectangle_coords(im_dims, stride, overlap) { + let patch_width = overlap + stride; + + let patch_onset_rows = Array.from({length: Math.ceil((im_dims[0] - patch_width) / stride)}, (_, i) => i * stride).concat([im_dims[0] - patch_width]); + let patch_offset_rows = patch_onset_rows.map(x => Math.min(x + patch_width, im_dims[0] - 1)); + let patch_rows = patch_onset_rows.map((x, i) => [x, patch_offset_rows[i]]); + + let patch_onset_cols = Array.from({length: Math.ceil((im_dims[1] - patch_width) / stride)}, (_, i) => i * stride).concat([im_dims[1] - patch_width]); + let patch_offset_cols = patch_onset_cols.map(x => Math.min(x + patch_width, im_dims[1] - 1)); + let patch_cols = patch_onset_cols.map((x, i) => [x, patch_offset_cols[i]]); + + return [patch_rows, patch_cols]; + } + + function create_quilt_patches(patch_rows, patch_cols) { + let patches = []; + for (let row of patch_rows) { + for (let col of patch_cols) { + let center_x = (col[0] + col[1]) / 2; + let center_y = (row[0] + row[1]) / 2; + let width = col[1] - col[0]; + let height = row[1] - row[0]; + patches.push({'center_x': center_x, 'center_y': center_y, 'width': width, 'height': height}); + } + } + return patches; + } + + let width = (stride_slider.value * 2) + 1; + let overlap = overlap_slider.value + 1; + let stride = width - overlap + + let [patch_rows, patch_cols] = get_rectangle_coords(im_dims, stride, overlap); + let patches = create_quilt_patches(patch_rows, patch_cols); + + source.data = { + center_x: patches.map(patch => patch.center_x), + center_y: patches.map(patch => patch.center_y), + width: patches.map(patch => patch.width), + height: patches.map(patch => patch.height) + }; + source.change.emit(); + """) + + stride_slider.js_on_change('value', callback) + overlap_slider.js_on_change('value', callback) + + + bpl.show(bokeh.layouts.row(plot, bokeh.layouts.column(stride_slider, overlap_slider))) \ No newline at end of file diff --git a/demos/notebooks/demo_pipeline.ipynb b/demos/notebooks/demo_pipeline.ipynb index 0923ddd82..7fb668cc3 100644 --- a/demos/notebooks/demo_pipeline.ipynb +++ b/demos/notebooks/demo_pipeline.ipynb @@ -73,7 +73,7 @@ "from caiman.source_extraction.cnmf import cnmf, params\n", "from caiman.utils.utils import download_demo\n", "from caiman.utils.visualization import plot_contours, nb_view_patches, nb_plot_contour\n", - "from caiman.utils.visualization import view_quilt\n", + "from caiman.utils.visualization import nb_view_quilt\n", "\n", "bpl.output_notebook()\n", "hv.notebook_extension('bokeh')" @@ -741,7 +741,7 @@ "metadata": {}, "source": [ "### Selecting spatial parameters\n", - "To select the spatial parameters (`gSig`, `rf`, `stride`, `K`), you need to look at your movie, or a summary image for your movie, and pick values close to those suggested by the guidelines above. It is helpful to use `view_quilt()` function to see if our key spatial parameters are in the right ballpark (note we recommend running this viewer in interactive qt mode so you can interact with it and get a better feel for the parameters):" + "To select the spatial parameters (`gSig`, `rf`, `stride`, `K`), you need to look at your movie, or a summary image for your movie, and pick values close to those suggested by the guidelines above. It is helpful to use the interactive `nb_view_quilt()` function or `view_quilt()` function to see if our key spatial parameters are in the right ballpark (you can use the sliders in the interactive version to change the `rf` and `stride` parameters and get a better feel for them):" ] }, { @@ -757,13 +757,9 @@ "print(f'Patch width: {cnmf_patch_width} , Stride: {cnmf_patch_stride}, Overlap: {cnmf_patch_overlap}');\n", "\n", "# plot the patches\n", - "patch_ax = view_quilt(correlation_image, \n", - " cnmf_patch_stride, \n", - " cnmf_patch_overlap, \n", - " vmin=np.percentile(np.ravel(correlation_image),50), \n", - " vmax=np.percentile(np.ravel(correlation_image),99.5),\n", - " figsize=(4,4));\n", - "patch_ax.set_title(f'CNMF Patches Width {cnmf_patch_width}, Overlap {cnmf_patch_overlap}');" + "patch_ax = nb_view_quilt(correlation_image, \n", + " cnmf_model.params.patch['rf'], \n", + " cnmf_model.params.patch['stride']);" ] }, { From 827c54117b3bc11e4fb94fd1d03db678785e72cf Mon Sep 17 00:00:00 2001 From: salehtahini Date: Tue, 18 Jun 2024 08:32:51 +0200 Subject: [PATCH 08/28] updated documentation --- caiman/utils/visualization.py | 1 - 1 file changed, 1 deletion(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index 5206ca203..f0e3928e4 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -1344,7 +1344,6 @@ def view_quilt(template_image: np.ndarray, Note: Currently assumes square patches so takes in a single number for stride/overlap. - TODO: implement bokeh version of this function """ im_dims = template_image.shape patch_rows, patch_cols = get_rectangle_coords(im_dims, stride, overlap) From 54b2fb780dd5bafe4565c70036db3b557a17d43d Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Mon, 1 Jul 2024 10:45:43 -0400 Subject: [PATCH 09/28] Fix naming of Slider() --- caiman/utils/visualization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index f0e3928e4..a82833507 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -1424,8 +1424,8 @@ def nb_view_quilt(template_image: np.ndarray, plot.rect(x='center_x', y='center_y', width='width', height='height', source=source, color=color, alpha=alpha) # Create sliders - stride_slider = Slider(start=1, end=100, value=rf, step=1, title="Patch half-size (rf)") - overlap_slider = Slider(start=0, end=100, value=stride_input, step=1, title="Overlap (stride)") + stride_slider = bokeh.models.Slider(start=1, end=100, value=rf, step=1, title="Patch half-size (rf)") + overlap_slider = bokeh.models.Slider(start=0, end=100, value=stride_input, step=1, title="Overlap (stride)") callback = CustomJS(args=dict(source=source, im_dims=im_dims, stride_slider=stride_slider, overlap_slider=overlap_slider), code=""" function get_rectangle_coords(im_dims, stride, overlap) { From 9c4646e69277e99f56a44f1c27020e739cbf8cc0 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Mon, 1 Jul 2024 13:37:55 -0400 Subject: [PATCH 10/28] pin peakutils >=1.3.5 (see #1369) --- environment-minimal.yml | 2 +- environment.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment-minimal.yml b/environment-minimal.yml index a7d097ed0..e2f6f7f81 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -16,7 +16,7 @@ dependencies: - numpy <2.0.0,>=1.26 - numpydoc - opencv -- peakutils +- peakutils >=1.3.5 - pims - psutil - pynwb diff --git a/environment.yml b/environment.yml index 6534f39a5..6394dbbb1 100644 --- a/environment.yml +++ b/environment.yml @@ -22,7 +22,7 @@ dependencies: - numpydoc - opencv - panel >=1.0.2 -- peakutils +- peakutils >=1.3.5 - pims - psutil - pynwb From 522d456be8eeca6ac12f73b8de872521a92695d1 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 28 Jun 2024 11:50:53 -0400 Subject: [PATCH 11/28] Fix CoM calculation when swap_dim is true --- caiman/utils/visualization.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index a82833507..825757a1b 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -392,18 +392,16 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False): if 'csc_matrix' not in str(type(A)): A = csc_matrix(A) d, nr = np.shape(A) - # if we are on a 3D video - if len(dims) == 3: - d1, d2, d3 = dims - x, y = np.mgrid[0:d2:1, 0:d3:1] - else: - d1, d2 = dims - x, y = np.mgrid[0:d1:1, 0:d2:1] + d1, d2 = dims[:2] coordinates = [] # get the center of mass of neurons( patches ) - cm = caiman.base.rois.com(A, *dims) + # com assumes F-order + if swap_dim: + cm = caiman.base.rois.com(A, *dims[::-1])[:, ::-1] + else: + cm = caiman.base.rois.com(A, *dims) # for each patches for i in range(nr): From 1f88e493a6756fd0bb6dca580c930c32110a192d Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Tue, 2 Jul 2024 12:13:26 -0400 Subject: [PATCH 12/28] Fix plot_contours to use CoMs from updated get_contours --- caiman/utils/visualization.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index 825757a1b..7f03e4652 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -1096,16 +1096,11 @@ def plot_contours(A, Cn, thr=None, thr_method='max', maxthr=0.2, nrgthr=0.9, dis plt.plot(*v.T, c=colors, **contour_args) if display_numbers: - d1, d2 = np.shape(Cn) - d, nr = np.shape(A) - cm = caiman.base.rois.com(A, d1, d2) + nr = A.shape[1] if max_number is None: - max_number = A.shape[1] - for i in range(np.minimum(nr, max_number)): - if swap_dim: - ax.text(cm[i, 0], cm[i, 1], str(i + 1), color=colors, **number_args) - else: - ax.text(cm[i, 1], cm[i, 0], str(i + 1), color=colors, **number_args) + max_number = nr + for i, c in zip(range(np.minimum(nr, max_number)), coordinates): + ax.text(c['CoM'][1], c['CoM'][0], str(i + 1), color=colors, **number_args) return coordinates def plot_shapes(Ab, dims, num_comps=15, size=(15, 15), comps_per_row=None, From 8f359bbfc9303a1d8affd47c715ea1ca8b9174d7 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 2 Jul 2024 12:54:15 -0400 Subject: [PATCH 13/28] de-emoji the readme --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 36d57acc6..72b8cf101 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Caiman Central -------------- - [Caiman Central](https://github.com/flatironinstitute/caiman_central) is the hub for sharing information about CaImAn. Information on quarterly community meetings, workshops, other events, and any other communications between the developers and the user community can be found there. -# Quick start :rocket: +# Quick start Follow these three steps to get started quickly, from installation to working through a demo notebook. If you do not already have conda installed, [you can find it here](https://docs.conda.io/en/latest/miniconda.html). There is a video walkthrough of the following steps [here](https://youtu.be/b63zAmKihIY?si=m7WleTwdU0rJup_2). ### Step 1: Install caiman @@ -41,7 +41,7 @@ Jupyter will open. Navigate to demos/notebooks/ and click on `demo_pipeline.ipyn ## For installation help Caiman should install easily on Linux, Mac, and Windows. If you run into problems, we have a dedicated [installation page](./docs/source/Installation.rst): the details there should help you troubleshoot. If you don't find what you need there, *please* [create an issue](https://github.com/flatironinstitute/CaImAn/issues) at GitHub, and we will help you get it sorted out. -# Demo notebooks :page_with_curl: +# Demo notebooks Caiman provides demo notebooks to showcase each of our main features, from motion correction to online CNMF. We recommend starting with the CNMF notebook (`demo_pipeline.ipynb`), which contains more explanation and details than the other notebooks: it covers many concepts that will be used without explanation in the other notebooks. The CNMFE notebook (`demo_pipeline_cnmfE.ipynb`), is also more detailed. Once you've gotten things set up and worked through those "anchor" notebooks, the best way to get started is to work through the demo notebook that most closely matches your use case; you should be able to adapt it for your particular needs. The main use cases and notebooks are listed in the following table: @@ -72,7 +72,7 @@ Caiman also provides commandline demos, similar to the notebooks, demonstrating - If you have found a bug, we recommend searching the [issues at github](https://github.com/flatironinstitute/CaImAn/issues) and opening a new issue if you can't find the solution there. - If there is a feature you would like to see implemented, feel free to come chat at the above forums or open an issue at Github. -# How to contribute :hammer: +# How to contribute Caiman is an open-source project and improves because of contributions from users all over the world. If there is something about Caiman that you would like to work on, then please reach out. We are always looking for more contributors, so please come read the [contributors page](./CONTRIBUTING.md) for more details about how. # Videos @@ -89,7 +89,7 @@ The following talks are more in depth: * https://www.youtube.com/watch?v=z6TlH28MLRo -# Related repositories :pushpin: +# Related repositories There are many repositories that use Caiman, or help make using Caiman easier. * [use\_cases repo](https://github.com/flatironinstitute/caiman_use_cases): additional code (unmaintained) demonstrating how to reproduce results in some Caiman-related papers, and how to use/extend Caiman. From b38f0faa75859536e7c889436a86c7437d3577b1 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Tue, 2 Jul 2024 16:22:12 -0400 Subject: [PATCH 14/28] Add rewritten com function with C/F order and add test --- caiman/base/rois.py | 44 ++++++++++++----------------------- caiman/tests/test_toydata.py | 17 ++++++++++++++ caiman/utils/visualization.py | 6 +---- 3 files changed, 33 insertions(+), 34 deletions(-) diff --git a/caiman/base/rois.py b/caiman/base/rois.py index b5fff51e1..86e0fd6b0 100644 --- a/caiman/base/rois.py +++ b/caiman/base/rois.py @@ -28,46 +28,32 @@ pass -def com(A: np.ndarray, d1: int, d2: int, d3: Optional[int] = None) -> np.array: +def com(A, *dims: int, order='F') -> np.ndarray: """Calculation of the center of mass for spatial components Args: - A: np.ndarray - matrix of spatial components (d x K) + A: np.ndarray or scipy.sparse array or matrix + matrix of spatial components (d x K); column indices map to dimensions according to F-order. - d1: int - number of pixels in x-direction - - d2: int - number of pixels in y-direction - - d3: int - number of pixels in z-direction + *dims: D ints + each positional argument after A defines the extent of one of the dimensions of the data. + + order: 'C' or 'F' + how each column of A should be reshaped to match dims. Returns: cm: np.ndarray - center of mass for spatial components (K x 2 or 3) + center of mass for spatial components (K x D) """ - if 'csc_matrix' not in str(type(A)): A = scipy.sparse.csc_matrix(A) - if d3 is None: - Coor = np.matrix([np.outer(np.ones(d2), np.arange(d1)).ravel(), - np.outer(np.arange(d2), np.ones(d1)).ravel()], - dtype=A.dtype) - else: - Coor = np.matrix([ - np.outer(np.ones(d3), - np.outer(np.ones(d2), np.arange(d1)).ravel()).ravel(), - np.outer(np.ones(d3), - np.outer(np.arange(d2), np.ones(d1)).ravel()).ravel(), - np.outer(np.arange(d3), - np.outer(np.ones(d2), np.ones(d1)).ravel()).ravel() - ], - dtype=A.dtype) - - cm = (Coor * A / A.sum(axis=0)).T + # make coordinate arrays where coor[d] increases from 0 to npixels[d]-1 along the dth axis + coors = np.meshgrid(*[range(d) for d in dims], indexing='ij') + coor = np.stack([c.ravel(order=order) for c in coors]) + + # take weighted sum of pixel positions along each coordinate + cm = (coor @ A / A.sum(axis=0)).T return np.array(cm) diff --git a/caiman/tests/test_toydata.py b/caiman/tests/test_toydata.py index 2a0f2a8e0..bf61fd7d3 100644 --- a/caiman/tests/test_toydata.py +++ b/caiman/tests/test_toydata.py @@ -6,6 +6,7 @@ import caiman.source_extraction.cnmf.params from caiman.source_extraction import cnmf as cnmf +from caiman.utils.visualization import get_contours #%% @@ -64,6 +65,22 @@ def pipeline(D): ] npt.assert_allclose(corr, 1, .05) + # Check that get_contours works regardless of swap_dim + if D == 2: # Can't expect swap_dim to work for 3D data in the same way + coor_normal = get_contours(cnm.estimates.A, dims, swap_dim=False) + coor_swapped = get_contours(cnm.estimates.A, dims[::-1], swap_dim=True) + for c_normal, c_swapped in zip(coor_normal, coor_swapped): + # have to sort coordinates b/c starting point is unimportant & depend on orientation + # also, the first point is repeated and this may be a different point depending on orientation. + # to get around this, compare differences instead (have to take absolute value b/c direction may be opposite) + def normalize_coords(coords): + abs_diffs = np.abs(np.diff(coords, axis=0)) + sort_order = np.lexsort(abs_diffs.T) + return abs_diffs[sort_order, :] + + npt.assert_allclose(normalize_coords(c_normal['coordinates']), normalize_coords(c_swapped['coordinates'][:, ::-1])) + npt.assert_allclose(c_normal['CoM'], c_swapped['CoM'][::-1]) + def test_2D(): pipeline(2) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index 7f03e4652..0b6bbb652 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -397,11 +397,7 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False): coordinates = [] # get the center of mass of neurons( patches ) - # com assumes F-order - if swap_dim: - cm = caiman.base.rois.com(A, *dims[::-1])[:, ::-1] - else: - cm = caiman.base.rois.com(A, *dims) + cm = caiman.base.rois.com(A, *dims, order='C' if swap_dim else 'F') # for each patches for i in range(nr): From f6d30aa21a27dfa5b410a3f7222d8ca984d9c714 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 2 Jul 2024 16:44:29 -0400 Subject: [PATCH 15/28] caiman_gui: if the user doesn't provide a filename, bail out with a good message rather than crashing --- bin/caiman_gui.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/caiman_gui.py b/bin/caiman_gui.py index 9ffbe221f..0a02eb459 100755 --- a/bin/caiman_gui.py +++ b/bin/caiman_gui.py @@ -53,6 +53,8 @@ def make_color_img(img, gain=255, min_max=None, out_type=np.uint8): # load object saved by CNMF fpath = F.getOpenFileName(caption='Load CNMF Object', filter='HDF5 (*.h5 *.hdf5);;NWB (*.nwb)')[0] +if fpath == '': + raise Exception("You must provide a filename") cnm_obj = load_CNMF(fpath) # movie From 467c591af6ff749318afbef19b7163704848eaf4 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 5 Jul 2024 10:10:43 -0400 Subject: [PATCH 16/28] Restore individual dimension arguments for com --- caiman/base/rois.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/caiman/base/rois.py b/caiman/base/rois.py index 86e0fd6b0..417405828 100644 --- a/caiman/base/rois.py +++ b/caiman/base/rois.py @@ -28,18 +28,18 @@ pass -def com(A, *dims: int, order='F') -> np.ndarray: +def com(A, d1: int, d2: int, d3: Optional[int] = None, order: str = 'F') -> np.ndarray: """Calculation of the center of mass for spatial components Args: A: np.ndarray or scipy.sparse array or matrix - matrix of spatial components (d x K); column indices map to dimensions according to F-order. + matrix of spatial components (d x K). - *dims: D ints - each positional argument after A defines the extent of one of the dimensions of the data. + d1, d2, d3: ints + d1, d2, and (optionally) d3 are the original dimensions of the data. order: 'C' or 'F' - how each column of A should be reshaped to match dims. + how each column of A should be reshaped to match the given dimensions. Returns: cm: np.ndarray @@ -48,6 +48,10 @@ def com(A, *dims: int, order='F') -> np.ndarray: if 'csc_matrix' not in str(type(A)): A = scipy.sparse.csc_matrix(A) + dims = [d1, d2] + if d3 is not None: + dims.append(d3) + # make coordinate arrays where coor[d] increases from 0 to npixels[d]-1 along the dth axis coors = np.meshgrid(*[range(d) for d in dims], indexing='ij') coor = np.stack([c.ravel(order=order) for c in coors]) From d8835987801667615cdb9ef31782032c6263a7e8 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 5 Jul 2024 18:58:45 -0400 Subject: [PATCH 17/28] Add slice_dim option for get_contours and 3D test for swap_dim --- caiman/tests/test_toydata.py | 38 +++++++++++++++++++++++------------ caiman/utils/visualization.py | 37 +++++++++++++++++++++++++--------- 2 files changed, 53 insertions(+), 22 deletions(-) diff --git a/caiman/tests/test_toydata.py b/caiman/tests/test_toydata.py index bf61fd7d3..beb51a786 100644 --- a/caiman/tests/test_toydata.py +++ b/caiman/tests/test_toydata.py @@ -66,20 +66,32 @@ def pipeline(D): npt.assert_allclose(corr, 1, .05) # Check that get_contours works regardless of swap_dim - if D == 2: # Can't expect swap_dim to work for 3D data in the same way - coor_normal = get_contours(cnm.estimates.A, dims, swap_dim=False) - coor_swapped = get_contours(cnm.estimates.A, dims[::-1], swap_dim=True) - for c_normal, c_swapped in zip(coor_normal, coor_swapped): - # have to sort coordinates b/c starting point is unimportant & depend on orientation - # also, the first point is repeated and this may be a different point depending on orientation. - # to get around this, compare differences instead (have to take absolute value b/c direction may be opposite) - def normalize_coords(coords): - abs_diffs = np.abs(np.diff(coords, axis=0)) - sort_order = np.lexsort(abs_diffs.T) - return abs_diffs[sort_order, :] + coor_normal = get_contours(cnm.estimates.A, dims, swap_dim=False) + coor_swapped = get_contours(cnm.estimates.A, dims[::-1], swap_dim=True) + for c_normal, c_swapped in zip(coor_normal, coor_swapped): + if D == 3: + for plane_coor_normal, plane_coor_swapped in zip(c_normal['coordinates'], c_swapped['coordinates']): + compare_contour_coords(plane_coor_normal, plane_coor_swapped[:, ::-1]) + else: + compare_contour_coords(c_normal['coordinates'], c_swapped['coordinates'][:, ::-1]) - npt.assert_allclose(normalize_coords(c_normal['coordinates']), normalize_coords(c_swapped['coordinates'][:, ::-1])) - npt.assert_allclose(c_normal['CoM'], c_swapped['CoM'][::-1]) + npt.assert_allclose(c_normal['CoM'], c_swapped['CoM'][::-1]) + +def compare_contour_coords(coords1: np.ndarray, coords2: np.ndarray): + """ + Compare 2 matrices of contour coordinates that should be the same, but may be calculated in a different order/ + from different starting points. + + The first point of each contour component is repeated, and this may be a different point depending on orientation. + To get around this, compare differences instead (have to take absolute value b/c direction may be opposite). + Also sort coordinates b/c starting point is unimportant & depends on orientation + """ + diffs_sorted = [] + for coords in [coords1, coords2]: + abs_diffs = np.abs(np.diff(coords, axis=0)) + sort_order = np.lexsort(abs_diffs.T) + diffs_sorted.append(abs_diffs[sort_order, :]) + npt.assert_allclose(diffs_sorted[0], diffs_sorted[1]) def test_2D(): diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index 0b6bbb652..cceb58f59 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -20,7 +20,7 @@ from skimage.measure import find_contours import sys from tempfile import NamedTemporaryFile -from typing import Any, Optional +from typing import Any, Optional, Literal, Union from warnings import warn import caiman.base.rois @@ -366,7 +366,7 @@ def plot_unit(uid, scl): .redim.range(unit_id=(0, nr-1), scale=(0.0, 1.0))) -def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False): +def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: Union[int, Literal['auto']] = 'auto'): """Gets contour of spatial components and returns their coordinates Args: @@ -374,15 +374,24 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False): Matrix of Spatial components (d x K) dims: tuple of ints - Spatial dimensions of movie (x, y[, z]) + Spatial dimensions of movie thr: scalar between 0 and 1 Energy threshold for computing contours (default 0.9) - thr_method: [optional] string + thr_method: string Method of thresholding: 'max' sets to zero pixels that have value less than a fraction of the max value 'nrg' keeps the pixels that contribute up to a specified fraction of the energy + + swap_dim: bool + If False (default), each column of A should be reshaped in F-order to recover the mask; + this is correct if the dimensions have not been reordered from (y, x[, z]). + If True, each column should be reshaped in C-order; this is correct for dims = ([z, ]x, y). + + slice_dim: int or 'auto' + Which dimension to slice along if we have 3D data. (i.e., get contours on each plane along this axis). + The default ('auto') is 0 if swap_dim is True, else -1. Returns: Coor: list of coordinates with center of mass and @@ -431,9 +440,9 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False): Bmat = np.reshape(Bvec, dims, order='C') else: Bmat = np.reshape(Bvec, dims, order='F') - pars['coordinates'] = [] - # for each dimensions we draw the contour - for B in (Bmat if len(dims) == 3 else [Bmat]): + + def get_slice_coords(B: np.ndarray) -> np.ndarray: + """Get contour coordinates for a 2D slice""" vertices = find_contours(B.T, thr) # this fix is necessary for having disjoint figures and borders plotted correctly v = np.atleast_2d([np.nan, np.nan]) @@ -449,9 +458,19 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False): vtx = np.concatenate((vtx, vtx[0, np.newaxis]), axis=0) v = np.concatenate( (v, vtx, np.atleast_2d([np.nan, np.nan])), axis=0) + return v + + if len(dims) == 2: + pars['coordinates'] = get_slice_coords(Bmat) + else: + # make a list of the contour coordinates for each 2D slice + pars['coordinates'] = [] + if slice_dim == 'auto': + slice_dim = 0 if swap_dim else -1 + for s in range(dims[slice_dim]): + B = Bmat.take(s, axis=slice_dim) + pars['coordinates'].append(get_slice_coords(B)) - pars['coordinates'] = v if len( - dims) == 2 else (pars['coordinates'] + [v]) pars['CoM'] = np.squeeze(cm[i, :]) pars['neuron_id'] = i + 1 coordinates.append(pars) From 830967c747334fc3bd2c620d96a3714590415ac0 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 5 Jul 2024 19:48:02 -0400 Subject: [PATCH 18/28] Minor changes to make contours more consistent/predictable --- caiman/utils/visualization.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index cceb58f59..707781d0e 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -401,7 +401,6 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: if 'csc_matrix' not in str(type(A)): A = csc_matrix(A) d, nr = np.shape(A) - d1, d2 = dims[:2] coordinates = [] @@ -443,6 +442,7 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: def get_slice_coords(B: np.ndarray) -> np.ndarray: """Get contour coordinates for a 2D slice""" + d1, d2 = B.shape vertices = find_contours(B.T, thr) # this fix is necessary for having disjoint figures and borders plotted correctly v = np.atleast_2d([np.nan, np.nan]) @@ -451,8 +451,8 @@ def get_slice_coords(B: np.ndarray) -> np.ndarray: if num_close_coords < 2: if num_close_coords == 0: # case angle - newpt = np.round(vtx[-1, :] / [d2, d1]) * [d2, d1] - vtx = np.concatenate((vtx, newpt[np.newaxis, :]), axis=0) + newpt = np.round(np.mean(vtx[[0, -1], :], axis=0) / [d2, d1]) * [d2, d1] + vtx = np.concatenate((newpt[np.newaxis, :], vtx, newpt[np.newaxis, :]), axis=0) else: # case one is border vtx = np.concatenate((vtx, vtx[0, np.newaxis]), axis=0) From 04d1ca20da4a41322ac4d4530654cff28b733632 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Mon, 8 Jul 2024 10:20:24 -0400 Subject: [PATCH 19/28] Use None instead of 'auto' --- caiman/utils/visualization.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index 707781d0e..9f9a6f2e1 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -20,7 +20,7 @@ from skimage.measure import find_contours import sys from tempfile import NamedTemporaryFile -from typing import Any, Optional, Literal, Union +from typing import Any, Optional from warnings import warn import caiman.base.rois @@ -366,7 +366,7 @@ def plot_unit(uid, scl): .redim.range(unit_id=(0, nr-1), scale=(0.0, 1.0))) -def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: Union[int, Literal['auto']] = 'auto'): +def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: Optional[int] = None): """Gets contour of spatial components and returns their coordinates Args: @@ -389,9 +389,9 @@ def get_contours(A, dims, thr=0.9, thr_method='nrg', swap_dim=False, slice_dim: this is correct if the dimensions have not been reordered from (y, x[, z]). If True, each column should be reshaped in C-order; this is correct for dims = ([z, ]x, y). - slice_dim: int or 'auto' + slice_dim: int or None Which dimension to slice along if we have 3D data. (i.e., get contours on each plane along this axis). - The default ('auto') is 0 if swap_dim is True, else -1. + The default (None) is 0 if swap_dim is True, else -1. Returns: Coor: list of coordinates with center of mass and @@ -465,7 +465,7 @@ def get_slice_coords(B: np.ndarray) -> np.ndarray: else: # make a list of the contour coordinates for each 2D slice pars['coordinates'] = [] - if slice_dim == 'auto': + if slice_dim is None: slice_dim = 0 if swap_dim else -1 for s in range(dims[slice_dim]): B = Bmat.take(s, axis=slice_dim) From d0732cbf27b20115d5f9326f0d81f548e2d95c66 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 11 Jul 2024 11:58:07 -0400 Subject: [PATCH 20/28] demo_online_cnmfE: Fix a text cell that was mistakenly marked as code --- demos/notebooks/demo_online_cnmfE.ipynb | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/demos/notebooks/demo_online_cnmfE.ipynb b/demos/notebooks/demo_online_cnmfE.ipynb index 6ca093cf9..1783024b0 100644 --- a/demos/notebooks/demo_online_cnmfE.ipynb +++ b/demos/notebooks/demo_online_cnmfE.ipynb @@ -181,10 +181,8 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ "Plot shifts" ] From 2a9d2c85f9887e7823e83ec15c795ec88b249929 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 12 Jul 2024 14:22:23 -0400 Subject: [PATCH 21/28] Finish the fix started in #1326 (thanks to @EmeEmu and @marberi for bringing it to my attention) --- caiman/base/movies.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 9f44dac43..1dc61924d 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -1445,8 +1445,8 @@ def load(file_name: Union[str, list[str]], with np.load(file_name) as f: return movie(**f).astype(outtype) - elif extension in ('.hdf5', '.h5', '.mat', '.nwb', 'n5', 'zarr'): - if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open + elif extension in ('.hdf5', '.h5', '.mat', '.nwb', '.n5', '.zarr'): + if extension in ('.n5', '.zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: try: @@ -1971,9 +1971,9 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, filename = os.path.split(file_name)[-1] Yr, dims, T = caiman.mmapping.load_memmap(os.path.join( os.path.split(file_name)[0], filename)) - elif extension in ('.h5', '.hdf5', '.mat', '.nwb', 'n5', 'zarr'): + elif extension in ('.h5', '.hdf5', '.mat', '.nwb', '.n5', '.zarr'): # FIXME this doesn't match the logic in load() - if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open + if extension in ('.n5', '.zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: try: From 2d0108e54915370cb796a84bcfe44c6415b0ddb3 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 18 Jul 2024 11:16:36 -0400 Subject: [PATCH 22/28] mmapping: fix a path goof where a double-correction caused a problem, thanks t o @nick-youngblut #1373 --- caiman/mmapping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/caiman/mmapping.py b/caiman/mmapping.py index ce4b040e8..dc0f670a6 100644 --- a/caiman/mmapping.py +++ b/caiman/mmapping.py @@ -527,7 +527,7 @@ def save_memmap(filenames:list[str], sys.stdout.flush() Ttot = Ttot + T - fname_new = os.path.join(caiman.paths.get_tempdir(), caiman.paths.fn_relocated(f'{fname_tot}_frames_{Ttot}.mmap')) + fname_new = caiman.paths.fn_relocated(f'{fname_tot}_frames_{Ttot}.mmap') try: # need to explicitly remove destination on windows os.unlink(fname_new) From c82e768445ec3065122ff2717571be7ae620823f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Jul 2024 15:53:12 -0400 Subject: [PATCH 23/28] Move (almost) all caiman logging under a named logger called "caiman" --- caiman/base/movies.py | 149 +++++++++++------- caiman/base/rois.py | 63 ++++---- caiman/base/timeseries.py | 17 +- caiman/base/traces.py | 6 +- caiman/cluster.py | 6 +- caiman/components_evaluation.py | 40 +++-- caiman/mmapping.py | 67 ++++---- caiman/motion_correction.py | 134 ++++++++-------- caiman/paths.py | 6 +- caiman/source_extraction/cnmf/cnmf.py | 50 +++--- caiman/source_extraction/cnmf/estimates.py | 30 ++-- .../source_extraction/cnmf/initialization.py | 83 +++++----- caiman/source_extraction/cnmf/map_reduce.py | 26 +-- caiman/source_extraction/cnmf/merging.py | 8 +- caiman/source_extraction/cnmf/online_cnmf.py | 37 ++--- caiman/source_extraction/cnmf/params.py | 32 ++-- .../source_extraction/cnmf/pre_processing.py | 5 +- caiman/source_extraction/cnmf/spatial.py | 34 ++-- caiman/source_extraction/cnmf/temporal.py | 14 +- caiman/source_extraction/cnmf/utilities.py | 5 +- caiman/summary_images.py | 3 +- caiman/tests/comparison/comparison.py | 5 +- caiman/tests/comparison_general.py | 5 +- caiman/tests/comparison_humans.py | 5 +- caiman/tests/comparison_humans_online.py | 5 +- caiman/tests/test_deconvolution.py | 6 +- caiman/utils/labelling.py | 2 +- caiman/utils/sbx_utils.py | 4 +- caiman/utils/stats.py | 14 +- caiman/utils/utils.py | 40 ++--- demos/general/demo_OnACID.py | 17 +- demos/general/demo_OnACID_mesoscope.py | 17 +- demos/general/demo_behavior.py | 17 +- demos/general/demo_caiman_basic.py | 17 +- demos/general/demo_pipeline.py | 17 +- demos/general/demo_pipeline_NWB.py | 17 +- demos/general/demo_pipeline_cnmfE.py | 17 +- .../general/demo_pipeline_voltage_imaging.py | 19 ++- docs/source/Getting_Started.rst | 41 +++-- 39 files changed, 589 insertions(+), 491 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 1dc61924d..1322d22a9 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -318,10 +318,12 @@ def extract_shifts(self, max_shift_w: int = 5, max_shift_h: int = 5, template=No Exception 'Unknown motion correction method!' """ + logger = logging.getLogger("caiman") + min_val = np.percentile(self, 1) if min_val < -0.1: - logging.debug("min_val in extract_shifts: " + str(min_val)) - logging.warning('Movie average is negative. Removing 1st percentile.') + logger.debug(f"min_val in extract_shifts: {min_val}") + logger.warning('Movie average is negative. Removing 1st percentile.') self = self - min_val else: min_val = 0 @@ -339,7 +341,7 @@ def extract_shifts(self, max_shift_w: int = 5, max_shift_h: int = 5, template=No template = np.median(self, axis=0) else: if np.percentile(template, 8) < -0.1: - logging.warning('Movie average is negative. Removing 1st percentile.') + logger.warning('Movie average is negative. Removing 1st percentile.') template = template - np.percentile(template, 1) template = template[ms_h:h_i - ms_h, ms_w:w_i - ms_w].astype(np.float32) @@ -350,7 +352,7 @@ def extract_shifts(self, max_shift_w: int = 5, max_shift_h: int = 5, template=No for i, frame in enumerate(self): if i % 100 == 99: - logging.debug(f"Frame {i + 1}") + logger.debug(f"Frame {i + 1}") if method == 'opencv': res = cv2.matchTemplate(frame, template, cv2.TM_CCORR_NORMED) top_left = cv2.minMaxLoc(res)[3] @@ -408,6 +410,8 @@ def apply_shifts(self, shifts, interpolation: str = 'linear', method: str = 'ope Exception 'Method not defined' """ + logger = logging.getLogger("caiman") + if not isinstance(self[0, 0, 0], np.float32): warnings.warn('Casting the array to float32') self = np.asanyarray(self, dtype=np.float32) @@ -417,40 +421,40 @@ def apply_shifts(self, shifts, interpolation: str = 'linear', method: str = 'ope interpolation = cv2.INTER_CUBIC else: interpolation = 3 - logging.debug('cubic interpolation') + logger.debug('cubic interpolation') elif interpolation == 'nearest': if method == 'opencv': interpolation = cv2.INTER_NEAREST else: interpolation = 0 - logging.debug('nearest interpolation') + logger.debug('nearest interpolation') elif interpolation == 'linear': if method == 'opencv': interpolation = cv2.INTER_LINEAR else: interpolation = 1 - logging.debug('linear interpolation') + logger.debug('linear interpolation') elif interpolation == 'area': if method == 'opencv': interpolation = cv2.INTER_AREA else: raise Exception('Method not defined') - logging.debug('area interpolation') + logger.debug('area interpolation') elif interpolation == 'lanczos4': if method == 'opencv': interpolation = cv2.INTER_LANCZOS4 else: interpolation = 4 - logging.debug('lanczos/biquartic interpolation') + logger.debug('lanczos/biquartic interpolation') else: raise Exception('Interpolation method not available') _, h, w = self.shape for i, frame in enumerate(self): if i % 100 == 99: - logging.debug(f"Frame {i + 1}") + logger.debug(f"Frame {i + 1}") sh_x_n, sh_y_n = shifts[i] @@ -587,26 +591,28 @@ def computeDFF(self, secsWindow: int = 5, quantilMin: int = 8, method: str = 'on Raises: Exception 'Unknown method' """ - logging.debug("computing minimum ...") + logger = logging.getLogger("caiman") + + logger.debug("computing minimum ...") sys.stdout.flush() if np.min(self) <= 0 and method != 'only_baseline': raise ValueError("All pixels must be positive") numFrames, linePerFrame, pixPerLine = np.shape(self) downsampfact = int(secsWindow * self.fr) - logging.debug(f"Downsample factor: {downsampfact}") + logger.debug(f"Downsample factor: {downsampfact}") elm_missing = int(np.ceil(numFrames * 1.0 / downsampfact) * downsampfact - numFrames) padbefore = int(np.floor(elm_missing / 2.)) padafter = int(np.ceil(elm_missing / 2.)) - logging.debug('Initial Size Image:' + str(np.shape(self))) + logger.debug(f'Initial Size Image: {np.shape(self)}') sys.stdout.flush() mov_out = movie(np.pad(self.astype(np.float32), ((padbefore, padafter), (0, 0), (0, 0)), mode='reflect'), **self.__dict__) numFramesNew, linePerFrame, pixPerLine = np.shape(mov_out) # compute baseline quickly - logging.debug("binning data ...") + logger.debug("binning data ...") sys.stdout.flush() if not in_place: @@ -619,9 +625,9 @@ def computeDFF(self, secsWindow: int = 5, quantilMin: int = 8, method: str = 'on order=order) movBL = np.percentile(movBL, quantilMin, axis=0) - logging.debug("interpolating data ...") + logger.debug("interpolating data ...") sys.stdout.flush() - logging.debug(f"movBL shape is {movBL.shape}") + logger.debug(f"movBL shape is {movBL.shape}") movBL = scipy.ndimage.zoom(np.array(movBL, dtype=np.float32), [downsampfact, 1, 1], order=1, mode='constant', @@ -642,7 +648,7 @@ def computeDFF(self, secsWindow: int = 5, quantilMin: int = 8, method: str = 'on mov_out = movBL mov_out = mov_out[padbefore:len(movBL) - padafter, :, :] - logging.debug('Final Size Movie:' + str(self.shape)) + logger.debug(f'Final Size Movie: {self.shape}') return mov_out, movie(movBL, fr=self.fr, start_time=self.start_time, @@ -812,6 +818,8 @@ def local_correlations(self, rho: d1 x d2 [x d3] matrix, cross-correlation with adjacent pixels """ + logger = logging.getLogger("caiman") + T = self.shape[0] Cn = np.zeros(self.shape[1:]) if T <= 3000: @@ -823,7 +831,7 @@ def local_correlations(self, n_chunks = T // frames_per_chunk for jj, mv in enumerate(range(n_chunks - 1)): - logging.debug('number of chunks:' + str(jj) + ' frames: ' + + logger.debug('number of chunks:{jj} frames: ' + str([mv * frames_per_chunk, (mv + 1) * frames_per_chunk])) rho = caiman.summary_images.local_correlations(np.array(self[mv * frames_per_chunk:(mv + 1) * frames_per_chunk]), eight_neighbours=eight_neighbours, @@ -834,7 +842,7 @@ def local_correlations(self, plt.imshow(Cn, cmap='gray') plt.pause(.1) - logging.debug('number of chunks:' + str(n_chunks - 1) + ' frames: ' + + logger.debug(f'number of chunks: {n_chunks - 1} frames: ' + str([(n_chunks - 1) * frames_per_chunk, T])) rho = caiman.summary_images.local_correlations(np.array(self[(n_chunks - 1) * frames_per_chunk:]), eight_neighbours=eight_neighbours, @@ -929,16 +937,18 @@ def resize(self, fx=1, fy=1, fz=1, interpolation=cv2.INTER_AREA): Returns: self (caiman movie) """ + logger = logging.getLogger("caiman") + T, d1, d2 = self.shape d = d1 * d2 elm = d * T max_els = 2**61 - 1 # the bug for sizes >= 2**31 is appears to be fixed now - if elm > max_els: + if elm > max_els: # XXX do we still need this fallback then? chunk_size = max_els // d new_m:list = [] - logging.debug('Resizing in chunks because of opencv bug') + logger.debug('Resizing in chunks because of opencv bug') for chunk in range(0, T, chunk_size): - logging.debug([chunk, np.minimum(chunk + chunk_size, T)]) + logger.debug([chunk, np.minimum(chunk + chunk_size, T)]) m_tmp = self[chunk:np.minimum(chunk + chunk_size, T)].copy() m_tmp = m_tmp.resize(fx=fx, fy=fy, fz=fz, interpolation=interpolation) if len(new_m) == 0: @@ -949,16 +959,16 @@ def resize(self, fx=1, fy=1, fz=1, interpolation=cv2.INTER_AREA): return new_m else: if fx != 1 or fy != 1: - logging.debug("reshaping along x and y") + logger.debug("reshaping along x and y") t, h, w = self.shape newshape = (int(w * fy), int(h * fx)) mov = [] - logging.debug(f"New shape is {newshape}") + logger.debug(f"New shape is {newshape}") for frame in self: mov.append(cv2.resize(frame, newshape, fx=fx, fy=fy, interpolation=interpolation)) self = movie(np.asarray(mov), **self.__dict__) if fz != 1: - logging.debug("reshaping along z") + logger.debug("reshaping along z") t, h, w = self.shape self = np.reshape(self, (t, h * w)) mov = cv2.resize(self, (h * w, int(fz * t)), fx=1, fy=fz, interpolation=interpolation) @@ -972,9 +982,11 @@ def guided_filter_blur_2D(self, guide_filter, radius: int = 5, eps=0): """ performs guided filtering on each frame. See opencv documentation of cv2.ximgproc.guidedFilter """ + logger = logging.getLogger("caiman") + for idx, fr in enumerate(self): if idx % 1000 == 0: - logging.debug("At index: " + str(idx)) + logger.debug(f"At index: {idx}") self[idx] = cv2.ximgproc.guidedFilter(guide_filter, fr, radius=radius, eps=eps) return self @@ -983,13 +995,15 @@ def bilateral_blur_2D(self, diameter: int = 5, sigmaColor: int = 10000, sigmaSpa """ performs bilateral filtering on each frame. See opencv documentation of cv2.bilateralFilter """ + logger = logging.getLogger("caiman") + if not isinstance(self[0, 0, 0], np.float32): - warnings.warn('Casting the array to float32') + warnings.warn('Casting the array to float32') # XXX consider just using logger self = np.asanyarray(self, dtype=np.float32) for idx, fr in enumerate(self): if idx % 1000 == 0: - logging.debug("At index: " + str(idx)) + logger.debug("At index: " + str(idx)) self[idx] = cv2.bilateralFilter(fr, diameter, sigmaColor, sigmaSpace) return self @@ -1015,9 +1029,10 @@ def gaussian_blur_2D(self, self: ndarray blurred movie """ + logger = logging.getLogger("caiman") for idx, fr in enumerate(self): - logging.debug(idx) + logger.debug(idx) self[idx] = cv2.GaussianBlur(fr, ksize=(kernel_size_x, kernel_size_y), sigmaX=kernel_std_x, @@ -1044,9 +1059,10 @@ def median_blur_2D(self, kernel_size: float = 3.0): self: ndarray blurred movie """ + logger = logging.getLogger("caiman") for idx, fr in enumerate(self): - logging.debug(idx) + logger.debug(idx) self[idx] = cv2.medianBlur(fr, ksize=kernel_size) return self @@ -1207,13 +1223,15 @@ def load(file_name: Union[str, list[str]], Exception 'File not found!' """ + logger = logging.getLogger("caiman") + # case we load movie from file if max(top, bottom, left, right) > 0 and isinstance(file_name, str): file_name = [file_name] # type: ignore # mypy doesn't like that this changes type if isinstance(file_name, list): if shape is not None: - logging.error('shape parameter not supported for multiple movie input') + logger.error('shape parameter not supported for multiple movie input') return load_movie_chain(file_name, fr=fr, @@ -1230,10 +1248,10 @@ def load(file_name: Union[str, list[str]], is3D=is3D) elif isinstance(file_name, tuple): - logging.debug(f'**** Processing input file {file_name} as individual frames *****') + logger.debug(f'**** Processing input file {file_name} as individual frames *****') if shape is not None: # XXX Should this be an Exception? - logging.error('movies.py:load(): A shape parameter is not supported for multiple movie input') + logger.error('movies.py:load(): A shape parameter is not supported for multiple movie input') else: return load_movie_chain(tuple([iidd for iidd in np.array(file_name)[subindices]]), fr=fr, start_time=start_time, @@ -1243,10 +1261,10 @@ def load(file_name: Union[str, list[str]], # If we got here we're parsing a single movie file if max(top, bottom, left, right) > 0: - logging.error('movies.py:load(): Parameters top,bottom,left,right are not supported for single movie input') + logger.error('movies.py:load(): Parameters top,bottom,left,right are not supported for single movie input') if channel is not None: - logging.error('movies.py:load(): channel parameter is not supported for single movie input') + logger.error('movies.py:load(): channel parameter is not supported for single movie input') if os.path.exists(file_name): basename, extension = os.path.splitext(file_name) @@ -1257,8 +1275,8 @@ def load(file_name: Union[str, list[str]], with tifffile.TiffFile(file_name) as tffl: multi_page = True if tffl.series[0].shape[0] > 1 else False if len(tffl.pages) == 1: - logging.warning('Your tif file is saved a single page' + - 'file. Performance will be affected') + logger.warning('Your tif file is saved a single page' + + 'file. Performance will be affected') multi_page = False if subindices is not None: # if isinstance(subindices, (list, tuple)): # is list or tuple: @@ -1308,7 +1326,7 @@ def load(file_name: Union[str, list[str]], # # We have a fallback of trying to use the pims package if OpenCV fails if 'CAIMAN_LOAD_AVI_FORCE_FALLBACK' in os.environ: # User requested we don't even try opencv - logging.debug("Loading AVI/MKV file: PIMS codepath requested") + logger.debug("Loading AVI/MKV file: PIMS codepath requested") do_opencv = False else: cap = cv2.VideoCapture(file_name) @@ -1322,7 +1340,7 @@ def load(file_name: Union[str, list[str]], do_opencv = False cap.release() cv2.destroyAllWindows() - logging.warning(f"OpenCV failed to parse {file_name}, falling back to pims") + logger.warning(f"OpenCV failed to parse {file_name}, falling back to pims") else: do_opencv = True @@ -1404,7 +1422,7 @@ def load(file_name: Union[str, list[str]], elif isinstance(sb, np.ndarray): dims[ind] = sb.shape[0] start_frame = subindices[0][0] - logging.debug(f"Subinds not none: start frame: {start_frame} and subinds: {subindices}") + logger.debug(f"Subinds not none: start frame: {start_frame} and subinds: {subindices}") else: subindices = [np.r_[range(dims[0])]] start_frame = 0 @@ -1477,12 +1495,12 @@ def load(file_name: Union[str, list[str]], if type(subindices).__module__ == 'numpy': subindices = subindices.tolist() if len(fgroup.shape) > 3: - logging.warning(f'fgroup.shape has dimensionality greater than 3 {fgroup.shape} in load') + logger.warning(f'fgroup.shape has dimensionality greater than 3 {fgroup.shape} in load') images = np.array(fgroup[subindices]).squeeze() return movie(images.astype(outtype)) else: - logging.debug('KEYS:' + str(f.keys())) + logger.debug('KEYS:' + str(f.keys())) raise Exception('Key not found in hdf5 file') elif extension == '.mmap': @@ -1495,14 +1513,14 @@ def load(file_name: Union[str, list[str]], images = images[subindices] if in_memory: - logging.debug('loading mmap file in memory') + logger.debug('loading mmap file in memory') images = np.array(images).astype(outtype) - logging.debug('mmap') + logger.debug('mmap') return movie(images, fr=fr) elif extension == '.sbx': - logging.debug('sbx') + logger.debug('sbx') meta_data = caiman.utils.sbx_utils.sbx_meta_data(basename) input_arr = caiman.utils.sbx_utils.sbxread(basename, subindices) return movie(input_arr, fr=fr, @@ -1515,7 +1533,7 @@ def load(file_name: Union[str, list[str]], else: raise Exception('Unknown file type') else: - logging.error(f"File request:[{file_name}] not found!") + logger.error(f"File request:[{file_name}] not found!") raise Exception(f'File {file_name} not found!') return movie(input_arr.astype(outtype), @@ -1559,6 +1577,8 @@ def load_movie_chain(file_list: list[str], movie corresponding to the concatenation og the input files """ + logger = logging.getLogger("caiman") + mov = [] for f in tqdm(file_list): m = load(f, @@ -1570,9 +1590,9 @@ def load_movie_chain(file_list: list[str], outtype=outtype, var_name_hdf5=var_name_hdf5) if channel is not None: - logging.debug(m.shape) + logger.debug(m.shape) m = m[channel].squeeze() - logging.debug(f"Movie shape: {m.shape}") + logger.debug(f"Movie shape: {m.shape}") if not is3D: if m.ndim == 2: @@ -1600,6 +1620,7 @@ def _load_behavior(file_name:str) -> Any: # with a particular inner structure. The keys in that file are things like trial_1, trial_12, .. # I refactored this out of load() to improve clarity of that function and to make it more clear # that user code should not do things that way. + logger = logging.getLogger("caiman") if not isinstance(file_name, str): raise Exception(f'Invalid type in _load_behavior(); please do not use this function outside of demo_behavior.py') @@ -1612,7 +1633,7 @@ def _load_behavior(file_name:str) -> Any: kk.sort(key=lambda x: int(x.split('_')[-1])) input_arr = [] for trial in kk: - logging.info('Loading ' + trial) + logger.info(f'Loading {trial}') input_arr.append(np.array(f[trial]['mov'])) input_arr = np.vstack(input_arr) @@ -1620,7 +1641,7 @@ def _load_behavior(file_name:str) -> Any: raise Exception(f'_load_behavior() only accepts hdf5 files formatted a certain way. Please do not use this function.') else: - logging.error(f"File request:[{file_name}] not found!") + logger.error(f"File request:[{file_name}] not found!") raise Exception(f'File {file_name} not found!') # Defaults from movies.load() at time this was refactored out @@ -1653,8 +1674,10 @@ def from_zip_file_to_movie(zipfile_name: str, start_end:Optional[tuple] = None) Returns: movie ''' + logger = logging.getLogger("caiman") + mov:list = [] - logging.info('unzipping file into movie object') + logger.info('unzipping file into movie object') if start_end is not None: num_frames = start_end[1] - start_end[0] @@ -1671,7 +1694,7 @@ def from_zip_file_to_movie(zipfile_name: str, start_end:Optional[tuple] = None) mov[counter] = np.array(Image.open(file)) if counter % 100 == 0: - logging.debug(f"counter/idx: {counter} {idx}") + logger.debug(f"counter/idx: {counter} {idx}") counter += 1 @@ -1763,6 +1786,8 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: Exception 'File not found!' """ + logger = logging.getLogger("caiman") + if isinstance(file_name, list) or isinstance(file_name, tuple): for fname in file_name: iterator = load_iter(fname, subindices, var_name_hdf5, outtype) @@ -1814,7 +1839,7 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: elif extension in ('.avi', '.mkv'): # First, let's see if OpenCV can handle this AVI file if 'CAIMAN_LOAD_AVI_FORCE_FALLBACK' in os.environ: # User requested we don't even try opencv - logging.debug("Loading AVI/MKV file: PIMS codepath requested") + logger.debug("Loading AVI/MKV file: PIMS codepath requested") do_opencv = False else: cap = cv2.VideoCapture(file_name) @@ -1822,7 +1847,7 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) if length <= 0 or width <= 0 or height <= 0: # Not a perfect way to do this, but it's a start. Could also do a try/except block? - logging.warning(f"OpenCV failed to parse {file_name}, falling back to pims") + logger.warning(f"OpenCV failed to parse {file_name}, falling back to pims") do_opencv = False # Close up shop, and get ready for the alternative cap.release() @@ -1912,7 +1937,7 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: subindices=subindices, outtype=outtype, is3D=is3D): yield y else: - logging.error(f"File request:[{file_name}] not found!") + logger.error(f"File request:[{file_name}] not found!") raise Exception('File not found!') def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, tuple]]: @@ -1934,6 +1959,8 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, number of timesteps in each file """ + logger = logging.getLogger("caiman") + # TODO There is a lot of redundant code between this, load(), and load_iter() that should be unified somehow if isinstance(file_name, pathlib.Path): # We want to support these as input, but str has a broader set of operations that we'd like to use, so let's just convert. @@ -1995,7 +2022,7 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, elif var_name_hdf5 in f['acquisition']: siz = f['acquisition'][var_name_hdf5]['data'].shape else: - logging.error(f'The file does not contain a variable named {var_name_hdf5}') + logger.error(f'The file does not contain a variable named {var_name_hdf5}') raise Exception('Variable not found. Use one of the above') T, dims = siz[0], siz[1:] elif extension in ('.sbx'): @@ -2097,10 +2124,16 @@ def play_movie(movie, Raises: Exception 'Unknown backend!' """ + logger = logging.getLogger("caiman") + # todo: todocument - it = True if (isinstance(movie, list) or isinstance(movie, tuple) or isinstance(movie, str)) else False + if isinstance(movie, list) or isinstance(movie, tuple) or isinstance(movie, str): + it = True + else: + it = False + if backend == 'pyplot': - logging.warning('Using pyplot back end: not recommended. Using opencv will yield faster, higher-quality results.') + logger.warning('Using pyplot back end: not recommended. Using opencv will yield faster, higher-quality results.') gain = float(gain) # convert to float in case we were passed an int if q_max < 100: diff --git a/caiman/base/rois.py b/caiman/base/rois.py index 417405828..38b158404 100644 --- a/caiman/base/rois.py +++ b/caiman/base/rois.py @@ -212,6 +212,7 @@ def nf_match_neurons_in_binary_masks(masks_gt, indices false pos """ + logger = logging.getLogger("caiman") _, d1, d2 = np.shape(masks_gt) dims = d1, d2 @@ -252,7 +253,7 @@ def nf_match_neurons_in_binary_masks(masks_gt, performance['precision'] = TP / (TP + FP) performance['accuracy'] = (TP + TN) / (TP + FP + FN + TN) performance['f1_score'] = 2 * TP / (2 * TP + FP + FN) - logging.debug(performance) + logger.debug(performance) idx_tp = np.where(np.array(costs) < thresh_cost)[0] idx_tp_ben = matches[0][idx_tp] # ground truth @@ -297,8 +298,8 @@ def nf_match_neurons_in_binary_masks(masks_gt, pl.show() pl.axis('off') except Exception as e: - logging.warning("not able to plot precision recall: graphics failure") - logging.warning(e) + logger.warning("not able to plot precision recall: graphics failure") + logger.warning(e) return idx_tp_gt, idx_tp_comp, idx_fn_gt, idx_fp_comp, performance @@ -392,11 +393,7 @@ def register_ROIs(A1, ROIs from session 2 aligned to session 1 """ - - # if 'csc_matrix' not in str(type(A1)): - # A1 = scipy.sparse.csc_matrix(A1) - # if 'csc_matrix' not in str(type(A2)): - # A2 = scipy.sparse.csc_matrix(A2) + logger = logging.getLogger("caiman") if 'ndarray' not in str(type(A1)): A1 = A1.toarray() @@ -488,7 +485,7 @@ def register_ROIs(A1, performance['precision'] = TP / (TP + FP) performance['accuracy'] = (TP + TN) / (TP + FP + FN + TN) performance['f1_score'] = 2 * TP / (2 * TP + FP + FN) - logging.info(performance) + logger.info(performance) if plot_results: if Cn is None: @@ -521,11 +518,6 @@ def register_ROIs(A1, pl.title('Mismatches') pl.axis('off') - -# except Exception as e: -# logging.warning("not able to plot precision recall usually because we are on travis") -# logging.warning(e) - return matched_ROIs1, matched_ROIs2, non_matched1, non_matched2, performance, A2 @@ -586,6 +578,7 @@ def register_multisession(A, by component k in A_union """ + logger = logging.getLogger("caiman") n_sessions = len(A) templates = list(templates) @@ -615,7 +608,7 @@ def register_multisession(A, enclosed_thr=enclosed_thr) mat_sess, mat_un, nm_sess, nm_un, _, A2 = reg_results - logging.info(len(mat_sess)) + logger.info(len(mat_sess)) A_union = A2.copy() A_union[:, mat_un] = A[sess][:, mat_sess] A_union = np.concatenate((A_union.toarray(), A[sess][:, nm_sess]), axis=1) @@ -763,6 +756,7 @@ def distance_masks(M_s:list, cm_s: list[list], max_dist: float, enclosed_thr: Op def find_matches(D_s, print_assignment: bool = False) -> tuple[list, list]: # todo todocument + logger = logging.getLogger("caiman") matches = [] costs = [] @@ -771,7 +765,7 @@ def find_matches(D_s, print_assignment: bool = False) -> tuple[list, list]: # we make a copy not to set changes in the original DD = D.copy() if np.sum(np.where(np.isnan(DD))) > 0: - logging.error('Exception: Distance Matrix contains invalid value NaN') + logger.error('Exception: Distance Matrix contains invalid value NaN') raise Exception('Distance Matrix contains invalid value NaN') # we do the hungarian @@ -784,10 +778,10 @@ def find_matches(D_s, print_assignment: bool = False) -> tuple[list, list]: for row, column in indexes2: value = DD[row, column] if print_assignment: - logging.debug(('(%d, %d) -> %f' % (row, column, value))) + logger.debug(f'({row}, {column}) -> {value}') total.append(value) - logging.debug(('FOV: %d, shape: %d,%d total cost: %f' % (ii, DD.shape[0], DD.shape[1], np.sum(total)))) - logging.debug((time.time() - t_start)) + logger.debug((f'FOV: {ii}, shape: {DD.shape[0]},{DD.shape[1]} total cost: {np.sum(total)}') + logger.debug(time.time() - t_start) costs.append(total) # send back the results in the format we want return matches, costs @@ -818,6 +812,8 @@ def link_neurons(matches: list[list[tuple]], neurons: list of arrays representing the indices of neurons in each FOV """ + logger = logging.getLogger("caiman") + if min_FOV_present is None: min_FOV_present = len(matches) @@ -842,7 +838,7 @@ def link_neurons(matches: list[list[tuple]], neurons.append(neuron) neurons = np.array(neurons).T - logging.info(f'num_neurons: {num_neurons}') + logger.info(f'num_neurons: {num_neurons}') return neurons @@ -917,6 +913,8 @@ def nf_read_roi(fileobj) -> np.ndarray: Adapted from https://gist.github.com/luispedro/3437255 ''' + logger = logging.getLogger("caiman") + # This is based on: # http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html # http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html @@ -957,8 +955,7 @@ def getfloat(): magic = fileobj.read(4) if magic != 'Iout': - # raise IOError('Magic number not found') - logging.warning('Magic number not found') + logger.warning('Magic number not found') version = get16() # It seems that the roi type field occupies 2 Bytes, but only one is used @@ -967,13 +964,6 @@ def getfloat(): # Discard second Byte: get8() - # if not (0 <= roi_type < 11): - # logging.error(('roireader: ROI type %s not supported' % roi_type)) - # - # if roi_type != 7: - # - # logging.error(('roireader: ROI type %s not supported (!= 7)' % roi_type)) - top = get16() left = get16() bottom = get16() @@ -1050,6 +1040,8 @@ def nf_merge_roi_zip(fnames: list[str], idx_to_keep: list[list], new_fold: str): name of the output zip file (without .zip extension) """ + logger = logging.getLogger("caiman") + folders_rois = [] files_to_keep = [] # unzip the files and keep only the ones that are requested @@ -1058,7 +1050,7 @@ def nf_merge_roi_zip(fnames: list[str], idx_to_keep: list[list], new_fold: str): folders_rois.append(dirpath) with zipfile.ZipFile(fn) as zf: name_rois = zf.namelist() - logging.debug(len(name_rois)) + logger.debug(len(name_rois)) zip_ref = zipfile.ZipFile(fn, 'r') zip_ref.extractall(dirpath) files_to_keep.append([os.path.join(dirpath, ff) for ff in np.array(name_rois)[idx]]) @@ -1109,6 +1101,8 @@ def extract_binary_masks_blob(A, neg_examples: """ + logger = logging.getLogger("caiman") + params = cv2.SimpleBlobDetector_Params() params.minCircularity = minCircularity params.minInertiaRatio = minInertiaRatio @@ -1136,7 +1130,7 @@ def extract_binary_masks_blob(A, neg_examples = [] for count, comp in enumerate(A.tocsc()[:].T): - logging.debug(count) + logger.debug(count) comp_d = np.array(comp.todense()) gray_image = np.reshape(comp_d, dims, order='F') gray_image = (gray_image - np.min(gray_image)) / \ @@ -1161,7 +1155,7 @@ def extract_binary_masks_blob(A, edges = (label_objects == (1 + idx_largest)) edges = scipy.ndimage.binary_fill_holes(edges) else: - logging.warning('empty component') + logger.warning('empty component') edges = np.zeros_like(edges) masks_ws.append(edges) @@ -1265,6 +1259,7 @@ def detect_duplicates_and_subsets(binary_masks, dist_thr: float = 0.1, min_dist=10, thresh_subset: float = 0.8): + logger = logging.getLogger("caiman") cm = [scipy.ndimage.center_of_mass(mm) for mm in binary_masks] sp_rois = scipy.sparse.csc_matrix(np.reshape(binary_masks, (binary_masks.shape[0], -1)).T) @@ -1272,7 +1267,7 @@ def detect_duplicates_and_subsets(binary_masks, np.fill_diagonal(D, 1) overlap = sp_rois.T.dot(sp_rois).toarray() sz = np.array(sp_rois.sum(0)) - logging.info(sz.shape) + logger.info(sz.shape) overlap = overlap / sz.T np.fill_diagonal(overlap, 0) # pairs of duplicate indices @@ -1287,7 +1282,7 @@ def detect_duplicates_and_subsets(binary_masks, metric = r_values.squeeze() else: metric = sz.squeeze() - logging.debug('***** USING MAX AREA BY DEFAULT') + logger.debug('***** USING MAX AREA BY DEFAULT') overlap_tmp = overlap.copy() >= thresh_subset overlap_tmp = overlap_tmp * metric[:, None] diff --git a/caiman/base/timeseries.py b/caiman/base/timeseries.py index 55aa127d5..334b2d786 100644 --- a/caiman/base/timeseries.py +++ b/caiman/base/timeseries.py @@ -173,17 +173,19 @@ def save(self, Exception 'Extension Unknown' """ + logger = logging.getLogger("caiman") + file_name = caiman.paths.fn_relocated(file_name) name, extension = os.path.splitext(file_name)[:2] # name is only used by the memmap saver extension = extension.lower() - logging.debug("Parsing extension " + str(extension)) + logger.debug(f"Parsing extension {extension}") if extension in ['.tif', '.tiff', '.btf']: with tifffile.TiffWriter(file_name, bigtiff=bigtiff, imagej=imagej) as tif: if "%4d%02d%02d" % tuple(map(int, tifffile.__version__.split('.'))) >= '20200813': def foo(i): if i % 200 == 0: - logging.debug(str(i) + ' frames saved') + logger.debug(f'{i} frames saved') curfr = self[i].copy() if to32 and not ('float32' in str(self.dtype)): curfr = curfr.astype(np.float32) @@ -196,7 +198,7 @@ def foo(i): else: for i in range(self.shape[0]): if i % 200 == 0: - logging.debug(str(i) + ' frames saved') + logger.debug(f'{i} frames saved') curfr = self[i].copy() if to32 and not ('float32' in str(self.dtype)): curfr = curfr.astype(np.float32) @@ -295,9 +297,9 @@ def foo(i): try: dset.attrs["file_name"] = [a.encode('utf8') for a in self.file_name] except: - logging.warning('No file saved') + logger.warning('No file saved') if self.meta_data[0] is not None: - logging.debug("Metadata for saved file: " + str(self.meta_data)) + logger.debug("Metadata for saved file: " + str(self.meta_data)) dset.attrs["meta_data"] = cpk.dumps(self.meta_data) return file_name elif extension == '.mmap': @@ -370,7 +372,7 @@ def foo(i): return file_name else: - logging.error("Extension " + str(extension) + " unknown") + logger.error(f"Extension {extension} unknown") raise Exception('Extension Unknown') @@ -382,6 +384,7 @@ def concatenate(*args, **kwargs): mov: XMovie object """ # todo: todocument return + logger = logging.getLogger("caiman") frRef = None for arg in args: @@ -400,6 +403,6 @@ def concatenate(*args, **kwargs): try: return obj.__class__(np.concatenate(*args, **kwargs), **obj.__dict__) except: - logging.debug('no meta information passed') + logger.debug('no meta information passed') return obj.__class__(np.concatenate(*args, **kwargs)) diff --git a/caiman/base/traces.py b/caiman/base/traces.py index 5e9962b79..973f1fca1 100644 --- a/caiman/base/traces.py +++ b/caiman/base/traces.py @@ -58,18 +58,20 @@ def computeDFF(self, window_sec=5, minQuantile=20): ValueError "All traces must be positive" ValueError "The window must be shorter than the total length" """ + logger = logging.getLogger("caiman") + if np.min(self) <= 0: raise ValueError("All traces must be positive") T, _ = self.shape window = int(window_sec * self.fr) - logging.debug(window) + logger.debug(window) if window >= T: raise ValueError("The window must be shorter than the total length") tracesDFF = [] for tr in self.T: - logging.debug(f"TR Shape is {tr.shape}") + logger.debug(f"TR Shape is {tr.shape}") traceBL = [np.percentile(tr[i:i + window], minQuantile) for i in range(1, len(tr) - window)] missing = np.percentile(tr[-window:], minQuantile) missing = np.repeat(missing, window + 1) diff --git a/caiman/cluster.py b/caiman/cluster.py index 323674b27..dde1b336f 100644 --- a/caiman/cluster.py +++ b/caiman/cluster.py @@ -21,9 +21,6 @@ import time from typing import Any, Optional, Union -logger = logging.getLogger(__name__) - - def extract_patch_coordinates(dims: tuple, rf: Union[list, tuple], stride: Union[list[int], tuple], @@ -107,6 +104,7 @@ def start_server(ipcluster: str = "ipcluster", ncpus: int = None) -> None: ipcluster binary file name; requires 4 path separators on Windows. ipcluster="C:\\\\Anaconda3\\\\Scripts\\\\ipcluster.exe" Default: "ipcluster" """ + logger = logging.getLogger("caiman") logger.info("Starting cluster...") if ncpus is None: ncpus = psutil.cpu_count() @@ -142,6 +140,7 @@ def stop_server(ipcluster: str = 'ipcluster', pdir: str = None, profile: str = N dview: Undocumented """ + logger = logging.getLogger("caiman") if 'multiprocessing' in str(type(dview)): dview.terminate() else: @@ -216,6 +215,7 @@ def setup_cluster(backend:str = 'multiprocessing', number of workers in dview. None means single core mode in use. """ + logger = logging.getLogger("caiman") sys.stdout.flush() # XXX Unsure why we do this if n_processes is None: n_processes = np.maximum(int(psutil.cpu_count() - 1), 1) diff --git a/caiman/components_evaluation.py b/caiman/components_evaluation.py index 6b6e353b7..0f0ed295e 100644 --- a/caiman/components_evaluation.py +++ b/caiman/components_evaluation.py @@ -136,12 +136,14 @@ def compute_eccentricity(A, dims, order='F'): def find_activity_intervals(C, Npeaks: int = 5, tB=-3, tA=10, thres: float = 0.3) -> list: # todo todocument + logger = logging.getLogger("caiman") + K, T = np.shape(C) L:list = [] for i in range(K): if np.sum(np.abs(np.diff(C[i, :]))) == 0: L.append([]) - logging.debug('empty component at:' + str(i)) + logger.debug('empty component at:' + str(i)) continue indexes = peakutils.indexes(C[i, :], thres=thres) srt_ind = indexes[np.argsort(C[i, indexes])][::-1] @@ -204,6 +206,7 @@ def classify_components_ep(Y, A, C, b, f, Athresh=0.1, Npeaks=5, tB=-3, tA=10, t significant_samples: list Frames that were used for computing correlation values """ + logger = logging.getLogger("caiman") K, _ = np.shape(C) A = csc_matrix(A) @@ -218,7 +221,7 @@ def classify_components_ep(Y, A, C, b, f, Athresh=0.1, Npeaks=5, tB=-3, tA=10, t significant_samples:list[Any] = [] for i in range(K): if (i + 1) % 200 == 0: # Show status periodically - logging.info('Components evaluated:' + str(i)) + logger.info(f'Components evaluated: {i}') if LOC[i] is not None: atemp = A[:, i].toarray().flatten() atemp[np.isnan(atemp)] = np.nanmean(atemp) @@ -230,13 +233,13 @@ def classify_components_ep(Y, A, C, b, f, Athresh=0.1, Npeaks=5, tB=-3, tA=10, t if len(indexes) == 0: indexes = set(LOC[i]) - logging.warning(f'Component {i} is only active jointly with neighboring components. Space ' + + logger.warning(f'Component {i} is only active jointly with neighboring components. Space ' + 'correlation calculation might be unreliable.') indexes = np.array(list(indexes)).astype(int) px = np.where(atemp > 0)[0] if px.size < 3: - logging.warning(f'Component {i} is almost empty. Space correlation is set to 0.') + logger.warning(f'Component {i} is almost empty. Space correlation is set to 0.') rval[i] = 0 significant_samples.append({0}) else: @@ -264,6 +267,7 @@ def evaluate_components_CNN(A, if isGPU is false, and the environment variable 'CAIMAN_ALLOW_GPU' is not set, then this code will try not to use a GPU. Otherwise it will use one if it finds it. """ + logger = logging.getLogger("caiman") # TODO: Find a less ugly way to do this if not isGPU and 'CAIMAN_ALLOW_GPU' not in os.environ: @@ -273,10 +277,10 @@ def evaluate_components_CNN(A, os.environ["KERAS_BACKEND"] = "tensorflow" from tensorflow.keras.models import model_from_json use_keras = True - logging.info('Using Keras') + logger.info('Using Keras') except (ModuleNotFoundError): use_keras = False - logging.info('Using Tensorflow') + logger.info('Using Tensorflow') if loaded_model is None: if use_keras: @@ -304,7 +308,7 @@ def evaluate_components_CNN(A, print(f"USING MODEL (tensorflow API): {model_file}") loaded_model = caiman.utils.utils.load_graph(model_file) - logging.debug("Loaded model from disk") + logger.debug("Loaded model from disk") half_crop = np.minimum(gSig[0] * 4 + 1, patch_size), np.minimum(gSig[1] * 4 + 1, patch_size) dims = np.array(dims) @@ -406,21 +410,22 @@ def evaluate_components(Y: np.ndarray, significant_samples: ndarray indexes of samples used to obtain the spatial mask by average """ + logger = logging.getLogger("caiman") tB = np.minimum(-2, np.floor(-5. / 30 * final_frate)) tA = np.maximum(5, np.ceil(25. / 30 * final_frate)) - logging.info('tB:' + str(tB) + ',tA:' + str(tA)) + logger.info(f'{tB=},{tA=}') dims, T = np.shape(Y)[:-1], np.shape(Y)[-1] Yr = np.reshape(Y, (np.prod(dims), T), order='F') - logging.debug('Computing event exceptionality delta') + logger.debug('Computing event exceptionality delta') fitness_delta, erfc_delta, _, _ = compute_event_exceptionality(np.diff(traces, axis=1), robust_std=robust_std, N=N, sigma_factor=sigma_factor) - logging.debug('Removing Baseline') + logger.debug('Removing Baseline') if remove_baseline: num_samps_bl = np.minimum(np.shape(traces)[-1]// 5, 800) slow_baseline = False @@ -438,11 +443,11 @@ def evaluate_components(Y: np.ndarray, tr_tmp = np.pad(traces.T, ((padbefore, padafter), (0, 0)), mode='reflect') numFramesNew, num_traces = np.shape(tr_tmp) # compute baseline quickly - logging.debug("binning data ...") + logger.debug("binning data ...") tr_BL = np.reshape(tr_tmp, (downsampfact, numFramesNew // downsampfact, num_traces), order='F') tr_BL = np.percentile(tr_BL, 8, axis=0) - logging.debug("interpolating data ...") - logging.debug(tr_BL.shape) + logger.debug("interpolating data ...") + logger.debug(tr_BL.shape) tr_BL = scipy.ndimage.zoom(np.array(tr_BL, dtype=np.float32), [downsampfact, 1], order=3, mode='constant', @@ -453,13 +458,13 @@ def evaluate_components(Y: np.ndarray, else: traces -= tr_BL[padbefore:-padafter].T - logging.debug('Computing event exceptionality') + logger.debug('Computing event exceptionality') fitness_raw, erfc_raw, _, _ = compute_event_exceptionality(traces, robust_std=robust_std, N=N, sigma_factor=sigma_factor) - logging.debug('Evaluating spatial footprint') + logger.debug('Evaluating spatial footprint') # compute the overlap between spatial and movie average across samples with significant events r_values, significant_samples = classify_components_ep(Yr, A, @@ -750,9 +755,10 @@ def estimate_components_quality(traces, """ # TODO: Consider always returning it all and let the caller ignore what it does not want + logger = logging.getLogger("caiman") if 'memmap' not in str(type(Y)): - logging.warning('NOT MEMORY MAPPED. FALLING BACK ON SINGLE CORE IMPLEMENTATION') + logger.warning('NOT MEMORY MAPPED. FALLING BACK ON SINGLE CORE IMPLEMENTATION') fitness_raw, fitness_delta, erfc_raw, erfc_delta, r_values, _ = \ evaluate_components(Y, traces, A, C, b, f, final_frate, remove_baseline=remove_baseline, N=N, robust_std=robust_std, Athresh=Athresh, @@ -782,7 +788,7 @@ def estimate_components_quality(traces, if dview is None: res = map(evaluate_components_placeholder, params) else: - logging.info('Component evaluation in parallel') + logger.info('Component evaluation in parallel') if 'multiprocessing' in str(type(dview)): res = dview.map_async(evaluate_components_placeholder, params).get(4294967) else: diff --git a/caiman/mmapping.py b/caiman/mmapping.py index dc0f670a6..1f15f7988 100644 --- a/caiman/mmapping.py +++ b/caiman/mmapping.py @@ -45,8 +45,9 @@ def load_memmap(filename: str, mode: str = 'r') -> tuple[Any, tuple, int]: ValueError "Unknown file extension" """ + logger = logging.getLogger("caiman") if pathlib.Path(filename).suffix != '.mmap': - logging.error(f"Unknown extension for file {filename}") + logger.error(f"Unknown extension for file {filename}") raise ValueError(f'Unknown file extension for file {filename} (should be .mmap)') # Strip path components and use CAIMAN_DATA/example_movies # TODO: Eventually get the code to save these in a different dir @@ -174,13 +175,14 @@ def save_memmap_join(mmap_fnames:list[str], base_name: str = None, n_chunks: int add_to_mov: (undocumented) """ + logger = logging.getLogger("caiman") tot_frames = 0 order = 'C' for f in mmap_fnames: cleaner_f = caiman.paths.fn_relocated(f) Yr, dims, T = load_memmap(cleaner_f) - logging.debug(f"save_memmap_join (loading data): {cleaner_f} {T}") + logger.debug(f"save_memmap_join (loading data): {cleaner_f} {T}") tot_frames += T del Yr @@ -193,7 +195,7 @@ def save_memmap_join(mmap_fnames:list[str], base_name: str = None, n_chunks: int fname_tot = caiman.paths.memmap_frames_filename(base_name, dims, tot_frames, order) fname_tot = os.path.join(os.path.split(mmap_fnames[0])[0], fname_tot) fname_tot = caiman.paths.fn_relocated(fname_tot) - logging.info(f"Memmap file for fname_tot: {fname_tot}") + logger.info(f"Memmap file for fname_tot: {fname_tot}") big_mov = np.memmap(fname_tot, mode='w+', dtype=np.float32, shape=prepare_shape((d, tot_frames)), order='C') @@ -220,18 +222,20 @@ def save_memmap_join(mmap_fnames:list[str], base_name: str = None, n_chunks: int np.savez(caiman.paths.fn_relocated(base_name + '.npz'), mmap_fnames=mmap_fnames, fname_tot=fname_tot) - logging.info('Deleting big mov') + logger.info('Deleting big mov') del big_mov sys.stdout.flush() return fname_tot def my_map(dv, func, args) -> list: + logger = logging.getLogger("caiman") + v = dv rc = v.client # scatter 'id', so id=0,1,2 on engines 0,1,2 dv.scatter('id', rc.ids, flatten=True) - logging.debug(dv['id']) + logger.debug(dv['id']) amr = v.map(func, args) pending = set(amr.msg_ids) @@ -248,15 +252,15 @@ def my_map(dv, func, args) -> list: # update pending to exclude those that just finished pending = pending.difference(finished) if counter % 10 == 0: - logging.debug(amr.progress) + logger.debug(amr.progress) for msg_id in finished: # we know these are done, so don't worry about blocking ar = rc.get_result(msg_id) - logging.debug(f"job id {msg_id} finished on engine {ar.engine_id}") # TODO: Abstract out the ugly bits - logging.debug("with stdout:") - logging.debug(' ' + ar.stdout.replace('\n', '\n ').rstrip()) - logging.debug("and errors:") - logging.debug(' ' + ar.stderr.replace('\n', '\n ').rstrip()) + logger.debug(f"job id {msg_id} finished on engine {ar.engine_id}") # TODO: Abstract out the ugly bits + logger.debug("with stdout:") + logger.debug(' ' + ar.stdout.replace('\n', '\n ').rstrip()) + logger.debug("and errors:") + logger.debug(' ' + ar.stderr.replace('\n', '\n ').rstrip()) # note that each job in a map always returns a list of length chunksize # even if chunksize == 1 results_all.update(ar.get_dict()) @@ -269,23 +273,25 @@ def my_map(dv, func, args) -> list: def save_portion(pars) -> int: # todo: todocument + logger = logging.getLogger("caiman") + use_mmap_save = False big_mov_fn, d, tot_frames, fnames, idx_start, idx_end, add_to_mov = pars big_mov_fn = caiman.paths.fn_relocated(big_mov_fn) Ttot = 0 Yr_tot = np.zeros((idx_end - idx_start, tot_frames), dtype=np.float32) - logging.debug(f"Shape of Yr_tot is {Yr_tot.shape}") + logger.debug(f"Shape of Yr_tot is {Yr_tot.shape}") for f in fnames: full_f = caiman.paths.fn_relocated(f) - logging.debug(f"Saving portion to {full_f}") + logger.debug(f"Saving portion to {full_f}") Yr, _, T = load_memmap(full_f) Yr_tot[:, Ttot:Ttot + T] = np.ascontiguousarray(Yr[idx_start:idx_end], dtype=np.float32) + np.float32(add_to_mov) Ttot = Ttot + T del Yr - logging.debug(f"Index start and end are {idx_start} and {idx_end}") + logger.debug(f"Index start and end are {idx_start} and {idx_end}") if use_mmap_save: big_mov = np.memmap(big_mov_fn, mode='r+', dtype=np.float32, shape=prepare_shape((d, tot_frames)), order='C') @@ -299,15 +305,15 @@ def save_portion(pars) -> int: f.write(Yr_tot) computed_position = np.uint64(idx_end * np.uint64(Yr_tot.dtype.itemsize) * tot_frames) if f.tell() != computed_position: - logging.critical(f"Error in mmap portion write: at position {f.tell()}") - logging.critical( + logger.critical(f"Error in mmap portion write: at position {f.tell()}") + logger.critical( f"But should be at position {idx_end} * {Yr_tot.dtype.itemsize} * {tot_frames} = {computed_position}" ) f.close() raise Exception('Internal error in mmapping: Actual position does not match computed position') del Yr_tot - logging.debug('done') + logger.debug('done') return Ttot def save_place_holder(pars:list) -> str: @@ -392,6 +398,8 @@ def save_memmap(filenames:list[str], the name will contain the frame dimensions and the number of frames """ + logger = logging.getLogger("caiman") + if not isinstance(filenames, list): raise Exception('save_memmap: input should be a list of filenames') @@ -409,7 +417,7 @@ def save_memmap(filenames:list[str], or (xy_shifts is not None) or (add_to_movie != 0) or (border_to_0>0)\ or slices is not None: - logging.debug('Distributing memory map over many files') + logger.debug('Distributing memory map over many files') # Here we make a bunch of memmap files in the right order. Same parameters fname_parts = caiman.save_memmap_each(filenames, base_name=base_name, @@ -439,7 +447,7 @@ def save_memmap(filenames:list[str], Ttot = 0 for idx, f in enumerate(filenames): if isinstance(f, str): # Might not always be filenames. - logging.debug(f) + logger.debug(f) if is_3D: Yr = f if not (isinstance(f, str)) else tifffile.imread(f) @@ -512,7 +520,7 @@ def save_memmap(filenames:list[str], big_mov[:, Ttot:Ttot + T] = Yr del big_mov else: - logging.debug('SAVING WITH numpy.tofile()') + logger.debug('SAVING WITH numpy.tofile()') Yr.tofile(fname_tot) else: big_mov = np.memmap(fname_tot, @@ -548,11 +556,12 @@ def parallel_dot_product(A: np.ndarray, b, block_size: int = 5000, dview=None, t b: time x comps """ + logger = logging.getLogger("caiman") pars = [] d1, d2 = np.shape(A) b = pickle.dumps(b) - logging.debug(f'parallel dot product block size: {block_size}') + logger.debug(f'parallel dot product block size: {block_size}') if block_size < d1: for idx in range(0, d1 - block_size, block_size): @@ -567,7 +576,7 @@ def parallel_dot_product(A: np.ndarray, b, block_size: int = 5000, dview=None, t idx_to_pass = list(range(d1)) pars.append([A.filename, idx_to_pass, b, transpose]) - logging.debug('Start product') + logger.debug('Start product') b = pickle.loads(b) if transpose: @@ -578,7 +587,7 @@ def parallel_dot_product(A: np.ndarray, b, block_size: int = 5000, dview=None, t if dview is None: if transpose: # b = pickle.loads(b) - logging.debug('Transposing') + logger.debug('Transposing') for _, pr in enumerate(pars): iddx, rs = dot_place_holder(pr) output = output + rs @@ -595,16 +604,16 @@ def parallel_dot_product(A: np.ndarray, b, block_size: int = 5000, dview=None, t else: results = dview.map_sync(dot_place_holder, pars[itera:itera + num_blocks_per_run]) - logging.debug('Processed:' + str([itera, itera + len(results)])) + logger.debug('Processed:' + str([itera, itera + len(results)])) if transpose: - logging.debug('Transposing') + logger.debug('Transposing') for _, res in enumerate(results): output += res[1] else: - logging.debug('Filling') + logger.debug('Filling') for res in results: output[res[0]] = res[1] @@ -615,12 +624,13 @@ def parallel_dot_product(A: np.ndarray, b, block_size: int = 5000, dview=None, t def dot_place_holder(par:list) -> tuple: # todo: todocument + logger = logging.getLogger("caiman") A_name, idx_to_pass, b_, transpose = par A_, _, _ = load_memmap(A_name) b_ = pickle.loads(b_).astype(np.float32) - logging.debug((idx_to_pass[-1])) + logger.debug((idx_to_pass[-1])) if 'sparse' in str(type(b_)): if transpose: # outp = (b_.tocsr()[idx_to_pass].T.dot( @@ -639,6 +649,7 @@ def dot_place_holder(par:list) -> tuple: def save_tif_to_mmap_online(movie_iterable, save_base_name='YrOL_', order='C', add_to_movie=0, border_to_0=0) -> str: # todo: todocument + logger = logging.getLogger("caiman") if isinstance(movie_iterable, str): # Allow specifying a filename rather than its data rep with tifffile.TiffFile(movie_iterable) as tf: # And load it if that happens @@ -659,7 +670,7 @@ def save_tif_to_mmap_online(movie_iterable, save_base_name='YrOL_', order='C', a for page in movie_iterable: if count % 100 == 0: - logging.debug(count) + logger.debug(count) if 'tifffile' in str(type(movie_iterable[0])): page = page.asarray() diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index 6ccbe570e..912cd0bea 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -45,11 +45,13 @@ import numpy as np from numpy.fft import ifftshift import os +import scipy +from skimage.transform import resize as resize_sk +from skimage.transform import warp as warp_sk import sys import tifffile +from tqdm import tqdm from typing import Optional -from skimage.transform import resize as resize_sk -from skimage.transform import warp as warp_sk import caiman import caiman.base.movies @@ -166,11 +168,12 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig self """ + logger = logging.getLogger("caiman") if 'ndarray' in str(type(fname)) or isinstance(fname, caiman.base.movies.movie): mc_tempfile = caiman.paths.fn_relocated('tmp_mov_mot_corr.hdf5') if os.path.isfile(mc_tempfile): os.remove(mc_tempfile) - logging.info(f"Creating file for motion correction: {mc_tempfile}") + logger.info(f"Creating file for motion correction: {mc_tempfile}") caiman.movie(fname).save(mc_tempfile) fname = [mc_tempfile] @@ -202,7 +205,7 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig self.is3D = bool(is3D) self.indices = indices if self.use_cuda and not HAS_CUDA: - logging.debug("pycuda is unavailable. Falling back to default FFT.") + logger.debug("pycuda is unavailable. Falling back to default FFT.") def motion_correct(self, template=None, save_movie=False): """general function for performing all types of motion correction. The @@ -277,8 +280,10 @@ def motion_correct_rigid(self, template=None, save_movie=False) -> None: self.shifts_rig: shifts in x and y (and z if 3D) per frame """ - logging.debug('Entering Rigid Motion Correction') - logging.debug(-self.min_mov) # XXX why the minus? + logger = logging.getLogger("caiman") + + logger.debug('Entering Rigid Motion Correction') + logger.debug(f"{self.min_mov=}") self.total_template_rig = template self.templates_rig:list = [] self.fname_tot_rig:list = [] @@ -336,10 +341,11 @@ def motion_correct_pwrigid(self, save_movie:bool=True, template:np.ndarray=None, Raises: Exception: 'Error: Template contains NaNs, Please review the parameters' """ + logger = logging.getLogger("caiman") num_iter = 1 if template is None: - logging.info('Generating template by rigid motion correction') + logger.info('Generating template by rigid motion correction') self.motion_correct_rigid() self.total_template_els = self.total_template_rig.copy() else: @@ -415,6 +421,7 @@ def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=Fal m_reg: caiman movie object caiman movie object with applied shifts (not memory mapped) """ + logger = logging.getLogger("caiman") Y = caiman.load(fname).astype(np.float32) if remove_min: @@ -423,7 +430,7 @@ def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=Fal Y -= Y.min() if rigid_shifts is not None: - logging.warning('The rigid_shifts flag is deprecated and it is ' + + logger.warning('The rigid_shifts flag is deprecated and it is ' + 'being ignored. The value is read directly from' + ' mc.pw_rigid and is currently set to the opposite' + f' of {self.pw_rigid}') @@ -750,6 +757,7 @@ def motion_correct_oneP_nonrigid( return mc def motion_correct_online_multifile(list_files, add_to_movie, order='C', **kwargs): + logger = logging.getLogger("caiman") # todo todocument kwargs['order'] = order @@ -762,7 +770,7 @@ def motion_correct_online_multifile(list_files, add_to_movie, order='C', **kwarg kwargs_['order'] = order total_frames = 0 for file_ in list_files: - logging.info(('Processing:' + file_)) + logger.info('Processing: {file_}') kwargs_['template'] = template kwargs_['save_base_name'] = os.path.splitext(file_)[0] tffl = tifffile.TiffFile(file_) @@ -781,6 +789,7 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif border_to_0=0, n_iter=1, remove_blanks=False, show_template=False, return_mov=False, use_median_as_template=False): # todo todocument + logger = logging.getLogger("caiman") shifts = [] # store the amount of shift in each frame xcorrs = [] @@ -790,7 +799,7 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif if 'tifffile' in str(type(movie_iterable[0])): if len(movie_iterable) == 1: - logging.warning( + logger.warning( '******** WARNING ****** NEED TO LOAD IN MEMORY SINCE SHAPE OF PAGE IS THE FULL MOVIE') movie_iterable = movie_iterable.asarray() init_mov = movie_iterable[:init_frames_template] @@ -801,7 +810,7 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif init_mov = movie_iterable[slice(0, init_frames_template, 1)] dims = (len(movie_iterable),) + movie_iterable[0].shape # TODO: Refactor so length is either tracked separately or is last part of tuple - logging.debug("dimensions:" + str(dims)) + logger.debug("dimensions:" + str(dims)) if use_median_as_template: template = bin_median(movie_iterable) @@ -885,9 +894,9 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif vmax=350, interpolation='none') plt.pause(.001) - logging.debug('Relative change in template:' + str( + logger.debug('Relative change in template:' + str( np.sum(np.abs(template - template_old)) / np.sum(np.abs(template)))) - logging.debug('Iteration:' + str(count)) + logger.debug('Iteration:' + str(count)) if border_to_0 > 0: new_img[:border_to_0, :] = min_mov @@ -916,7 +925,7 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif if show_movie: cv2.imshow('frame', new_img / 500) - logging.info(shift) + logger.info(shift) if not np.any(np.remainder(shift, 1) == (0, 0)): cv2.waitKey(int(1. / 500 * 1000)) @@ -925,7 +934,7 @@ def motion_correct_online(movie_iterable, add_to_movie, max_shift_w=25, max_shif xcorrs.append(xcorr_tmp) if save_base_name is not None: - logging.debug('Flushing memory') + logger.debug('Flushing memory') big_mov.flush() # type: ignore # mypy cannot prove big_mov is not still None del big_mov gc.collect() @@ -1087,6 +1096,7 @@ def bin_median_3d(mat, window=10, exclude_nans=True): def process_movie_parallel(arg_in): #todo: todocument + logger = logging.getLogger("caiman") fname, fr, margins_out, template, max_shift_w, max_shift_h, remove_blanks, apply_smooth, save_hdf5 = arg_in if template is not None: @@ -1098,7 +1108,6 @@ def process_movie_parallel(arg_in): type_input = str(type(fname)) if 'movie' in type_input: - # logging.info((type(fname))) Yr = fname elif 'ndarray' in type_input: @@ -1109,40 +1118,30 @@ def process_movie_parallel(arg_in): raise Exception('Unknown input type:' + type_input) if Yr.ndim > 1: - # logging.info('loaded') if apply_smooth: - # logging.info('applying smoothing') Yr = Yr.bilateral_blur_2D( diameter=10, sigmaColor=10000, sigmaSpace=0) -# print('Remove BL') if margins_out != 0: Yr = Yr[:, margins_out:-margins_out, margins_out:- margins_out] # borders create troubles -# logging.info('motion correcting') - Yr, shifts, xcorrs, template = Yr.motion_correct(max_shift_w=max_shift_w, max_shift_h=max_shift_h, method='opencv', template=template, remove_blanks=remove_blanks) if ('movie' in type_input) or ('ndarray' in type_input): - # logging.debug('Returning Values') return Yr, shifts, xcorrs, template else: - # logging.debug('median computing') + # logger.debug('median computing') template = Yr.bin_median() -# logging.debug('saving') idx_dot = len(fname.split('.')[-1]) if save_hdf5: Yr.save(fname[:-idx_dot] + 'hdf5') -# logging.debug('saving 2') np.savez(fname[:-idx_dot] + 'npz', shifts=shifts, xcorrs=xcorrs, template=template) -# logging.debug('deleting') del Yr -# logging.debug('done!') return fname[:-idx_dot] else: return None @@ -1167,6 +1166,7 @@ def motion_correct_parallel(file_names, fr=10, template=None, margins_out=0, Raises: Exception """ + logger = logging.getLogger("caiman") args_in = [] for file_idx, f in enumerate(file_names): if isinstance(template, list): @@ -1193,7 +1193,7 @@ def motion_correct_parallel(file_names, fr=10, template=None, margins_out=0, dview.results.clear() except UnboundLocalError: - logging.error('could not close client') + logger.error('could not close client') raise @@ -2123,7 +2123,8 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N """ - logging.debug("Starting tile and correct") + logger = logging.getLogger("caiman") + logger.debug("Starting tile and correct") img = img.astype(np.float64).copy() template = template.astype(np.float64).copy() @@ -2160,7 +2161,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N return new_img - add_to_movie, (-rigid_shts[0], -rigid_shts[1]), None, None else: # extract patches - logging.info("Extracting patches") + logger.info("Extracting patches") templates = [ it[-1] for it in sliding_window(template, overlaps=overlaps, strides=strides)] xy_grid = [(it[0], it[1]) for it in sliding_window( @@ -2183,7 +2184,7 @@ def tile_and_correct(img, template, strides, overlaps, max_shifts, newoverlaps=N ub_shifts = None # extract shifts for each patch - logging.info("extracting shifts for each patch") + logger.info("extracting shifts for each patch") shfts_et_all = [register_translation( a, b, c, shifts_lb=lb_shifts, shifts_ub=ub_shifts, max_shifts=max_shifts, use_cuda=use_cuda) for a, b, c in zip( imgs, templates, [upsample_factor_fft] * num_tiles)] @@ -2594,9 +2595,7 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di opencv=True, resize_fact_play=3, fr_play=30, max_flow=1, gSig_filt=None): #todo: todocument - # cv2.OPTFLOW_FARNEBACK_GAUSSIAN - import scipy - from tqdm import tqdm + logger = logging.getLogger("caiman") if os.environ.get('ENABLE_TQDM') == 'True': disable_tqdm = False else: @@ -2619,40 +2618,40 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di if max_shft_y_1 == 0: max_shft_y_1 = None - logging.info([max_shft_x, max_shft_x_1, max_shft_y, max_shft_y_1]) + logger.info([max_shft_x, max_shft_x_1, max_shft_y, max_shft_y_1]) m = m[:, max_shft_x:max_shft_x_1, max_shft_y:max_shft_y_1] if np.sum(np.isnan(m)) > 0: - logging.info(m.shape) - logging.warning('Movie contains NaN') + logger.info(m.shape) + logger.warning('Movie contains NaN') raise Exception('Movie contains NaN') - logging.debug('Local correlations..') + logger.debug('Local correlations..') img_corr = m.local_correlations(eight_neighbours=True, swap_dim=swap_dim) - logging.debug(m.shape) + logger.debug(m.shape) if template is None: tmpl = caiman.motion_correction.bin_median(m) else: tmpl = template - logging.debug('Compute Smoothness.. ') + logger.debug('Compute Smoothness.. ') smoothness = np.sqrt( np.sum(np.sum(np.array(np.gradient(np.mean(m, 0)))**2, 0))) smoothness_corr = np.sqrt( np.sum(np.sum(np.array(np.gradient(img_corr))**2, 0))) - logging.debug('Compute correlations.. ') + logger.debug('Computing correlations.. ') correlations = [] count = 0 sys.stdout.flush() for fr in tqdm(m, desc="Correlations", disable=disable_tqdm): if disable_tqdm: if count % 100 == 0: - logging.debug(count) + logger.debug(count) count += 1 correlations.append(scipy.stats.pearsonr( fr.flatten(), tmpl.flatten())[0]) - logging.info('Compute optical flow .. ') + logger.info('Computing optical flow.. ') m = m.resize(1, 1, resize_fact_flow) norms = [] @@ -2662,7 +2661,7 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di for fr in tqdm(m, desc="Optical flow", disable=disable_tqdm): if disable_tqdm: if count % 100 == 0: - logging.debug(count) + logger.debug(count) count += 1 @@ -2781,6 +2780,7 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl Exception 'The movie contains nans. Nans are not allowed!' """ + logger = logging.getLogger("caiman") dims, T = caiman.base.movies.get_file_size(fname, var_name_hdf5=var_name_hdf5) Ts = np.arange(T)[subidx].shape[0] @@ -2791,9 +2791,9 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl if len(m.shape) < 3: m = caiman.load(fname, var_name_hdf5=var_name_hdf5) m = m[corrected_slicer] - logging.warning("Your original file was saved as a single page " + - "file. Consider saving it in multiple smaller files" + - "with size smaller than 4GB (if it is a .tif file)") + logger.warning("Your original file was saved as a single page " + + "file. Consider saving it in multiple smaller files" + + "with size smaller than 4GB (if it is a .tif file)") if is3D: m = m[:, indices[0], indices[1], indices[2]] @@ -2820,20 +2820,20 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl add_to_movie = -np.min(template) if np.isnan(add_to_movie): - logging.error('The movie contains NaNs. NaNs are not allowed!') + logger.error('The movie contains NaNs. NaNs are not allowed!') raise Exception('The movie contains NaNs. NaNs are not allowed!') else: - logging.debug('Adding to movie ' + str(add_to_movie)) + logger.debug('Adding to movie ' + str(add_to_movie)) save_movie = False fname_tot_rig = None res_rig:list = [] for iter_ in range(num_iter): - logging.debug(iter_) + logger.debug(iter_) old_templ = new_templ.copy() if iter_ == num_iter - 1: save_movie = save_movie_rigid - logging.debug('saving!') + logger.debug('saving!') if isinstance(fname, tuple): @@ -2854,8 +2854,6 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl if gSig_filt is not None: new_templ = high_pass_filter_space(new_templ, gSig_filt) -# logging.debug((np.linalg.norm(new_templ - old_templ) / np.linalg.norm(old_templ))) - total_template = new_templ templates = [] shifts:list = [] @@ -2936,6 +2934,8 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo Exception 'You need to initialize the template with a good estimate. See the motion' '_correct_batch_rigid function' """ + logger = logging.getLogger("caiman") + if template is None: raise Exception('You need to initialize the template with a good estimate. See the motion' '_correct_batch_rigid function') @@ -2943,13 +2943,13 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo new_templ = template if np.isnan(add_to_movie): - logging.error('The template contains NaNs. NaNs are not allowed!') + logger.error('The template contains NaNs. NaNs are not allowed!') raise Exception('The template contains NaNs. NaNs are not allowed!') else: - logging.debug('Adding to movie ' + str(add_to_movie)) + logger.debug(f'Adding to movie {add_to_movie}')) for iter_ in range(num_iter): - logging.debug(iter_) + logger.debug(iter_) old_templ = new_templ.copy() if iter_ == num_iter - 1: @@ -2957,9 +2957,9 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo if save_movie: if isinstance(fname, tuple): - logging.debug(f'saving mmap of {fname[0]} to {fname[-1]}') + logger.debug(f'saving mmap of {fname[0]} to {fname[-1]}') else: - logging.debug(f'saving mmap of {fname}') + logger.debug(f'saving mmap of {fname}') if isinstance(fname, tuple): base_name=os.path.splitext(os.path.split(fname[0])[-1])[0] + '_els_' @@ -3019,6 +3019,7 @@ def tile_and_correct_wrapper(params): """ # todo todocument + logger = logging.getLogger("caiman") try: @@ -3046,7 +3047,7 @@ def tile_and_correct_wrapper(params): template = template[indices] for count, img in enumerate(imgs): if count % 10 == 0: - logging.debug(count) + logger.debug(count) if is3D: mc[count], total_shift, start_step, xyz_grid = tile_and_correct_3d(img, template, strides, overlaps, max_shifts, add_to_movie=add_to_movie, newoverlaps=newoverlaps, @@ -3092,6 +3093,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 """ # todo todocument + logger = logging.getLogger("caiman") if isinstance(fname, tuple): name, extension = os.path.splitext(fname[0])[:2] else: @@ -3102,7 +3104,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 dims, T = caiman.base.movies.get_file_size(fname, var_name_hdf5=var_name_hdf5) z = np.zeros(dims) dims = z[indices].shape - logging.debug(f'Number of Splits: {splits}') + logger.debug(f'Number of Splits: {splits}') if isinstance(splits, int): if subidx is None: rng = range(T) @@ -3121,7 +3123,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 if num_splits is not None: idxs = np.array(idxs)[np.random.randint(0, len(idxs), num_splits)] save_movie = False - #logging.warning('**** MOVIE NOT SAVED BECAUSE num_splits is not None ****') + logger.warning('**** MOVIE NOT SAVED BECAUSE num_splits is not None ****') if save_movie: if base_name is None: @@ -3131,29 +3133,29 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 np.memmap(fname_tot, mode='w+', dtype=np.float32, shape=caiman.mmapping.prepare_shape(shape_mov), order=order) - logging.info(f'Saving file as {fname_tot}') + logger.info(f'Saving file as {fname_tot}') else: fname_tot = None pars = [] for idx in idxs: - logging.debug(f'Extracting parameters for frames: {idx}') + logger.debug(f'Extracting parameters for frames: {idx}') pars.append([fname, fname_tot, idx, shape_mov, template, strides, overlaps, max_shifts, np.array( add_to_movie, dtype=np.float32), max_deviation_rigid, upsample_factor_grid, newoverlaps, newstrides, shifts_opencv, nonneg_movie, gSig_filt, is_fiji, use_cuda, border_nan, var_name_hdf5, is3D, indices]) if dview is not None: - logging.info('** Starting parallel motion correction **') + logger.info('** Starting parallel motion correction **') if HAS_CUDA and use_cuda: res = dview.map(tile_and_correct_wrapper,pars) dview.map(close_cuda_process, range(len(pars))) elif 'multiprocessing' in str(type(dview)): - logging.debug("entering multiprocessing tile_and_correct_wrapper") + logger.debug("entering multiprocessing tile_and_correct_wrapper") res = dview.map_async(tile_and_correct_wrapper, pars).get(4294967) else: res = dview.map_sync(tile_and_correct_wrapper, pars) - logging.info('** Finished parallel motion correction **') + logger.info('** Finished parallel motion correction **') else: res = list(map(tile_and_correct_wrapper, pars)) diff --git a/caiman/paths.py b/caiman/paths.py index 91dc1d0bf..95150394d 100644 --- a/caiman/paths.py +++ b/caiman/paths.py @@ -34,14 +34,16 @@ def get_tempdir() -> str: # If unset, uses default of a temp folder under caiman_datadir() # To get the old "store it where I am" behaviour, set CAIMAN_TEMP to a single dot. # If you prefer to store it somewhere different, provide a full path to that location. + logger = logging.getLogger("caiman") + if 'CAIMAN_TEMP' in os.environ: if os.path.isdir(os.environ['CAIMAN_TEMP']): return os.environ['CAIMAN_TEMP'] else: - logging.warning(f"CAIMAN_TEMP is set to nonexistent directory {os.environ['CAIMAN_TEMP']}. Ignoring") + logger.warning(f"CAIMAN_TEMP is set to nonexistent directory {os.environ['CAIMAN_TEMP']}. Ignoring") temp_under_data = os.path.join(caiman_datadir(), "temp") if not os.path.isdir(temp_under_data): - logging.warning(f"Default temporary dir {temp_under_data} does not exist, creating") + logger.warning(f"Default temporary dir {temp_under_data} does not exist, creating") os.makedirs(temp_under_data) return temp_under_data diff --git a/caiman/source_extraction/cnmf/cnmf.py b/caiman/source_extraction/cnmf/cnmf.py index b58e2e773..4249e8f3d 100644 --- a/caiman/source_extraction/cnmf/cnmf.py +++ b/caiman/source_extraction/cnmf/cnmf.py @@ -310,6 +310,7 @@ def fit_file(self, motion_correct=False, indices=None, include_eval=False): Returns: cnmf object with the current estimates """ + logger = logging.getLogger("caiman") if indices is None: indices = (slice(None), slice(None)) fnames = self.params.get('data', 'fnames') @@ -317,7 +318,7 @@ def fit_file(self, motion_correct=False, indices=None, include_eval=False): _, extension = os.path.splitext(fnames[0])[:2] extension = extension.lower() else: - logging.warning("Error: File not found, with file list:\n" + fnames[0]) + logger.error(f"Error: File not found, with file list:\n{fnames[0]}") raise Exception('File not found!') base_name = pathlib.Path(fnames[0]).stem + "_memmap_" @@ -419,6 +420,7 @@ def fit(self, images, indices=(slice(None), slice(None))): http://www.cell.com/neuron/fulltext/S0896-6273(15)01084-3 """ + logger = logging.getLogger("caiman") # Todo : to compartment if isinstance(indices, slice): indices = [indices] @@ -433,7 +435,7 @@ def fit(self, images, indices=(slice(None), slice(None))): if self.params.get('patch', 'rf') is None and (is_sliced or 'ndarray' in str(type(images))): images = images[tuple(indices)] self.dview = None - logging.info("Parallel processing in a single patch " + logger.info("Parallel processing in a single patch " "is not available for loaded in memory or sliced" + " data.") @@ -446,7 +448,7 @@ def fit(self, images, indices=(slice(None), slice(None))): if np.isfortran(Yr): raise Exception('The file is in F order, it should be in C order (see save_memmap function)') - logging.info((T,) + self.dims) + logger.info((T,) + self.dims) # Make sure filename is pointed correctly (numpy sets it to None sometimes) try: @@ -463,7 +465,7 @@ def fit(self, images, indices=(slice(None), slice(None))): # 'ss': np.ones((3,) * len(self.dims), dtype=np.uint8) # }) - logging.info(('Using ' + str(self.params.get('patch', 'n_processes')) + ' processes')) + logger.info(('Using ' + str(self.params.get('patch', 'n_processes')) + ' processes')) if self.params.get('preprocess', 'n_pixels_per_process') is None: avail_memory_per_process = psutil.virtual_memory()[ 1] / 2.**30 / self.params.get('patch', 'n_processes') @@ -474,14 +476,14 @@ def fit(self, images, indices=(slice(None), slice(None))): self.params.set('spatial', {'n_pixels_per_process': self.params.get('preprocess', 'n_pixels_per_process')}) - logging.info('using ' + str(self.params.get('preprocess', 'n_pixels_per_process')) + ' pixels per process') - logging.info('using ' + str(self.params.get('temporal', 'block_size_temp')) + ' block_size_temp') + logger.info('using ' + str(self.params.get('preprocess', 'n_pixels_per_process')) + ' pixels per process') + logger.info('using ' + str(self.params.get('temporal', 'block_size_temp')) + ' block_size_temp') if self.params.get('patch', 'rf') is None: # no patches - logging.info('preprocessing ...') + logger.info('preprocessing ...') Yr = self.preprocess(Yr) if self.estimates.A is None: - logging.info('initializing ...') + logger.info('initializing ...') self.initialize(Y) if self.params.get('patch', 'only_init'): # only return values after initialization @@ -494,21 +496,21 @@ def fit(self, images, indices=(slice(None), slice(None))): if self.remove_very_bad_comps: - logging.info('removing bad components : ') + logger.info('removing bad components : ') final_frate = 10 r_values_min = 0.5 # threshold on space consistency fitness_min = -15 # threshold on time variability fitness_delta_min = -15 Npeaks = 10 traces = np.array(self.C) - logging.info('estimating the quality...') + logger.info('estimating the quality...') idx_components, idx_components_bad, fitness_raw,\ fitness_delta, r_values = estimate_components_quality( traces, Y, self.estimates.A, self.estimates.C, self.estimates.b, self.estimates.f, final_frate=final_frate, Npeaks=Npeaks, r_values_min=r_values_min, fitness_min=fitness_min, fitness_delta_min=fitness_delta_min, return_all=True, N=5) - logging.info(('Keeping ' + str(len(idx_components)) + + logger.info(('Keeping ' + str(len(idx_components)) + ' and discarding ' + str(len(idx_components_bad)))) self.estimates.C = self.estimates.C[idx_components] self.estimates.A = self.estimates.A[:, idx_components] # type: ignore # not provable that self.initialise provides a value @@ -518,31 +520,31 @@ def fit(self, images, indices=(slice(None), slice(None))): return self - logging.info('update spatial ...') + logger.info('update spatial ...') self.update_spatial(Yr, use_init=True) - logging.info('update temporal ...') + logger.info('update temporal ...') if not self.skip_refinement: # set this to zero for fast updating without deconvolution self.params.set('temporal', {'p': 0}) else: self.params.set('temporal', {'p': self.params.get('preprocess', 'p')}) - logging.info('deconvolution ...') + logger.info('deconvolution ...') self.update_temporal(Yr) if not self.skip_refinement: - logging.info('refinement...') + logger.info('refinement...') if self.params.get('merging', 'do_merge'): - logging.info('merging components ...') + logger.info('merging components ...') self.merge_comps(Yr, mx=50, fast_merge=True) - logging.info('Updating spatial ...') + logger.info('Updating spatial ...') self.update_spatial(Yr, use_init=False) # set it back to original value to perform full deconvolution self.params.set('temporal', {'p': self.params.get('preprocess', 'p')}) - logging.info('update temporal ...') + logger.info('update temporal ...') self.update_temporal(Yr, use_init=False) # else: # todo : ask for those.. @@ -568,7 +570,7 @@ def fit(self, images, indices=(slice(None), slice(None))): else: # use patches if self.params.get('patch', 'stride') is None: self.params.set('patch', {'stride': int(self.params.get('patch', 'rf') * 2 * .1)}) - logging.info( + logger.info( ('Setting the stride to 10% of 2*rf automatically:' + str(self.params.get('patch', 'stride')))) if not isinstance(images, np.memmap): @@ -585,7 +587,7 @@ def fit(self, images, indices=(slice(None), slice(None))): indices=indices) self.estimates.bl, self.estimates.c1, self.estimates.g, self.estimates.neurons_sn = None, None, None, None - logging.info("merging") + logger.info("merging") self.estimates.merged_ROIs = [0] @@ -595,14 +597,14 @@ def fit(self, images, indices=(slice(None), slice(None))): while len(self.estimates.merged_ROIs) > 0: self.merge_comps(Yr, mx=np.Inf, fast_merge=True) - logging.info("update temporal") + logger.info("update temporal") self.update_temporal(Yr, use_init=False) self.params.set('spatial', {'se': np.ones((1,) * len(self.dims), dtype=np.uint8)}) - logging.info('update spatial ...') + logger.info('update spatial ...') self.update_spatial(Yr, use_init=False) - logging.info("update temporal") + logger.info("update temporal") self.update_temporal(Yr, use_init=False) else: while len(self.estimates.merged_ROIs) > 0: @@ -627,7 +629,7 @@ def fit(self, images, indices=(slice(None), slice(None))): while len(self.estimates.merged_ROIs) > 0: self.merge_comps(Yr, mx=np.Inf) - logging.info("update temporal") + logger.info("update temporal") self.update_temporal(Yr, use_init=False) self.estimates.normalize_components() diff --git a/caiman/source_extraction/cnmf/estimates.py b/caiman/source_extraction/cnmf/estimates.py index 1959a2e05..d32a1ce23 100644 --- a/caiman/source_extraction/cnmf/estimates.py +++ b/caiman/source_extraction/cnmf/estimates.py @@ -105,8 +105,9 @@ def __init__(self, A=None, b=None, C=None, f=None, R=None, dims=None): eccentricity values """ # Sanity checks (right now these just warn, but eventually we would like to fail) + logger = logging.getLogger("caiman") if R is not None and not isinstance(R, np.ndarray): - logging.warning(f"Estimates.R should be an np.ndarray but was assigned a {type(R)}") + logger.warning(f"Estimates.R should be an np.ndarray but was assigned a {type(R)}") # variables related to the estimates of traces, footprints, deconvolution and background self.A = A @@ -719,7 +720,8 @@ def compute_background(self, Yr): Yr : np.ndarray movie in format pixels (d) x frames (T) """ - logging.warning("Computing the full background has big memory requirements!") + logger = logging.getLogger("caiman") + logger.warning("Computing the full background has big memory requirements!") if self.f is not None: # low rank background return self.b.dot(self.f) else: # ring model background @@ -802,10 +804,11 @@ def detrend_df_f(self, quantileMin=8, frames_window=500, self: CNMF object self.F_dff contains the DF/F normalized traces """ + logger = logging.getLogger("caiman") # FIXME This method shares its name with a function elsewhere in the codebase (which it wraps) if self.C is None or self.C.shape[0] == 0: - logging.warning("There are no components for DF/F extraction!") + logger.warning("There are no components for DF/F extraction!") return self if use_residuals: @@ -947,6 +950,7 @@ def select_components(self, idx_components=None, use_object=False, save_discarde def restore_discarded_components(self): ''' Recover components that are filtered out with the select_components method ''' + logger = logging.getLogger("caiman") if self.discarded_components is not None: for field in ['C', 'S', 'YrA', 'R', 'F_dff', 'g', 'bl', 'c1', 'neurons_sn', 'lam', 'cnn_preds','SNR_comp','r_values','coordinates']: if getattr(self, field) is not None: @@ -956,7 +960,7 @@ def restore_discarded_components(self): setattr(self, field, np.concatenate([getattr(self, field), getattr(self.discarded_components, field)], axis=0)) setattr(self.discarded_components, field, None) else: - logging.warning('Variable ' + field + ' could not be \ + logger.warning('Variable ' + field + ' could not be \ restored as it does not have the same \ number of components as A') @@ -1034,6 +1038,7 @@ def evaluate_components(self, imgs, params, dview=None): self.cnn_preds: np.array CNN classifier values for each component """ + logger = logging.getLogger("caiman") dims = imgs.shape[1:] opts = params.get_group('quality') idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \ @@ -1052,11 +1057,11 @@ def evaluate_components(self, imgs, params, dview=None): self.idx_components = idx_components.astype(int) self.idx_components_bad = idx_components_bad.astype(int) if np.any(np.isnan(r_values)): - logging.warning('NaN values detected for space correlation in {}'.format(np.where(np.isnan(r_values))[0]) + + logger.warning('NaN values detected for space correlation in {}'.format(np.where(np.isnan(r_values))[0]) + '. Changing their value to -1.') r_values = np.where(np.isnan(r_values), -1, r_values) if np.any(np.isnan(SNR_comp)): - logging.warning('NaN values detected for trace SNR in {}'.format(np.where(np.isnan(SNR_comp))[0]) + + logger.warning('NaN values detected for trace SNR in {}'.format(np.where(np.isnan(SNR_comp))[0]) + '. Changing their value to 0.') SNR_comp = np.where(np.isnan(SNR_comp), 0, SNR_comp) self.SNR_comp = SNR_comp @@ -1176,7 +1181,7 @@ def deconvolve(self, params, dview=None, dff_flag=False): Returns: self: estimates object ''' - + logger = logging.getLogger("caiman") F = self.C + self.YrA args = dict() args['p'] = params.get('preprocess', 'p') @@ -1213,7 +1218,7 @@ def deconvolve(self, params, dview=None, dff_flag=False): if dff_flag: if self.F_dff is None: - logging.warning('The F_dff field is empty. Run the method' + + logger.warning('The F_dff field is empty. Run the method' + ' estimates.detrend_df_f before attempting' + ' to deconvolve.') else: @@ -1272,6 +1277,7 @@ def manual_merge(self, components, params): Returns: self: estimates object ''' + logger = logging.getLogger("caiman") ln = np.sum(np.array([len(comp) for comp in components])) ids = set.union(*[set(comp) for comp in components]) @@ -1295,7 +1301,7 @@ def manual_merge(self, components, params): for i in range(nbmrg): merged_ROI = list(set(components[i])) - logging.info(f'Merging components {merged_ROI}') + logger.info(f'Merging components {merged_ROI}') merged_ROIs.append(merged_ROI) Acsc = self.A.tocsc()[:, merged_ROI] @@ -1446,6 +1452,7 @@ def remove_duplicates(self, predictions=None, r_values=None, dist_thr=0.1, thresh_subset plot_duplicates ''' + logger = logging.getLogger("caiman") if self.A_thr is None: raise Exception('You need to compute thresholded components before calling remove_duplicates: use the threshold_components method') @@ -1454,7 +1461,7 @@ def remove_duplicates(self, predictions=None, r_values=None, dist_thr=0.1, duplicates_gt, indices_keep_gt, indices_remove_gt, D_gt, overlap_gt = detect_duplicates_and_subsets( A_gt_thr_bin,predictions=predictions, r_values=r_values, dist_thr=dist_thr, min_dist=min_dist, thresh_subset=thresh_subset) - logging.info(f'Number of duplicates: {len(duplicates_gt)}') + logger.info(f'Number of duplicates: {len(duplicates_gt)}') if len(duplicates_gt) > 0: if plot_duplicates: plt.figure() @@ -1544,6 +1551,7 @@ def save_NWB(self, location: str """ + logger = logging.getLogger("caiman") if identifier is None: identifier = uuid.uuid1().hex @@ -1578,7 +1586,7 @@ def save_NWB(self, io.write(nwbfile) time.sleep(4) # ensure the file is fully closed before opening again in append mode - logging.info('Saving the results in the NWB file...') + logger.info('Saving the results in the NWB file...') with NWBHDF5IO(filename, 'r+') as io: nwbfile = io.read() diff --git a/caiman/source_extraction/cnmf/initialization.py b/caiman/source_extraction/cnmf/initialization.py index 172f4e57b..31ac48e0a 100644 --- a/caiman/source_extraction/cnmf/initialization.py +++ b/caiman/source_extraction/cnmf/initialization.py @@ -75,17 +75,17 @@ def decimate_last_axis(y, sub): def downscale(Y, ds, opencv=False): """downscaling without zero padding faster version of skimage.transform._warps.block_reduce(Y, ds, np.nanmean, np.nan)""" + logger = logging.getLogger("caiman") - from caiman.base.movies import movie d = Y.ndim if opencv and (d in [2, 3]): if d == 2: Y = Y[..., None] ds = tuple(ds) + (1,) else: - Y_ds = movie(Y.transpose(2, 0, 1)).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2], + Y_ds = caiman.base.movies.movie(Y.transpose(2, 0, 1)).resize(fx=1. / ds[0], fy=1. / ds[1], fz=1. / ds[2], interpolation=cv2.INTER_AREA).transpose(1, 2, 0) - logging.info('Downscaling using OpenCV') + logger.info('Downscaling using OpenCV') else: if d > 3: # raise NotImplementedError @@ -278,6 +278,7 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter Exception 'You need to define arguments for local NMF' """ + logger = logging.getLogger("caiman") method = method_init if gSiz is None: @@ -289,7 +290,7 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter gSiz = np.round(np.asarray(gSiz) / ssub).astype(int) if normalize_init: - logging.info('Variance Normalization') + logger.info('Variance Normalization') if img is None: img = np.mean(Y, axis=-1) img += np.median(img) @@ -305,11 +306,11 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter if ssub != 1 or tsub != 1: if method == 'corr_pnr': - logging.info("Spatial/Temporal downsampling 1-photon") + logger.info("Spatial/Temporal downsampling 1-photon") # this increments the performance against ground truth and solves border problems Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=False) else: - logging.info("Spatial/Temporal downsampling 2-photon") + logger.info("Spatial/Temporal downsampling 2-photon") # this increments the performance against ground truth and solves border problems Y_ds = downscale(Y, tuple([ssub] * len(d) + [tsub]), opencv=True) else: @@ -319,14 +320,14 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter if nb > min(np.prod(ds), Y_ds.shape[-1]): nb = -1 - logging.info('Roi Initialization...') + logger.info('Roi Initialization...') if method == 'greedy_roi': Ain, Cin, _, b_in, f_in = greedyROI( Y_ds, nr=K, gSig=gSig, gSiz=gSiz, nIter=nIter, kernel=kernel, nb=nb, rolling_sum=rolling_sum, rolling_length=rolling_length, seed_method=seed_method) if use_hals: - logging.info('Refining Components using HALS NMF iterations') + logger.info('Refining Components using HALS NMF iterations') Ain, Cin, b_in, f_in = hals( Y_ds, Ain, Cin, b_in, f_in, maxIter=maxIter) elif method == 'corr_pnr': @@ -521,15 +522,16 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), center: np.array 2d array of size nr x 2 [ or 3] with the components centroids """ + logger = logging.getLogger("caiman") m = scipy.ndimage.gaussian_filter(np.transpose( Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth, mode='nearest', truncate=truncate) if remove_baseline: - logging.info('Removing baseline') + logger.info('Removing baseline') bl = np.percentile(m, perc_baseline, axis=0) m1 = np.maximum(0, m - bl) else: - logging.info('Not removing baseline') + logger.info('Not removing baseline') bl = np.zeros(m.shape[1:]) m1 = m @@ -537,7 +539,7 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), d = np.prod(dims) yr = np.reshape(m1, [T, d], order='F') - logging.debug(f"Running SparseNMF with alpha_W={alpha}") + logger.debug(f"Running SparseNMF with alpha_W={alpha}") mdl = NMF(n_components=nr, verbose=False, init='nndsvd', @@ -564,15 +566,16 @@ def sparseNMF(Y_ds, nr, max_iter_snmf=200, alpha=0.5, sigma_smooth=(.5, .5, .5), def compressedNMF(Y_ds, nr, r_ov=10, max_iter_snmf=500, sigma_smooth=(.5, .5, .5), remove_baseline=False, perc_baseline=20, nb=1, truncate=2, tol=1e-3): + logger = logging.getLogger("caiman") m = scipy.ndimage.gaussian_filter(np.transpose( Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth, mode='nearest', truncate=truncate) if remove_baseline: - logging.info('REMOVING BASELINE') + logger.info('REMOVING BASELINE') bl = np.percentile(m, perc_baseline, axis=0) m = np.maximum(0, m - bl) else: - logging.info('NOT REMOVING BASELINE') + logger.info('NOT REMOVING BASELINE') bl = np.zeros(m.shape[1:]) T, dims = m.shape[0], m.shape[1:] @@ -618,7 +621,7 @@ def compressedNMF(Y_ds, nr, r_ov=10, max_iter_snmf=500, A /= nA C *= nA[:, np.newaxis] if (np.linalg.norm(C - C__)/np.linalg.norm(C__) < tol) & (np.linalg.norm(A - A__)/np.linalg.norm(A__) < tol): - logging.info(f'Graph NMF converged after {it + 1} iterations') + logger.info(f'Graph NMF converged after {it + 1} iterations') break A_in = A C_in = C @@ -638,15 +641,16 @@ def graphNMF(Y_ds, nr, max_iter_snmf=500, lambda_gnmf=1, perc_baseline=20, nb=1, truncate=2, tol=1e-3, SC_kernel='heat', SC_normalize=True, SC_thr=0, SC_sigma=1, SC_use_NN=False, SC_nnn=20): + logger = logging.getLogger("caiman") m = scipy.ndimage.gaussian_filter(np.transpose( Y_ds, np.roll(np.arange(Y_ds.ndim), 1)), sigma=sigma_smooth, mode='nearest', truncate=truncate) if remove_baseline: - logging.info('REMOVING BASELINE') + logger.info('REMOVING BASELINE') bl = np.percentile(m, perc_baseline, axis=0) m1 = np.maximum(0, m - bl) else: - logging.info('NOT REMOVING BASELINE') + logger.info('NOT REMOVING BASELINE') bl = np.zeros(m.shape[1:]) m1 = m T, dims = m1.shape[0], m1.shape[1:] @@ -669,7 +673,7 @@ def graphNMF(Y_ds, nr, max_iter_snmf=500, lambda_gnmf=1, A /= nA C *= nA[:, np.newaxis] if (np.linalg.norm(C - C_)/np.linalg.norm(C_) < tol) & (np.linalg.norm(A - A_)/np.linalg.norm(A_) < tol): - logging.info(f'Graph NMF converged after {it + 1} iterations') + logger.info(f'Graph NMF converged after {it + 1} iterations') break A_in = A C_in = C @@ -742,7 +746,8 @@ def greedyROI(Y, nr=30, gSig=[5, 5], gSiz=[11, 11], nIter=5, kernel=None, nb=1, """ - logging.info("Greedy initialization of spatial and temporal components using spatial Gaussian filtering") + logger = logging.getLogger("caiman") + logger.info("Greedy initialization of spatial and temporal components using spatial Gaussian filtering") d = np.shape(Y) Y[np.isnan(Y)] = 0 med = np.median(Y, axis=-1) @@ -758,13 +763,13 @@ def greedyROI(Y, nr=30, gSig=[5, 5], gSiz=[11, 11], nIter=5, kernel=None, nb=1, rho = imblur(Y, sig=gSig, siz=gSiz, nDimBlur=Y.ndim - 1, kernel=kernel) if rolling_sum: - logging.info('Using rolling sum for initialization (RollingGreedyROI)') + logger.info('Using rolling sum for initialization (RollingGreedyROI)') rolling_filter = np.ones( (rolling_length), dtype=np.float32) / rolling_length rho_s = scipy.signal.lfilter(rolling_filter, 1., rho**2) v = np.amax(rho_s, axis=-1) else: - logging.info('Using total sum for initialization (GreedyROI)') + logger.info('Using total sum for initialization (GreedyROI)') v = np.sum(rho**2, axis=-1) if seed_method.lower() != 'manual': @@ -1163,11 +1168,12 @@ def greedyROI_corr(Y, Y_ds, max_number=None, gSiz=None, gSig=None, center_psf=Tr init_iter: int, optional number of iterations for 1-photon imaging initialization """ + logger = logging.getLogger("caiman") if min_corr is None or min_pnr is None: raise Exception( 'Either min_corr or min_pnr are None. Both of them must be real numbers.') - logging.info('One photon initialization (GreedyCorr)') + logger.info('One photon initialization (GreedyCorr)') o = options['temporal'].copy() o['s_min'] = None if o['p'] > 1: @@ -1188,7 +1194,7 @@ def greedyROI_corr(Y, Y_ds, max_number=None, gSiz=None, gSig=None, center_psf=Tr if ring_size_factor is not None: # background according to ringmodel - logging.info('Computing ring model background') + logger.info('Computing ring model background') W, b0 = compute_W(Y_ds.reshape((-1, total_frames), order='F'), A, C, (d1, d2), ring_size_factor * gSiz, ssub=ssub_B) @@ -1208,14 +1214,14 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p B = compute_B(b0, W, B) # "-B" B += Y_ds.reshape((-1, total_frames), order='F') # "Y-B" - logging.info('Updating spatial components') + logger.info('Updating spatial components') A, _, C, _ = caiman.source_extraction.cnmf.spatial.update_spatial_components( B, C=C, f=np.zeros((0, total_frames), np.float32), A_in=A, sn=np.sqrt(downscale((sn**2).reshape(dims, order='F'), tuple([ssub] * len(dims))).ravel() / tsub) / ssub, b_in=np.zeros((d1 * d2, 0), np.float32), dview=None, dims=(d1, d2), **options['spatial']) - logging.info('Updating temporal components') + logger.info('Updating temporal components') C, A = caiman.source_extraction.cnmf.temporal.update_temporal_components( B, spr.csc_matrix(A, dtype=np.float32), np.zeros((d1 * d2, 0), np.float32), @@ -1230,7 +1236,7 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p if max_number != 0: if i == init_iter-2 and seed_method.lower()[:4] == 'semi': seed_method, min_corr, min_pnr = 'manual', 0, 0 - logging.info('Searching for more neurons in the residual') + logger.info('Searching for more neurons in the residual') A_R, C_R, _, _, center_R = init_neurons_corr_pnr( (B - A.dot(C)).reshape(Y_ds.shape, order='F'), max_number=max_number, gSiz=gSiz, gSig=gSig, @@ -1242,13 +1248,13 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p C = np.concatenate((C, C_R), 0) # 1st iteration on decimated data - logging.info('Merging components') + logger.info('Merging components') A, C = caiman.source_extraction.cnmf.merging.merge_components( B, A, [], C, None, [], C, [], o, options['spatial'], dview=None, thr=options['merging']['merge_thr'], mx=np.Inf, fast_merge=True)[:2] A = A.astype(np.float32) C = C.astype(np.float32) - logging.info('Updating spatial components') + logger.info('Updating spatial components') A, _, C, _ = caiman.source_extraction.cnmf.spatial.update_spatial_components( B, C=C, f=np.zeros((0, total_frames), np.float32), A_in=A, sn=np.sqrt(downscale((sn**2).reshape(dims, order='F'), @@ -1256,14 +1262,14 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p b_in=np.zeros((d1 * d2, 0), np.float32), dview=None, dims=(d1, d2), **options['spatial']) A = A.astype(np.float32) - logging.info('Updating temporal components') + logger.info('Updating temporal components') C, A = caiman.source_extraction.cnmf.temporal.update_temporal_components( B, spr.csc_matrix(A), np.zeros((d1 * d2, 0), np.float32), C, np.zeros((0, total_frames), np.float32), dview=None, bl=None, c1=None, sn=None, g=None, **o)[:2] - logging.info('Recomputing background') + logger.info('Recomputing background') # background according to ringmodel W, b0 = compute_W(Y_ds.reshape((-1, total_frames), order='F'), A, C, (d1, d2), ring_size_factor * gSiz, ssub=ssub_B) @@ -1291,19 +1297,19 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p .reshape((np.prod(dims), K), order='F')) B += Y.reshape((-1, T), order='F') # "Y-B" - logging.info('Merging components') + logger.info('Merging components') A, C = caiman.source_extraction.cnmf.merging.merge_components( B, A, [], C, None, [], C, [], o, options['spatial'], dview=None, thr=options['merging']['merge_thr'], mx=np.Inf, fast_merge=True)[:2] A = A.astype(np.float32) C = C.astype(np.float32) - logging.info('Updating spatial components') + logger.info('Updating spatial components') options['spatial']['se'] = np.ones((1,) * len((d1, d2)), dtype=np.uint8) A, _, C, _ = caiman.source_extraction.cnmf.spatial.update_spatial_components( B, C=C, f=np.zeros((0, T), np.float32), A_in=A, sn=sn, b_in=np.zeros((np.prod(dims), 0), np.float32), dview=None, dims=dims, **options['spatial']) - logging.info('Updating temporal components') + logger.info('Updating temporal components') C, A, b__, f__, S, bl, c1, neurons_sn, g1, YrA, lam__ = \ caiman.source_extraction.cnmf.temporal.update_temporal_components( B, spr.csc_matrix(A, dtype=np.float32), @@ -1316,11 +1322,11 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p use_NMF = True if nb == -1: - logging.info('Returning full background') + logger.info('Returning full background') b_in = spr.eye(len(B), dtype='float32') f_in = B elif nb > 0: - logging.info(f'Estimate low rank background (rank = {nb})') + logger.info(f'Estimate low rank background (rank = {nb})') print(nb) if use_NMF: model = NMF(n_components=nb, init='nndsvdar') @@ -1334,12 +1340,12 @@ def compute_B(b0, W, B): # actually computes -B to efficiently compute Y-B in p b_in = np.empty((A.shape[0], 0)) f_in = np.empty((0, T)) if nb == 0: - logging.info('Returning background as b0 and W') + logger.info('Returning background as b0 and W') return (A, C, center.T, b_in.astype(np.float32), f_in.astype(np.float32), (S.astype(np.float32), bl, c1, neurons_sn, g1, YrA, lam__, W, b0)) else: - logging.info("Not returning background") + logger.info("Not returning background") return (A, C, center.T, b_in.astype(np.float32), f_in.astype(np.float32), None if ring_size_factor is None else (S.astype(np.float32), bl, c1, neurons_sn, g1, YrA, lam__)) @@ -1408,6 +1414,7 @@ def init_neurons_corr_pnr(data, max_number=None, gSiz=15, gSig=None, center: np.ndarray center locations of all neurons """ + logger = logging.getLogger("caiman") if swap_dim: d1, d2, total_frames = data.shape @@ -1787,9 +1794,9 @@ def onclick(event): break else: if num_neurons % 100 == 1: - logging.info(f'{num_neurons - 1} neurons have been initialized') + logger.info(f'{num_neurons - 1} neurons have been initialized') - logging.info(f'In total, {num_neurons} neurons were initialized.') + logger.info(f'In total, {num_neurons} neurons were initialized.') # A = np.reshape(Ain[:num_neurons], (-1, d1 * d2)).transpose() A = np.reshape(Ain[:num_neurons], (-1, d1 * d2), order='F').transpose() C = Cin[:num_neurons] diff --git a/caiman/source_extraction/cnmf/map_reduce.py b/caiman/source_extraction/cnmf/map_reduce.py index 8a4d64ab7..ba6cd0e4f 100644 --- a/caiman/source_extraction/cnmf/map_reduce.py +++ b/caiman/source_extraction/cnmf/map_reduce.py @@ -67,11 +67,10 @@ def cnmf_patches(args_in): """ #FIXME Fix in-function imports - import logging from . import cnmf + logger = logging.getLogger("caiman") file_name, idx_, shapes, params = args_in - logger = logging.getLogger(__name__) name_log = os.path.basename( file_name[:-5]) + '_LOG_ ' + str(idx_[0]) + '_' + str(idx_[-1]) @@ -185,6 +184,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, Raises: Empty Exception """ + logger = logging.getLogger("caiman") dims = shape[:-1] d = np.prod(dims) @@ -224,7 +224,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, foo[id_f] = 1 patch_centers.append(scipy.ndimage.center_of_mass( foo.reshape(dims, order='F'))) - logging.info(f'Patch size: {id_2d}') + logger.info(f'Patch size: {id_2d}') st = time.time() if dview is not None: if 'multiprocessing' in str(type(dview)): @@ -237,12 +237,12 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, print('Something went wrong') raise finally: - logging.info('Patch processing complete') + logger.info('Patch processing complete') else: file_res = list(map(cnmf_patches, args_in)) - logging.info('Elapsed time for processing patches: \ + logger.info('Elapsed time for processing patches: \ {0}s'.format(str(time.time() - st).split('.')[0])) # count components count = 0 @@ -304,7 +304,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, # instead of filling in the matrices, construct lists with their non-zero # entries and coordinates - logging.info('Embedding patches results into whole FOV') + logger.info('Embedding patches results into whole FOV') for fff in file_res: if fff is not None: @@ -360,7 +360,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, else: empty += 1 - logging.debug('Skipped %d empty patches', empty) + logger.debug(f'Skipped {empty} empty patches') if count_bgr > 0: idx_tot_B = np.concatenate(idx_tot_B) b_tot = np.concatenate(b_tot) @@ -396,7 +396,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, optional_outputs['F'] = F_tot optional_outputs['mask'] = mask - logging.info("Constructing background") + logger.info("Constructing background") Im = scipy.sparse.csr_matrix( (1. / (mask + np.finfo(np.float32).eps), (np.arange(d), np.arange(d))), dtype=np.float32) @@ -410,9 +410,9 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, elif low_rank_background is None: b = Im.dot(B_tot) f = F_tot - logging.info("Leaving background components intact") + logger.info("Leaving background components intact") elif low_rank_background: - logging.info("Compressing background components with a low rank NMF") + logger.info("Compressing background components with a low rank NMF") B_tot = Im.dot(B_tot) Bm = (B_tot) #f = np.r_[np.atleast_2d(np.mean(F_tot, axis=0)), @@ -445,7 +445,7 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, # B_tot = scipy.sparse.coo_matrix(B_tot) f *= nB[:, None] else: - logging.info('Removing overlapping background components \ + logger.info('Removing overlapping background components \ from different patches') nA = np.ravel(np.sqrt(A_tot.power(2).sum(0))) A_tot /= nA @@ -475,8 +475,8 @@ def run_CNMF_patches(file_name, shape, params, gnb=1, dview=None, b = B_tot f = F_tot - logging.info('using one background component per patch') + logger.info('using one background component per patch') - logging.info("Constructing background DONE") + logger.info("Constructing background DONE") return A_tot, C_tot, YrA_tot, b, f, sn_tot, optional_outputs diff --git a/caiman/source_extraction/cnmf/merging.py b/caiman/source_extraction/cnmf/merging.py index 5e6452232..a01244472 100644 --- a/caiman/source_extraction/cnmf/merging.py +++ b/caiman/source_extraction/cnmf/merging.py @@ -120,6 +120,7 @@ def merge_components(Y, A, b, C, R, f, S, sn_pix, temporal_params, Exception "The number of elements of bl, c1, g, sn must match the number of components" """ + logger = logging.getLogger("caiman") # tests and initialization nr = A.shape[1] A = csc_matrix(A) @@ -222,7 +223,7 @@ def merge_components(Y, A, b, C, R, f, S, sn_pix, temporal_params, merged_ROIs = [] for i in range(nbmrg): merged_ROI = np.where(list_conxcomp[:, ind[i]])[0] - logging.info(f'Merging components {merged_ROI}') + logger.info(f'Merging components {merged_ROI}') merged_ROIs.append(merged_ROI) Acsc = A.tocsc()[:, merged_ROI] Ctmp = np.array(C)[merged_ROI, :] + np.array(R)[merged_ROI, :] @@ -279,7 +280,7 @@ def merge_components(Y, A, b, C, R, f, S, sn_pix, temporal_params, nr = nr - len(neur_id) + len(C_merged) else: - logging.info('No more components merged!') + logger.info('No more components merged!') merged_ROIs = [] empty = [] @@ -292,6 +293,7 @@ def merge_iter(a): return res def merge_iteration(Acsc, C_to_norm, Ctmp, fast_merge, g, g_idx, indx, temporal_params): + logger = logging.getLogger("caiman") if fast_merge: # we normalize the values of different A's to be able to compare them efficiently. we then sum them @@ -304,7 +306,7 @@ def merge_iteration(Acsc, C_to_norm, Ctmp, fast_merge, g, g_idx, indx, temporal_ break computedA = np.maximum(Acsc.dot(Ctmp.dot(computedC.T)) / nc, 0) else: - logging.info('Simple merging ny taking best neuron') + logger.info('Simple merging ny taking best neuron') computedC = Ctmp[indx] computedA = Acsc[:, indx] # then we de-normalize them using A_to_norm diff --git a/caiman/source_extraction/cnmf/online_cnmf.py b/caiman/source_extraction/cnmf/online_cnmf.py index 2a1aede6c..c84ec3f5a 100644 --- a/caiman/source_extraction/cnmf/online_cnmf.py +++ b/caiman/source_extraction/cnmf/online_cnmf.py @@ -119,6 +119,7 @@ def __init__(self, params=None, estimates=None, path=None, dview=None, Ain=None) @profile def _prepare_object(self, Yr, T, new_dims=None, idx_components=None): + logger = logging.getLogger("caiman") init_batch = self.params.get('online', 'init_batch') old_dims = self.params.get('data', 'dims') self.is1p = (self.params.get('init', 'method_init') == 'corr_pnr' and @@ -248,7 +249,7 @@ def _prepare_object(self, Yr, T, new_dims=None, idx_components=None): self.estimates.CY = self.estimates.CY * 1. / self.params.get('online', 'init_batch') self.estimates.CC = 1 * self.estimates.CC / self.params.get('online', 'init_batch') - logging.info(f'Expecting {expected_comps} components') + logger.info(f'Expecting {expected_comps} components') self.estimates.CY.resize([expected_comps + self.params.get('init', 'nb'), self.estimates.CY.shape[-1]], refcheck=False) if self.params.get('online', 'use_dense'): self.estimates.Ab_dense = np.zeros((self.estimates.CY.shape[-1], expected_comps + self.params.get('init', 'nb')), @@ -325,11 +326,11 @@ def _prepare_object(self, Yr, T, new_dims=None, idx_components=None): else: try: from tensorflow.keras.models import model_from_json - logging.info('Using Keras') + logger.info('Using Keras') use_keras = True except(ModuleNotFoundError): use_keras = False - logging.info('Using Tensorflow') + logger.info('Using Tensorflow') if use_keras: path = self.params.get('online', 'path_to_model').split(".")[:-1] json_path = ".".join(path + ["json"]) @@ -420,7 +421,7 @@ def fit_next(self, t, frame_in, num_iters_hals=3): num_iters_hals: int, optional maximal number of iterations for HALS (NNLS via blockCD) """ - + logger = logging.getLogger("caiman") t_start = time() # locally scoped variables for brevity of code and faster look up @@ -581,8 +582,7 @@ def fit_next(self, t, frame_in, num_iters_hals=3): self.estimates.Ab_dense = np.zeros((self.estimates.CY.shape[-1], expected_comps + nb_), dtype=np.float32) self.estimates.Ab_dense[:, :Ab_.shape[1]] = Ab_.toarray() - logging.info('Increasing number of expected components to:' + - str(expected_comps)) + logger.info(f'Increasing number of expected components to: {expected_comps}') self.update_counter.resize(self.N, refcheck=False) self.estimates.noisyC[self.M - num_added:self.M, t - mbs + @@ -739,7 +739,7 @@ def fit_next(self, t, frame_in, num_iters_hals=3): if not self.params.get('online', 'dist_shape_update'): # bulk shape update if ((t + 1 - self.params.get('online', 'init_batch')) % self.params.get('online', 'update_freq') == 0): - logging.info('Updating Shapes') + logger.info('Updating Shapes') if self.N > self.params.get('online', 'max_comp_update_shape'): indicator_components = np.where(self.update_counter <= @@ -875,6 +875,7 @@ def process_pixel(p): return self def initialize_online(self, model_LN=None, T=None): + logger = logging.getLogger("caiman") fls = self.params.get('data', 'fnames') opts = self.params.get_group('online') Y = caiman.load(fls[0], subindices=slice(0, opts['init_batch'], @@ -909,7 +910,7 @@ def initialize_online(self, model_LN=None, T=None): Y = Y - img_min img_norm = np.std(Y, axis=0) img_norm += np.median(img_norm) # normalize data to equalize the FOV - logging.info('Frame size:' + str(img_norm.shape)) + logger.info(f'Frame size: {img_norm.shape}') if self.params.get('online', 'normalize'): Y = Y/img_norm[None, :, :] if opts['show_movie']: @@ -1108,11 +1109,12 @@ def fit_online(self, **kwargs): Returns: self (results of caiman online) """ + logger = logging.getLogger("caiman") self.t_init = -time() fls = self.params.get('data', 'fnames') init_batch = self.params.get('online', 'init_batch') if self.params.get('online', 'ring_CNN'): - logging.info('Using Ring CNN model') + logger.info('Using Ring CNN model') gSig = self.params.get('init', 'gSig')[0] width = self.params.get('ring_CNN', 'width') nch = self.params.get('ring_CNN', 'n_channels') @@ -1127,22 +1129,22 @@ def fit_online(self, **kwargs): Y = caiman.base.movies.load(fls[0], subindices=slice(init_batch), var_name_hdf5=self.params.get('data', 'var_name_hdf5')) shape = Y.shape[1:] + (1,) - logging.info('Starting background model training.') + logger.info('Starting background model training.') model_LN = create_LN_model(Y, shape=shape, n_channels=nch, lr=self.params.get('ring_CNN', 'lr'), gSig=gSig, loss=loss_fn, width=width, use_add=self.params.get('ring_CNN', 'use_add'), use_bias=self.params.get('ring_CNN', 'use_bias')) if self.params.get('ring_CNN', 'reuse_model'): - logging.info('Using existing model from {self.params.get("ring_CNN", "path_to_model")}') + logger.info('Using existing model from {self.params.get("ring_CNN", "path_to_model")}') model_LN.load_weights(self.params.get('ring_CNN', 'path_to_model')) else: - logging.info('Estimating model from scratch, starting training.') + logger.info('Estimating model from scratch, starting training.') model_LN, history, path_to_model = fit_NL_model(model_LN, Y, epochs=self.params.get('ring_CNN', 'max_epochs'), patience=self.params.get('ring_CNN', 'patience'), schedule=sch) - logging.info(f'Training complete. Model saved in {path_to_model}.') + logger.info(f'Training complete. Model saved in {path_to_model}.') self.params.set('ring_CNN', {'path_to_model': path_to_model}) else: model_LN = None @@ -1171,7 +1173,7 @@ def fit_online(self, **kwargs): for iter in range(epochs): if iter == epochs - 1 and self.params.get('online', 'stop_detection'): self.params.set('online', {'update_num_comps': False}) - logging.info(f"Searching for new components set to: {self.params.get('online', 'update_num_comps')}") + logger.info(f"Searching for new components set to: {self.params.get('online', 'update_num_comps')}") if iter > 0: # if not on first epoch process all files from scratch process_files = fls[:init_files + extra_files] @@ -1180,7 +1182,7 @@ def fit_online(self, **kwargs): # Go through all files # TODO Use better variable names for file_count, ffll in enumerate(process_files): - logging.warning(f'Now processing file {ffll}') + logger.warning(f'Now processing file {ffll}') Y_ = caiman.base.movies.load_iter( ffll, var_name_hdf5=self.params.get('data', 'var_name_hdf5'), subindices=slice(init_batc_iter[file_count], None, None)) @@ -1206,10 +1208,9 @@ def fit_online(self, **kwargs): raise Exception('Frame ' + str(frame_count) + ' contains NaN') if t % 500 == 0: - logging.info('Epoch: ' + str(iter + 1) + '. ' + str(t) + + logger.info(f'Epoch: {iter + 1}. {t}' + ' frames have been processed in total. ' + - str(self.N - old_comps) + - ' new components were added. Total # of components is ' + f'{self.N - old_comps} new components were added. Total # of components is ' + str(self.estimates.Ab.shape[-1] - self.params.get('init', 'nb'))) old_comps = self.N diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 7ca69fa04..2014860df 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -10,8 +10,8 @@ from scipy.ndimage import generate_binary_structure, iterate_structure from typing import Optional -import caiman.utils.utils import caiman.base.movies +import caiman.utils.utils from caiman.paths import caiman_datadir from caiman.source_extraction.cnmf.utilities import dict_compare @@ -903,6 +903,7 @@ def check_consistency(self): """ Populates the params object with some dataset dependent values and ensures that certain constraints are satisfied. """ + logger = logging.getLogger("caiman") self.data['last_commit'] = '-'.join(caiman.utils.utils.get_caiman_version()) if self.data['dims'] is None and self.data['fnames'] is not None: self.data['dims'] = caiman.base.movies.get_file_size(self.data['fnames'], var_name_hdf5=self.data['var_name_hdf5'])[0] @@ -936,18 +937,18 @@ def check_consistency(self): self.init['gSiz'] = tuple([gs + 1 if gs % 2 == 0 else gs for gs in self.init['gSiz']]) if self.patch['rf'] is not None: if np.any(np.array(self.patch['rf']) <= self.init['gSiz'][0]): - logging.warning(f"Changing rf from {self.patch['rf']} to {2 * self.init['gSiz'][0]} because the constraint rf > gSiz was not satisfied.") + logger.warning(f"Changing rf from {self.patch['rf']} to {2 * self.init['gSiz'][0]} because the constraint rf > gSiz was not satisfied.") if self.init['nb'] <= 0 and (self.patch['nb_patch'] != self.init['nb'] or self.patch['low_rank_background'] is not None): - logging.warning(f"gnb={self.init['nb']}, hence setting keys nb_patch and low_rank_background in group patch automatically.") + logger.warning(f"gnb={self.init['nb']}, hence setting keys nb_patch and low_rank_background in group patch automatically.") self.set('patch', {'nb_patch': self.init['nb'], 'low_rank_background': None}) if self.init['nb'] == -1 and self.spatial['update_background_components']: - logging.warning("gnb=-1, hence setting key update_background_components " + + logger.warning("gnb=-1, hence setting key update_background_components " + "in group spatial automatically to False.") self.set('spatial', {'update_background_components': False}) if self.init['method_init'] == 'corr_pnr' and self.init['ring_size_factor'] is not None \ and self.init['normalize_init']: - logging.warning("using CNMF-E's ringmodel for background hence setting key " + + logger.warning("using CNMF-E's ringmodel for background hence setting key " + "normalize_init in group init automatically to False.") self.set('init', {'normalize_init': False}) if self.motion['is3D']: @@ -955,14 +956,14 @@ def check_consistency(self): if len(self.motion[a]) != 3: if self.motion[a][0] == self.motion[a][1]: self.motion[a] = (self.motion[a][0],) * 3 - logging.warning("is3D=True, hence setting key " + a + + logger.warning("is3D=True, hence setting key " + a + " automatically to " + str(self.motion[a])) else: raise ValueError(a + ' has to be a tuple of length 3 for volumetric 3D data') for key in ('max_num_added', 'min_num_trial'): if (self.online[key] == 0 and self.online['update_num_comps']): self.set('online', {'update_num_comps': False}) - logging.warning(key + "=0, hence setting key update_num_comps " + + logger.warning(key + "=0, hence setting key update_num_comps " + "in group online automatically to False.") # FIXME The authoritative value is stored in the init field. This should later be refactored out # into a general section, once we're passing around the CNMFParams object rather than splatting it out @@ -985,8 +986,9 @@ def set(self, group:str, val_dict:dict, set_if_not_exists:bool=False, verbose=Fa A future version of caiman may make this method private. """ + logger = logging.getLogger("caiman") if set_if_not_exists: - logging.warning("The set_if_not_exists flag for CNMFParams.set() is deprecated and will be removed in a future version of Caiman") + logger.warning("The set_if_not_exists flag for CNMFParams.set() is deprecated and will be removed in a future version of Caiman") # can't easily catch if it's passed but set to False, but that wouldn't do anything because of the default, # and if they get that error it's at least really easy to fix - just remove the flag # we don't want to support this because it makes the structure of the object unpredictable except at runtime @@ -998,14 +1000,14 @@ def set(self, group:str, val_dict:dict, set_if_not_exists:bool=False, verbose=Fa for k, v in val_dict.items(): if k not in d and not set_if_not_exists: if verbose: - logging.warning( + logger.warning( f"{group}/{k} not set: invalid target in CNMFParams object") else: try: if np.any(d[k] != v): - logging.info(f"Changing key {k} in group {group} from {d[k]} to {v}") + logger.info(f"Changing key {k} in group {group} from {d[k]} to {v}") except ValueError: # d[k] and v also differ if above comparison fails, e.g. lists of different length - logging.info(f"Changing key {k} in group {group} from {d[k]} to {v}") + logger.info(f"Changing key {k} in group {group} from {d[k]} to {v}") d[k] = v def get(self, group, key): @@ -1129,11 +1131,11 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr if k == 'nb' and paramkey != 'init': # Special casing to handle a misdesign in CNMFParams where some keys must have the same value in different # sections. - logging.warning("The 'nb' parameter can only be set in the init part of CNMFParams. Attempts to set it elsewhere are ignored") + logger.warning("The 'nb' parameter can only be set in the init part of CNMFParams. Attempts to set it elsewhere are ignored") continue if k not in cat_handle and warn_unused: # For regular/pathed API, we can notice right away if the user gave us something that won't update the object - logging.warning(f"In setting CNMFParams, provided key {paramkey}/{k} was not consumed. This is a bug!") + logger.warning(f"In setting CNMFParams, provided key {paramkey}/{k} was not consumed. This is a bug!") else: cat_handle[k] = v # BEGIN code that we will remove in some future version of caiman @@ -1147,13 +1149,13 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr cat_handle[paramkey] = params_dict[paramkey] # Do the update if legacy_used: if not nagged_once: - logging.warning(f"In setting CNMFParams, non-pathed parameters were used; this is deprecated. In some future version of Caiman, allow_legacy will default to False (and eventually will be removed)") + logger.warning(f"In setting CNMFParams, non-pathed parameters were used; this is deprecated. In some future version of Caiman, allow_legacy will default to False (and eventually will be removed)") nagged_once = True # END if warn_unused: for toplevel_k in params_dict: if toplevel_k not in consumed and toplevel_k not in list(self.__dict__.keys()): # When we remove legacy behaviour, this logic will simplify and fold into above - logging.warning(f"In setting CNMFParams, provided toplevel key {toplevel_k} was unused. This is a bug!") + logger.warning(f"In setting CNMFParams, provided toplevel key {toplevel_k} was unused. This is a bug!") self.check_consistency() def change_params_from_json(self, jsonstring:str, verbose:bool=False) -> None: diff --git a/caiman/source_extraction/cnmf/pre_processing.py b/caiman/source_extraction/cnmf/pre_processing.py index 7a880225b..d7eef9609 100644 --- a/caiman/source_extraction/cnmf/pre_processing.py +++ b/caiman/source_extraction/cnmf/pre_processing.py @@ -37,10 +37,11 @@ def interpolate_missing_data(Y): Raises: Exception 'The algorithm has not been tested with missing values (NaNs). Remove NaNs and rerun the algorithm.' """ + logger = logging.getLogger("caiman") coor = [] - logging.info('Checking for missing data entries (NaN)') + logger.info('Checking for missing data entries (NaN)') if np.any(np.isnan(Y)): - logging.info('Interpolating missing data') + logger.info('Interpolating missing data') for idx, row in enumerate(Y): nans = np.where(np.isnan(row))[0] n_nans = np.where(~np.isnan(row))[0] diff --git a/caiman/source_extraction/cnmf/spatial.py b/caiman/source_extraction/cnmf/spatial.py index 045027c8a..0ba5f2b4e 100644 --- a/caiman/source_extraction/cnmf/spatial.py +++ b/caiman/source_extraction/cnmf/spatial.py @@ -8,17 +8,19 @@ import logging import numpy as np import os +import psutil import scipy from scipy.sparse import coo_matrix, csc_matrix, csr_matrix from scipy.sparse import spdiags from scipy.linalg import eig from scipy.ndimage import ( - label, binary_dilation, binary_closing, - median_filter, generate_binary_structure, - iterate_structure + grey_dilation, + iterate_structure, + label, + median_filter ) import shutil from sklearn.decomposition import NMF @@ -26,7 +28,6 @@ from sklearn.pipeline import make_pipeline import tempfile import time -import psutil import caiman.mmapping import caiman.utils.stats @@ -151,8 +152,9 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, Exception "Failed to delete: " + folder """ + logger = logging.getLogger("caiman") # TODO fix documentation on backend - #logging.info('Initializing update of Spatial Components') + logger.info('Initializing update of Spatial Components') if expandCore is None: expandCore = iterate_structure( @@ -166,7 +168,7 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, Y, A_in, C, f, n_pixels_per_process, nb) start_time = time.time() - logging.info('Computing support of spatial components') + logger.info('Computing support of spatial components') # we compute the indicator from distance indicator ind2_, nr, C, f, b_, A_in = computing_indicator( Y, A_in, b_in, C, f, nb, method_exp, dims, min_size, max_size, dist, expandCore, dview) @@ -174,7 +176,7 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, # remove components that are empty or have a nan ff = np.where((np.sum(C, axis=1)==0) + np.isnan(np.sum(C, axis=1)))[0] if np.size(ff) > 0: - logging.info(f"Eliminating empty and nan components: {ff}") + logger.info(f"Eliminating empty and nan components: {ff}") A_in = caiman.utils.stats.csc_column_remove(A_in, list(ff)) C = np.delete(C, list(ff), 0) # update indices @@ -195,13 +197,13 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, if b_in is None: b_in = b_ - logging.info('Memory mapping') + logger.info('Memory mapping') # we create a memory map file if not already the case, we send Cf, a # matrix that include background components C_name, Y_name, folder = creatememmap(Y, np.vstack((C, f)), dview) # we create a pixel group array (chunks for the cnmf)for the parallelization of the process - logging.info('Updating Spatial Components using lasso lars') + logger.info('Updating Spatial Components using lasso lars') cct = np.diag(C.dot(C.T)) pixel_groups = [] for i in range(0, np.prod(dims) - n_pixels_per_process + 1, n_pixels_per_process): @@ -234,13 +236,13 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, cols.extend(idxs_[nz]) A_ = scipy.sparse.coo_matrix((data, (rows, cols)), shape=(d, nr + np.size(f, 0))) - logging.info("thresholding components") + logger.info("thresholding components") A_ = threshold_components(A_, dims, dview=dview, medw=medw, thr_method=thr_method, maxthr=maxthr, nrgthr=nrgthr, extract_cc=extract_cc, se=se, ss=ss) #ff = np.where(np.sum(A_, axis=0) == 0) # remove empty components ff = np.asarray(A_.sum(0) == 0).nonzero()[1] if np.size(ff) > 0: - logging.info(f'removing {ff.shape[0]} empty spatial component(s)') + logger.info(f'removing {ff.shape[0]} empty spatial component(s)') if any(ff < nr): A_ = caiman.utils.stats.csc_column_remove(A_, list(ff[ff < nr])) C = np.delete(C, list(ff[ff < nr]), 0) @@ -257,7 +259,7 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, A_ = A_[:, :nr] if update_background_components: A_ = csr_matrix(A_) - logging.info("Computing residuals") + logger.info("Computing residuals") if 'memmap' in str(type(Y)): bl_siz1 = Y.shape[0] // (num_blocks_per_run_spat - 1) bl_siz2 = psutil.virtual_memory().available // (4*Y.shape[-1]*(num_blocks_per_run_spat + 1)) @@ -284,11 +286,11 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, # except NameError: b = b_in # print(("--- %s seconds ---" % (time.time() - start_time))) - logging.info('Updating done in ' + + logger.info('Updating done in ' + '{0}s'.format(str(time.time() - start_time).split(".")[0])) try: # clean up # remove temporary file created - logging.info("Removing created tempfiles") + logger.info("Removing created tempfiles") shutil.rmtree(folder) except: raise Exception("Failed to delete: " + folder) @@ -841,7 +843,7 @@ def determine_search_location(A, dims, method='ellipse', min_size=3, max_size=8, Exception 'You cannot pass empty (all zeros) components!' """ - from scipy.ndimage import grey_dilation + logger = logging.getLogger("caiman") # we initialize the values if len(dims) == 2: @@ -918,7 +920,7 @@ def determine_search_location(A, dims, method='ellipse', min_size=3, max_size=8, dist_indicator = csc_matrix((data, indices, indptr), shape=(d, nr)) else: - logging.info('dilate in parallel...') + logger.info('dilate in parallel...') pars = [] for i in range(nr): pars.append([A[:, i], dims, expandCore, d]) diff --git a/caiman/source_extraction/cnmf/temporal.py b/caiman/source_extraction/cnmf/temporal.py index b5bdde791..1ee1ee65a 100644 --- a/caiman/source_extraction/cnmf/temporal.py +++ b/caiman/source_extraction/cnmf/temporal.py @@ -169,6 +169,7 @@ def update_temporal_components(Y, A, b, Cin, fin, bl=None, c1=None, g=None, sn=N Automatically tuned sparsity parameter """ + logger = logging.getLogger("caiman") if 'p' not in kwargs or kwargs['p'] is None: raise Exception("You have to provide a value for p") @@ -197,7 +198,7 @@ def update_temporal_components(Y, A, b, Cin, fin, bl=None, c1=None, g=None, sn=N C = Cin.copy() nA = np.ravel(A.power(2).sum(axis=0)) + np.finfo(np.float32).eps - logging.info('Generating residuals') + logger.info('Generating residuals') # dview_res = None if block_size >= 500 else dview if 'memmap' in str(type(Y)): bl_siz1 = d // (np.maximum(num_blocks_per_run_temp - 1, 1)) @@ -211,13 +212,13 @@ def update_temporal_components(Y, A, b, Cin, fin, bl=None, c1=None, g=None, sn=N YrA = YA - AA.T.dot(Cin).T # creating the patch of components to be computed in parallel parrllcomp, len_parrllcomp = caiman.source_extraction.cnmf.utilities.update_order_greedy(AA[:nr, :][:, :nr]) - logging.info("entering the deconvolution ") + logger.info("entering the deconvolution ") C, S, bl, YrA, c1, sn, g, lam = update_iteration(parrllcomp, len_parrllcomp, nb, C, S, bl, nr, ITER, YrA, c1, sn, g, Cin, T, nA, dview, debug, AA, kwargs) ff = np.where(np.sum(C, axis=1) == 0) # remove empty components if np.size(ff) > 0: # Eliminating empty temporal components ff = ff[0] - logging.info(f'removing {len(ff)} empty spatial component(s)') + logger.info(f'removing {len(ff)} empty spatial component(s)') keep = list(range(A.shape[1])) for i in ff: keep.remove(i) @@ -327,6 +328,7 @@ def update_iteration(parrllcomp, len_parrllcomp, nb, C, S, bl, nr, YrA corresponds to the residual trace for each component and is used for faster plotting (K x T) """ + logger = logging.getLogger("caiman") lam = np.repeat(None, nr) for _ in range(ITER): @@ -375,7 +377,7 @@ def update_iteration(parrllcomp, len_parrllcomp, nb, C, S, bl, nr, YrA -= AA[jo, :].T.dot(Ctemp - C[jo, :]).T C[jo, :] = Ctemp.copy() S[jo, :] = Stemp - logging.info(str(np.sum(len_parrllcomp[:count + 1])) + + logger.info(str(np.sum(len_parrllcomp[:count + 1])) + f" out of total {nr} temporal components updated") for ii in np.arange(nr, nr + nb): @@ -388,13 +390,13 @@ def update_iteration(parrllcomp, len_parrllcomp, nb, C, S, bl, nr, try: if scipy.linalg.norm(Cin - C, 'fro') <= 1e-3*scipy.linalg.norm(C, 'fro'): - logging.info("stopping: overall temporal component not changing" + + logger.info("stopping: overall temporal component not changing" + " significantly") break else: # we keep Cin and do the iteration once more Cin = C.copy() except ValueError: - logging.warning("Aborting updating of temporal components due" + + logger.warning("Aborting updating of temporal components due" + " to possible numerical issues.") C = Cin.copy() break diff --git a/caiman/source_extraction/cnmf/utilities.py b/caiman/source_extraction/cnmf/utilities.py index c73678adb..1a935ab67 100644 --- a/caiman/source_extraction/cnmf/utilities.py +++ b/caiman/source_extraction/cnmf/utilities.py @@ -482,15 +482,16 @@ def detrend_df_f(A, b, C, f, YrA=None, quantileMin=8, frames_window=500, F_df: the computed Calcium activity to the derivative of f """ + logger = logging.getLogger("caiman") if C is None: - logging.warning("There are no components for DF/F extraction!") + logger.warning("There are no components for DF/F extraction!") return None if b is None or f is None: b = np.zeros((A.shape[0], 1)) f = np.zeros((1, C.shape[1])) - logging.warning("Background components not present. Results should" + + logger.warning("Background components not present. Results should" + " not be interpreted as DF/F normalized but only" + " as detrended.") detrend_only = True diff --git a/caiman/summary_images.py b/caiman/summary_images.py index f9a841d35..de4e40792 100644 --- a/caiman/summary_images.py +++ b/caiman/summary_images.py @@ -39,6 +39,7 @@ def max_correlation_image(Y, bin_size: int = 1000, eight_neighbours: bool = True Cn: d1 x d2 [x d3] matrix, max correlation image """ + logger = logging.getLogger("caiman") if swap_dim: Y = np.transpose(Y, tuple(np.hstack((Y.ndim - 1, list(range(Y.ndim))[:-1])))) @@ -57,7 +58,7 @@ def max_correlation_image(Y, bin_size: int = 1000, eight_neighbours: bool = True Cn_bins[i] = local_correlations_fft(Y[i * bin_size:(i + 1) * bin_size], eight_neighbours=eight_neighbours, swap_dim=False) - logging.debug(i * bin_size) + logger.debug(i * bin_size) Cn = np.max(Cn_bins, axis=0) return Cn diff --git a/caiman/tests/comparison/comparison.py b/caiman/tests/comparison/comparison.py index 034141ac3..d866bd3aa 100644 --- a/caiman/tests/comparison/comparison.py +++ b/caiman/tests/comparison/comparison.py @@ -23,10 +23,7 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig( - format="%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.DEBUG) +logging.getLogger("caiman").setLevel(logging.DEBUG) import caiman as cm from caiman.paths import caiman_datadir diff --git a/caiman/tests/comparison_general.py b/caiman/tests/comparison_general.py index b014284c4..da0538de8 100644 --- a/caiman/tests/comparison_general.py +++ b/caiman/tests/comparison_general.py @@ -49,10 +49,7 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig( - format="%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.DEBUG) +logging.getLogger("caiman").setLevel(logging.DEBUG) # GLOBAL VAR params_movie = { diff --git a/caiman/tests/comparison_humans.py b/caiman/tests/comparison_humans.py index 5b955e26b..38fcca106 100644 --- a/caiman/tests/comparison_humans.py +++ b/caiman/tests/comparison_humans.py @@ -38,10 +38,7 @@ from caiman.source_extraction.cnmf import params as params from caiman.source_extraction.cnmf.cnmf import load_CNMF -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.ERROR) +logging.getLogger("caiman").setLevel(logging.ERROR) warnings.filterwarnings("ignore", category=FutureWarning) # %% ANALYSIS MODE AND PARAMETERS diff --git a/caiman/tests/comparison_humans_online.py b/caiman/tests/comparison_humans_online.py index e01fbb33a..ba0e61434 100644 --- a/caiman/tests/comparison_humans_online.py +++ b/caiman/tests/comparison_humans_online.py @@ -32,10 +32,7 @@ except: print('OpenCV is naturally single threaded') -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.ERROR) +logging.getLogger("caiman").setLevel(logging.ERROR) # %% Select a dataset (specify the ID variable as a list of datasets to be processed) # 0: neuforinder.03.00.test (N.03.00.t) # 1: neurofinder.04.00.test (N.04.00.t) diff --git a/caiman/tests/test_deconvolution.py b/caiman/tests/test_deconvolution.py index b4d53d585..46694a0fc 100644 --- a/caiman/tests/test_deconvolution.py +++ b/caiman/tests/test_deconvolution.py @@ -11,11 +11,7 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig( - format="%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.DEBUG) - +logging.getLogger("caiman").setLevel(logging.DEBUG) def gen_data(g=[.95], sn=.2, T=1000, framerate=30, firerate=.5, b=10, N=1, seed=0): """ diff --git a/caiman/utils/labelling.py b/caiman/utils/labelling.py index 82124b4af..95dbc0264 100644 --- a/caiman/utils/labelling.py +++ b/caiman/utils/labelling.py @@ -16,7 +16,7 @@ def pre_process_handle(args): name_log = fil[:-4] + '_LOG' logger = logging.getLogger(name_log) - hdlr = logging.FileHandler(name_log) + hdlr = logging.FileHandler(name_log) # We don't use the caiman logger here b/c we're saving to files formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index f349b9d0b..48bbba484 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -487,12 +487,14 @@ def _interpret_subindices(subindices: DimSubindices, dim_extent: int) -> tuple[I Given the extent of a dimension in the corresponding recording, obtain an iterable over subindices and the step size (or 0 if the step size is not uniform). """ + logger = logging.getLogger("caiman") + if isinstance(subindices, slice): iterable_elements = range(dim_extent)[subindices] skip = iterable_elements.step if subindices.stop is not None and np.isfinite(subindices.stop) and subindices.stop > dim_extent: - logging.warning(f'Only {dim_extent} frames or pixels available to load ' + + logger.warning(f'Only {dim_extent} frames or pixels available to load ' + f'(requested up to {subindices.stop})') else: iterable_elements = subindices diff --git a/caiman/utils/stats.py b/caiman/utils/stats.py index a5688fe36..cf1e3d596 100644 --- a/caiman/utils/stats.py +++ b/caiman/utils/stats.py @@ -172,6 +172,8 @@ def df_percentile(inputData, axis=None): If errors occur, return fallback values """ + logger = logging.getLogger("caiman") + if axis is not None: def fnc(x): @@ -191,7 +193,7 @@ def fnc(x): err = False except ValueError: err_count += 1 - logging.warning(f"kde() failure {err_count}. Concatenating traces and recomputing.") + logger.warning(f"kde() failure {err_count}. Concatenating traces and recomputing.") if not isinstance(inputData, list): inputData = inputData.tolist() inputData += inputData @@ -199,10 +201,11 @@ def fnc(x): # There are three ways the above calculation can go wrong. # For each case: just use median. # if cdf was never defined, it means kde never ran without error + # TODO: Review if we can just look at err instead of this weird try try: cdf except NameError: - logging.warning('Max err count reached. Reverting to median.') + logger.warning('Max err count reached. Reverting to median.') data_prct = 50 val = np.median(np.array(inputData)) else: @@ -210,12 +213,12 @@ def fnc(x): val = mesh[np.argmax(density)] if data_prct >= 100 or data_prct < 0: - logging.warning('Invalid percentile (<0 or >100) computed. Reverting to median.') + logger.warning('Invalid percentile (<0 or >100) computed. Reverting to median.') data_prct = 50 val = np.median(np.array(inputData)) if np.isnan(data_prct): - logging.warning('NaN percentile computed. Reverting to median.') + logger.warning('NaN percentile computed. Reverting to median.') data_prct = 50 val = np.median(np.array(inputData)) @@ -299,9 +302,10 @@ def csc_column_remove(A, ind): ind: iterable[int] list or np.array with columns to be removed """ + logger = logging.getLogger("caiman") d1, d2 = A.shape if 'csc_matrix' not in str(type(A)): # FIXME - logging.warning("Original matrix not in csc_format. Converting it" + " anyway.") + logger.warning("Original matrix not in csc_format. Converting it anyway.") A = scipy.sparse.csc_matrix(A) indptr = A.indptr ind_diff = np.diff(A.indptr).tolist() diff --git a/caiman/utils/utils.py b/caiman/utils/utils.py index e0dd46f15..599d9b60e 100644 --- a/caiman/utils/utils.py +++ b/caiman/utils/utils.py @@ -61,6 +61,7 @@ def download_demo(name:str='Sue_2x_3000_40_-46.tif', save_folder:str='') -> str: Raise: WrongFolder Exception """ + logger = logging.getLogger("caiman") file_dict = { 'Sue_2x_3000_40_-46.tif': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/Sue_2x_3000_40_-46.tif', @@ -93,8 +94,8 @@ def download_demo(name:str='Sue_2x_3000_40_-46.tif', save_folder:str='') -> str: path_movie = os.path.join(base_folder, save_folder, name) if not os.path.exists(path_movie): url = file_dict[name] - logging.info(f"downloading {name} with urllib") - logging.info(f"GET {url} HTTP/1.1") + logger.info(f"downloading {name} with urllib") + logger.info(f"GET {url} HTTP/1.1") if os.name == 'nt': urllib_context = ssl.create_default_context(cafile = certifi.where() ) # On windows we need to avoid the limited default cert store else: @@ -102,7 +103,7 @@ def download_demo(name:str='Sue_2x_3000_40_-46.tif', save_folder:str='') -> str: try: f = urlopen(url, context=urllib_context) except: - logging.info(f"Trying to set user agent to download demo") + logger.info(f"Trying to set user agent to download demo") from urllib.request import Request req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) f = urlopen(req, context=urllib_context) @@ -111,7 +112,7 @@ def download_demo(name:str='Sue_2x_3000_40_-46.tif', save_folder:str='') -> str: with open(path_movie, "wb") as code: code.write(data) else: - logging.info(f"File {name} already downloaded") + logger.info(f"File {name} already downloaded") else: raise Exception('Cannot find the example_movies folder in your caiman_datadir - did you make one with caimanmanager?') return path_movie @@ -134,9 +135,7 @@ def download_model(name:str='mask_rcnn', save_folder:str='') -> str: Raise: WrongFolder Exception """ - - #\bug - #\warning + logger = logging.getLogger("caiman") file_dict = {'mask_rcnn': 'https://caiman.flatironinstitute.org/~neuro/caiman_downloadables/model/mask_rcnn_neurons_0040.h5'} base_folder = os.path.join(caiman_datadir(), 'model') @@ -146,8 +145,8 @@ def download_model(name:str='mask_rcnn', save_folder:str='') -> str: path_movie = os.path.join(base_folder, save_folder, name) if not os.path.exists(path_movie): url = file_dict[name] - logging.info(f"downloading {name} with urllib") - logging.info(f"GET {url} HTTP/1.1") + logger.info(f"downloading {name} with urllib") + logger.info(f"GET {url} HTTP/1.1") if os.name == 'nt': urllib_context = ssl.create_default_context(cafile = certifi.where() ) # On windows we need to avoid the limited default cert store else: @@ -155,7 +154,7 @@ def download_model(name:str='mask_rcnn', save_folder:str='') -> str: try: f = urlopen(url, context=urllib_context) except: - logging.info(f"Trying to set user agent to download demo") + logger.info(f"Trying to set user agent to download demo") from urllib.request import Request req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) f = urlopen(req, context=urllib_context) @@ -164,7 +163,7 @@ def download_model(name:str='mask_rcnn', save_folder:str='') -> str: with open(path_movie, "wb") as code: code.write(data) else: - logging.info("File " + str(name) + " already downloaded") + logger.info("File " + str(name) + " already downloaded") else: raise Exception('Cannot find the model folder in your caiman_datadir - did you make one with caimanmanager?') return path_movie @@ -223,6 +222,7 @@ def get_image_description_SI(fname:str) -> list: Returns: image_description: information of the image """ + logger = logging.getLogger("caiman") image_descriptions = [] @@ -230,7 +230,7 @@ def get_image_description_SI(fname:str) -> list: for idx, pag in enumerate(tf.pages): if idx % 1000 == 0: - logging.debug(idx) # progress report to the user + logger.debug(idx) # progress report to the user field = pag.tags['image_description'].value image_descriptions.append(si_parse(field)) @@ -374,6 +374,7 @@ def apply_magic_wand(A, gSig, dims, A_thr=None, coms=None, dview=None, masks: ndarray binary masks """ + logger = logging.getLogger("caiman") if (A_thr is None) and (coms is None): import pdb @@ -399,7 +400,7 @@ def apply_magic_wand(A, gSig, dims, A_thr=None, coms=None, dview=None, params.append([A.tocsc()[:,idx].toarray().reshape(dims, order='F'), coms[idx], min_radius, max_radius, roughness, zoom_factor, center_range]) - logging.debug(len(params)) + logger.debug(len(params)) if dview is not None: masks = np.array(list(dview.map(cell_magic_wand_wrapper, params))) @@ -459,6 +460,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict dic: dictionary dictionary to save ''' + logger = logging.getLogger("caiman") # argument type checking if not isinstance(dic, dict): raise ValueError("must provide a dictionary") @@ -475,7 +477,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict if key == 'g': if item is None: item = 0 - logging.info(key + ' is an object type') + logger.info(key + ' is an object type') try: item = np.array(list(item)) except: @@ -485,7 +487,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict if key in ['groups', 'idx_tot', 'ind_A', 'Ab_epoch', 'coordinates', 'loaded_model', 'optional_outputs', 'merged_ROIs', 'tf_in', 'tf_out', 'empty_merged']: - logging.info(f'Key {key} is not saved') + logger.info(f'Key {key} is not saved') continue if isinstance(item, (list, tuple)): @@ -503,12 +505,12 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict elif isinstance(item, (np.int64, np.int32, np.float64, float, np.float32, int)): # TODO In the future we may store all scalars, including these, as attributes too, although strings suffer the most from being stored as datasets h5file[path + key] = item - logging.debug(f'Saving numeric {path + key}') + logger.debug(f'Saving numeric {path + key}') if not h5file[path + key][()] == item: raise ValueError(f'Error (v {h5py.__version__}) while saving numeric {path + key}: assigned value {h5file[path + key][()]} does not match intended value {item}') # save numpy arrays elif isinstance(item, np.ndarray): - logging.debug(f'Saving {key}') + logger.debug(f'Saving {key}') try: h5file[path + key] = item except: @@ -520,7 +522,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict elif isinstance(item, dict): recursively_save_dict_contents_to_group(h5file, path + key + '/', item) elif 'sparse' in str(type(item)): - logging.info(key + ' is sparse ****') + logger.info(key + ' is sparse ****') h5file[path + key + '/data'] = item.tocsc().data h5file[path + key + '/indptr'] = item.tocsc().indptr h5file[path + key + '/indices'] = item.tocsc().indices @@ -530,7 +532,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict h5file[path + key] = 'NoneType' elif key in ['dims', 'medw', 'sigma_smooth_snmf', 'dxy', 'max_shifts', 'strides', 'overlaps', 'gSig']: - logging.info(key + ' is a tuple ****') + logger.info(f'{key} is a tuple ****') h5file[path + key] = np.array(item) elif type(item).__name__ in ['CNMFParams', 'Estimates']: # parameter object recursively_save_dict_contents_to_group(h5file, path + key + '/', item.__dict__) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 1951e6375..ca56b41d1 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -26,17 +26,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index 620a8b1b9..a3afe5d57 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -35,17 +35,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py index 19c46224e..6d441bb06 100755 --- a/demos/general/demo_behavior.py +++ b/demos/general/demo_behavior.py @@ -28,17 +28,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) if cfg.input is None: # If no input is specified, use sample data, downloading if necessary diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 255e5fb00..4731a7a18 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -35,17 +35,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) opts = params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 028b9c55a..57ef191c4 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -38,17 +38,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) # First set up some parameters for data and motion correction opts = params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py index 956f632b1..f2983d2fb 100755 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -36,17 +36,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) opts = params.CNMFParams(params_from_file=cfg.configfile) if cfg.input is not None: diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index 3b3d102bf..49def303d 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -30,17 +30,18 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) opts = params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py index 20ad228e1..6ac39faed 100755 --- a/demos/general/demo_pipeline_voltage_imaging.py +++ b/demos/general/demo_pipeline_voltage_imaging.py @@ -36,17 +36,20 @@ def main(): cfg = handle_args() + # Set up logging + logger = logging.getLogger("caiman") + logger.setLevel(logging.INFO) # Or another loglevel + logfmt = logging.Formatter("[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s") + if cfg.logfile: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO, - filename=cfg.logfile) - # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + handler = logging.FileHandler(cfg.logfile) else: - logging.basicConfig(format= - "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", - level=logging.INFO) + handler = logging.StreamHandler() + + handler.setFormatter(logfmt) + logger.addHandler(handler) + # Figure out what data we're working on if cfg.input is None: # If no input is specified, use sample data, downloading if necessary fnames = [download_demo('demo_voltage_imaging.hdf5', 'volpy')] # XXX do we need to use a separate directory? diff --git a/docs/source/Getting_Started.rst b/docs/source/Getting_Started.rst index 1294ecd7c..c9e92033d 100644 --- a/docs/source/Getting_Started.rst +++ b/docs/source/Getting_Started.rst @@ -228,25 +228,38 @@ of the file creating the log, the process ID, and the actual log message. While logging is especially helpful when running code on a server, it can also be helpful to get feedback locally, either to audit progress or diagnose problems when debugging. If you set -this feature up by running the following cell, the logs will by default go to console. If you want to direct +this feature up by running the following code, the logs will by default go to console. If you want to direct your log to file (which you can indicate with ``use_logfile = True``), then it will automatically be directed to your ``caiman_data/temp`` directory as defined in the ``caiman.paths`` module. You can set another path manually by changing the argument to the ``filename`` parameter in ``basicConfig()``. +If you want to log to normal outputs (cells in Jupyter, STDOUT in scripts), you can set that up by running this: + +:: + + logger = logging.getLogger("caiman") + logger.setLevel(logging.WARNING) + handler = logging.StreamHandler() + log_format = logging.Formatter("%(relativeCreated)12d [%(filename)s:%(funcName)10s():%(lineno)s] [%(process)d] %(message)s") + handler.setFormatter(log_format) + logger.addHandler(handler) + +If you prefer to log to a file, you can set that up by running this: + :: - use_logfile = True # set to True to log to file - if use_logfile: - current_datetime = datetime.datetime.now().strftime("_%Y%m%d_%H%M%S") - log_filename = 'demo_pipeline' + current_datetime + '.log' - log_path = Path(cm.paths.get_tempdir()) / log_filename - print(f"Will save logging data to {log_path}") - else: - log_path = None - log_format = "{asctime} - {levelname} - [{filename} {funcName}() {lineno}] - pid {process} - {message}" - logging.basicConfig(format=log_format, - filename=log_path, - level=logging.WARNING, style="{") #DEBUG, INFO, WARNING, ERROR, CRITICAL + logger = logging.getLogger("caiman") + logger.setLevel(logging.WARNING) + # Set path to logfile + current_datetime = datetime.datetime.now().strftime("_%Y%m%d_%H%M%S") + log_filename = 'demo_pipeline' + current_datetime + '.log' + log_path = Path(cm.paths.get_tempdir()) / log_filename + # Done with path stuff + handler = logging.FileHandler(log_path) + log_format = logging.Formatter("%(relativeCreated)12d [%(filename)s:%(funcName)10s():%(lineno)s] [%(process)d] %(message)s") + handler.setFormatter(log_format) + logger.addHandler(handler) + Caiman makes extensive use of the log system, and we have place many loggers interleaved throughough the code to aid in debugging. If you hit a bug, it is often helpful to set your debugging level to ``DEBUG`` so you can see what @@ -256,7 +269,7 @@ Once you have configured your logger, you can change the level (say, from ``WARN :: - logging.getLogger().setLevel(logging.DEBUG) + logging.getLogger("caiman").setLevel(logging.DEBUG) From 9bdbacbe3319a2c9b9ab7c20d9f3afaf8d03481f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Jul 2024 16:38:13 -0400 Subject: [PATCH 24/28] caiman-logger: Further cleanups, fix problems CI revealed --- caiman/base/rois.py | 2 +- caiman/components_evaluation.py | 2 +- caiman/motion_correction.py | 2 +- caiman/source_extraction/cnmf/cnmf.py | 7 +++---- caiman/source_extraction/cnmf/map_reduce.py | 9 +++++---- caiman/source_extraction/cnmf/params.py | 8 +++----- caiman/utils/utils.py | 10 +++++----- 7 files changed, 19 insertions(+), 21 deletions(-) diff --git a/caiman/base/rois.py b/caiman/base/rois.py index 38b158404..fddd1471f 100644 --- a/caiman/base/rois.py +++ b/caiman/base/rois.py @@ -780,7 +780,7 @@ def find_matches(D_s, print_assignment: bool = False) -> tuple[list, list]: if print_assignment: logger.debug(f'({row}, {column}) -> {value}') total.append(value) - logger.debug((f'FOV: {ii}, shape: {DD.shape[0]},{DD.shape[1]} total cost: {np.sum(total)}') + logger.debug(f'FOV: {ii}, shape: {DD.shape[0]},{DD.shape[1]} total cost: {np.sum(total)}') logger.debug(time.time() - t_start) costs.append(total) # send back the results in the format we want diff --git a/caiman/components_evaluation.py b/caiman/components_evaluation.py index 0f0ed295e..5bf9a47c0 100644 --- a/caiman/components_evaluation.py +++ b/caiman/components_evaluation.py @@ -143,7 +143,7 @@ def find_activity_intervals(C, Npeaks: int = 5, tB=-3, tA=10, thres: float = 0.3 for i in range(K): if np.sum(np.abs(np.diff(C[i, :]))) == 0: L.append([]) - logger.debug('empty component at:' + str(i)) + logger.debug(f'empty component at: {i}') continue indexes = peakutils.indexes(C[i, :], thres=thres) srt_ind = indexes[np.argsort(C[i, indexes])][::-1] diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index 912cd0bea..fb186d1c4 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -2823,7 +2823,7 @@ def motion_correct_batch_rigid(fname, max_shifts, dview=None, splits=56, num_spl logger.error('The movie contains NaNs. NaNs are not allowed!') raise Exception('The movie contains NaNs. NaNs are not allowed!') else: - logger.debug('Adding to movie ' + str(add_to_movie)) + logger.debug(f'Adding to movie {add_to_movie}') save_movie = False fname_tot_rig = None diff --git a/caiman/source_extraction/cnmf/cnmf.py b/caiman/source_extraction/cnmf/cnmf.py index 4249e8f3d..05ce34a6d 100644 --- a/caiman/source_extraction/cnmf/cnmf.py +++ b/caiman/source_extraction/cnmf/cnmf.py @@ -435,9 +435,7 @@ def fit(self, images, indices=(slice(None), slice(None))): if self.params.get('patch', 'rf') is None and (is_sliced or 'ndarray' in str(type(images))): images = images[tuple(indices)] self.dview = None - logger.info("Parallel processing in a single patch " - "is not available for loaded in memory or sliced" + - " data.") + logger.info("Parallel processing in a single patch is not available for loaded in memory or sliced data.") T = images.shape[0] self.params.set('online', {'init_batch': T}) @@ -465,7 +463,8 @@ def fit(self, images, indices=(slice(None), slice(None))): # 'ss': np.ones((3,) * len(self.dims), dtype=np.uint8) # }) - logger.info(('Using ' + str(self.params.get('patch', 'n_processes')) + ' processes')) + logger.info(f"Using {self.params.get('patch', 'n_processes')} processes") + # FIXME The code below is really ugly and it's hard to tell if it's doing the right thing if self.params.get('preprocess', 'n_pixels_per_process') is None: avail_memory_per_process = psutil.virtual_memory()[ 1] / 2.**30 / self.params.get('patch', 'n_processes') diff --git a/caiman/source_extraction/cnmf/map_reduce.py b/caiman/source_extraction/cnmf/map_reduce.py index ba6cd0e4f..6a7d17e3d 100644 --- a/caiman/source_extraction/cnmf/map_reduce.py +++ b/caiman/source_extraction/cnmf/map_reduce.py @@ -14,6 +14,7 @@ from caiman.cluster import extract_patch_coordinates from caiman.mmapping import load_memmap +from caiman.source_extraction.cnmf import CNMF def cnmf_patches(args_in): """Function that is run for each patches @@ -67,16 +68,16 @@ def cnmf_patches(args_in): """ #FIXME Fix in-function imports - from . import cnmf + from caiman.source_extraction.cnmf import CNMF logger = logging.getLogger("caiman") file_name, idx_, shapes, params = args_in name_log = os.path.basename( file_name[:-5]) + '_LOG_ ' + str(idx_[0]) + '_' + str(idx_[-1]) - logger.debug(name_log + 'START') + logger.debug(name_log + ' START') - logger.debug(name_log + 'Read file') + logger.debug(name_log + ' Read file') Yr, dims, timesteps = load_memmap(file_name) # slicing array (takes the min and max index in n-dimensional space and @@ -107,7 +108,7 @@ def cnmf_patches(args_in): for group in ('preprocess', 'temporal'): opts.set(group, {'p': params.get('patch', 'p_patch')}) - cnm = cnmf.CNMF(n_processes=1, params=opts) + cnm = CNMF(n_processes=1, params=opts) cnm = cnm.fit(images) return [idx_, shapes, scipy.sparse.coo_matrix(cnm.estimates.A), diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 2014860df..4d5d74108 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -956,15 +956,13 @@ def check_consistency(self): if len(self.motion[a]) != 3: if self.motion[a][0] == self.motion[a][1]: self.motion[a] = (self.motion[a][0],) * 3 - logger.warning("is3D=True, hence setting key " + a + - " automatically to " + str(self.motion[a])) + logger.warning(f"is3D=True, hence setting key {a} to {self.motion[a]}") else: - raise ValueError(a + ' has to be a tuple of length 3 for volumetric 3D data') + raise ValueError(f'{a} must be a tuple of length 3 for volumetric 3D data') for key in ('max_num_added', 'min_num_trial'): if (self.online[key] == 0 and self.online['update_num_comps']): self.set('online', {'update_num_comps': False}) - logger.warning(key + "=0, hence setting key update_num_comps " + - "in group online automatically to False.") + logger.warning(f"{key}=0, hence setting key online.update_num_comps to False.") # FIXME The authoritative value is stored in the init field. This should later be refactored out # into a general section, once we're passing around the CNMFParams object rather than splatting it out # from **get_group diff --git a/caiman/utils/utils.py b/caiman/utils/utils.py index 599d9b60e..54f028e3d 100644 --- a/caiman/utils/utils.py +++ b/caiman/utils/utils.py @@ -477,7 +477,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict if key == 'g': if item is None: item = 0 - logger.info(key + ' is an object type') + logger.info(f'{key} is an object type') try: item = np.array(list(item)) except: @@ -522,7 +522,7 @@ def recursively_save_dict_contents_to_group(h5file:h5py.File, path:str, dic:dict elif isinstance(item, dict): recursively_save_dict_contents_to_group(h5file, path + key + '/', item) elif 'sparse' in str(type(item)): - logger.info(key + ' is sparse ****') + logger.info(f'{key} is sparse ****') h5file[path + key + '/data'] = item.tocsc().data h5file[path + key + '/indptr'] = item.tocsc().indptr h5file[path + key + '/indices'] = item.tocsc().indices @@ -576,10 +576,10 @@ def recursively_load_dict_contents_from_group(h5file:h5py.File, path:str) -> dic elif isinstance(item, h5py._hl.group.Group): if key in ('A', 'W', 'Ab', 'downscale_matrix', 'upscale_matrix'): - data = item[path + key + '/data'] + data = item[path + key + '/data'] indices = item[path + key + '/indices'] - indptr = item[path + key + '/indptr'] - shape = item[path + key + '/shape'] + indptr = item[path + key + '/indptr'] + shape = item[path + key + '/shape'] ans[key] = scipy.sparse.csc_matrix((data[:], indices[:], indptr[:]), shape[:]) if key in ('W', 'upscale_matrix'): From b3fafb56a03ca432a669a6c3cc52750577e1b4b5 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Jul 2024 16:57:30 -0400 Subject: [PATCH 25/28] caiman logging change: had to partly back off on import cleanup, fix some goofs --- caiman/motion_correction.py | 2 +- caiman/source_extraction/cnmf/map_reduce.py | 1 - caiman/source_extraction/cnmf/params.py | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index fb186d1c4..cae11df49 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -2946,7 +2946,7 @@ def motion_correct_batch_pwrigid(fname, max_shifts, strides, overlaps, add_to_mo logger.error('The template contains NaNs. NaNs are not allowed!') raise Exception('The template contains NaNs. NaNs are not allowed!') else: - logger.debug(f'Adding to movie {add_to_movie}')) + logger.debug(f'Adding to movie {add_to_movie}') for iter_ in range(num_iter): logger.debug(iter_) diff --git a/caiman/source_extraction/cnmf/map_reduce.py b/caiman/source_extraction/cnmf/map_reduce.py index 6a7d17e3d..efe789e4f 100644 --- a/caiman/source_extraction/cnmf/map_reduce.py +++ b/caiman/source_extraction/cnmf/map_reduce.py @@ -14,7 +14,6 @@ from caiman.cluster import extract_patch_coordinates from caiman.mmapping import load_memmap -from caiman.source_extraction.cnmf import CNMF def cnmf_patches(args_in): """Function that is run for each patches diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 4d5d74108..4ad83eaac 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -1118,6 +1118,7 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr were never used in populating the Params object. You really should not set this to False. Fix your code. """ + logger = logging.getLogger("caiman") # When we're ready to remove allow_legacy, this code will get a lot simpler consumed = {} # Keep track of what parameters in params_dict were used to set something in params (just for legacy API) From d3a89fc4779bdb340183119e660442ee9084b87c Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Jul 2024 18:02:19 -0400 Subject: [PATCH 26/28] Initial go at updating notebooks to use the logger we're calling caiman --- demos/notebooks/demo_OnACID_mesoscope.ipynb | 14 ++++++++++---- demos/notebooks/demo_Ring_CNN.ipynb | 14 ++++++++++---- demos/notebooks/demo_VST.ipynb | 14 ++++++++++---- demos/notebooks/demo_caiman_cnmf_3D.ipynb | 13 +++++++++---- demos/notebooks/demo_dendritic.ipynb | 12 ++++++++---- demos/notebooks/demo_motion_correction.ipynb | 13 +++++++++---- demos/notebooks/demo_online_3D.ipynb | 13 +++++++++---- demos/notebooks/demo_online_cnmfE.ipynb | 13 +++++++++---- demos/notebooks/demo_pipeline.ipynb | 12 +++++++++--- demos/notebooks/demo_pipeline_cnmfE.ipynb | 12 +++++++++--- .../notebooks/demo_pipeline_voltage_imaging.ipynb | 13 +++++++++---- demos/notebooks/demo_realtime_cnmfE.ipynb | 13 +++++++++---- demos/notebooks/demo_seeded_CNMF.ipynb | 13 +++++++++---- 13 files changed, 119 insertions(+), 50 deletions(-) diff --git a/demos/notebooks/demo_OnACID_mesoscope.ipynb b/demos/notebooks/demo_OnACID_mesoscope.ipynb index 14a8020b4..b6d2407ab 100644 --- a/demos/notebooks/demo_OnACID_mesoscope.ipynb +++ b/demos/notebooks/demo_OnACID_mesoscope.ipynb @@ -34,10 +34,16 @@ "from caiman.source_extraction import cnmf\n", "from caiman.utils.utils import download_demo\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.INFO)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.INFO)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "try:\n", " if __IPYTHON__:\n", diff --git a/demos/notebooks/demo_Ring_CNN.ipynb b/demos/notebooks/demo_Ring_CNN.ipynb index 06149b57a..277d94fb0 100644 --- a/demos/notebooks/demo_Ring_CNN.ipynb +++ b/demos/notebooks/demo_Ring_CNN.ipynb @@ -23,10 +23,16 @@ "import numpy as np\n", "import os\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.WARNING)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.INFO)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "import caiman as cm\n", "import caiman.base.movies\n", diff --git a/demos/notebooks/demo_VST.ipynb b/demos/notebooks/demo_VST.ipynb index cc7c86e37..295789785 100644 --- a/demos/notebooks/demo_VST.ipynb +++ b/demos/notebooks/demo_VST.ipynb @@ -24,10 +24,16 @@ "import os\n", "import timeit\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.DEBUG)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.INFO)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "import caiman.external.houghvst.estimation as est\n", "from caiman.external.houghvst.gat import compute_gat, compute_inverse_gat\n", diff --git a/demos/notebooks/demo_caiman_cnmf_3D.ipynb b/demos/notebooks/demo_caiman_cnmf_3D.ipynb index 2a0c3be84..7529a4dc3 100644 --- a/demos/notebooks/demo_caiman_cnmf_3D.ipynb +++ b/demos/notebooks/demo_caiman_cnmf_3D.ipynb @@ -39,10 +39,15 @@ "import bokeh.plotting as bpl\n", "bpl.output_notebook()\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.WARNING)" + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.INFO)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n" ] }, { diff --git a/demos/notebooks/demo_dendritic.ipynb b/demos/notebooks/demo_dendritic.ipynb index 3c69dfdb6..58b329bf8 100644 --- a/demos/notebooks/demo_dendritic.ipynb +++ b/demos/notebooks/demo_dendritic.ipynb @@ -69,10 +69,14 @@ " print(f\"Will save logging data to {log_file}\")\n", "else:\n", " log_file = None\n", - "log_format = \"{asctime} - {levelname} - [{filename} {funcName}() {lineno}] - pid {process} - {message}\"\n", - "logging.basicConfig(format=log_format,\n", - " filename=log_file, \n", - " level=logging.WARNING, style=\"{\") #DEBUG, INFO, WARNING, ERROR, CRITICAL" + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.INFO)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if log_file is not None:\n", + " handler = logging.FileHandler(log_file)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n" ] }, { diff --git a/demos/notebooks/demo_motion_correction.ipynb b/demos/notebooks/demo_motion_correction.ipynb index d3c6fcc81..e62abbe0d 100644 --- a/demos/notebooks/demo_motion_correction.ipynb +++ b/demos/notebooks/demo_motion_correction.ipynb @@ -40,10 +40,15 @@ "except NameError:\n", " pass\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.DEBUG)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.INFO)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", "\n", "import caiman as cm\n", "from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise\n", diff --git a/demos/notebooks/demo_online_3D.ipynb b/demos/notebooks/demo_online_3D.ipynb index 674ae1504..333bcc3b2 100644 --- a/demos/notebooks/demo_online_3D.ipynb +++ b/demos/notebooks/demo_online_3D.ipynb @@ -36,10 +36,15 @@ "import bokeh.plotting as bpl\n", "bpl.output_notebook()\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.WARNING)" + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.WARNING)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n" ] }, { diff --git a/demos/notebooks/demo_online_cnmfE.ipynb b/demos/notebooks/demo_online_cnmfE.ipynb index 1783024b0..f788df595 100644 --- a/demos/notebooks/demo_online_cnmfE.ipynb +++ b/demos/notebooks/demo_online_cnmfE.ipynb @@ -36,10 +36,15 @@ "except NameError:\n", " pass\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)10s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.WARNING)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.WARNING)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", "\n", "bpl.output_notebook()\n", "hv.notebook_extension('bokeh')" diff --git a/demos/notebooks/demo_pipeline.ipynb b/demos/notebooks/demo_pipeline.ipynb index 7fb668cc3..5458ef3cf 100644 --- a/demos/notebooks/demo_pipeline.ipynb +++ b/demos/notebooks/demo_pipeline.ipynb @@ -93,9 +93,15 @@ "outputs": [], "source": [ "# set up logging\n", - "logging.basicConfig(format=\"{asctime} - {levelname} - [{filename} {funcName}() {lineno}] - pid {process} - {message}\",\n", - " filename=None, \n", - " level=logging.WARNING, style=\"{\") #logging level can be DEBUG, INFO, WARNING, ERROR, CRITICAL\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.WARNING)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", "\n", "# set env variables \n", "os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n", diff --git a/demos/notebooks/demo_pipeline_cnmfE.ipynb b/demos/notebooks/demo_pipeline_cnmfE.ipynb index 839b44075..32599fe9e 100644 --- a/demos/notebooks/demo_pipeline_cnmfE.ipynb +++ b/demos/notebooks/demo_pipeline_cnmfE.ipynb @@ -83,9 +83,15 @@ "outputs": [], "source": [ "# set up logging\n", - "logging.basicConfig(format=\"{asctime} - {levelname} - [{filename} {funcName}() {lineno}] - pid {process} - {message}\",\n", - " filename=None, \n", - " level=logging.WARNING, style=\"{\") #logging level can be DEBUG, INFO, WARNING, ERROR, CRITICAL\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.WARNING)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", "\n", "# set env variables in case they weren't already set\n", "os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n", diff --git a/demos/notebooks/demo_pipeline_voltage_imaging.ipynb b/demos/notebooks/demo_pipeline_voltage_imaging.ipynb index 3db090b78..524368f5a 100644 --- a/demos/notebooks/demo_pipeline_voltage_imaging.ipynb +++ b/demos/notebooks/demo_pipeline_voltage_imaging.ipynb @@ -60,10 +60,15 @@ "from caiman.summary_images import mean_image\n", "from caiman.paths import caiman_datadir\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]\" \\\n", - " \"[%(process)d] %(message)s\",\n", - " level=logging.ERROR)" + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.ERROR)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n" ] }, { diff --git a/demos/notebooks/demo_realtime_cnmfE.ipynb b/demos/notebooks/demo_realtime_cnmfE.ipynb index 0122b9e0b..f4c4f478b 100644 --- a/demos/notebooks/demo_realtime_cnmfE.ipynb +++ b/demos/notebooks/demo_realtime_cnmfE.ipynb @@ -51,10 +51,15 @@ "except NameError:\n", " pass\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)10s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.WARNING)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.WARNING)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", "\n", "import caiman as cm\n", "from caiman.source_extraction import cnmf\n", diff --git a/demos/notebooks/demo_seeded_CNMF.ipynb b/demos/notebooks/demo_seeded_CNMF.ipynb index 4f1ad1e60..2c1c3bfed 100644 --- a/demos/notebooks/demo_seeded_CNMF.ipynb +++ b/demos/notebooks/demo_seeded_CNMF.ipynb @@ -45,10 +45,15 @@ "except NameError:\n", " pass\n", "\n", - "logging.basicConfig(format=\n", - " \"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s\",\n", - " # filename=\"/tmp/caiman.log\",\n", - " level=logging.WARNING)\n", + "logfile = None # Replace with a path if you want to log to a file\n", + "logger = logging.getLogger('caiman')\n", + "logger.setLevel(logging.WARNING)\n", + "logfmt = '%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s'\n", + "if logfile is not None:\n", + " handler = logging.FileHandler(logfile)\n", + "else:\n", + " handler = logging.StreamHandler()\n", + "handler.setFormatter(logfmt)\n", "\n", "import caiman as cm\n", "from caiman.motion_correction import MotionCorrect\n", From 268e807c3614017104b1efcf074f37fd5b6174ca Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 25 Jul 2024 11:29:00 -0400 Subject: [PATCH 27/28] logging change diffset: Fixing a missed line in the mass cut'n'paste I did to notebooks --- demos/notebooks/demo_caiman_cnmf_3D.ipynb | 3 ++- demos/notebooks/demo_dendritic.ipynb | 3 ++- demos/notebooks/demo_motion_correction.ipynb | 1 + demos/notebooks/demo_online_3D.ipynb | 3 ++- demos/notebooks/demo_online_cnmfE.ipynb | 1 + demos/notebooks/demo_pipeline.ipynb | 1 + demos/notebooks/demo_pipeline_cnmfE.ipynb | 1 + demos/notebooks/demo_pipeline_voltage_imaging.ipynb | 3 ++- demos/notebooks/demo_realtime_cnmfE.ipynb | 1 + demos/notebooks/demo_seeded_CNMF.ipynb | 1 + 10 files changed, 14 insertions(+), 4 deletions(-) diff --git a/demos/notebooks/demo_caiman_cnmf_3D.ipynb b/demos/notebooks/demo_caiman_cnmf_3D.ipynb index 7529a4dc3..0dfea2d00 100644 --- a/demos/notebooks/demo_caiman_cnmf_3D.ipynb +++ b/demos/notebooks/demo_caiman_cnmf_3D.ipynb @@ -47,7 +47,8 @@ " handler = logging.FileHandler(logfile)\n", "else:\n", " handler = logging.StreamHandler()\n", - "handler.setFormatter(logfmt)\n" + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n" ] }, { diff --git a/demos/notebooks/demo_dendritic.ipynb b/demos/notebooks/demo_dendritic.ipynb index 58b329bf8..49109bb72 100644 --- a/demos/notebooks/demo_dendritic.ipynb +++ b/demos/notebooks/demo_dendritic.ipynb @@ -76,7 +76,8 @@ " handler = logging.FileHandler(log_file)\n", "else:\n", " handler = logging.StreamHandler()\n", - "handler.setFormatter(logfmt)\n" + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n" ] }, { diff --git a/demos/notebooks/demo_motion_correction.ipynb b/demos/notebooks/demo_motion_correction.ipynb index e62abbe0d..55c98dd2d 100644 --- a/demos/notebooks/demo_motion_correction.ipynb +++ b/demos/notebooks/demo_motion_correction.ipynb @@ -49,6 +49,7 @@ "else:\n", " handler = logging.StreamHandler()\n", "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "import caiman as cm\n", "from caiman.motion_correction import MotionCorrect, tile_and_correct, motion_correction_piecewise\n", diff --git a/demos/notebooks/demo_online_3D.ipynb b/demos/notebooks/demo_online_3D.ipynb index 333bcc3b2..14906d32a 100644 --- a/demos/notebooks/demo_online_3D.ipynb +++ b/demos/notebooks/demo_online_3D.ipynb @@ -44,7 +44,8 @@ " handler = logging.FileHandler(logfile)\n", "else:\n", " handler = logging.StreamHandler()\n", - "handler.setFormatter(logfmt)\n" + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n" ] }, { diff --git a/demos/notebooks/demo_online_cnmfE.ipynb b/demos/notebooks/demo_online_cnmfE.ipynb index f788df595..e94a5e389 100644 --- a/demos/notebooks/demo_online_cnmfE.ipynb +++ b/demos/notebooks/demo_online_cnmfE.ipynb @@ -45,6 +45,7 @@ "else:\n", " handler = logging.StreamHandler()\n", "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "bpl.output_notebook()\n", "hv.notebook_extension('bokeh')" diff --git a/demos/notebooks/demo_pipeline.ipynb b/demos/notebooks/demo_pipeline.ipynb index 5458ef3cf..a5a64ab32 100644 --- a/demos/notebooks/demo_pipeline.ipynb +++ b/demos/notebooks/demo_pipeline.ipynb @@ -102,6 +102,7 @@ "else:\n", " handler = logging.StreamHandler()\n", "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "# set env variables \n", "os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n", diff --git a/demos/notebooks/demo_pipeline_cnmfE.ipynb b/demos/notebooks/demo_pipeline_cnmfE.ipynb index 32599fe9e..62325fb07 100644 --- a/demos/notebooks/demo_pipeline_cnmfE.ipynb +++ b/demos/notebooks/demo_pipeline_cnmfE.ipynb @@ -92,6 +92,7 @@ "else:\n", " handler = logging.StreamHandler()\n", "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "# set env variables in case they weren't already set\n", "os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n", diff --git a/demos/notebooks/demo_pipeline_voltage_imaging.ipynb b/demos/notebooks/demo_pipeline_voltage_imaging.ipynb index 524368f5a..b0a21ea4d 100644 --- a/demos/notebooks/demo_pipeline_voltage_imaging.ipynb +++ b/demos/notebooks/demo_pipeline_voltage_imaging.ipynb @@ -68,7 +68,8 @@ " handler = logging.FileHandler(logfile)\n", "else:\n", " handler = logging.StreamHandler()\n", - "handler.setFormatter(logfmt)\n" + "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n" ] }, { diff --git a/demos/notebooks/demo_realtime_cnmfE.ipynb b/demos/notebooks/demo_realtime_cnmfE.ipynb index f4c4f478b..378f82510 100644 --- a/demos/notebooks/demo_realtime_cnmfE.ipynb +++ b/demos/notebooks/demo_realtime_cnmfE.ipynb @@ -60,6 +60,7 @@ "else:\n", " handler = logging.StreamHandler()\n", "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "import caiman as cm\n", "from caiman.source_extraction import cnmf\n", diff --git a/demos/notebooks/demo_seeded_CNMF.ipynb b/demos/notebooks/demo_seeded_CNMF.ipynb index 2c1c3bfed..9c5baa96a 100644 --- a/demos/notebooks/demo_seeded_CNMF.ipynb +++ b/demos/notebooks/demo_seeded_CNMF.ipynb @@ -54,6 +54,7 @@ "else:\n", " handler = logging.StreamHandler()\n", "handler.setFormatter(logfmt)\n", + "logger.addHandler(handler)\n", "\n", "import caiman as cm\n", "from caiman.motion_correction import MotionCorrect\n", From 04a04aea970ff56ee71c5cdc88091296d81e59fe Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 25 Jul 2024 11:44:29 -0400 Subject: [PATCH 28/28] Logging cleanup diffset: Get rid of almost all calls to global logger --- caiman/tests/comparison/comparison.py | 42 +++++++-------------- caiman/tests/comparison_general.py | 31 ++++++--------- caiman/tests/test_deconvolution.py | 5 ++- demos/general/demo_OnACID.py | 2 +- demos/general/demo_OnACID_mesoscope.py | 2 +- demos/notebooks/demo_OnACID_mesoscope.ipynb | 2 +- 6 files changed, 32 insertions(+), 52 deletions(-) diff --git a/caiman/tests/comparison/comparison.py b/caiman/tests/comparison/comparison.py index d866bd3aa..18d824589 100644 --- a/caiman/tests/comparison/comparison.py +++ b/caiman/tests/comparison/comparison.py @@ -23,7 +23,8 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.getLogger("caiman").setLevel(logging.DEBUG) +logger = logging.getLogger("caiman") +logger.setLevel(logging.DEBUG) import caiman as cm from caiman.paths import caiman_datadir @@ -190,7 +191,7 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): """ # getting the DATA FOR COMPARISONS assert (params != None and self.cnmpatch != None) - logging.info('we need the parameters in order to save anything\n') + logger.info('Parameters must be set in order to save anything\n') # actions on the sparse matrix cnm = self.cnmpatch.__dict__ cnmpatch = deletesparse(cnm) @@ -227,7 +228,7 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): if os.path.exists(file_path): os.remove(file_path) else: - logging.debug("nothing to remove\n") + logger.debug("nothing to remove\n") np.savez_compressed(file_path, information=information, A_full=self.comparison['cnmf_full_frame']['ourdata'][0], @@ -235,7 +236,7 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): A_patch=self.comparison['cnmf_on_patch']['ourdata'][0], C_patch=self.comparison['cnmf_on_patch']['ourdata'][1], rig_shifts=self.comparison['rig_shifts']['ourdata']) - logging.info('we now have ground truth\n') + logger.info('we now have ground truth\n') return else: # if not we create a comparison first @@ -250,7 +251,7 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): # if we cannot manage to open it or it doesn't exist: except (IOError, OSError): # we save but we explain why there were a problem - logging.warning('we were not able to read the file ' + str(file_path) + ' to compare it\n') + logger.warning(f'We were not able to read the file {file_path} to compare it\n') file_path = os.path.join(caiman_datadir(), "testdata", "NC" + dt + ".npz") np.savez_compressed(file_path, information=information, @@ -273,26 +274,26 @@ def save_with_compare(self, istruth=False, params=None, dview=None, Cn=None): information.update({'differences': {'proc': False, 'params_movie': False, 'params_cnm': False}}) if data['processor'] != information['processor']: - logging.info("You don't have the same processor as was used to generate the ground truth. The processing time can vary.\n" + + logger.info("You don't have the same processor as was used to generate the ground truth. The processing time can vary.\n" + "For time comparison, Create your own groundtruth standard for future testing.\n" + f"Compare: {data['processor']} to {information['processor']}\n") information['differences']['proc'] = True if data['params'] != information['params']: - logging.warning("You are not using the same movie parameters. Results will not be comparable.") - logging.warning('You must use the same parameters as the groundtruth.\n' + + logger.warning("You are not using the same movie parameters. Results will not be comparable.") + logger.warning('You must use the same parameters as the groundtruth.\n' + 'examine the groundtruth parameters with the see() method\n') information['differences']['params_movie'] = True # We must cleanup some fields to permit an accurate comparison if not normalised_compare_cnmpatches(data['cnmpatch'], cnmpatch): if data['cnmpatch'].keys() != cnmpatch.keys(): - logging.error( + logger.error( 'DIFFERENCES IN THE FIELDS OF CNMF' ) # TODO: Now that we have deeply nested data structures, find a module that gives you tight differences. diffkeys = [k for k in data['cnmpatch'] if data['cnmpatch'][k] != cnmpatch[k]] for k in diffkeys: - logging.info(f"{k}:{data['cnmpatch'][k]}->{cnmpatch[k]}") + logger.info(f"{k}:{data['cnmpatch'][k]}->{cnmpatch[k]}") - logging.warning('You are not using the same parameters in your cnmf on patches initialization\n') + logger.warning('You are not using the same parameters in your cnmf on patches initialization\n') information['differences']['params_cnm'] = True # for rigid @@ -373,7 +374,7 @@ def see(filename=None): dr = os.path.join(caiman_datadir(), "testdata", "groundtruth.npz") else: dr = os.path.join(caiman_datadir(), "testdata", filename, filename + ".npz") - logging.debug("Loading GT file " + str(dr)) + logger.debug("Loading GT file " + str(dr)) with np.load(dr) as dt: print('Info :\n') see_it(dt) @@ -403,7 +404,7 @@ def deletesparse(cnm): val = deletesparse(val) if not isinstance(val, scipy.sparse.coo.coo_matrix) and not isinstance(val, np.ndarray) \ and not isinstance(val, scipy.sparse.csc.csc_matrix) and not keys == 'dview': - logging.debug(f"type of val is {type(val)}") + logger.debug(f"type of val is {type(val)}") cnm[keys] = val else: @@ -483,7 +484,6 @@ def cnmf(Cn, A_gt, A_test, C_gt, C_test, dims_gt, dims_test, dview=None, sensiti def plotrig(init, curr, timer, sensitivity): - diff = np.linalg.norm(np.asarray(init) - np.asarray(curr)) / np.linalg.norm(init) isdiff = diff > sensitivity info = {'isdifferent': int(isdiff), 'diff_data': diff, 'diff_timing': timer} @@ -491,20 +491,6 @@ def plotrig(init, curr, timer, sensitivity): init = init.transpose([1, 0]) xc = np.arange(curr.shape[1]) xi = np.arange(init.shape[1]) - #try: - # pl.figure() - # pl.subplot(1, 2, 1) - # pl.plot(xc, curr[0], 'r', xi, init[0], 'b') - # pl.legend(['x shifts curr', 'x shifts init']) - # pl.xlabel('frames') - # pl.ylabel('pixels') - # pl.subplot(1, 2, 2) - # pl.plot(xc, curr[1], 'r', xi, init[1], 'b') - # pl.legend(['yshifts curr', 'y shifts init']) - # pl.xlabel('frames') - # pl.ylabel('pixels') - #except: - # logging.warning("not able to plot") return info diff --git a/caiman/tests/comparison_general.py b/caiman/tests/comparison_general.py index da0538de8..5361dad28 100644 --- a/caiman/tests/comparison_general.py +++ b/caiman/tests/comparison_general.py @@ -6,15 +6,7 @@ See Also ------------ caiman/tests/comparison/comparison.py - - """ -#%% -#\package None -#\version 1.0 -#\copyright GNU General Public License v2.0 -#\date Created on june 2017 -#\author: Jremie KALFON import copy import cv2 @@ -49,7 +41,8 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.getLogger("caiman").setLevel(logging.DEBUG) +logger = logging.getLogger("caiman") +logger.setLevel(logging.DEBUG) # GLOBAL VAR params_movie = { @@ -201,7 +194,7 @@ def test_general(): if len(name_new) > 1: fname_new = cm.save_memmap_join(name_new, base_name='Yr', n_chunks=params_movie['n_chunks'], dview=None) else: - logging.warning('One file only, not saving!') + logger.warning('One file only, not saving!') fname_new = name_new[0] Yr, dims, T = cm.load_memmap(fname_new) @@ -256,7 +249,7 @@ def test_general(): b_tot = cnm.estimates.b f_tot = cnm.estimates.f # DISCARDING - logging.info(('Number of components:' + str(A_tot.shape[-1]))) + logger.info(('Number of components:' + str(A_tot.shape[-1]))) final_frate = params_movie['final_frate'] # threshold on space consistency r_values_min = params_movie['r_values_min_patch'] @@ -332,29 +325,29 @@ def test_general(): for log_file in log_files: os.remove(log_file) except: - logging.warning('Cannot remove log files') + logger.warning('Cannot remove log files') ############ assertions ################## pb = False if (comp.information['differences']['params_movie']): - logging.error( - "you need to set the same movie parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" + logger.error( + "You must set the same movie parameters as the ground truth to have a real comparison (use the comp.see() function to explore it)" ) pb = True if (comp.information['differences']['params_cnm']): - logging.warning( - "you need to set the same cnmf parameters than the ground truth to have a real comparison (use the comp.see() function to explore it)" + logger.warning( + "You must set the same cnmf parameters as the ground truth to have a real comparison (use the comp.see() function to explore it)" ) # pb = True if (comp.information['diff']['rig']['isdifferent']): - logging.error("the rigid shifts are different from the groundtruth ") + logger.error("the rigid shifts are different from the groundtruth ") pb = True if (comp.information['diff']['cnmpatch']['isdifferent']): - logging.error("the cnmf on patch produces different results than the groundtruth ") + logger.error("the cnmf on patch produces different results than the groundtruth ") pb = True if (comp.information['diff']['cnmfull']['isdifferent']): - logging.error("the cnmf full frame produces different results than the groundtruth ") + logger.error("the cnmf full frame produces different results than the groundtruth ") pb = True assert (not pb) diff --git a/caiman/tests/test_deconvolution.py b/caiman/tests/test_deconvolution.py index 46694a0fc..a34eaa086 100644 --- a/caiman/tests/test_deconvolution.py +++ b/caiman/tests/test_deconvolution.py @@ -11,7 +11,8 @@ # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.getLogger("caiman").setLevel(logging.DEBUG) +logger = logging.getLogger("caiman") +logger.setLevel(logging.DEBUG) def gen_data(g=[.95], sn=.2, T=1000, framerate=30, firerate=.5, b=10, N=1, seed=0): """ @@ -67,7 +68,7 @@ def foo(method, p): res = constrained_foopsi(y, g=g, sn=sn, p=p, method=method) npt.assert_allclose(np.corrcoef(res[0], c)[0, 1], 1, [.01, .1][i]) npt.assert_allclose(np.corrcoef(res[-2], s)[0, 1], 1, [.03, .3][i]) - logging.debug(['\n', ''][p - 1] + ' %5s AR%d %.4fs' % (method, p, time() - t)) + logger.debug(['\n', ''][p - 1] + ' %5s AR%d %.4fs' % (method, p, time() - t)) def test_oasis(): diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index ca56b41d1..39418ef7d 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -57,7 +57,7 @@ def main(): cnm.fit_online() # plot contours - logging.info(f"Number of components: {cnm.estimates.A.shape[-1]}") + logger.info(f"Number of components: {cnm.estimates.A.shape[-1]}") Cn = cm.load(fnames[0], subindices=slice(0,500)).local_correlations(swap_dim=False) cnm.estimates.plot_contours(img=Cn) diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index a3afe5d57..4e1fe86f6 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -69,7 +69,7 @@ def main(): cnm.fit_online() # plot contours (this may take time) - logging.info(f"Number of components: {cnm.estimates.A.shape[-1]}") + logger.info(f"Number of components: {cnm.estimates.A.shape[-1]}") images = cm.load(fnames) Cn = images.local_correlations(swap_dim=False, frames_per_chunk=500) cnm.estimates.plot_contours(img=Cn, display_numbers=False) diff --git a/demos/notebooks/demo_OnACID_mesoscope.ipynb b/demos/notebooks/demo_OnACID_mesoscope.ipynb index b6d2407ab..cca9bb663 100644 --- a/demos/notebooks/demo_OnACID_mesoscope.ipynb +++ b/demos/notebooks/demo_OnACID_mesoscope.ipynb @@ -175,7 +175,7 @@ "metadata": {}, "outputs": [], "source": [ - "logging.info('Number of components: ' + str(cnm.estimates.A.shape[-1]))\n", + "logger.info('Number of components: ' + str(cnm.estimates.A.shape[-1]))\n", "Cn = cm.load(fnames[0], subindices=slice(0,500)).local_correlations(swap_dim=False)\n", "cnm.estimates.plot_contours(img=Cn)" ]