Skip to content

Commit

Permalink
demo_pipeline_NWB conversion work
Browse files Browse the repository at this point in the history
  • Loading branch information
pgunn committed Mar 12, 2024
1 parent 1ea0409 commit 8aa12fe
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 125 deletions.
2 changes: 1 addition & 1 deletion demos/general/demo_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def main():
level=logging.WARNING)

# First set up some parameters for data and motion correction
opts = params.CNMFParams(params_from_file=cfg.input)
opts = params.CNMFParams(params_from_file=cfg.configfile)

if cfg.input is not None:
opts.change_params({"data": {"fnames": cfg.input}})
Expand Down
151 changes: 29 additions & 122 deletions demos/general/demo_pipeline_NWB.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
This script follows closely the demo_pipeline.py script but uses the
Neurodata Without Borders (NWB) file format for loading the input and saving
the output. It is meant as an example on how to use NWB files with CaImAn.
authors: @agiovann and @epnev
If you provide a filename in the config json, it must be an nwb file.
"""

import argparse
Expand All @@ -22,7 +23,7 @@
except:
pass

import caiman as cm
import caiman
from caiman.motion_correction import MotionCorrect
from caiman.paths import caiman_datadir
from caiman.source_extraction.cnmf import cnmf as cnmf
Expand All @@ -47,15 +48,19 @@ def main():
"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s",
level=logging.WARNING)

# This demo will either accept a nwb file or it'll convert an existing movie into one
if cfg.input is None:
opts = params.CNMFParams(params_from_file=cfg.configfile)
if cfg.input is not None:
opts.change_params({"data": {"fnames": cfg.input}})
# TODO throw error if it is not an nwb file
if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data
# This demo will either accept a nwb file or it'll convert an existing movie into one
# default path, no input was provide, so we'll take the "Sue" sample data, convert it to a nwb, and then ingest it.
# The convert target is the original fn, with the extension swapped to nwb.
fr = float(15) # imaging rate in frames per second
pre_convert = download_demo('Sue_2x_3000_40_-46.tif')
convert_target = os.path.join(caiman_datadir(), 'Sue_2x_3000_40_-46.nwb')

orig_movie = cm.load(pre_convert, fr=fr)
orig_movie = caiman.load(pre_convert, fr=fr)
# save file in NWB format with various additional info
orig_movie.save(convert_target,
sess_desc="test",
Expand All @@ -69,6 +74,7 @@ def main():
session_id='Session 1',
var_name_hdf5='TwoPhotonSeries')
fnames = [convert_target]
opts.change_params({'data': {'fnames': fnames, 'var_names_hdf5': 'TwoPhotonSeries'}})
elif cfg.input[0].endswith('.nwb'):
if os.path.isfile(cfg.input[0]):
# We were handed at least one nwb file and it exists either right here or as an absolute path
Expand All @@ -91,65 +97,14 @@ def main():
else: # Someone mentioned an nwb file but we can't find it!
raise Exception(f"Could not find referenced input file {cfg.input[0]}")
else:
# We were handed a file in some other format, so let's make an nwb file out of it and
# then ingest that nwb file.
if len(cfg.input) != 1:
raise Exception("We're only willing to convert one file to nwb")
pre_convert = cfg.input[0]
post_convert = os.path.splitext(pre_convert)[0] + ".nwb"
fr = float(15) # TODO: Make this a cli parameter
orig_movie = cm.load(pre_convert, fr=fr)
# save file in NWB format with various additional info
orig_movie.save(convert_target,
sess_desc="test",
identifier="demo 1",
imaging_plane_description='single plane',
emission_lambda=520.0, indicator='GCAMP6f', # Entirely synthetic, but if we don't know, what can we do?
location='unknown',
experimenter='Unknown Name', lab_name='Unknown Lab'
institution='Unknown University',
experiment_description='Experiment Description',
session_id='Session 1') # XXX do we need to provide var_name_hdf5?
fnames = [post_convert]
raise Exception("If you're providing your own data to this demo, you must provide it in nwb format or pre-convert it yourself")

# estimates save path can be same or different from raw data path
save_path = os.path.splitext(fnames[0])[0] + '_CNMF_estimates.nwb' # TODO: Make this a parameter?

# We're done with all the file input parts

# dataset dependent parameters
decay_time = 0.4 # length of a typical transient in seconds
dxy = (2., 2.) # spatial resolution in x and y in (um per pixel)
# note the lower than usual spatial resolution here
max_shift_um = (12., 12.) # maximum shift in um
patch_motion_um = (100., 100.) # patch size for non-rigid correction in um

# First setup some parameters for data and motion correction
# motion correction parameters
pw_rigid = True # flag to select rigid vs pw_rigid motion correction
# maximum allowed rigid shift in pixels
max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)]
# start a new patch for pw-rigid motion correction every x pixels
strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)])
# overlap between patches (size of patch in pixels: strides+overlaps)
overlaps = (24, 24)
# maximum deviation allowed for patch with respect to rigid shifts
max_deviation_rigid = 3

params_dict = {'fnames': fnames,
'fr': fr,
'decay_time': decay_time,
'dxy': dxy,
'pw_rigid': pw_rigid,
'max_shifts': max_shifts,
'strides': strides,
'overlaps': overlaps,
'max_deviation_rigid': max_deviation_rigid,
'border_nan': 'copy',
'var_name_hdf5': 'TwoPhotonSeries'}
opts = params.CNMFParams(params_dict=params_dict)

m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5'])
m_orig = caiman.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5'])

# play the movie (optional)
# playing the movie using opencv. It requires loading the movie in memory.
Expand All @@ -161,7 +116,7 @@ def main():
moviehandle.play(q_max=99.5, fr=60, magnification=2)

# start a cluster for parallel processing
c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc)
c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc)

# Motion Correction
# first we create a motion correction object with the specified parameters
Expand All @@ -173,10 +128,10 @@ def main():

# compare with original movie
if not cfg.no_play:
m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5'])
m_els = cm.load(mc.mmap_file)
m_orig = caiman.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5'])
m_els = caiman.load(mc.mmap_file)
ds_ratio = 0.2
moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie,
moviehandle = caiman.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie,
m_els.resize(1, 1, ds_ratio)], axis=2)
moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit

Expand All @@ -187,55 +142,24 @@ def main():
# the boundaries

# memory map the file in order 'C'
fname_new = cm.save_memmap(mc.mmap_file, base_name='memmap_', order='C',
fname_new = caiman.save_memmap(mc.mmap_file, base_name='memmap_', order='C',
border_to_0=border_to_0) # exclude borders

# now load the file
Yr, dims, T = cm.load_memmap(fname_new)
Yr, dims, T = caiman.load_memmap(fname_new)
images = np.reshape(Yr.T, [T] + list(dims), order='F')
# load frames in python format (T x X x Y)

# restart cluster to clean up memory
cm.stop_server(dview=dview)
c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc)

# parameters for source extraction and deconvolution
p = 1 # order of the autoregressive system
gnb = 2 # number of global background components
merge_thr = 0.85 # merging threshold, max correlation allowed
rf = 15
# half-size of the patches in pixels. e.g., if rf=25, patches are 50x50
stride_cnmf = 6 # amount of overlap between the patches in pixels
K = 4 # number of components per patch
gSig = [4, 4] # expected half size of neurons in pixels
# initialization method (if analyzing dendritic data using 'sparse_nmf')
method_init = 'greedy_roi'
ssub = 2 # spatial subsampling during initialization
tsub = 2 # temporal subsampling during initialization

# parameters for component evaluation
opts_dict = {'fnames': fnames,
'fr': fr,
'nb': gnb,
'rf': rf,
'K': K,
'gSig': gSig,
'stride': stride_cnmf,
'method_init': method_init,
'rolling_sum': True,
'merge_thr': merge_thr,
'n_processes': n_processes,
'only_init': True,
'ssub': ssub,
'tsub': tsub}

opts.change_params(params_dict=opts_dict);
caiman.stop_server(dview=dview)
c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc)

# RUN CNMF ON PATCHES
# First extract spatial and temporal components on patches and combine them
# for this step deconvolution is turned off (p=0)

opts.change_params({'p': 0})
saved_p = opts.preprocess['p'] # Save the deconvolution parameter for later restoration
opts.change_params({'preprocess': {'p': 0}, 'temporal': {'p': 0}})
cnm = cnmf.CNMF(n_processes, params=opts, dview=dview)
cnm = cnm.fit(images)

Expand All @@ -247,39 +171,20 @@ def main():
# cnm1.fit_file(motion_correct=True)

# plot contours of found components
Cn = cm.local_correlations(images, swap_dim=False)
Cn = caiman.summary_images.local_correlations(images, swap_dim=False)
Cn[np.isnan(Cn)] = 0
if not cfg.no_play:
cnm.estimates.plot_contours(img=Cn)
plt.title('Contour plots of found components')

# save results in a separate file (just for demonstration purposes)
cnm.estimates.Cn = Cn
cnm.save(fname_new[:-4]+'hdf5')
#cm.movie(Cn).save(fname_new[:-5]+'_Cn.tif')
cnm.save(fname_new[:-4] + 'hdf5') # FIXME

# RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution
cnm.params.change_params({'p': p})
cnm.params.change_params({'preprocess': {'p': saved_p}, 'temporal': {'p': saved_p}}) # Restore deconvolution
cnm2 = cnm.refit(images, dview=dview)

# Component Evaluation
# Components are evaluated in three ways:
# a) the shape of each component must be correlated with the data
# b) a minimum peak SNR is required over the length of a transient
# c) each shape passes a CNN based classifier

min_SNR = 2 # signal to noise ratio for accepting a component
rval_thr = 0.85 # space correlation threshold for accepting a component
use_cnn = True
cnn_thr = 0.99 # threshold for CNN based classifier
cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected

cnm2.params.set('quality', {'decay_time': decay_time,
'min_SNR': min_SNR,
'rval_thr': rval_thr,
'use_cnn': use_cnn,
'min_cnn_thr': cnn_thr,
'cnn_lowest': cnn_lowest})
cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview)

# Save new estimates object (after adding correlation image)
Expand Down Expand Up @@ -314,7 +219,7 @@ def main():
include_bck=False) # background not shown

# Stop the cluster and clean up log files
cm.stop_server(dview=dview)
caiman.stop_server(dview=dview)

if not cfg.keep_logs:
log_files = glob.glob('*_LOG_*')
Expand All @@ -327,6 +232,7 @@ def main():

def handle_args():
parser = argparse.ArgumentParser(description="Demonstrate 2P Pipeline using batch algorithm, with nwb files")
parser.add_argument("--configfile", default=os.path.join(caiman.paths.caiman_datadir(), 'demos', 'general', 'params_demo_pipeline_NWB.json'), help="JSON Configfile for Caiman parameters")
parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles")
parser.add_argument("--no_play", action="store_true", help="Do not display results")
parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine")
Expand All @@ -337,3 +243,4 @@ def handle_args():

########
main()

4 changes: 2 additions & 2 deletions demos/general/params_demo_pipeline.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
"data": {
"fr": 30,
"decay_time": 0.4,
"dxy": [2.0, 2.0],
"nb": 2
"dxy": [2.0, 2.0]
},
"init": {
"nb": 2,
"K": 4,
"gSig": [4, 4],
"method_init": "greedy_roi",
Expand Down
46 changes: 46 additions & 0 deletions demos/general/params_demo_pipeline_NWB.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
{
"data": {
"fr": 15.0,
"decay_time": 0.4,
"dxy": [2.0, 2.0]
},
"init": {
"nb": 2,
"K": 4,
"gSig": [4, 4],
"method_init": "greedy_roi",
"rolling_sum": true,
"ssub": 2,
"tsub": 2
},
"motion": {
"pw_rigid": true,
"max_shifts": [6, 6],
"strides": [50, 50],
"overlaps": [24, 24],
"max_deviation_rigid": 3,
"border_nan": "copy"
},
"preprocess": {
"p": 1
},
"temporal": {
"p": 1
},
"patch": {
"rf": 15,
"stride": 6,
"only_init": true
},
"merging": {
"merge_thr": 0.85
},
"quality": {
"decay_time": 0.4,
"min_SNR": 2,
"rval_thr": 0.85,
"use_cnn": true,
"min_cnn_thr": 0.99,
"cnn_lowest": 0.1
}
}

0 comments on commit 8aa12fe

Please sign in to comment.