From e86f4a77dcff4501baff6a1b86a03026e8b7e8a8 Mon Sep 17 00:00:00 2001 From: cnstll Date: Wed, 3 Aug 2022 17:54:43 +0200 Subject: [PATCH 01/25] =?UTF-8?q?Calculation=20of=20Activity=20Index=20|?= =?UTF-8?q?=C2=A0Printing=20of=20global=20statistics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- WF_NTP/WF_NTP/WF_NTP_script.py | 846 ++++++++++++++++++--------------- 1 file changed, 451 insertions(+), 395 deletions(-) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index 9ee07c8..106f596 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -1,5 +1,4 @@ -""" -Copyright (C) 2019 Quentin Peter +"""Copyright (C) 2019 Quentin Peter. This file is part of WF_NTP. @@ -7,28 +6,29 @@ recieved a copy of the licence along with WF_NTP. If not, see https://creativecommons.org/licenses/by-nc-sa/4.0/. """ -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.cm as cm -from scipy import interpolate, ndimage -import cv2 +import functools +import json import os +import pickle +import sys import time +import traceback +import warnings +from collections import Counter, defaultdict + +import cv2 import mahotas as mh +import matplotlib.cm as cm +import matplotlib.path as mplPath +import matplotlib.pyplot as plt +import numpy as np import pandas as pd -import trackpy as tp -from skimage import measure, morphology, io import skimage.draw -import pickle -import warnings -import matplotlib.path as mplPath -from collections import defaultdict, Counter -from skimage.transform import resize -import traceback +import trackpy as tp +from scipy import interpolate, ndimage from scipy.signal import savgol_filter -import functools -import sys -import json +from skimage import io, measure, morphology +from skimage.transform import resize class StdoutRedirector(object): @@ -37,7 +37,7 @@ class StdoutRedirector(object): def __init__(self, queue, prefix=None): self.queue = queue if not prefix: - prefix = '' + prefix = "" self.prefix = prefix def write(self, string): @@ -52,20 +52,17 @@ def flush(self): def save_settings(settings): # Make output directory try: - os.mkdir(settings['save_as']) + os.mkdir(settings["save_as"]) except OSError: - print( - 'Warning: job folder "%s" already created, overwriting.' % - settings['save_as']) + print('Warning: job folder "%s" already created, overwriting.' % settings["save_as"]) - settingsfilename = os.path.join(settings['save_as'], 'settings.json') - with open(settingsfilename, 'w') as f: + settingsfilename = os.path.join(settings["save_as"], "settings.json") + with open(settingsfilename, "w") as f: json.dump(settings, f, indent=4) def run_tracker(settings, stdout_queue=None): - """ - Run the tracker with the given settings. + """Run the tracker with the given settings. stdout_queue can be used to redirect stdout. """ @@ -77,14 +74,14 @@ def run_tracker(settings, stdout_queue=None): # Do some adjustments settings = settings.copy() - settings["frames_to_estimate_velocity"] = min([ - settings["frames_to_estimate_velocity"], - settings["min_track_length"]]) - settings["bend_threshold"] /= 100. + settings["frames_to_estimate_velocity"] = min( + [settings["frames_to_estimate_velocity"], settings["min_track_length"]] + ) + settings["bend_threshold"] /= 100.0 video = Video(settings, grey=True) - print('Video shape:', video[0].shape) + print("Video shape:", video[0].shape) regions = settings["regions"] try: @@ -98,13 +95,12 @@ def run_tracker(settings, stdout_queue=None): all_regions = np.zeros_like(video[0]) for key, d in list(regions.items()): im = np.zeros_like(video[0]) - rr, cc = skimage.draw.polygon(np.array(d['y']), np.array(d['x'])) + rr, cc = skimage.draw.polygon(np.array(d["y"]), np.array(d["x"])) try: im[rr, cc] = 1 except IndexError: - print('Region "', key, '" cannot be applied to video', - settings["video_filename"]) - print('Input image sizes do not match.') + print('Region "', key, '" cannot be applied to video', settings["video_filename"]) + print("Input image sizes do not match.") return None, None all_regions += im all_regions = all_regions > 0.1 @@ -113,7 +109,7 @@ def run_tracker(settings, stdout_queue=None): t0 = time.time() save_folder = settings["save_as"] - ims_folder = os.path.join(save_folder, 'imgs') + ims_folder = os.path.join(save_folder, "imgs") if not os.path.exists(ims_folder): os.mkdir(ims_folder) @@ -125,16 +121,15 @@ def run_tracker(settings, stdout_queue=None): track = form_trajectories(locations, settings) results = extract_data(track, settings) - if not check_for_worms(results["particle_dataframe"].index, - settings): - print('No worms detected. Stopping!') + if not check_for_worms(results["particle_dataframe"].index, settings): + print("No worms detected. Stopping!") return print_data, None # Output write_results_file(results, settings) - print('Done (in %.1f minutes).' % ((time.time() - t0) / 60.)) + print("Done (in %.1f minutes)." % ((time.time() - t0) / 60.0)) video.release() - return print_data, results['particle_dataframe'].loc[:, "bends"] + return print_data, results["particle_dataframe"].loc[:, "bends"] class Video: @@ -148,12 +143,10 @@ def __init__(self, settings, grey=False): raise RuntimeError(f"{video_filename} does not exist.") self.cap = cv2.VideoCapture(video_filename) - self.len = (self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - - settings["start_frame"]) + self.len = self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - settings["start_frame"] self.start_frame = settings["start_frame"] limit_images_to = settings["limit_images_to"] - if (limit_images_to and limit_images_to < ( - self.len - self.start_frame)): + if limit_images_to and limit_images_to < (self.len - self.start_frame): self.len = limit_images_to self.grey = grey if grey: @@ -202,6 +195,7 @@ def release(self): def track_all_locations(video, settings, stdout_queue): """Track and get all locations.""" + def get_Z_brightness(zi): if settings["keep_paralyzed_method"]: return find_Z_with_paralyzed(video, settings, *zi) @@ -209,19 +203,19 @@ def get_Z_brightness(zi): return find_Z(video, settings, *zi) apply_indeces = list( - map(int, list(np.linspace(0, len(video), - len(video) // settings["use_images"] + 2)))) + map(int, list(np.linspace(0, len(video), len(video) // settings["use_images"] + 2))) + ) apply_indeces = list(zip(apply_indeces[:-1], apply_indeces[1:])) - Z_indeces = [(max([0, i - settings["use_around"]]), - min(j + settings["use_around"], len(video))) - for i, j in apply_indeces] + Z_indeces = [ + (max([0, i - settings["use_around"]]), min(j + settings["use_around"], len(video))) + for i, j in apply_indeces + ] # Get frames0 print material Z, mean_brightness = get_Z_brightness(Z_indeces[0]) - print_data = process_frame(settings, Z, mean_brightness, - len(video), - args=(0, video[0]), - return_plot=True) + print_data = process_frame( + settings, Z, mean_brightness, len(video), args=(0, video[0]), return_plot=True + ) if settings["stop_after_example_output"]: return print_data, None @@ -232,8 +226,7 @@ def get_Z_brightness(zi): def locate(args): i, zi = args Z, mean_brightness = get_Z_brightness(zi) - return process_frames(video, settings, *i, Z=Z, - mean_brightness=mean_brightness) + return process_frames(video, settings, *i, Z=Z, mean_brightness=mean_brightness) split_results = list(map(locate, args)) locations = [] @@ -242,12 +235,13 @@ def locate(args): return print_data, locations -def process_frame(settings, Z, mean_brightness, nframes, - args=None, return_plot=False): +def process_frame(settings, Z, mean_brightness, nframes, args=None, return_plot=False): """Locate worms in a given frame.""" i, frameorig = args - print(' : Locating in frame %i/%i' % (i + 1 + settings["start_frame"], - nframes + settings["start_frame"])) + print( + " : Locating in frame %i/%i" + % (i + 1 + settings["start_frame"], nframes + settings["start_frame"]) + ) if mean_brightness: frame = frameorig * mean_brightness / np.mean(frameorig) @@ -255,77 +249,89 @@ def process_frame(settings, Z, mean_brightness, nframes, frame = np.array(frameorig, dtype=np.float64) frame = np.abs(frame - Z) * settings["all_regions"] if (frame > 1.1).any(): - frame /= 255. + frame /= 255.0 - thresholded = frame > (settings["threshold"] / 255.) + thresholded = frame > (settings["threshold"] / 255.0) opening = settings["opening"] closing = settings["closing"] save_folder = settings["save_as"] if opening > 0: frame_after_open = ndimage.binary_opening( - thresholded, - structure=np.ones((opening, opening))).astype(np.int) + thresholded, structure=np.ones((opening, opening)) + ).astype(np.int) else: frame_after_open = thresholded if closing > 0: frame_after_close = ndimage.binary_closing( - frame_after_open, - structure=np.ones((closing, closing))).astype(np.int) + frame_after_open, structure=np.ones((closing, closing)) + ).astype(np.int) else: frame_after_close = frame_after_open - labeled, _ = mh.label(frame_after_close, np.ones( - (3, 3), bool)) + labeled, _ = mh.label(frame_after_close, np.ones((3, 3), bool)) sizes = mh.labeled.labeled_size(labeled) - remove = np.where(np.logical_or(sizes < settings["min_size"], - sizes > settings["max_size"])) + remove = np.where(np.logical_or(sizes < settings["min_size"], sizes > settings["max_size"])) labeled_removed = mh.labeled.remove_regions(labeled, remove) labeled_removed, n_left = mh.labeled.relabel(labeled_removed) props = measure.regionprops(labeled_removed) - prop_list = [{"area": props[j].area, "centroid":props[j].centroid, - "eccentricity":props[j].eccentricity, - "area_eccentricity":props[j].eccentricity, - "minor_axis_length":props[j].minor_axis_length / - (props[j].major_axis_length + 0.001)} - for j in range(len(props))] + prop_list = [ + { + "area": props[j].area, + "centroid": props[j].centroid, + "eccentricity": props[j].eccentricity, + "area_eccentricity": props[j].eccentricity, + "minor_axis_length": props[j].minor_axis_length / (props[j].major_axis_length + 0.001), + } + for j in range(len(props)) + ] if settings["skeletonize"]: skeletonized_frame = morphology.skeletonize(frame_after_close) - skeletonized_frame = prune(skeletonized_frame, - settings["prune_size"]) + skeletonized_frame = prune(skeletonized_frame, settings["prune_size"]) skel_labeled = labeled_removed * skeletonized_frame if settings["do_full_prune"]: skel_labeled = prune_fully(skel_labeled) - skel_props = measure.regionprops(skel_labeled, coordinates='xy') + skel_props = measure.regionprops(skel_labeled, coordinates="xy") for j in range(len(skel_props)): prop_list[j]["length"] = skel_props[j].area prop_list[j]["eccentricity"] = skel_props[j].eccentricity - prop_list[j]["minor_axis_length"] = \ - skel_props[j].minor_axis_length\ - / (skel_props[j].major_axis_length + 0.001) + prop_list[j]["minor_axis_length"] = skel_props[j].minor_axis_length / ( + skel_props[j].major_axis_length + 0.001 + ) if return_plot: - return (sizes, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, labeled_removed, - (skel_labeled if settings["skeletonize"] else None)) + return ( + sizes, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + (skel_labeled if settings["skeletonize"] else None), + ) output_overlayed_images = settings["output_overlayed_images"] if i < output_overlayed_images or output_overlayed_images is None: - io.imsave(os.path.join(save_folder, "imgs", '%05d.jpg' % (i)), - np.array(255 * (labeled_removed == 0), dtype=np.uint8), - check_contrast=False) + io.imsave( + os.path.join(save_folder, "imgs", "%05d.jpg" % (i)), + np.array(255 * (labeled_removed == 0), dtype=np.uint8), + check_contrast=False, + ) return prop_list def process_frames(video, settings, i0, i1, Z, mean_brightness): """Frocess frames from i0 to i1.""" - func = functools.partial( - process_frame, settings, Z, mean_brightness, len(video)) + func = functools.partial(process_frame, settings, Z, mean_brightness, len(video)) def args(): for i in range(i0, i1): @@ -336,56 +342,55 @@ def args(): def form_trajectories(loc, settings): """Form worm trajectories.""" - print('Forming worm trajectories...', end=' ') - data = {'x': [], 'y': [], 'frame': [], - 'eccentricity': [], 'area': [], - 'minor_axis_length': [], - 'area_eccentricity': []} + print("Forming worm trajectories...", end=" ") + data = { + "x": [], + "y": [], + "frame": [], + "eccentricity": [], + "area": [], + "minor_axis_length": [], + "area_eccentricity": [], + } for t, l in enumerate(loc): - data['x'] += [d['centroid'][0] for d in l] - data['y'] += [d['centroid'][1] for d in l] - data['eccentricity'] += [d['eccentricity'] for d in l] - data['area_eccentricity'] += [d['area_eccentricity'] for d in l] - data['minor_axis_length'] += [d['minor_axis_length'] for d in l] - data['area'] += [d['area'] for d in l] - data['frame'] += [t] * len(l) + data["x"] += [d["centroid"][0] for d in l] + data["y"] += [d["centroid"][1] for d in l] + data["eccentricity"] += [d["eccentricity"] for d in l] + data["area_eccentricity"] += [d["area_eccentricity"] for d in l] + data["minor_axis_length"] += [d["minor_axis_length"] for d in l] + data["area"] += [d["area"] for d in l] + data["frame"] += [t] * len(l) data = pd.DataFrame(data) try: - track = tp.link_df(data, search_range=settings["max_dist_move"], - memory=settings["memory"]) + track = tp.link_df(data, search_range=settings["max_dist_move"], memory=settings["memory"]) except tp.linking.SubnetOversizeException: raise RuntimeError( - 'Linking problem too complex.' - ' Reduce maximum move distance or memory.') - track = tp.filter_stubs(track, min([settings["min_track_length"], - len(loc)])) + "Linking problem too complex." " Reduce maximum move distance or memory." + ) + track = tp.filter_stubs(track, min([settings["min_track_length"], len(loc)])) try: - with open(os.path.join(settings["save_as"], 'track.p'), - 'bw') as trackfile: + with open(os.path.join(settings["save_as"], "track.p"), "bw") as trackfile: pickle.dump(track, trackfile) except Exception: traceback.print_exc() - print('Warning: no track file saved. Track too long.') - print(' plot_path.py will not work on this file.') + print("Warning: no track file saved. Track too long.") + print(" plot_path.py will not work on this file.") return track def extract_data(track, settings): """Extract data from track and return a pandas DataFrame.""" - P = track['particle'] - columns_dtype = { - "bends": object - } + P = track["particle"] + columns_dtype = {"bends": object} # Use particle as index - particle_dataframe = pd.DataFrame(index=P.unique(), - columns=columns_dtype.keys()) + particle_dataframe = pd.DataFrame(index=P.unique(), columns=columns_dtype.keys()) # Set non float dtype correctly particle_dataframe = particle_dataframe.astype(columns_dtype) - T = track['frame'] - X = track['x'] - Y = track['y'] + T = track["frame"] + X = track["x"] + Y = track["y"] regions = settings["regions"] if len(regions) > 1: @@ -395,10 +400,10 @@ def extract_data(track, settings): for p in particle_dataframe.index: # Define signals t = T[P == p] - ecc = track['eccentricity'][P == p] - area_ecc = track['area_eccentricity'][P == p] + ecc = track["eccentricity"][P == p] + area_ecc = track["area_eccentricity"][P == p] # mal = track['minor_axis_length'][P == p] - area = track['area'][P == p] + area = track["area"][P == p] window_size = 7 @@ -420,16 +425,14 @@ def extract_data(track, settings): idx = area_ecc > settings["minimum_ecc"] if sum(idx) > 0: smooth_y = np.interp(x, x[idx], smooth_y[idx]) - particle_dataframe.at[p, "Round ratio"] = ( - 1.0 - float(sum(idx)) / float(len(idx))) + particle_dataframe.at[p, "Round ratio"] = 1.0 - float(sum(idx)) / float(len(idx)) else: # 0.001,0.991,0.992 are dummy variables specifically picked # to deal with coilers, see protocol. lengthX = 0.001 / len(idx) smooth_y = np.arange(0.991, 0.992, lengthX) np.random.shuffle(smooth_y) - particle_dataframe.at[p, "Round ratio"] = ( - 1.0 - float(sum(idx)) / float(len(idx))) + particle_dataframe.at[p, "Round ratio"] = 1.0 - float(sum(idx)) / float(len(idx)) # Bends bend_times = extract_bends(x, smooth_y, settings) @@ -438,9 +441,9 @@ def extract_data(track, settings): continue bl = form_bend_array(bend_times, T[P == p]) if len(bl) > 0: - bl = (np.asarray(bl, float)) + bl = np.asarray(bl, float) else: - bl = (np.array([0.0] * len(T[P == p]))) + bl = np.array([0.0] * len(T[P == p])) px_to_mm = settings["px_to_mm"] # Area @@ -454,16 +457,19 @@ def extract_data(track, settings): # Velocity particle_dataframe.at[p, "Speed"] = extract_velocity( - T[P == p], X[P == p], Y[P == p], settings) + T[P == p], X[P == p], Y[P == p], settings + ) # Max velocity: 90th percentile to avoid skewed results due to tracking # inefficiency particle_dataframe.at[p, "Max speed"] = extract_max_speed( - T[P == p], X[P == p], Y[P == p], settings) + T[P == p], X[P == p], Y[P == p], settings + ) # Move per bend particle_dataframe.at[p, "Dist per bend"] = extract_move_per_bend( - bl, T[P == p], X[P == p], Y[P == p], px_to_mm) + bl, T[P == p], X[P == p], Y[P == p], px_to_mm + ) particle_dataframe.at[p, "bends"] = bl @@ -480,13 +486,17 @@ def extract_data(track, settings): with warnings.catch_warnings(): # Ignore ptp warnings as this is a numpy bug warnings.simplefilter("ignore") - particle_dataframe.at[index, "BPM"] = ( - last_bend / np.ptp(T[P == index]) * 60 * fps) - x = (settings["limit_images_to"] / fps) + particle_dataframe.at[index, "BPM"] = last_bend / np.ptp(T[P == index]) * 60 * fps + x = settings["limit_images_to"] / fps particle_dataframe.at[index, "bends_in_movie"] = ( - last_bend / np.ptp(T[P == index]) * x * fps) + last_bend / np.ptp(T[P == index]) * x * fps + ) + particle_dataframe.at[index, "activity_index"] = ( + particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 + ) particle_dataframe.at[index, "Appears in frames"] = len( - particle_dataframe.at[index, "bends"]) + particle_dataframe.at[index, "bends"] + ) # Cut off-tool for skewed statistics if settings["cutoff_filter"]: @@ -502,8 +512,9 @@ def extract_data(track, settings): frames = np.array(frames) if settings["use_average"]: - cut_off = int(np.sum(list_number) / len(list_number)) + \ - (np.sum(list_number) % len(list_number) > 0) + cut_off = int(np.sum(list_number) / len(list_number)) + ( + np.sum(list_number) % len(list_number) > 0 + ) else: cut_off = max(list_number) @@ -518,16 +529,16 @@ def extract_data(track, settings): frames=frames, original_particles=original_particles, removed_particles_cutoff=removed_particles_cutoff, - ) + ) else: cutoff_filter_data = None # Cut off-tool for boundaries (spurious worms) if settings["extra_filter"]: - mask = ( - (particle_dataframe.loc[:, "BPM"] > settings["Bends_max"]) & - (particle_dataframe.loc[:, "Speed"] < settings["Speed_max"])) + mask = (particle_dataframe.loc[:, "BPM"] > settings["Bends_max"]) & ( + particle_dataframe.loc[:, "Speed"] < settings["Speed_max"] + ) extra_filter_spurious_worms = mask.sum() particle_dataframe = particle_dataframe.loc[~mask] else: @@ -541,14 +552,15 @@ def extract_data(track, settings): if not this_reg: continue else: - this_reg = ['all'] + this_reg = ["all"] particle_dataframe.at[index, "Region"] = str(this_reg) for reg in this_reg: region_particles[reg].append(index) particle_dataframe.loc[:, "Moving"] = np.logical_or( particle_dataframe.loc[:, "BPM"] > settings["maximum_bpm"], - particle_dataframe.loc[:, "Speed"] > settings["maximum_velocity"]) + particle_dataframe.loc[:, "Speed"] > settings["maximum_velocity"], + ) return dict( cutoff_filter_data=cutoff_filter_data, @@ -563,13 +575,14 @@ def extract_data(track, settings): # --- Utilities Functions --- # ============================================================================= + def find_Z(video, settings, i0, i1): """Get thresholded image.""" # Adjust brightness: frame = video[(i0 + i1) // 2] mean_brightness = np.mean(frame) if mean_brightness > 1: - mean_brightness /= 255. + mean_brightness /= 255.0 Z = np.zeros_like(frame, dtype=np.float64) if settings["darkfield"]: minv = np.zeros_like(frame, dtype=np.float64) + 256 @@ -591,16 +604,20 @@ def find_Z(video, settings, i0, i1): def find_Z_with_paralyzed(video, settings, i0, i1): """Get thresholded image with paralyzed worms.""" frame = video[(i0 + i1) // 2] - Y, X = np.meshgrid(np.arange(frame.shape[1]), - np.arange(frame.shape[0])) + Y, X = np.meshgrid(np.arange(frame.shape[1]), np.arange(frame.shape[0])) thres = cv2.adaptiveThreshold( - frame, 1, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, - cv2.THRESH_BINARY, 2 * (settings["std_px"] // 2) + 1, 0) + frame, + 1, + cv2.ADAPTIVE_THRESH_GAUSSIAN_C, + cv2.THRESH_BINARY, + 2 * (settings["std_px"] // 2) + 1, + 0, + ) mask = thres > 0.5 vals = frame[mask] x = X[mask] y = Y[mask] - Z = interpolate.griddata((x, y), vals, (X, Y), method='nearest') + Z = interpolate.griddata((x, y), vals, (X, Y), method="nearest") return Z, False @@ -614,7 +631,8 @@ def find_skel_endpoints(skel): np.array([[2, 1, 2], [0, 1, 0], [0, 0, 0]]), np.array([[1, 2, 0], [2, 1, 0], [0, 0, 0]]), np.array([[2, 0, 0], [1, 1, 0], [2, 0, 0]]), - np.array([[0, 0, 0], [2, 1, 0], [1, 2, 0]])] + np.array([[0, 0, 0], [2, 1, 0], [1, 2, 0]]), + ] ep = 0 for skel_endpoint in skel_endpoints: @@ -638,8 +656,7 @@ def prune_fully(skel_labeled): idx = np.argwhere(endpoints) reg = skel_labeled[idx[:, 0], idx[:, 1]] count = Counter(reg) - idx = np.array([idx[i, :] for i in range(len(reg)) - if count[reg[i]] > 2]) + idx = np.array([idx[i, :] for i in range(len(reg)) if count[reg[i]] > 2]) if len(idx) == 0: break endpoints[:] = 1 @@ -651,11 +668,11 @@ def prune_fully(skel_labeled): def check_for_worms(particles, settings): """Check if any worms have been detected.""" if len(particles) == 0: - with open(os.path.join(settings["save_as"], 'results.txt'), 'w') as f: - f.write('---------------------------------\n') - f.write(' Results for %s \n' % settings["video_filename"]) - f.write('---------------------------------\n\n') - f.write('No worms detected. Check your settings.\n\n') + with open(os.path.join(settings["save_as"], "results.txt"), "w") as f: + f.write("---------------------------------\n") + f.write(" Results for %s \n" % settings["video_filename"]) + f.write("---------------------------------\n\n") + f.write("No worms detected. Check your settings.\n\n") return False return True @@ -664,7 +681,8 @@ def make_region_paths(regions): reg_paths = {} for key, d in list(regions.items()): reg_paths[key] = mplPath.Path( - np.array(list(zip(d['x'] + [d['x'][0]], d['y'] + [d['y'][0]])))) + np.array(list(zip(d["x"] + [d["x"][0]], d["y"] + [d["y"][0]]))) + ) return reg_paths @@ -679,7 +697,7 @@ def identify_region(xs, ys, reg_paths): def extract_bends(x, smooth_y, settings): # Find extrema - ex = (np.diff(np.sign(np.diff(smooth_y))).nonzero()[0] + 1) + ex = np.diff(np.sign(np.diff(smooth_y))).nonzero()[0] + 1 if len(ex) >= 2 and ex[0] == 0: ex = ex[1:] bend_times = x[ex] @@ -722,8 +740,9 @@ def extract_velocity(tt, xx, yy, settings): dtt = -(np.roll(tt, ftev) - tt)[ftev:] dxx = (np.roll(xx, ftev) - xx)[ftev:] dyy = (np.roll(yy, ftev) - yy)[ftev:] - velocity = (settings["px_to_mm"] * settings["fps"] - * np.median(np.sqrt(dxx**2 + dyy**2) / dtt)) + velocity = ( + settings["px_to_mm"] * settings["fps"] * np.median(np.sqrt(dxx**2 + dyy**2) / dtt) + ) return velocity @@ -735,8 +754,10 @@ def extract_max_speed(tt, xx, yy, settings): dxx = (np.roll(xx, ftev) - xx)[ftev:] dyy = (np.roll(yy, ftev) - yy)[ftev:] percentile = ( - settings["px_to_mm"] * settings["fps"] * - np.percentile((np.sqrt(dxx**2 + dyy**2) / dtt), 90)) + settings["px_to_mm"] + * settings["fps"] + * np.percentile((np.sqrt(dxx**2 + dyy**2) / dtt), 90) + ) return percentile @@ -751,7 +772,7 @@ def extract_move_per_bend(bl, tt, xx, yy, px_to_mm): yi = np.interp(i, tt, yy) yj = np.interp(j, tt, yy) - dist = px_to_mm * np.sqrt((xj - xi)**2 + (yj - yi)**2) + dist = px_to_mm * np.sqrt((xj - xi) ** 2 + (yj - yi) ** 2) dists.append(dist) bend_i += 1 j = i @@ -762,140 +783,127 @@ def extract_move_per_bend(bl, tt, xx, yy, px_to_mm): return np.nan -def write_stats(settings, results, f, paralyzed_stats=True, prepend='', - mask=None): +def write_stats(settings, results, f, paralyzed_stats=True, prepend="", mask=None): stats = statistics(results, settings, mask) - f.write(f'\n-------------------------------\n{prepend}\n') + f.write(f"\n-------------------------------\n{prepend}\n") if settings["cutoff_filter"]: if mask is None: # Meaningless if mask != None - f.write('Total particles: %i\n' % - results['cutoff_filter_data']['original_particles']) + f.write("Total particles: %i\n" % results["cutoff_filter_data"]["original_particles"]) else: - f.write('Total particles: Not saved for regions\n') + f.write("Total particles: Not saved for regions\n") else: - f.write('Total particles: %i\n' % - stats['count']) + f.write("Total particles: %i\n" % stats["count"]) if paralyzed_stats and mask is None: # filters stats are only meaningful if mask == None - f.write('\nCUT-OFF tool/filters\n') + f.write("\nCUT-OFF tool/filters\n") # Not saved for cutoff_filter - f.write('Max particles present at same time: %i\n' - % stats['max_number_worms_present']) - f.write('\n') + f.write("Max particles present at same time: %i\n" % stats["max_number_worms_present"]) + f.write("\n") if settings["cutoff_filter"]: # Meaningless if mask != None - f.write('Frame number: ') - for item in results['cutoff_filter_data']["frames"]: - f.write('%i, ' % item) + f.write("Frame number: ") + for item in results["cutoff_filter_data"]["frames"]: + f.write("%i, " % item) - f.write('\n# of particles: ') - for item in results['cutoff_filter_data']["list_number"]: - f.write('%i, ' % item) + f.write("\n# of particles: ") + for item in results["cutoff_filter_data"]["list_number"]: + f.write("%i, " % item) - f.write('\nCut-off tool: Yes\n') + f.write("\nCut-off tool: Yes\n") if settings["use_average"]: - f.write('Method: averaging\n') + f.write("Method: averaging\n") else: - f.write('Method: maximum\n') + f.write("Method: maximum\n") f.write( - 'Removed particles: %i\n' % - results['cutoff_filter_data']['removed_particles_cutoff']) + "Removed particles: %i\n" + % results["cutoff_filter_data"]["removed_particles_cutoff"] + ) else: - f.write('Cut-off tool: No\n') + f.write("Cut-off tool: No\n") if settings["extra_filter"]: - f.write('Extra filter: Yes\n') + f.write("Extra filter: Yes\n") f.write( - 'Settings: remove when bpm > %.5f and velocity < %.5f\n' % - (settings["Bends_max"], settings["Speed_max"])) - f.write('Removed particles: %i' % - results['extra_filter_spurious_worms']) + "Settings: remove when bpm > %.5f and velocity < %.5f\n" + % (settings["Bends_max"], settings["Speed_max"]) + ) + f.write("Removed particles: %i" % results["extra_filter_spurious_worms"]) else: - f.write('Extra filter: No\n') - - f.write('\n-------------------------------\n\n') - - f.write(prepend + 'BPM Mean: %.5f\n' % stats['bpm_mean']) - f.write(prepend + 'BPM Standard deviation: %.5f\n' % stats['bpm_std']) - f.write(prepend + 'BPM Error on Mean: %.5f\n' % stats['bpm_mean_std']) - f.write(prepend + 'BPM Median: %.5f\n' % stats['bpm_median']) - - f.write(prepend + 'Bends in movie Mean: %.5f\n' % - stats['bends_in_movie_mean']) - f.write(prepend + 'Bends in movie Standard deviation: %.5f\n' % - stats['bends_in_movie_std']) - f.write(prepend + 'Bends in movie Error on Mean: %.5f\n' % - stats['bends_in_movie_mean_std']) - f.write( - prepend + - 'Bends in movie Median: %.5f\n' % - stats['bends_in_movie_median']) - - f.write(prepend + 'Speed Mean: %.6f\n' % stats['vel_mean']) - f.write(prepend + 'Speed Standard deviation: %.6f\n' % stats['vel_std']) - f.write(prepend + 'Speed Error on Mean: %.6f\n' % stats['vel_mean_std']) - f.write(prepend + 'Speed Median: %.6f\n' % stats['vel_median']) - - f.write( - prepend + - '90th Percentile speed Mean: %.6f\n' % - stats['max_speed_mean']) - f.write(prepend + '90th Percentile speed Standard deviation: %.6f\n' % - stats['max_speed_std']) - f.write(prepend + '90th Percentile speed Error on mean: %.6f\n' % - stats['max_speed_mean_std']) - if np.isnan(stats['move_per_bend_mean']): - f.write(prepend + 'Dist per bend Mean: nan\n') - f.write(prepend + 'Dist per bend Standard deviation: nan\n') - f.write(prepend + 'Dist per bend Error on Mean: nan\n') + f.write("Extra filter: No\n") + + f.write("\n-------------------------------\n\n") + + f.write(prepend + "BPM Mean: %.5f\n" % stats["bpm_mean"]) + f.write(prepend + "BPM Standard deviation: %.5f\n" % stats["bpm_std"]) + f.write(prepend + "BPM Error on Mean: %.5f\n" % stats["bpm_mean_std"]) + f.write(prepend + "BPM Median: %.5f\n" % stats["bpm_median"]) + + f.write(prepend + "Activity index Mean: %.5f\n" % stats["activity_index_mean"]) + f.write(prepend + "Activity index Standard deviation: %.5f\n" % stats["activity_index_std"]) + f.write(prepend + "Activity index Error on Mean: %.5f\n" % stats["activity_index_mean_std"]) + f.write(prepend + "Activity index Median: %.5f\n" % stats["activity_index_median"]) + + f.write(prepend + "Bends in movie Mean: %.5f\n" % stats["bends_in_movie_mean"]) + f.write(prepend + "Bends in movie Standard deviation: %.5f\n" % stats["bends_in_movie_std"]) + f.write(prepend + "Bends in movie Error on Mean: %.5f\n" % stats["bends_in_movie_mean_std"]) + f.write(prepend + "Bends in movie Median: %.5f\n" % stats["bends_in_movie_median"]) + + f.write(prepend + "Speed Mean: %.6f\n" % stats["vel_mean"]) + f.write(prepend + "Speed Standard deviation: %.6f\n" % stats["vel_std"]) + f.write(prepend + "Speed Error on Mean: %.6f\n" % stats["vel_mean_std"]) + f.write(prepend + "Speed Median: %.6f\n" % stats["vel_median"]) + + f.write(prepend + "90th Percentile speed Mean: %.6f\n" % stats["max_speed_mean"]) + f.write(prepend + "90th Percentile speed Standard deviation: %.6f\n" % stats["max_speed_std"]) + f.write(prepend + "90th Percentile speed Error on mean: %.6f\n" % stats["max_speed_mean_std"]) + if np.isnan(stats["move_per_bend_mean"]): + f.write(prepend + "Dist per bend Mean: nan\n") + f.write(prepend + "Dist per bend Standard deviation: nan\n") + f.write(prepend + "Dist per bend Error on Mean: nan\n") else: - f.write( - prepend + - 'Dist per bend Mean: %.6f\n' % - stats['move_per_bend_mean']) - f.write(prepend + 'Dist per bend Standard deviation: %.6f\n' % - stats['move_per_bend_std']) - f.write(prepend + 'Dist per bend Error on Mean: %.6f\n' % - stats['move_per_bend_mean_std']) + f.write(prepend + "Dist per bend Mean: %.6f\n" % stats["move_per_bend_mean"]) + f.write(prepend + "Dist per bend Standard deviation: %.6f\n" % stats["move_per_bend_std"]) + f.write(prepend + "Dist per bend Error on Mean: %.6f\n" % stats["move_per_bend_mean_std"]) if paralyzed_stats: - f.write(prepend + 'Moving worms: %i\n' % stats['n_moving']) - f.write(prepend + 'Paralyzed worms: %i\n' % stats['n_paralyzed']) - f.write(prepend + 'Total worms: %i\n' % - stats['max_number_worms_present']) - f.write(prepend + 'Moving ratio: %.6f\n' % - (float(stats['n_moving']) / stats['count'])) - f.write(prepend + 'Paralyzed ratio: %.6f\n' % - (float(stats['n_paralyzed']) / stats['count'])) - if stats['n_paralyzed'] > 0: - f.write(prepend + 'Moving-to-paralyzed ratio: %.6f\n' % (float( - stats['n_moving']) / stats['n_paralyzed'])) + f.write(prepend + "Moving worms: %i\n" % stats["n_moving"]) + f.write(prepend + "Paralyzed worms: %i\n" % stats["n_paralyzed"]) + f.write(prepend + "Total worms: %i\n" % stats["max_number_worms_present"]) + f.write(prepend + "Moving ratio: %.6f\n" % (float(stats["n_moving"]) / stats["count"])) + f.write( + prepend + "Paralyzed ratio: %.6f\n" % (float(stats["n_paralyzed"]) / stats["count"]) + ) + if stats["n_paralyzed"] > 0: + f.write( + prepend + + "Moving-to-paralyzed ratio: %.6f\n" + % (float(stats["n_moving"]) / stats["n_paralyzed"]) + ) else: - f.write(prepend + 'Moving-to-paralyzed ratio: inf\n') - if stats['n_moving'] > 0: - f.write(prepend + 'Paralyzed-to-moving ratio: %.6f\n' % (float( - stats['n_paralyzed']) / stats['n_moving'])) + f.write(prepend + "Moving-to-paralyzed ratio: inf\n") + if stats["n_moving"] > 0: + f.write( + prepend + + "Paralyzed-to-moving ratio: %.6f\n" + % (float(stats["n_paralyzed"]) / stats["n_moving"]) + ) else: - f.write(prepend + 'Paralyzed-to-moving ratio: inf\n') - f.write(prepend + 'Area Mean: %.6f\n' % stats['area_mean']) - f.write(prepend + 'Area Standard Deviation: %.6f\n' % stats['area_std']) - f.write(prepend + 'Area Error on Mean: %.6f\n' % stats['area_mean_std']) + f.write(prepend + "Paralyzed-to-moving ratio: inf\n") + f.write(prepend + "Area Mean: %.6f\n" % stats["area_mean"]) + f.write(prepend + "Area Standard Deviation: %.6f\n" % stats["area_std"]) + f.write(prepend + "Area Error on Mean: %.6f\n" % stats["area_mean_std"]) - f.write(prepend + 'Round ratio Mean: %.6f\n' % stats['round_ratio_mean']) - f.write(prepend + 'Round ratio Standard deviation: %.6f\n' % - stats['round_ratio_std']) - f.write(prepend + 'Round ratio Error on mean: %.6f\n' % - stats['round_ratio_mean_std']) + f.write(prepend + "Round ratio Mean: %.6f\n" % stats["round_ratio_mean"]) + f.write(prepend + "Round ratio Standard deviation: %.6f\n" % stats["round_ratio_std"]) + f.write(prepend + "Round ratio Error on mean: %.6f\n" % stats["round_ratio_mean_std"]) - f.write(prepend + 'Eccentricity Mean: %.6f\n' % stats['eccentricity_mean']) - f.write(prepend + 'Eccentricity Standard deviation: %.6f\n' % - stats['eccentricity_std']) - f.write(prepend + 'Eccentricity Error on mean: %.6f\n' % - stats['eccentricity_mean_std']) + f.write(prepend + "Eccentricity Mean: %.6f\n" % stats["eccentricity_mean"]) + f.write(prepend + "Eccentricity Standard deviation: %.6f\n" % stats["eccentricity_std"]) + f.write(prepend + "Eccentricity Error on mean: %.6f\n" % stats["eccentricity_mean_std"]) def mean_std(x, appears_in): @@ -912,15 +920,15 @@ def statistics(results, settings, mask=None): df = df.loc[mask, :] - P = results["track"]['particle'] - T = results["track"]['frame'] + P = results["track"]["particle"] + T = results["track"]["frame"] if settings["cutoff_filter"]: max_number_worms_present = len(df) else: max_number_worms_present = max( - [len([1 for p in set(P[T == t]) if p in df.index]) - for t in set(T)]) + [len([1 for p in set(P[T == t]) if p in df.index]) for t in set(T)] + ) count = len(df) n_moving = np.sum(df.loc[:, "Moving"]) n_paralyzed = len(df) - n_moving @@ -930,11 +938,13 @@ def statistics(results, settings, mask=None): bpm_median = np.median(df.loc[:, "BPM"]) bpm_mean_std = bpm_std / np.sqrt(max_number_worms_present) - bends_in_movie_mean, bends_in_movie_std = mean_std( - df.loc[:, "bends_in_movie"], appears_in) + activity_index_mean, activity_index_std = mean_std(df.loc[:, "activity_index"], appears_in) + activity_index_median = np.median(df.loc[:, "activity_index"]) + activity_index_mean_std = activity_index_std / np.sqrt(max_number_worms_present) + + bends_in_movie_mean, bends_in_movie_std = mean_std(df.loc[:, "bends_in_movie"], appears_in) bends_in_movie_median = np.median(df.loc[:, "bends_in_movie"]) - bends_in_movie_mean_std = bends_in_movie_std / \ - np.sqrt(max_number_worms_present) + bends_in_movie_mean_std = bends_in_movie_std / np.sqrt(max_number_worms_present) vel_mean, vel_std = mean_std(df.loc[:, "Speed"], appears_in) vel_mean_std = vel_std / np.sqrt(max_number_worms_present) @@ -943,94 +953,115 @@ def statistics(results, settings, mask=None): area_mean, area_std = mean_std(df.loc[:, "Area"], appears_in) area_mean_std = area_std / np.sqrt(max_number_worms_present) - max_speed_mean, max_speed_std = mean_std( - df.loc[:, "Max speed"], appears_in) + max_speed_mean, max_speed_std = mean_std(df.loc[:, "Max speed"], appears_in) max_speed_mean_std = max_speed_std / np.sqrt(max_number_worms_present) - round_ratio_mean, round_ratio_std = mean_std( - df.loc[:, "Round ratio"], appears_in) + round_ratio_mean, round_ratio_std = mean_std(df.loc[:, "Round ratio"], appears_in) round_ratio_mean_std = round_ratio_std / np.sqrt(max_number_worms_present) - eccentricity_mean, eccentricity_std = mean_std( - df.loc[:, "eccentricity"], appears_in) - eccentricity_mean_std = eccentricity_std / \ - np.sqrt(max_number_worms_present) + eccentricity_mean, eccentricity_std = mean_std(df.loc[:, "eccentricity"], appears_in) + eccentricity_mean_std = eccentricity_std / np.sqrt(max_number_worms_present) # Ignore nan particles for move_per_bend mask_appear = np.logical_not(np.isnan(df.loc[:, "Dist per bend"])) if np.any(mask_appear): move_per_bend_mean, move_per_bend_std = mean_std( - df.loc[mask_appear, "Dist per bend"], - df.loc[mask_appear, "Appears in frames"]) - move_per_bend_mean_std = move_per_bend_std / \ - np.sqrt(max([np.sum(mask_appear), max_number_worms_present])) + df.loc[mask_appear, "Dist per bend"], df.loc[mask_appear, "Appears in frames"] + ) + move_per_bend_mean_std = move_per_bend_std / np.sqrt( + max([np.sum(mask_appear), max_number_worms_present]) + ) else: move_per_bend_mean = np.nan move_per_bend_std = np.nan move_per_bend_mean_std = np.nan stats = { - 'max_number_worms_present': max_number_worms_present, - 'n_paralyzed': n_paralyzed, - 'n_moving': n_moving, - 'bpm_mean': bpm_mean, - 'bpm_std': bpm_std, - 'bpm_median': bpm_median, - 'bpm_mean_std': bpm_mean_std, - 'bends_in_movie_mean': bends_in_movie_mean, - 'bends_in_movie_std': bends_in_movie_std, - 'bends_in_movie_mean_std': bends_in_movie_mean_std, - 'bends_in_movie_median': bends_in_movie_median, - 'vel_mean': vel_mean, - 'vel_std': vel_std, - 'vel_mean_std': vel_mean_std, - 'vel_median': vel_median, - 'area_mean': area_mean, - 'area_std': area_std, - 'area_mean_std': area_mean_std, - 'max_speed_mean': max_speed_mean, - 'max_speed_std': max_speed_std, - 'max_speed_mean_std': max_speed_mean_std, - 'move_per_bend_mean': move_per_bend_mean, - 'move_per_bend_std': move_per_bend_std, - 'move_per_bend_mean_std': move_per_bend_mean_std, - 'count': count, - 'round_ratio_mean': round_ratio_mean, - 'round_ratio_std': round_ratio_std, - 'round_ratio_mean_std': round_ratio_mean_std, - 'eccentricity_mean': eccentricity_mean, - 'eccentricity_std': eccentricity_std, - 'eccentricity_mean_std': eccentricity_mean_std} + "max_number_worms_present": max_number_worms_present, + "n_paralyzed": n_paralyzed, + "n_moving": n_moving, + "bpm_mean": bpm_mean, + "bpm_std": bpm_std, + "bpm_median": bpm_median, + "bpm_mean_std": bpm_mean_std, + "activity_index_mean": activity_index_mean, + "activity_index_std": activity_index_std, + "activity_index_median": activity_index_median, + "activity_index_mean_std": activity_index_mean_std, + "bends_in_movie_mean": bends_in_movie_mean, + "bends_in_movie_std": bends_in_movie_std, + "bends_in_movie_mean_std": bends_in_movie_mean_std, + "bends_in_movie_median": bends_in_movie_median, + "vel_mean": vel_mean, + "vel_std": vel_std, + "vel_mean_std": vel_mean_std, + "vel_median": vel_median, + "area_mean": area_mean, + "area_std": area_std, + "area_mean_std": area_mean_std, + "max_speed_mean": max_speed_mean, + "max_speed_std": max_speed_std, + "max_speed_mean_std": max_speed_mean_std, + "move_per_bend_mean": move_per_bend_mean, + "move_per_bend_std": move_per_bend_std, + "move_per_bend_mean_std": move_per_bend_mean_std, + "count": count, + "round_ratio_mean": round_ratio_mean, + "round_ratio_std": round_ratio_std, + "round_ratio_mean_std": round_ratio_mean_std, + "eccentricity_mean": eccentricity_mean, + "eccentricity_std": eccentricity_std, + "eccentricity_mean_std": eccentricity_mean_std, + } return stats def write_particles(settings, particles_dataframe, filename): - """Write particles dataframe to csv""" - df = particles_dataframe.loc[:, [ - "BPM", "bends_in_movie", "Speed", "Max speed", "Dist per bend", - "Area", "Appears in frames", "Moving", "Region", "Round ratio", - "eccentricity"]] - - x = (settings["limit_images_to"] / settings["fps"]) + """Write particles dataframe to csv.""" + df = particles_dataframe.loc[ + :, + [ + "BPM", + "bends_in_movie", + "Speed", + "Max speed", + "Dist per bend", + "Area", + "Appears in frames", + "Moving", + "Region", + "Round ratio", + "eccentricity", + ], + ] + + x = settings["limit_images_to"] / settings["fps"] df.columns = [ - 'BPM', f'Bends per {x:.2f} s', 'Speed', 'Max speed', 'Dist per bend', - 'Area', 'Appears in frames', 'Moving (non-paralyzed)', 'Region', - 'Round ratio', 'Eccentricity'] + "BPM", + f"Bends per {x:.2f} s", + "Speed", + "Max speed", + "Dist per bend", + "Area", + "Appears in frames", + "Moving (non-paralyzed)", + "Region", + "Round ratio", + "Eccentricity", + ] df.to_csv(filename) def write_results_file(results, settings): df = results["particle_dataframe"] - write_particles(settings, - df, - os.path.join(settings["save_as"], 'particles.csv')) + write_particles(settings, df, os.path.join(settings["save_as"], "particles.csv")) - with open(os.path.join(settings["save_as"], 'results.txt'), 'w') as f: - f.write('---------------------------------\n') - f.write(' Results for %s \n' % settings["video_filename"]) - f.write('---------------------------------\n\n') + with open(os.path.join(settings["save_as"], "results.txt"), "w") as f: + f.write("---------------------------------\n") + f.write(" Results for %s \n" % settings["video_filename"]) + f.write("---------------------------------\n\n") # Stats for all worms write_stats(settings, results, f, paralyzed_stats=True) @@ -1038,19 +1069,20 @@ def write_results_file(results, settings): # Stats for moving worms moving_mask = df.loc[:, "Moving"] - write_stats(settings, results, f, paralyzed_stats=False, - prepend='Moving ', mask=moving_mask) + write_stats( + settings, results, f, paralyzed_stats=False, prepend="Moving ", mask=moving_mask + ) # Raw stats - f.write('---------------------------------\n\n') + f.write("---------------------------------\n\n") regions = settings["regions"] # Per region stats if len(regions) > 1: for reg in regions: - f.write('---------------------------------\n') - f.write('Stats for region: %s\n' % reg) - f.write('---------------------------------\n\n') + f.write("---------------------------------\n") + f.write("Stats for region: %s\n" % reg) + f.write("---------------------------------\n\n") # Worms of this region try: @@ -1058,64 +1090,60 @@ def write_results_file(results, settings): except TypeError: pars = [int(results["region_particles"][reg])] if len(pars) == 0: - f.write('Nothing found in region.\n\n') + f.write("Nothing found in region.\n\n") continue indices = [idx for idx in pars if idx in df.index] # All worms - write_stats(settings, results, f, paralyzed_stats=True, - mask=indices) + write_stats(settings, results, f, paralyzed_stats=True, mask=indices) - f.write('\n\n') - f.write('\n') + f.write("\n\n") + f.write("\n") - print('results.txt file produced.') + print("results.txt file produced.") # ============================================================================= # --- Matplotlib code--- # ============================================================================= def print_frame(settings, t, P, T, bends, track): - font = {'size': settings["font_size"]} - print('Printing frame', t + 1) - image_filename = os.path.join( - settings["save_as"], 'imgs', '%05d.jpg' % (int(t))) - frame = (255 - io.imread(image_filename)) + font = {"size": settings["font_size"]} + print("Printing frame", t + 1) + image_filename = os.path.join(settings["save_as"], "imgs", "%05d.jpg" % (int(t))) + frame = 255 - io.imread(image_filename) os.remove(image_filename) small_imshow(settings, frame, cmap=cm.binary, vmax=300) for p in bends.index: pp = P == p l = np.logical_and(pp, T == t) if np.sum(l) > 0: - x = track['x'][l].iloc[0] - y = track['y'][l].iloc[0] + x = track["x"][l].iloc[0] + y = track["y"][l].iloc[0] b = bends[p][np.sum(T[pp] < t)] - plt.text(y + 3, x + 3, 'p=%i\n%.1f' % - (p, b), font, color=[1, 0.3, 0.2]) + plt.text(y + 3, x + 3, "p=%i\n%.1f" % (p, b), font, color=[1, 0.3, 0.2]) m, n = frame.shape plt.plot( - [n - (5 + settings["scale_bar_size"] / float(settings["px_to_mm"])), - n - 5], + [n - (5 + settings["scale_bar_size"] / float(settings["px_to_mm"])), n - 5], [m - 5, m - 5], - linewidth=settings["scale_bar_thickness"], c=[0.5, 0.5, 0.5]) - plt.axis('off') - plt.axis('tight') - plt.savefig(os.path.join(settings["save_as"], 'imgs', '%05d.jpg' % (t))) + linewidth=settings["scale_bar_thickness"], + c=[0.5, 0.5, 0.5], + ) + plt.axis("off") + plt.axis("tight") + plt.savefig(os.path.join(settings["save_as"], "imgs", "%05d.jpg" % (t))) def print_images(settings, bends): plt.gcf().set_size_inches(20, 20) plt.clf() - with open(os.path.join(settings["save_as"], 'track.p'), - 'br') as trackfile: + with open(os.path.join(settings["save_as"], "track.p"), "br") as trackfile: track = pickle.load(trackfile) - P = track['particle'] - T = track['frame'] + P = track["particle"] + T = track["frame"] output_overlayed_images = settings["output_overlayed_images"] if output_overlayed_images != 0: - up_to = (len(set(T)) if output_overlayed_images is None - else output_overlayed_images) + up_to = len(set(T)) if output_overlayed_images is None else output_overlayed_images for t in range(up_to): print_frame(settings, t, P, T, bends, track) plt.clf() @@ -1131,58 +1159,86 @@ def small_imshow(settings, img, *args, **kwargs): img = resize( np.asarray(img, float), (int(img.shape[0] * factor), int(img.shape[1] * factor)), - preserve_range=True) + preserve_range=True, + ) plt.clf() - plt.imshow(img, *args, extent=[0, original_shape[1], - original_shape[0], 0], **kwargs) + plt.imshow(img, *args, extent=[0, original_shape[1], original_shape[0], 0], **kwargs) def output_processing_frames( - settings, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, - labeled_removed, skel_labeled=None): + settings, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + skel_labeled=None, +): plt.gcf().set_size_inches(20, 20) plt.clf() small_imshow(settings, frameorig, cmap=cm.gray) - plt.savefig(os.path.join(save_folder, '0frameorig.jpg')) + plt.savefig(os.path.join(save_folder, "0frameorig.jpg")) small_imshow(settings, Z, cmap=cm.gray) - plt.savefig(os.path.join(save_folder, '0z.jpg')) + plt.savefig(os.path.join(save_folder, "0z.jpg")) small_imshow(settings, frame, cmap=cm.gray) - plt.savefig(os.path.join(save_folder, '1framesubtract.jpg')) + plt.savefig(os.path.join(save_folder, "1framesubtract.jpg")) small_imshow(settings, thresholded, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '2thresholded.jpg')) + plt.savefig(os.path.join(save_folder, "2thresholded.jpg")) small_imshow(settings, frame_after_open, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '3opened.jpg')) + plt.savefig(os.path.join(save_folder, "3opened.jpg")) small_imshow(settings, frame_after_close, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '4closed.jpg')) + plt.savefig(os.path.join(save_folder, "4closed.jpg")) small_imshow(settings, labeled, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '5labelled.jpg')) + plt.savefig(os.path.join(save_folder, "5labelled.jpg")) small_imshow(settings, labeled_removed, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '6removed.jpg')) + plt.savefig(os.path.join(save_folder, "6removed.jpg")) if skel_labeled is not None: small_imshow(settings, skel_labeled, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '7skeletonized.jpg')) + plt.savefig(os.path.join(save_folder, "7skeletonized.jpg")) plt.clf() def print_example_frame( - settings, sizes, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, labeled_removed, - skel_labeled): - print('Sizes:') + settings, + sizes, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + skel_labeled, +): + print("Sizes:") print(sizes) output_processing_frames( - settings, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, labeled_removed, - (skel_labeled if settings["skeletonize"] else None)) - print('Example frame outputted!') + settings, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + (skel_labeled if settings["skeletonize"] else None), + ) + print("Example frame outputted!") From a02f676c77dcb1810150d371aae4fcc446e1f45d Mon Sep 17 00:00:00 2001 From: cnstll Date: Wed, 3 Aug 2022 18:02:02 +0200 Subject: [PATCH 02/25] Integration of Activity index into particles.csv file --- WF_NTP/WF_NTP/WF_NTP_script.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index 106f596..dc6a71b 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -1023,6 +1023,7 @@ def write_particles(settings, particles_dataframe, filename): :, [ "BPM", + "activity_index", "bends_in_movie", "Speed", "Max speed", @@ -1039,6 +1040,7 @@ def write_particles(settings, particles_dataframe, filename): x = settings["limit_images_to"] / settings["fps"] df.columns = [ "BPM", + "Activity Index", f"Bends per {x:.2f} s", "Speed", "Max speed", From ce41cf2237263ed48980971da147e903594bb6bd Mon Sep 17 00:00:00 2001 From: cnstll Date: Wed, 3 Aug 2022 17:54:43 +0200 Subject: [PATCH 03/25] =?UTF-8?q?Calculation=20of=20Activity=20Index=20|?= =?UTF-8?q?=C2=A0Printing=20of=20global=20statistics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- WF_NTP/WF_NTP/WF_NTP_script.py | 844 ++++++++++++++++++--------------- 1 file changed, 450 insertions(+), 394 deletions(-) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index 1792ab0..bbef080 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -1,5 +1,4 @@ -""" -Copyright (C) 2019 Quentin Peter +"""Copyright (C) 2019 Quentin Peter. This file is part of WF_NTP. @@ -7,28 +6,29 @@ recieved a copy of the licence along with WF_NTP. If not, see https://creativecommons.org/licenses/by-nc-sa/4.0/. """ -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.cm as cm -from scipy import interpolate, ndimage -import cv2 +import functools +import json import os +import pickle +import sys import time +import traceback +import warnings +from collections import Counter, defaultdict + +import cv2 import mahotas as mh +import matplotlib.cm as cm +import matplotlib.path as mplPath +import matplotlib.pyplot as plt +import numpy as np import pandas as pd -import trackpy as tp -from skimage import measure, morphology, io import skimage.draw -import pickle -import warnings -import matplotlib.path as mplPath -from collections import defaultdict, Counter -from skimage.transform import resize -import traceback +import trackpy as tp +from scipy import interpolate, ndimage from scipy.signal import savgol_filter -import functools -import sys -import json +from skimage import io, measure, morphology +from skimage.transform import resize class StdoutRedirector(object): @@ -37,7 +37,7 @@ class StdoutRedirector(object): def __init__(self, queue, prefix=None): self.queue = queue if not prefix: - prefix = '' + prefix = "" self.prefix = prefix def write(self, string): @@ -52,20 +52,17 @@ def flush(self): def save_settings(settings): # Make output directory try: - os.mkdir(settings['save_as']) + os.mkdir(settings["save_as"]) except OSError: - print( - 'Warning: job folder "%s" already created, overwriting.' % - settings['save_as']) + print('Warning: job folder "%s" already created, overwriting.' % settings["save_as"]) - settingsfilename = os.path.join(settings['save_as'], 'settings.json') - with open(settingsfilename, 'w') as f: + settingsfilename = os.path.join(settings["save_as"], "settings.json") + with open(settingsfilename, "w") as f: json.dump(settings, f, indent=4) def run_tracker(settings, stdout_queue=None): - """ - Run the tracker with the given settings. + """Run the tracker with the given settings. stdout_queue can be used to redirect stdout. """ @@ -77,14 +74,14 @@ def run_tracker(settings, stdout_queue=None): # Do some adjustments settings = settings.copy() - settings["frames_to_estimate_velocity"] = min([ - settings["frames_to_estimate_velocity"], - settings["min_track_length"]]) - settings["bend_threshold"] /= 100. + settings["frames_to_estimate_velocity"] = min( + [settings["frames_to_estimate_velocity"], settings["min_track_length"]] + ) + settings["bend_threshold"] /= 100.0 video = Video(settings, grey=True) - print('Video shape:', video[0].shape) + print("Video shape:", video[0].shape) regions = settings["regions"] try: @@ -98,13 +95,12 @@ def run_tracker(settings, stdout_queue=None): all_regions = np.zeros_like(video[0]) for key, d in list(regions.items()): im = np.zeros_like(video[0]) - rr, cc = skimage.draw.polygon(np.array(d['y']), np.array(d['x'])) + rr, cc = skimage.draw.polygon(np.array(d["y"]), np.array(d["x"])) try: im[rr, cc] = 1 except IndexError: - print('Region "', key, '" cannot be applied to video', - settings["video_filename"]) - print('Input image sizes do not match.') + print('Region "', key, '" cannot be applied to video', settings["video_filename"]) + print("Input image sizes do not match.") return None, None all_regions += im all_regions = all_regions > 0.1 @@ -113,7 +109,7 @@ def run_tracker(settings, stdout_queue=None): t0 = time.time() save_folder = settings["save_as"] - ims_folder = os.path.join(save_folder, 'imgs') + ims_folder = os.path.join(save_folder, "imgs") if not os.path.exists(ims_folder): os.mkdir(ims_folder) @@ -125,16 +121,15 @@ def run_tracker(settings, stdout_queue=None): track = form_trajectories(locations, settings) results = extract_data(track, settings) - if not check_for_worms(results["particle_dataframe"].index, - settings): - print('No worms detected. Stopping!') + if not check_for_worms(results["particle_dataframe"].index, settings): + print("No worms detected. Stopping!") return print_data, None # Output write_results_file(results, settings) - print('Done (in %.1f minutes).' % ((time.time() - t0) / 60.)) + print("Done (in %.1f minutes)." % ((time.time() - t0) / 60.0)) video.release() - return print_data, results['particle_dataframe'].loc[:, "bends"] + return print_data, results["particle_dataframe"].loc[:, "bends"] class Video: @@ -148,12 +143,10 @@ def __init__(self, settings, grey=False): raise RuntimeError(f"{video_filename} does not exist.") self.cap = cv2.VideoCapture(video_filename) - self.len = (self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - - settings["start_frame"]) + self.len = self.cap.get(cv2.CAP_PROP_FRAME_COUNT) - settings["start_frame"] self.start_frame = settings["start_frame"] limit_images_to = settings["limit_images_to"] - if (limit_images_to and limit_images_to < ( - self.len - self.start_frame)): + if limit_images_to and limit_images_to < (self.len - self.start_frame): self.len = limit_images_to self.grey = grey if grey: @@ -202,6 +195,7 @@ def release(self): def track_all_locations(video, settings, stdout_queue): """Track and get all locations.""" + def get_Z_brightness(zi): if settings["keep_paralyzed_method"]: return find_Z_with_paralyzed(video, settings, *zi) @@ -209,19 +203,19 @@ def get_Z_brightness(zi): return find_Z(video, settings, *zi) apply_indeces = list( - map(int, list(np.linspace(0, len(video), - len(video) // settings["use_images"] + 2)))) + map(int, list(np.linspace(0, len(video), len(video) // settings["use_images"] + 2))) + ) apply_indeces = list(zip(apply_indeces[:-1], apply_indeces[1:])) - Z_indeces = [(max([0, i - settings["use_around"]]), - min(j + settings["use_around"], len(video))) - for i, j in apply_indeces] + Z_indeces = [ + (max([0, i - settings["use_around"]]), min(j + settings["use_around"], len(video))) + for i, j in apply_indeces + ] # Get frames0 print material Z, mean_brightness = get_Z_brightness(Z_indeces[0]) - print_data = process_frame(settings, Z, mean_brightness, - len(video), - args=(0, video[0]), - return_plot=True) + print_data = process_frame( + settings, Z, mean_brightness, len(video), args=(0, video[0]), return_plot=True + ) if settings["stop_after_example_output"]: return print_data, None @@ -232,8 +226,7 @@ def get_Z_brightness(zi): def locate(args): i, zi = args Z, mean_brightness = get_Z_brightness(zi) - return process_frames(video, settings, *i, Z=Z, - mean_brightness=mean_brightness) + return process_frames(video, settings, *i, Z=Z, mean_brightness=mean_brightness) split_results = list(map(locate, args)) locations = [] @@ -242,12 +235,13 @@ def locate(args): return print_data, locations -def process_frame(settings, Z, mean_brightness, nframes, - args=None, return_plot=False): +def process_frame(settings, Z, mean_brightness, nframes, args=None, return_plot=False): """Locate worms in a given frame.""" i, frameorig = args - print(' : Locating in frame %i/%i' % (i + 1 + settings["start_frame"], - nframes + settings["start_frame"])) + print( + " : Locating in frame %i/%i" + % (i + 1 + settings["start_frame"], nframes + settings["start_frame"]) + ) if mean_brightness: frame = frameorig * mean_brightness / np.mean(frameorig) @@ -255,46 +249,47 @@ def process_frame(settings, Z, mean_brightness, nframes, frame = np.array(frameorig, dtype=np.float64) frame = np.abs(frame - Z) * settings["all_regions"] if (frame > 1.1).any(): - frame /= 255. + frame /= 255.0 - thresholded = frame > (settings["threshold"] / 255.) + thresholded = frame > (settings["threshold"] / 255.0) opening = settings["opening"] closing = settings["closing"] save_folder = settings["save_as"] if opening > 0: frame_after_open = ndimage.binary_opening( - thresholded, - structure=np.ones((opening, opening))).astype(np.int) + thresholded, structure=np.ones((opening, opening)) + ).astype(np.int) else: frame_after_open = thresholded if closing > 0: frame_after_close = ndimage.binary_closing( - frame_after_open, - structure=np.ones((closing, closing))).astype(np.int) + frame_after_open, structure=np.ones((closing, closing)) + ).astype(np.int) else: frame_after_close = frame_after_open - labeled, _ = mh.label(frame_after_close, np.ones( - (3, 3), bool)) + labeled, _ = mh.label(frame_after_close, np.ones((3, 3), bool)) sizes = mh.labeled.labeled_size(labeled) - remove = np.where(np.logical_or(sizes < settings["min_size"], - sizes > settings["max_size"])) + remove = np.where(np.logical_or(sizes < settings["min_size"], sizes > settings["max_size"])) labeled_removed = mh.labeled.remove_regions(labeled, remove) labeled_removed, n_left = mh.labeled.relabel(labeled_removed) props = measure.regionprops(labeled_removed) - prop_list = [{"area": props[j].area, "centroid":props[j].centroid, - "eccentricity":props[j].eccentricity, - "area_eccentricity":props[j].eccentricity, - "minor_axis_length":props[j].minor_axis_length / - (props[j].major_axis_length + 0.001)} - for j in range(len(props))] + prop_list = [ + { + "area": props[j].area, + "centroid": props[j].centroid, + "eccentricity": props[j].eccentricity, + "area_eccentricity": props[j].eccentricity, + "minor_axis_length": props[j].minor_axis_length / (props[j].major_axis_length + 0.001), + } + for j in range(len(props)) + ] if settings["skeletonize"]: skeletonized_frame = morphology.skeletonize(frame_after_close) - skeletonized_frame = prune(skeletonized_frame, - settings["prune_size"]) + skeletonized_frame = prune(skeletonized_frame, settings["prune_size"]) skel_labeled = labeled_removed * skeletonized_frame if settings["do_full_prune"]: @@ -304,28 +299,39 @@ def process_frame(settings, Z, mean_brightness, nframes, for j in range(len(skel_props)): prop_list[j]["length"] = skel_props[j].area prop_list[j]["eccentricity"] = skel_props[j].eccentricity - prop_list[j]["minor_axis_length"] = \ - skel_props[j].minor_axis_length\ - / (skel_props[j].major_axis_length + 0.001) + prop_list[j]["minor_axis_length"] = skel_props[j].minor_axis_length / ( + skel_props[j].major_axis_length + 0.001 + ) if return_plot: - return (sizes, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, labeled_removed, - (skel_labeled if settings["skeletonize"] else None)) + return ( + sizes, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + (skel_labeled if settings["skeletonize"] else None), + ) output_overlayed_images = settings["output_overlayed_images"] if i < output_overlayed_images or output_overlayed_images is None: - io.imsave(os.path.join(save_folder, "imgs", '%05d.jpg' % (i)), - np.array(255 * (labeled_removed == 0), dtype=np.uint8), - check_contrast=False) + io.imsave( + os.path.join(save_folder, "imgs", "%05d.jpg" % (i)), + np.array(255 * (labeled_removed == 0), dtype=np.uint8), + check_contrast=False, + ) return prop_list def process_frames(video, settings, i0, i1, Z, mean_brightness): """Frocess frames from i0 to i1.""" - func = functools.partial( - process_frame, settings, Z, mean_brightness, len(video)) + func = functools.partial(process_frame, settings, Z, mean_brightness, len(video)) def args(): for i in range(i0, i1): @@ -336,56 +342,55 @@ def args(): def form_trajectories(loc, settings): """Form worm trajectories.""" - print('Forming worm trajectories...', end=' ') - data = {'x': [], 'y': [], 'frame': [], - 'eccentricity': [], 'area': [], - 'minor_axis_length': [], - 'area_eccentricity': []} + print("Forming worm trajectories...", end=" ") + data = { + "x": [], + "y": [], + "frame": [], + "eccentricity": [], + "area": [], + "minor_axis_length": [], + "area_eccentricity": [], + } for t, l in enumerate(loc): - data['x'] += [d['centroid'][0] for d in l] - data['y'] += [d['centroid'][1] for d in l] - data['eccentricity'] += [d['eccentricity'] for d in l] - data['area_eccentricity'] += [d['area_eccentricity'] for d in l] - data['minor_axis_length'] += [d['minor_axis_length'] for d in l] - data['area'] += [d['area'] for d in l] - data['frame'] += [t] * len(l) + data["x"] += [d["centroid"][0] for d in l] + data["y"] += [d["centroid"][1] for d in l] + data["eccentricity"] += [d["eccentricity"] for d in l] + data["area_eccentricity"] += [d["area_eccentricity"] for d in l] + data["minor_axis_length"] += [d["minor_axis_length"] for d in l] + data["area"] += [d["area"] for d in l] + data["frame"] += [t] * len(l) data = pd.DataFrame(data) try: - track = tp.link_df(data, search_range=settings["max_dist_move"], - memory=settings["memory"]) + track = tp.link_df(data, search_range=settings["max_dist_move"], memory=settings["memory"]) except tp.linking.SubnetOversizeException: raise RuntimeError( - 'Linking problem too complex.' - ' Reduce maximum move distance or memory.') - track = tp.filter_stubs(track, min([settings["min_track_length"], - len(loc)])) + "Linking problem too complex." " Reduce maximum move distance or memory." + ) + track = tp.filter_stubs(track, min([settings["min_track_length"], len(loc)])) try: - with open(os.path.join(settings["save_as"], 'track.p'), - 'bw') as trackfile: + with open(os.path.join(settings["save_as"], "track.p"), "bw") as trackfile: pickle.dump(track, trackfile) except Exception: traceback.print_exc() - print('Warning: no track file saved. Track too long.') - print(' plot_path.py will not work on this file.') + print("Warning: no track file saved. Track too long.") + print(" plot_path.py will not work on this file.") return track def extract_data(track, settings): """Extract data from track and return a pandas DataFrame.""" - P = track['particle'] - columns_dtype = { - "bends": object - } + P = track["particle"] + columns_dtype = {"bends": object} # Use particle as index - particle_dataframe = pd.DataFrame(index=P.unique(), - columns=columns_dtype.keys()) + particle_dataframe = pd.DataFrame(index=P.unique(), columns=columns_dtype.keys()) # Set non float dtype correctly particle_dataframe = particle_dataframe.astype(columns_dtype) - T = track['frame'] - X = track['x'] - Y = track['y'] + T = track["frame"] + X = track["x"] + Y = track["y"] regions = settings["regions"] if len(regions) > 1: @@ -395,10 +400,10 @@ def extract_data(track, settings): for p in particle_dataframe.index: # Define signals t = T[P == p] - ecc = track['eccentricity'][P == p] - area_ecc = track['area_eccentricity'][P == p] + ecc = track["eccentricity"][P == p] + area_ecc = track["area_eccentricity"][P == p] # mal = track['minor_axis_length'][P == p] - area = track['area'][P == p] + area = track["area"][P == p] window_size = 7 @@ -420,16 +425,14 @@ def extract_data(track, settings): idx = area_ecc > settings["minimum_ecc"] if sum(idx) > 0: smooth_y = np.interp(x, x[idx], smooth_y[idx]) - particle_dataframe.at[p, "Round ratio"] = ( - 1.0 - float(sum(idx)) / float(len(idx))) + particle_dataframe.at[p, "Round ratio"] = 1.0 - float(sum(idx)) / float(len(idx)) else: # 0.001,0.991,0.992 are dummy variables specifically picked # to deal with coilers, see protocol. lengthX = 0.001 / len(idx) smooth_y = np.arange(0.991, 0.992, lengthX) np.random.shuffle(smooth_y) - particle_dataframe.at[p, "Round ratio"] = ( - 1.0 - float(sum(idx)) / float(len(idx))) + particle_dataframe.at[p, "Round ratio"] = 1.0 - float(sum(idx)) / float(len(idx)) # Bends bend_times = extract_bends(x, smooth_y, settings) @@ -438,9 +441,9 @@ def extract_data(track, settings): continue bl = form_bend_array(bend_times, T[P == p]) if len(bl) > 0: - bl = (np.asarray(bl, float)) + bl = np.asarray(bl, float) else: - bl = (np.array([0.0] * len(T[P == p]))) + bl = np.array([0.0] * len(T[P == p])) px_to_mm = settings["px_to_mm"] # Area @@ -454,16 +457,19 @@ def extract_data(track, settings): # Velocity particle_dataframe.at[p, "Speed"] = extract_velocity( - T[P == p], X[P == p], Y[P == p], settings) + T[P == p], X[P == p], Y[P == p], settings + ) # Max velocity: 90th percentile to avoid skewed results due to tracking # inefficiency particle_dataframe.at[p, "Max speed"] = extract_max_speed( - T[P == p], X[P == p], Y[P == p], settings) + T[P == p], X[P == p], Y[P == p], settings + ) # Move per bend particle_dataframe.at[p, "Dist per bend"] = extract_move_per_bend( - bl, T[P == p], X[P == p], Y[P == p], px_to_mm) + bl, T[P == p], X[P == p], Y[P == p], px_to_mm + ) particle_dataframe.at[p, "bends"] = bl @@ -480,13 +486,17 @@ def extract_data(track, settings): with warnings.catch_warnings(): # Ignore ptp warnings as this is a numpy bug warnings.simplefilter("ignore") - particle_dataframe.at[index, "BPM"] = ( - last_bend / np.ptp(T[P == index]) * 60 * fps) - x = (settings["limit_images_to"] / fps) + particle_dataframe.at[index, "BPM"] = last_bend / np.ptp(T[P == index]) * 60 * fps + x = settings["limit_images_to"] / fps particle_dataframe.at[index, "bends_in_movie"] = ( - last_bend / np.ptp(T[P == index]) * x * fps) + last_bend / np.ptp(T[P == index]) * x * fps + ) + particle_dataframe.at[index, "activity_index"] = ( + particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 + ) particle_dataframe.at[index, "Appears in frames"] = len( - particle_dataframe.at[index, "bends"]) + particle_dataframe.at[index, "bends"] + ) # Cut off-tool for skewed statistics if settings["cutoff_filter"]: @@ -502,8 +512,9 @@ def extract_data(track, settings): frames = np.array(frames) if settings["use_average"]: - cut_off = int(np.sum(list_number) / len(list_number)) + \ - (np.sum(list_number) % len(list_number) > 0) + cut_off = int(np.sum(list_number) / len(list_number)) + ( + np.sum(list_number) % len(list_number) > 0 + ) else: cut_off = max(list_number) @@ -518,16 +529,16 @@ def extract_data(track, settings): frames=frames, original_particles=original_particles, removed_particles_cutoff=removed_particles_cutoff, - ) + ) else: cutoff_filter_data = None # Cut off-tool for boundaries (spurious worms) if settings["extra_filter"]: - mask = ( - (particle_dataframe.loc[:, "BPM"] > settings["Bends_max"]) & - (particle_dataframe.loc[:, "Speed"] < settings["Speed_max"])) + mask = (particle_dataframe.loc[:, "BPM"] > settings["Bends_max"]) & ( + particle_dataframe.loc[:, "Speed"] < settings["Speed_max"] + ) extra_filter_spurious_worms = mask.sum() particle_dataframe = particle_dataframe.loc[~mask] else: @@ -541,14 +552,15 @@ def extract_data(track, settings): if not this_reg: continue else: - this_reg = ['all'] + this_reg = ["all"] particle_dataframe.at[index, "Region"] = str(this_reg) for reg in this_reg: region_particles[reg].append(index) particle_dataframe.loc[:, "Moving"] = np.logical_or( particle_dataframe.loc[:, "BPM"] > settings["maximum_bpm"], - particle_dataframe.loc[:, "Speed"] > settings["maximum_velocity"]) + particle_dataframe.loc[:, "Speed"] > settings["maximum_velocity"], + ) return dict( cutoff_filter_data=cutoff_filter_data, @@ -563,13 +575,14 @@ def extract_data(track, settings): # --- Utilities Functions --- # ============================================================================= + def find_Z(video, settings, i0, i1): """Get thresholded image.""" # Adjust brightness: frame = video[(i0 + i1) // 2] mean_brightness = np.mean(frame) if mean_brightness > 1: - mean_brightness /= 255. + mean_brightness /= 255.0 Z = np.zeros_like(frame, dtype=np.float64) if settings["darkfield"]: minv = np.zeros_like(frame, dtype=np.float64) + 256 @@ -591,16 +604,20 @@ def find_Z(video, settings, i0, i1): def find_Z_with_paralyzed(video, settings, i0, i1): """Get thresholded image with paralyzed worms.""" frame = video[(i0 + i1) // 2] - Y, X = np.meshgrid(np.arange(frame.shape[1]), - np.arange(frame.shape[0])) + Y, X = np.meshgrid(np.arange(frame.shape[1]), np.arange(frame.shape[0])) thres = cv2.adaptiveThreshold( - frame, 1, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, - cv2.THRESH_BINARY, 2 * (settings["std_px"] // 2) + 1, 0) + frame, + 1, + cv2.ADAPTIVE_THRESH_GAUSSIAN_C, + cv2.THRESH_BINARY, + 2 * (settings["std_px"] // 2) + 1, + 0, + ) mask = thres > 0.5 vals = frame[mask] x = X[mask] y = Y[mask] - Z = interpolate.griddata((x, y), vals, (X, Y), method='nearest') + Z = interpolate.griddata((x, y), vals, (X, Y), method="nearest") return Z, False @@ -614,7 +631,8 @@ def find_skel_endpoints(skel): np.array([[2, 1, 2], [0, 1, 0], [0, 0, 0]]), np.array([[1, 2, 0], [2, 1, 0], [0, 0, 0]]), np.array([[2, 0, 0], [1, 1, 0], [2, 0, 0]]), - np.array([[0, 0, 0], [2, 1, 0], [1, 2, 0]])] + np.array([[0, 0, 0], [2, 1, 0], [1, 2, 0]]), + ] ep = 0 for skel_endpoint in skel_endpoints: @@ -638,8 +656,7 @@ def prune_fully(skel_labeled): idx = np.argwhere(endpoints) reg = skel_labeled[idx[:, 0], idx[:, 1]] count = Counter(reg) - idx = np.array([idx[i, :] for i in range(len(reg)) - if count[reg[i]] > 2]) + idx = np.array([idx[i, :] for i in range(len(reg)) if count[reg[i]] > 2]) if len(idx) == 0: break endpoints[:] = 1 @@ -651,11 +668,11 @@ def prune_fully(skel_labeled): def check_for_worms(particles, settings): """Check if any worms have been detected.""" if len(particles) == 0: - with open(os.path.join(settings["save_as"], 'results.txt'), 'w') as f: - f.write('---------------------------------\n') - f.write(' Results for %s \n' % settings["video_filename"]) - f.write('---------------------------------\n\n') - f.write('No worms detected. Check your settings.\n\n') + with open(os.path.join(settings["save_as"], "results.txt"), "w") as f: + f.write("---------------------------------\n") + f.write(" Results for %s \n" % settings["video_filename"]) + f.write("---------------------------------\n\n") + f.write("No worms detected. Check your settings.\n\n") return False return True @@ -664,7 +681,8 @@ def make_region_paths(regions): reg_paths = {} for key, d in list(regions.items()): reg_paths[key] = mplPath.Path( - np.array(list(zip(d['x'] + [d['x'][0]], d['y'] + [d['y'][0]])))) + np.array(list(zip(d["x"] + [d["x"][0]], d["y"] + [d["y"][0]]))) + ) return reg_paths @@ -679,7 +697,7 @@ def identify_region(xs, ys, reg_paths): def extract_bends(x, smooth_y, settings): # Find extrema - ex = (np.diff(np.sign(np.diff(smooth_y))).nonzero()[0] + 1) + ex = np.diff(np.sign(np.diff(smooth_y))).nonzero()[0] + 1 if len(ex) >= 2 and ex[0] == 0: ex = ex[1:] bend_times = x[ex] @@ -722,8 +740,9 @@ def extract_velocity(tt, xx, yy, settings): dtt = -(np.roll(tt, ftev) - tt)[ftev:] dxx = (np.roll(xx, ftev) - xx)[ftev:] dyy = (np.roll(yy, ftev) - yy)[ftev:] - velocity = (settings["px_to_mm"] * settings["fps"] - * np.median(np.sqrt(dxx**2 + dyy**2) / dtt)) + velocity = ( + settings["px_to_mm"] * settings["fps"] * np.median(np.sqrt(dxx**2 + dyy**2) / dtt) + ) return velocity @@ -735,8 +754,10 @@ def extract_max_speed(tt, xx, yy, settings): dxx = (np.roll(xx, ftev) - xx)[ftev:] dyy = (np.roll(yy, ftev) - yy)[ftev:] percentile = ( - settings["px_to_mm"] * settings["fps"] * - np.percentile((np.sqrt(dxx**2 + dyy**2) / dtt), 90)) + settings["px_to_mm"] + * settings["fps"] + * np.percentile((np.sqrt(dxx**2 + dyy**2) / dtt), 90) + ) return percentile @@ -751,7 +772,7 @@ def extract_move_per_bend(bl, tt, xx, yy, px_to_mm): yi = np.interp(i, tt, yy) yj = np.interp(j, tt, yy) - dist = px_to_mm * np.sqrt((xj - xi)**2 + (yj - yi)**2) + dist = px_to_mm * np.sqrt((xj - xi) ** 2 + (yj - yi) ** 2) dists.append(dist) bend_i += 1 j = i @@ -762,140 +783,127 @@ def extract_move_per_bend(bl, tt, xx, yy, px_to_mm): return np.nan -def write_stats(settings, results, f, paralyzed_stats=True, prepend='', - mask=None): +def write_stats(settings, results, f, paralyzed_stats=True, prepend="", mask=None): stats = statistics(results, settings, mask) - f.write(f'\n-------------------------------\n{prepend}\n') + f.write(f"\n-------------------------------\n{prepend}\n") if settings["cutoff_filter"]: if mask is None: # Meaningless if mask != None - f.write('Total particles: %i\n' % - results['cutoff_filter_data']['original_particles']) + f.write("Total particles: %i\n" % results["cutoff_filter_data"]["original_particles"]) else: - f.write('Total particles: Not saved for regions\n') + f.write("Total particles: Not saved for regions\n") else: - f.write('Total particles: %i\n' % - stats['count']) + f.write("Total particles: %i\n" % stats["count"]) if paralyzed_stats and mask is None: # filters stats are only meaningful if mask == None - f.write('\nCUT-OFF tool/filters\n') + f.write("\nCUT-OFF tool/filters\n") # Not saved for cutoff_filter - f.write('Max particles present at same time: %i\n' - % stats['max_number_worms_present']) - f.write('\n') + f.write("Max particles present at same time: %i\n" % stats["max_number_worms_present"]) + f.write("\n") if settings["cutoff_filter"]: # Meaningless if mask != None - f.write('Frame number: ') - for item in results['cutoff_filter_data']["frames"]: - f.write('%i, ' % item) + f.write("Frame number: ") + for item in results["cutoff_filter_data"]["frames"]: + f.write("%i, " % item) - f.write('\n# of particles: ') - for item in results['cutoff_filter_data']["list_number"]: - f.write('%i, ' % item) + f.write("\n# of particles: ") + for item in results["cutoff_filter_data"]["list_number"]: + f.write("%i, " % item) - f.write('\nCut-off tool: Yes\n') + f.write("\nCut-off tool: Yes\n") if settings["use_average"]: - f.write('Method: averaging\n') + f.write("Method: averaging\n") else: - f.write('Method: maximum\n') + f.write("Method: maximum\n") f.write( - 'Removed particles: %i\n' % - results['cutoff_filter_data']['removed_particles_cutoff']) + "Removed particles: %i\n" + % results["cutoff_filter_data"]["removed_particles_cutoff"] + ) else: - f.write('Cut-off tool: No\n') + f.write("Cut-off tool: No\n") if settings["extra_filter"]: - f.write('Extra filter: Yes\n') + f.write("Extra filter: Yes\n") f.write( - 'Settings: remove when bpm > %.5f and velocity < %.5f\n' % - (settings["Bends_max"], settings["Speed_max"])) - f.write('Removed particles: %i' % - results['extra_filter_spurious_worms']) + "Settings: remove when bpm > %.5f and velocity < %.5f\n" + % (settings["Bends_max"], settings["Speed_max"]) + ) + f.write("Removed particles: %i" % results["extra_filter_spurious_worms"]) else: - f.write('Extra filter: No\n') - - f.write('\n-------------------------------\n\n') - - f.write(prepend + 'BPM Mean: %.5f\n' % stats['bpm_mean']) - f.write(prepend + 'BPM Standard deviation: %.5f\n' % stats['bpm_std']) - f.write(prepend + 'BPM Error on Mean: %.5f\n' % stats['bpm_mean_std']) - f.write(prepend + 'BPM Median: %.5f\n' % stats['bpm_median']) - - f.write(prepend + 'Bends in movie Mean: %.5f\n' % - stats['bends_in_movie_mean']) - f.write(prepend + 'Bends in movie Standard deviation: %.5f\n' % - stats['bends_in_movie_std']) - f.write(prepend + 'Bends in movie Error on Mean: %.5f\n' % - stats['bends_in_movie_mean_std']) - f.write( - prepend + - 'Bends in movie Median: %.5f\n' % - stats['bends_in_movie_median']) - - f.write(prepend + 'Speed Mean: %.6f\n' % stats['vel_mean']) - f.write(prepend + 'Speed Standard deviation: %.6f\n' % stats['vel_std']) - f.write(prepend + 'Speed Error on Mean: %.6f\n' % stats['vel_mean_std']) - f.write(prepend + 'Speed Median: %.6f\n' % stats['vel_median']) - - f.write( - prepend + - '90th Percentile speed Mean: %.6f\n' % - stats['max_speed_mean']) - f.write(prepend + '90th Percentile speed Standard deviation: %.6f\n' % - stats['max_speed_std']) - f.write(prepend + '90th Percentile speed Error on mean: %.6f\n' % - stats['max_speed_mean_std']) - if np.isnan(stats['move_per_bend_mean']): - f.write(prepend + 'Dist per bend Mean: nan\n') - f.write(prepend + 'Dist per bend Standard deviation: nan\n') - f.write(prepend + 'Dist per bend Error on Mean: nan\n') + f.write("Extra filter: No\n") + + f.write("\n-------------------------------\n\n") + + f.write(prepend + "BPM Mean: %.5f\n" % stats["bpm_mean"]) + f.write(prepend + "BPM Standard deviation: %.5f\n" % stats["bpm_std"]) + f.write(prepend + "BPM Error on Mean: %.5f\n" % stats["bpm_mean_std"]) + f.write(prepend + "BPM Median: %.5f\n" % stats["bpm_median"]) + + f.write(prepend + "Activity index Mean: %.5f\n" % stats["activity_index_mean"]) + f.write(prepend + "Activity index Standard deviation: %.5f\n" % stats["activity_index_std"]) + f.write(prepend + "Activity index Error on Mean: %.5f\n" % stats["activity_index_mean_std"]) + f.write(prepend + "Activity index Median: %.5f\n" % stats["activity_index_median"]) + + f.write(prepend + "Bends in movie Mean: %.5f\n" % stats["bends_in_movie_mean"]) + f.write(prepend + "Bends in movie Standard deviation: %.5f\n" % stats["bends_in_movie_std"]) + f.write(prepend + "Bends in movie Error on Mean: %.5f\n" % stats["bends_in_movie_mean_std"]) + f.write(prepend + "Bends in movie Median: %.5f\n" % stats["bends_in_movie_median"]) + + f.write(prepend + "Speed Mean: %.6f\n" % stats["vel_mean"]) + f.write(prepend + "Speed Standard deviation: %.6f\n" % stats["vel_std"]) + f.write(prepend + "Speed Error on Mean: %.6f\n" % stats["vel_mean_std"]) + f.write(prepend + "Speed Median: %.6f\n" % stats["vel_median"]) + + f.write(prepend + "90th Percentile speed Mean: %.6f\n" % stats["max_speed_mean"]) + f.write(prepend + "90th Percentile speed Standard deviation: %.6f\n" % stats["max_speed_std"]) + f.write(prepend + "90th Percentile speed Error on mean: %.6f\n" % stats["max_speed_mean_std"]) + if np.isnan(stats["move_per_bend_mean"]): + f.write(prepend + "Dist per bend Mean: nan\n") + f.write(prepend + "Dist per bend Standard deviation: nan\n") + f.write(prepend + "Dist per bend Error on Mean: nan\n") else: - f.write( - prepend + - 'Dist per bend Mean: %.6f\n' % - stats['move_per_bend_mean']) - f.write(prepend + 'Dist per bend Standard deviation: %.6f\n' % - stats['move_per_bend_std']) - f.write(prepend + 'Dist per bend Error on Mean: %.6f\n' % - stats['move_per_bend_mean_std']) + f.write(prepend + "Dist per bend Mean: %.6f\n" % stats["move_per_bend_mean"]) + f.write(prepend + "Dist per bend Standard deviation: %.6f\n" % stats["move_per_bend_std"]) + f.write(prepend + "Dist per bend Error on Mean: %.6f\n" % stats["move_per_bend_mean_std"]) if paralyzed_stats: - f.write(prepend + 'Moving worms: %i\n' % stats['n_moving']) - f.write(prepend + 'Paralyzed worms: %i\n' % stats['n_paralyzed']) - f.write(prepend + 'Total worms: %i\n' % - stats['max_number_worms_present']) - f.write(prepend + 'Moving ratio: %.6f\n' % - (float(stats['n_moving']) / stats['count'])) - f.write(prepend + 'Paralyzed ratio: %.6f\n' % - (float(stats['n_paralyzed']) / stats['count'])) - if stats['n_paralyzed'] > 0: - f.write(prepend + 'Moving-to-paralyzed ratio: %.6f\n' % (float( - stats['n_moving']) / stats['n_paralyzed'])) + f.write(prepend + "Moving worms: %i\n" % stats["n_moving"]) + f.write(prepend + "Paralyzed worms: %i\n" % stats["n_paralyzed"]) + f.write(prepend + "Total worms: %i\n" % stats["max_number_worms_present"]) + f.write(prepend + "Moving ratio: %.6f\n" % (float(stats["n_moving"]) / stats["count"])) + f.write( + prepend + "Paralyzed ratio: %.6f\n" % (float(stats["n_paralyzed"]) / stats["count"]) + ) + if stats["n_paralyzed"] > 0: + f.write( + prepend + + "Moving-to-paralyzed ratio: %.6f\n" + % (float(stats["n_moving"]) / stats["n_paralyzed"]) + ) else: - f.write(prepend + 'Moving-to-paralyzed ratio: inf\n') - if stats['n_moving'] > 0: - f.write(prepend + 'Paralyzed-to-moving ratio: %.6f\n' % (float( - stats['n_paralyzed']) / stats['n_moving'])) + f.write(prepend + "Moving-to-paralyzed ratio: inf\n") + if stats["n_moving"] > 0: + f.write( + prepend + + "Paralyzed-to-moving ratio: %.6f\n" + % (float(stats["n_paralyzed"]) / stats["n_moving"]) + ) else: - f.write(prepend + 'Paralyzed-to-moving ratio: inf\n') - f.write(prepend + 'Area Mean: %.6f\n' % stats['area_mean']) - f.write(prepend + 'Area Standard Deviation: %.6f\n' % stats['area_std']) - f.write(prepend + 'Area Error on Mean: %.6f\n' % stats['area_mean_std']) + f.write(prepend + "Paralyzed-to-moving ratio: inf\n") + f.write(prepend + "Area Mean: %.6f\n" % stats["area_mean"]) + f.write(prepend + "Area Standard Deviation: %.6f\n" % stats["area_std"]) + f.write(prepend + "Area Error on Mean: %.6f\n" % stats["area_mean_std"]) - f.write(prepend + 'Round ratio Mean: %.6f\n' % stats['round_ratio_mean']) - f.write(prepend + 'Round ratio Standard deviation: %.6f\n' % - stats['round_ratio_std']) - f.write(prepend + 'Round ratio Error on mean: %.6f\n' % - stats['round_ratio_mean_std']) + f.write(prepend + "Round ratio Mean: %.6f\n" % stats["round_ratio_mean"]) + f.write(prepend + "Round ratio Standard deviation: %.6f\n" % stats["round_ratio_std"]) + f.write(prepend + "Round ratio Error on mean: %.6f\n" % stats["round_ratio_mean_std"]) - f.write(prepend + 'Eccentricity Mean: %.6f\n' % stats['eccentricity_mean']) - f.write(prepend + 'Eccentricity Standard deviation: %.6f\n' % - stats['eccentricity_std']) - f.write(prepend + 'Eccentricity Error on mean: %.6f\n' % - stats['eccentricity_mean_std']) + f.write(prepend + "Eccentricity Mean: %.6f\n" % stats["eccentricity_mean"]) + f.write(prepend + "Eccentricity Standard deviation: %.6f\n" % stats["eccentricity_std"]) + f.write(prepend + "Eccentricity Error on mean: %.6f\n" % stats["eccentricity_mean_std"]) def mean_std(x, appears_in): @@ -912,15 +920,15 @@ def statistics(results, settings, mask=None): df = df.loc[mask, :] - P = results["track"]['particle'] - T = results["track"]['frame'] + P = results["track"]["particle"] + T = results["track"]["frame"] if settings["cutoff_filter"]: max_number_worms_present = len(df) else: max_number_worms_present = max( - [len([1 for p in set(P[T == t]) if p in df.index]) - for t in set(T)]) + [len([1 for p in set(P[T == t]) if p in df.index]) for t in set(T)] + ) count = len(df) n_moving = np.sum(df.loc[:, "Moving"]) n_paralyzed = len(df) - n_moving @@ -930,11 +938,13 @@ def statistics(results, settings, mask=None): bpm_median = np.median(df.loc[:, "BPM"]) bpm_mean_std = bpm_std / np.sqrt(max_number_worms_present) - bends_in_movie_mean, bends_in_movie_std = mean_std( - df.loc[:, "bends_in_movie"], appears_in) + activity_index_mean, activity_index_std = mean_std(df.loc[:, "activity_index"], appears_in) + activity_index_median = np.median(df.loc[:, "activity_index"]) + activity_index_mean_std = activity_index_std / np.sqrt(max_number_worms_present) + + bends_in_movie_mean, bends_in_movie_std = mean_std(df.loc[:, "bends_in_movie"], appears_in) bends_in_movie_median = np.median(df.loc[:, "bends_in_movie"]) - bends_in_movie_mean_std = bends_in_movie_std / \ - np.sqrt(max_number_worms_present) + bends_in_movie_mean_std = bends_in_movie_std / np.sqrt(max_number_worms_present) vel_mean, vel_std = mean_std(df.loc[:, "Speed"], appears_in) vel_mean_std = vel_std / np.sqrt(max_number_worms_present) @@ -943,94 +953,115 @@ def statistics(results, settings, mask=None): area_mean, area_std = mean_std(df.loc[:, "Area"], appears_in) area_mean_std = area_std / np.sqrt(max_number_worms_present) - max_speed_mean, max_speed_std = mean_std( - df.loc[:, "Max speed"], appears_in) + max_speed_mean, max_speed_std = mean_std(df.loc[:, "Max speed"], appears_in) max_speed_mean_std = max_speed_std / np.sqrt(max_number_worms_present) - round_ratio_mean, round_ratio_std = mean_std( - df.loc[:, "Round ratio"], appears_in) + round_ratio_mean, round_ratio_std = mean_std(df.loc[:, "Round ratio"], appears_in) round_ratio_mean_std = round_ratio_std / np.sqrt(max_number_worms_present) - eccentricity_mean, eccentricity_std = mean_std( - df.loc[:, "eccentricity"], appears_in) - eccentricity_mean_std = eccentricity_std / \ - np.sqrt(max_number_worms_present) + eccentricity_mean, eccentricity_std = mean_std(df.loc[:, "eccentricity"], appears_in) + eccentricity_mean_std = eccentricity_std / np.sqrt(max_number_worms_present) # Ignore nan particles for move_per_bend mask_appear = np.logical_not(np.isnan(df.loc[:, "Dist per bend"])) if np.any(mask_appear): move_per_bend_mean, move_per_bend_std = mean_std( - df.loc[mask_appear, "Dist per bend"], - df.loc[mask_appear, "Appears in frames"]) - move_per_bend_mean_std = move_per_bend_std / \ - np.sqrt(max([np.sum(mask_appear), max_number_worms_present])) + df.loc[mask_appear, "Dist per bend"], df.loc[mask_appear, "Appears in frames"] + ) + move_per_bend_mean_std = move_per_bend_std / np.sqrt( + max([np.sum(mask_appear), max_number_worms_present]) + ) else: move_per_bend_mean = np.nan move_per_bend_std = np.nan move_per_bend_mean_std = np.nan stats = { - 'max_number_worms_present': max_number_worms_present, - 'n_paralyzed': n_paralyzed, - 'n_moving': n_moving, - 'bpm_mean': bpm_mean, - 'bpm_std': bpm_std, - 'bpm_median': bpm_median, - 'bpm_mean_std': bpm_mean_std, - 'bends_in_movie_mean': bends_in_movie_mean, - 'bends_in_movie_std': bends_in_movie_std, - 'bends_in_movie_mean_std': bends_in_movie_mean_std, - 'bends_in_movie_median': bends_in_movie_median, - 'vel_mean': vel_mean, - 'vel_std': vel_std, - 'vel_mean_std': vel_mean_std, - 'vel_median': vel_median, - 'area_mean': area_mean, - 'area_std': area_std, - 'area_mean_std': area_mean_std, - 'max_speed_mean': max_speed_mean, - 'max_speed_std': max_speed_std, - 'max_speed_mean_std': max_speed_mean_std, - 'move_per_bend_mean': move_per_bend_mean, - 'move_per_bend_std': move_per_bend_std, - 'move_per_bend_mean_std': move_per_bend_mean_std, - 'count': count, - 'round_ratio_mean': round_ratio_mean, - 'round_ratio_std': round_ratio_std, - 'round_ratio_mean_std': round_ratio_mean_std, - 'eccentricity_mean': eccentricity_mean, - 'eccentricity_std': eccentricity_std, - 'eccentricity_mean_std': eccentricity_mean_std} + "max_number_worms_present": max_number_worms_present, + "n_paralyzed": n_paralyzed, + "n_moving": n_moving, + "bpm_mean": bpm_mean, + "bpm_std": bpm_std, + "bpm_median": bpm_median, + "bpm_mean_std": bpm_mean_std, + "activity_index_mean": activity_index_mean, + "activity_index_std": activity_index_std, + "activity_index_median": activity_index_median, + "activity_index_mean_std": activity_index_mean_std, + "bends_in_movie_mean": bends_in_movie_mean, + "bends_in_movie_std": bends_in_movie_std, + "bends_in_movie_mean_std": bends_in_movie_mean_std, + "bends_in_movie_median": bends_in_movie_median, + "vel_mean": vel_mean, + "vel_std": vel_std, + "vel_mean_std": vel_mean_std, + "vel_median": vel_median, + "area_mean": area_mean, + "area_std": area_std, + "area_mean_std": area_mean_std, + "max_speed_mean": max_speed_mean, + "max_speed_std": max_speed_std, + "max_speed_mean_std": max_speed_mean_std, + "move_per_bend_mean": move_per_bend_mean, + "move_per_bend_std": move_per_bend_std, + "move_per_bend_mean_std": move_per_bend_mean_std, + "count": count, + "round_ratio_mean": round_ratio_mean, + "round_ratio_std": round_ratio_std, + "round_ratio_mean_std": round_ratio_mean_std, + "eccentricity_mean": eccentricity_mean, + "eccentricity_std": eccentricity_std, + "eccentricity_mean_std": eccentricity_mean_std, + } return stats def write_particles(settings, particles_dataframe, filename): - """Write particles dataframe to csv""" - df = particles_dataframe.loc[:, [ - "BPM", "bends_in_movie", "Speed", "Max speed", "Dist per bend", - "Area", "Appears in frames", "Moving", "Region", "Round ratio", - "eccentricity"]] - - x = (settings["limit_images_to"] / settings["fps"]) + """Write particles dataframe to csv.""" + df = particles_dataframe.loc[ + :, + [ + "BPM", + "bends_in_movie", + "Speed", + "Max speed", + "Dist per bend", + "Area", + "Appears in frames", + "Moving", + "Region", + "Round ratio", + "eccentricity", + ], + ] + + x = settings["limit_images_to"] / settings["fps"] df.columns = [ - 'BPM', f'Bends per {x:.2f} s', 'Speed', 'Max speed', 'Dist per bend', - 'Area', 'Appears in frames', 'Moving (non-paralyzed)', 'Region', - 'Round ratio', 'Eccentricity'] + "BPM", + f"Bends per {x:.2f} s", + "Speed", + "Max speed", + "Dist per bend", + "Area", + "Appears in frames", + "Moving (non-paralyzed)", + "Region", + "Round ratio", + "Eccentricity", + ] df.to_csv(filename) def write_results_file(results, settings): df = results["particle_dataframe"] - write_particles(settings, - df, - os.path.join(settings["save_as"], 'particles.csv')) + write_particles(settings, df, os.path.join(settings["save_as"], "particles.csv")) - with open(os.path.join(settings["save_as"], 'results.txt'), 'w') as f: - f.write('---------------------------------\n') - f.write(' Results for %s \n' % settings["video_filename"]) - f.write('---------------------------------\n\n') + with open(os.path.join(settings["save_as"], "results.txt"), "w") as f: + f.write("---------------------------------\n") + f.write(" Results for %s \n" % settings["video_filename"]) + f.write("---------------------------------\n\n") # Stats for all worms write_stats(settings, results, f, paralyzed_stats=True) @@ -1038,19 +1069,20 @@ def write_results_file(results, settings): # Stats for moving worms moving_mask = df.loc[:, "Moving"] - write_stats(settings, results, f, paralyzed_stats=False, - prepend='Moving ', mask=moving_mask) + write_stats( + settings, results, f, paralyzed_stats=False, prepend="Moving ", mask=moving_mask + ) # Raw stats - f.write('---------------------------------\n\n') + f.write("---------------------------------\n\n") regions = settings["regions"] # Per region stats if len(regions) > 1: for reg in regions: - f.write('---------------------------------\n') - f.write('Stats for region: %s\n' % reg) - f.write('---------------------------------\n\n') + f.write("---------------------------------\n") + f.write("Stats for region: %s\n" % reg) + f.write("---------------------------------\n\n") # Worms of this region try: @@ -1058,64 +1090,60 @@ def write_results_file(results, settings): except TypeError: pars = [int(results["region_particles"][reg])] if len(pars) == 0: - f.write('Nothing found in region.\n\n') + f.write("Nothing found in region.\n\n") continue indices = [idx for idx in pars if idx in df.index] # All worms - write_stats(settings, results, f, paralyzed_stats=True, - mask=indices) + write_stats(settings, results, f, paralyzed_stats=True, mask=indices) - f.write('\n\n') - f.write('\n') + f.write("\n\n") + f.write("\n") - print('results.txt file produced.') + print("results.txt file produced.") # ============================================================================= # --- Matplotlib code--- # ============================================================================= def print_frame(settings, t, P, T, bends, track): - font = {'size': settings["font_size"]} - print('Printing frame', t + 1) - image_filename = os.path.join( - settings["save_as"], 'imgs', '%05d.jpg' % (int(t))) - frame = (255 - io.imread(image_filename)) + font = {"size": settings["font_size"]} + print("Printing frame", t + 1) + image_filename = os.path.join(settings["save_as"], "imgs", "%05d.jpg" % (int(t))) + frame = 255 - io.imread(image_filename) os.remove(image_filename) small_imshow(settings, frame, cmap=cm.binary, vmax=300) for p in bends.index: pp = P == p l = np.logical_and(pp, T == t) if np.sum(l) > 0: - x = track['x'][l].iloc[0] - y = track['y'][l].iloc[0] + x = track["x"][l].iloc[0] + y = track["y"][l].iloc[0] b = bends[p][np.sum(T[pp] < t)] - plt.text(y + 3, x + 3, 'p=%i\n%.1f' % - (p, b), font, color=[1, 0.3, 0.2]) + plt.text(y + 3, x + 3, "p=%i\n%.1f" % (p, b), font, color=[1, 0.3, 0.2]) m, n = frame.shape plt.plot( - [n - (5 + settings["scale_bar_size"] / float(settings["px_to_mm"])), - n - 5], + [n - (5 + settings["scale_bar_size"] / float(settings["px_to_mm"])), n - 5], [m - 5, m - 5], - linewidth=settings["scale_bar_thickness"], c=[0.5, 0.5, 0.5]) - plt.axis('off') - plt.axis('tight') - plt.savefig(os.path.join(settings["save_as"], 'imgs', '%05d.jpg' % (t))) + linewidth=settings["scale_bar_thickness"], + c=[0.5, 0.5, 0.5], + ) + plt.axis("off") + plt.axis("tight") + plt.savefig(os.path.join(settings["save_as"], "imgs", "%05d.jpg" % (t))) def print_images(settings, bends): plt.gcf().set_size_inches(20, 20) plt.clf() - with open(os.path.join(settings["save_as"], 'track.p'), - 'br') as trackfile: + with open(os.path.join(settings["save_as"], "track.p"), "br") as trackfile: track = pickle.load(trackfile) - P = track['particle'] - T = track['frame'] + P = track["particle"] + T = track["frame"] output_overlayed_images = settings["output_overlayed_images"] if output_overlayed_images != 0: - up_to = (len(set(T)) if output_overlayed_images is None - else output_overlayed_images) + up_to = len(set(T)) if output_overlayed_images is None else output_overlayed_images for t in range(up_to): print_frame(settings, t, P, T, bends, track) plt.clf() @@ -1131,58 +1159,86 @@ def small_imshow(settings, img, *args, **kwargs): img = resize( np.asarray(img, float), (int(img.shape[0] * factor), int(img.shape[1] * factor)), - preserve_range=True) + preserve_range=True, + ) plt.clf() - plt.imshow(img, *args, extent=[0, original_shape[1], - original_shape[0], 0], **kwargs) + plt.imshow(img, *args, extent=[0, original_shape[1], original_shape[0], 0], **kwargs) def output_processing_frames( - settings, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, - labeled_removed, skel_labeled=None): + settings, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + skel_labeled=None, +): plt.gcf().set_size_inches(20, 20) plt.clf() small_imshow(settings, frameorig, cmap=cm.gray) - plt.savefig(os.path.join(save_folder, '0frameorig.jpg')) + plt.savefig(os.path.join(save_folder, "0frameorig.jpg")) small_imshow(settings, Z, cmap=cm.gray) - plt.savefig(os.path.join(save_folder, '0z.jpg')) + plt.savefig(os.path.join(save_folder, "0z.jpg")) small_imshow(settings, frame, cmap=cm.gray) - plt.savefig(os.path.join(save_folder, '1framesubtract.jpg')) + plt.savefig(os.path.join(save_folder, "1framesubtract.jpg")) small_imshow(settings, thresholded, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '2thresholded.jpg')) + plt.savefig(os.path.join(save_folder, "2thresholded.jpg")) small_imshow(settings, frame_after_open, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '3opened.jpg')) + plt.savefig(os.path.join(save_folder, "3opened.jpg")) small_imshow(settings, frame_after_close, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '4closed.jpg')) + plt.savefig(os.path.join(save_folder, "4closed.jpg")) small_imshow(settings, labeled, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '5labelled.jpg')) + plt.savefig(os.path.join(save_folder, "5labelled.jpg")) small_imshow(settings, labeled_removed, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '6removed.jpg')) + plt.savefig(os.path.join(save_folder, "6removed.jpg")) if skel_labeled is not None: small_imshow(settings, skel_labeled, cmap=cm.binary) - plt.savefig(os.path.join(save_folder, '7skeletonized.jpg')) + plt.savefig(os.path.join(save_folder, "7skeletonized.jpg")) plt.clf() def print_example_frame( - settings, sizes, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, labeled_removed, - skel_labeled): - print('Sizes:') + settings, + sizes, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + skel_labeled, +): + print("Sizes:") print(sizes) output_processing_frames( - settings, save_folder, frameorig, Z, frame, thresholded, - frame_after_open, frame_after_close, labeled, labeled_removed, - (skel_labeled if settings["skeletonize"] else None)) - print('Example frame outputted!') + settings, + save_folder, + frameorig, + Z, + frame, + thresholded, + frame_after_open, + frame_after_close, + labeled, + labeled_removed, + (skel_labeled if settings["skeletonize"] else None), + ) + print("Example frame outputted!") From aa68e5dbceaf5037f7fbedfb42d26a62ab6b2e83 Mon Sep 17 00:00:00 2001 From: cnstll Date: Wed, 3 Aug 2022 18:02:02 +0200 Subject: [PATCH 04/25] Integration of Activity index into particles.csv file --- WF_NTP/WF_NTP/WF_NTP_script.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index bbef080..b344cde 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -1023,6 +1023,7 @@ def write_particles(settings, particles_dataframe, filename): :, [ "BPM", + "activity_index", "bends_in_movie", "Speed", "Max speed", @@ -1039,6 +1040,7 @@ def write_particles(settings, particles_dataframe, filename): x = settings["limit_images_to"] / settings["fps"] df.columns = [ "BPM", + "Activity Index", f"Bends per {x:.2f} s", "Speed", "Max speed", From 4030f4f95e126aa8d6345cbd10fe6541760d993f Mon Sep 17 00:00:00 2001 From: cnstll Date: Thu, 4 Aug 2022 11:13:36 +0200 Subject: [PATCH 05/25] Creating report_overview file --- .../activity_index/report_overview/report_overview.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 experiments/activity_index/report_overview/report_overview.md diff --git a/experiments/activity_index/report_overview/report_overview.md b/experiments/activity_index/report_overview/report_overview.md new file mode 100644 index 0000000..bf54863 --- /dev/null +++ b/experiments/activity_index/report_overview/report_overview.md @@ -0,0 +1,7 @@ +# Description + + +# Methodology + + +# Results From d0821d1242e0b51e7e1c32e2c63248f2949472fe Mon Sep 17 00:00:00 2001 From: rcatini <100608277+rcatini@users.noreply.github.com> Date: Thu, 1 Sep 2022 18:04:54 +0200 Subject: [PATCH 06/25] WF_NTP activity index: documentation of the current implementation --- docs/WF_NTP-activity_index.md | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 docs/WF_NTP-activity_index.md diff --git a/docs/WF_NTP-activity_index.md b/docs/WF_NTP-activity_index.md new file mode 100644 index 0000000..1b687b3 --- /dev/null +++ b/docs/WF_NTP-activity_index.md @@ -0,0 +1,40 @@ +WF-NTP Activity index implementation +==================================== + +The activity index is calculated similarly to [CeleST (2014)](https://doi.org/10.1371/journal.pcbi.1003702): + +First, the brush stroke is computed comparing, for each bend $b$, the ratio between the residual area painted by the worm and the average area occupied by its body. + +Assuming that over one bend $b$ the worm paints a total of $\mathrm{TotalArea}_b$ pixels over all the frames, and the average area occupied by its body is $\mathrm{AverageBodyArea}_b$, the brush stroke is: + +$$ +\mathrm{BrushStroke}_b = \frac{\mathrm{TotalArea}_b - \mathrm{AverageBodyArea}_b}{\mathrm{AverageBodyArea}_b} +$$ + +The activity index corresponds to the value of the brush stroke, normalized by the time that the worm has taken to complete one bend: + +$$ +\mathrm{ActivityIndex}_b = \frac{\mathrm{BrushStroke}_b}{\Delta t_b} +$$ + +Note: In the current version, the activity index is still not normalized and therefore equal to the brush stroke. + +Properties +---------- +The brush stroke is therefore a number greater then zero: +- the value $0$ (zero) corresponds to a worm that doesn't move at all during the bend: $\mathrm{TotalArea}_b =\mathrm{AverageBodyArea}_b$; +- the value $n$ corresponds to a worm that occupied in total an amount of $(n+1) \mathrm{AverageBodyArea}_b$ pixels during the bend. + +Differences with CeleST +----------------------- +The formulas for the brush stroke and activity index are slightly different from the formula at page 6 of the aformentioned paper. +The only significant difference is that CeleST's value should be always negative and WF_NTP's always positive, the two values being exact opposites: +- BrushStoke(CeleST) = -BrushStoke(WF-NTP) +- ActivityIndex(CeleST) = -ActivityIndex(WF-NTP) + +Output format +------------- + +The activity index is computed over a normal execution of WF_NTP as one of the several features supported by the program. + +Statistical properties of the activity index are presented in the results file (mean, standard deviation, etc.). From 3742219864461e81e7501dc99f263b61f41097e5 Mon Sep 17 00:00:00 2001 From: COTHSC Date: Mon, 22 Aug 2022 19:42:55 +0200 Subject: [PATCH 07/25] added auditor script to help check quality of provided images --- auditor/__main__.py | 34 +++++++++ auditor/auditor.py | 46 +++++++++++ auditor/parser.py | 181 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 261 insertions(+) create mode 100644 auditor/__main__.py create mode 100644 auditor/auditor.py create mode 100644 auditor/parser.py diff --git a/auditor/__main__.py b/auditor/__main__.py new file mode 100644 index 0000000..e0b3077 --- /dev/null +++ b/auditor/__main__.py @@ -0,0 +1,34 @@ +from os import stat + +from auditor.auditor import path_checker +from auditor.parser import audit_images, load_metadata, parser + +# ########################################################################## # +# FUNCTIONS # +# ########################################################################## # + + +def main(): + # parsing the argument(s) + args = parser() + + # print(args) + dir_path = args.path + + # checker of the path + path_checker(dir_path) + + # load the json metadata file + metadata = load_metadata(dir_path) + + # auditor of the tiff images based on metadata. + # Retrieving some info about frames + stat_frames = audit_images(metadata, dir_path) + + +# ########################################################################## # +# MAIN # +# ########################################################################## # + +if __name__ == "__main__": + main() diff --git a/auditor/auditor.py b/auditor/auditor.py new file mode 100644 index 0000000..0daf67f --- /dev/null +++ b/auditor/auditor.py @@ -0,0 +1,46 @@ +import os + +NB_LIMIT = 10 + +# Checker related to the argument parsed. +def path_checker(path: str): + """Check the existence and access of a directory. + + Arguments: + path (str): path to the input directory (containing the images). + + Raises: + NotADirectoryError: directory doesn't exist or is not a directory. + PermissionError: user doesn't have access to the directory. + """ + if not os.path.isdir(path): + raise NotADirectoryError(path + " is not a directory.") + if not os.access(path, os.R_OK | os.W_OK): + raise PermissionError("Permission denied to " + path) + + +def path_inside_checker(dir_path: str): + """Check that the files inside the directory are .tif or .json, that a .json file exists and + that there are at least 100 .tif files. + + Arguments: + dir_path : path to directory + + Raises: + FileNotFoundError: there is no .json file + Exception: there are fewer than 100 .tif files + Exception a file other than .tif or .json + """ + number_tif_files = 0 + metadata_file = False + for file in os.listdir(dir_path): + if file[-4:] == ".tif": + number_tif_files += 1 + elif file == "metadata.txt": + metadata_file = True + elif file[-5:] == ".json": + continue + else: + raise Exception("File other than .tif or .json or metadata.txt found") + if metadata_file is False: + raise FileNotFoundError("No metadata file found") diff --git a/auditor/parser.py b/auditor/parser.py new file mode 100644 index 0000000..d3233b3 --- /dev/null +++ b/auditor/parser.py @@ -0,0 +1,181 @@ +import argparse +import json +from cmath import exp +from curses import meta +from json import JSONDecodeError +from os import F_OK, R_OK, access +from os.path import exists as file_exists + +import cv2 as cv +import pandas as pd + + +# ########################################################################### # +# Parsing of the inputs of converter # +# ########################################################################### # +# Parser related to the arguments of converter program +def parser() -> dict: + """Parse arguments to get name directory as input and the video file's name as output. + + Return: + A dictionary containing the name of the path to input directory + and the defined name of the video file as output + Namespace(output='', path='') + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "--path", type=str, required=True, help="path where source will be look in." + ) + return parser.parse_args() + + +# ########################################################################### # +# Parsing of the inputs of converter # +# ########################################################################### # + + +def load_metadata(directoryPath: str) -> dict: + """loads the metadata file in a python dictionary. + + args: + directorypath (str): directory path to json file to load. + + raises: + filenotfounderror: file [directorypath]/metadata.txt does not exist. + exception: [directorypath]/metadata.txt is not readable by the user. + jsondecodeerror: issue when loading the metadata from file. + returns: + dict: the loaded metadata. + """ + metadata_file = directoryPath + "/metadata.txt" + if not access(metadata_file, F_OK): + raise FileNotFoundError(f"File {metadata_file} does not exists.") + if not access(metadata_file, R_OK): + raise Exception(f"File {metadata_file} is not readable for the user.") + with open(file=metadata_file, mode="r") as file: + metadata = json.load(file) + return metadata + + +def load_dataframe(file_path): + """loads a dataframe from a .csv file containing the audit data, creates one if none exist. + + args: + directorypath (str): directory path to json file to load. + + raises: + filenotfounderror: file [directorypath]/metadata.txt does not exist. + exception: [directorypath]/metadata.txt is not readable by the user. + jsondecodeerror: issue when loading the metadata from file. + returns: + dict: the loaded metadata. + """ + if file_exists(file_path): + df = pd.read_csv(file_path, index_col=False) + else: + df = pd.DataFrame( + columns=[ + "URL", + "expected_frames", + "number_of_actual_frames", + "expected_interval", + "average_interval", + "stdev_interval", + "actual_length_seconds", + "avg_fps", + ] + ) + return df + + +def audit_images(metadata: dict, directoryPath: str) -> dict: + """Audits the video frames and saves the results to .csv file. + + Args: + metadata: the metadata resulting from loadMetadata + directoryPath: the directory where the images are located + + Returns: + dict: a dictionary with the audited metadata + """ + video_name = directoryPath.rsplit("/", 1)[-1] + total_time_ms = 0 + expect_frame_no = 0 + expected_frames = metadata["Summary"]["Frames"] + theoretical_interval = metadata["Summary"]["Interval_ms"] + filenames_list = [] + intervals_list = [] + missing_frames = 0 + tmp_total_time_ms = 0 + + for obj in metadata: + if obj.startswith("Metadata-Default"): + filename = obj.rsplit("/", 1)[-1] + if filename != "Summary": + cv_img = cv.imread(directoryPath + "/" + filename) + if cv_img is None: + missing_frames += 1 + expect_frame_no = expect_frame_no + 1 + continue + filenames_list.append(filename) + + # Checking the shape of the image and the expected shape + actual_height, actual_width = cv_img.shape[0], cv_img.shape[1] + expected_width = metadata[obj]["Width"] + expected_height = metadata[obj]["Height"] + if (actual_height != expected_height) or (actual_width != expected_width): + raise Exception(f"Mismatched image size: frame: {directoryPath}/{filename}") + + currentFrame = metadata[obj]["Frame"] + if currentFrame == 0: + time_to_first_image = metadata[obj]["ElapsedTime-ms"] + + # checks for missing frames + if currentFrame != expect_frame_no: + missing_frames = missing_frames + 1 + + # create a list of the intervals between two frames + total_time_ms = metadata[obj]["ElapsedTime-ms"] - time_to_first_image + intervals_list.append(total_time_ms - tmp_total_time_ms) + tmp_total_time_ms = total_time_ms + expect_frame_no = expect_frame_no + 1 + + if expect_frame_no != expected_frames: + missing_frames = expect_frame_no - expected_frames + df = pd.DataFrame(intervals_list, columns=["intervals"]) + df2 = load_dataframe("/audit.csv") + if video_name not in df2.values: + df2.loc[len(df2.index)] = [ + video_name, + expected_frames, + expected_frames - missing_frames, + theoretical_interval, + df["intervals"].mean(), + df["intervals"].std(), + total_time_ms / 1000, + expect_frame_no / (total_time_ms / 1000), + ] + else: + df2.loc[df2["URL"] == video_name] = [ + video_name, + expected_frames, + expected_frames - missing_frames, + theoretical_interval, + df["intervals"].mean(), + df["intervals"].std(), + total_time_ms / 1000, + expect_frame_no / (total_time_ms / 1000), + ] + df2.reset_index(drop=True, inplace=True) + + print(df2) + df2.to_csv("./audit.csv", index=False) + return { + "number_of_expected_frames": expected_frames, + "number_of_actual_frames": expected_frames - missing_frames, + "expected_interval": theoretical_interval, + "average_interval": df["intervals"].mean(), + "stdev_interval": df["intervals"].std(), + "actual_length_seconds": total_time_ms / 1000, + "avg_fps": expect_frame_no / (total_time_ms / 1000), + } From 2e25d73a9f1e18d561a73c2667b3b76f57dd9843 Mon Sep 17 00:00:00 2001 From: p-lg-ux <61841681+p-lg-ux@users.noreply.github.com> Date: Thu, 1 Sep 2022 19:40:14 +0200 Subject: [PATCH 08/25] 57 docssetup setup wf ntp under windows (#58) added setup instructions for anaconda and wf_ntp under windows --- Setup.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/Setup.md b/Setup.md index 89ec236..f5eedf4 100644 --- a/Setup.md +++ b/Setup.md @@ -22,6 +22,8 @@ Dans les sections qui vont suivre sont détaillés les étapes d'installation po ## Installation d'Anaconda pour Python3.8 + +### Linux 1. télécharger le script d'installation d'anaconda (`wget -P /tmp https://repo.anaconda.com/archive/Anaconda3-2020.02-Linux-x86_64.sh`) > À noter que l'exécutable télécharger par la commande wget est pour un système dont l'architecture est x86 (pour vérifier tapper `arch` ou `uname -m` dans votre terminal) 2. ajouter les droits d'exécution à l'utilisateur (`sudo chmod u+x Anaconda3-2020.02-Linux-x86_64.sh`) et exécuter le script d'installation @@ -31,9 +33,13 @@ Dans les sections qui vont suivre sont détaillés les étapes d'installation po L'installation est complète. +### Windows +1. Télécharger l'exécutable d'installation [Anaconda](https://www.anaconda.com/products/distribution) ou [Miniconda](https://docs.conda.io/en/latest/miniconda.html) pour Python 3.8. +2. Lancer l'exécutable et suivre les instructions jusqu'à ce que l'installation soit complète. ## Installation de WF_NTP +### Linux 0. Penser a telecharger les submodules du repository avec `git submodule update --init --recursive` 1. Créer un environement conda virtuel avec la commande `conda create -n v_wf_ntp python=3.8` et activer le (`conda activate v_wf_ntp`) 1.1 Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge `conda config --append channels conda-forge` @@ -43,6 +49,29 @@ L'installation est complète. L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec la commande `./multiwormtracker_app`. +### Windows + +#### Création de l'environnement conda +1. Ouvrir l'Anaconda Prompt et se placer dans le répertoire `Elegant-Elegans` +``` +cd path\to\Elegant-Elegans +``` +2. Créer un environnement conda virtuel `v_wf_ntp`, puis l'activer : +``` +conda env create -f env_wf_ntp.yml +conda activate v_wf_ntp +``` + > L'environnement est bien activé si `v_wf_ntp` apparaît entre parenthèses au début de la ligne de commande. + +#### Exécution +1. Dans l'Anaconda Prompt, se placer dans le répertoire `WF_NTP` +``` +cd path\to\Elegant-Elegans\WF_NTP +``` +2. Vérifier que l'environnement `v_wf_ntp` est bien activé et lancer le programme de `WF_NTP` avec la commande : +``` +python multiwormtracker_app +``` ## Exécution de CeleST ### Étapes préliminaires From 4c9d91efb7fff8b0b3e0d92f425aabd8cfcd78e0 Mon Sep 17 00:00:00 2001 From: rcatini <100608277+rcatini@users.noreply.github.com> Date: Thu, 4 Aug 2022 17:31:22 +0200 Subject: [PATCH 09/25] High level description of Celest features --- docs/features/celest_features_review.md | 91 +++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 docs/features/celest_features_review.md diff --git a/docs/features/celest_features_review.md b/docs/features/celest_features_review.md new file mode 100644 index 0000000..00ccf63 --- /dev/null +++ b/docs/features/celest_features_review.md @@ -0,0 +1,91 @@ +# References +- CeleST: Computer Vision Software for Quantitative Analysis of C. elegans Swim Behavior Reveals Novel Features of Locomotion doi:10.1371/journal.pcbi.1003702 +- (WF-NTP) Assessing motor-related phenotypes of Caenorhabditis elegans with the wide field-of-view nematode tracking platform doi:10.1038/s41596-020-0321-9 + +# Data pipelines +## CeleST pipeline +1. Detect the outlines of the animals (maximisation of gradient flow) +2. Compute the curvilinear center line of the body, and the half-with of the body along the length +3. Track the animals in successive frames +4. Reject the overlaps between animals +5. Compute 13 body points from head to tail along the center line, and the curvature of the body at each point +6. Select a short time interval and compute the bidimentional Fourier transform of the curvature (on curvature and time) + +## WF-NTP pipeline +1. Detect the regions occupied by the animals at each frame +2. Compute the centroid of the animal +3. At each frame and for each animal, fit with an ellipse +4. Compute the frequency of worm bendings using the changes in eccentricity of the ellipse + +# List of CeleST features + +## Wave initiation rate +The frequency of body wawes initiated from the head or the tail (# of bends/minutes) + +Prerequisite: Celest pipeline #6 (Fourier transform) + +Possible alternative in WF-NTP: not straightforward + +## Body wave number +The number of waves traveling through the body (# of bends) + +Prerequisite: Celest pipeline #6 (Fourier transform) + +Possible alternative in WF-NTP: not straightforward without a Fourier transform + +## Asymmetry +The global curvature of the body (positive value: clockwise curvature, negative value: counterclockwise) + +Prerequisite: Celest pipeline #6 (Fourier transform) + +Possible alternative in WF-NTP: Ratio of surface covered by the body on the left side of the ellipse, compared to the right side) + +## Stretch +Range between the point of maximum and minimun curvature + +Prerequisite: Celest pipeline #6 (Fourier transform) + +Possible alternative in WF-NTP: not straightforward (computation of curvature along the body is required) + +## Attenuation +Ratio between the amplitudes of the waves on the head and the tail + +Prerequisite: Celest pipeline #6 (Fourier transform) + +Possible alternative in WF-NTP: not straightforward without the computation of the Fourier transform + +## Reverse swimming +Fraction of time of reverse swimming (global measure) + +Prerequisite: Celest pipeline #6 (Fourier transform) + +Possible alternative in WF-NTP: Fraction of the time of negative speed + +## Curling +Fraction of time of maximal bending, defined as an self-overlap (global measure) + +Prerequisite: Celest pipeline #3 (identification of head and tail, and tracking of the animal across time) + +Possible alternative in WF-NTP: threshold on the eccentricity of the fitted ellipse + + +## Travel speed +Longitudinal distance travelled by the body center over a two-stroke interval + +Prerequisite: Celest pipeline #3 (identification of body center, and tracking of the animal across time) + +Possible alternative in WF-NTP: similar implementation, tracking the center of the ellipse + +## Brush stroke +Area "painted" by the animal over a two-stroke interval, normalized by body size + +Prerequisite: Celest pipeline #1 + +Possible alternative in WF-NTP: similar implementation (can also be normalised on the surface of the ellipse) + +## Activity index +Brush stroke, normalized by the two-strokes interval + +Prerequisite: Celest pipeline #6 + +Possible alternative in WF-NTP: similar implementation (can also be normalised on the surface of the ellipse) From d72f109a3e93ab32601c3c79aa9ab2c7871cd5c7 Mon Sep 17 00:00:00 2001 From: Perrine Date: Tue, 30 Aug 2022 11:35:16 +0200 Subject: [PATCH 10/25] add conda environment requirements in yml file for windows setup --- env_wf_ntp.yml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 env_wf_ntp.yml diff --git a/env_wf_ntp.yml b/env_wf_ntp.yml new file mode 100644 index 0000000..13d17b2 --- /dev/null +++ b/env_wf_ntp.yml @@ -0,0 +1,17 @@ +name: v_wf_ntp +channels: + - defaults + - conda-forge +dependencies: + - mahotas + - matplotlib + - numpy=1.21 + - opencv + - pandas + - python=3.8 + - scikit-image + - scipy + - sqlite + - tornado + - trackpy + - py-opencv \ No newline at end of file From 6e965d017ffe7ba4c042be2fc7b600f4abd18f09 Mon Sep 17 00:00:00 2001 From: madvid Date: Thu, 1 Sep 2022 20:05:21 +0200 Subject: [PATCH 11/25] update (doc setup): update of the step of setup for linux --- Setup.md | 51 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/Setup.md b/Setup.md index f5eedf4..944ebe0 100644 --- a/Setup.md +++ b/Setup.md @@ -40,12 +40,25 @@ L'installation est complète. ## Installation de WF_NTP ### Linux -0. Penser a telecharger les submodules du repository avec `git submodule update --init --recursive` -1. Créer un environement conda virtuel avec la commande `conda create -n v_wf_ntp python=3.8` et activer le (`conda activate v_wf_ntp`) -1.1 Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge `conda config --append channels conda-forge` -2. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande `conda install -f conda_wf_ntp_requirements.txt` -3. Copier le fichier `run_script/multiwormtracker_app` à la racine du répertoire `WF_NTP` -4. Ouvrir le fichier `WF_NTP/WF_NTP/WF_NTP_script.py` et remplacer `coordinates='xy'` par `coordinates='rc'` à la ligne 287. +1. Créer un environement conda virtuel avec la commande: + ```bash + conda create -n v_wf_ntp python=3.8 + ``` +2. Activer l'environnement: + ```bash + conda activate v_wf_ntp + ``` +> ![INFO] +> Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: +> ```bash +> conda config --append channels conda-forge +> ``` + +3. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande: + ```bash + conda install -f conda_wf_ntp_requirements.txt + ``` +3. Copier le fichier `run_script/multiwormtracker_app` à la racine du répertoire `WF_NTP/` L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec la commande `./multiwormtracker_app`. @@ -53,25 +66,25 @@ L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec l #### Création de l'environnement conda 1. Ouvrir l'Anaconda Prompt et se placer dans le répertoire `Elegant-Elegans` -``` -cd path\to\Elegant-Elegans -``` + ```bash + cd path\to\Elegant-Elegans + ``` 2. Créer un environnement conda virtuel `v_wf_ntp`, puis l'activer : -``` -conda env create -f env_wf_ntp.yml -conda activate v_wf_ntp -``` + ```bash + conda env create -f env_wf_ntp.yml + conda activate v_wf_ntp + ``` > L'environnement est bien activé si `v_wf_ntp` apparaît entre parenthèses au début de la ligne de commande. #### Exécution 1. Dans l'Anaconda Prompt, se placer dans le répertoire `WF_NTP` -``` -cd path\to\Elegant-Elegans\WF_NTP -``` + ```bash + cd path\to\Elegant-Elegans\WF_NTP + ``` 2. Vérifier que l'environnement `v_wf_ntp` est bien activé et lancer le programme de `WF_NTP` avec la commande : -``` -python multiwormtracker_app -``` + ```bash + python multiwormtracker_app + ``` ## Exécution de CeleST ### Étapes préliminaires From aef21bd688a5a2be246e881654b3b7fcba13ed6a Mon Sep 17 00:00:00 2001 From: madvid Date: Sun, 4 Sep 2022 16:54:13 +0200 Subject: [PATCH 12/25] update (set up): admonition style of quote (test if it is working) --- Setup.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Setup.md b/Setup.md index 944ebe0..3e5ad32 100644 --- a/Setup.md +++ b/Setup.md @@ -48,11 +48,11 @@ L'installation est complète. ```bash conda activate v_wf_ntp ``` -> ![INFO] -> Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: -> ```bash -> conda config --append channels conda-forge -> ``` +!!! info + Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: + ```bash + conda config --append channels conda-forge + ``` 3. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande: ```bash From 9c341746da8efe5df6441f67ab5a586d65a0e530 Mon Sep 17 00:00:00 2001 From: madvid Date: Sun, 4 Sep 2022 16:56:25 +0200 Subject: [PATCH 13/25] update (set up): github beta style of quote (test if it is working) --- Setup.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Setup.md b/Setup.md index 3e5ad32..02630f4 100644 --- a/Setup.md +++ b/Setup.md @@ -48,11 +48,11 @@ L'installation est complète. ```bash conda activate v_wf_ntp ``` -!!! info - Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: - ```bash - conda config --append channels conda-forge - ``` +> **Note** +> Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: +> ```bash +> conda config --append channels conda-forge + ``` 3. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande: ```bash From 7a01588de3dcfc22c7aef8e7d599d899a01e1431 Mon Sep 17 00:00:00 2001 From: madvid Date: Sun, 4 Sep 2022 16:57:26 +0200 Subject: [PATCH 14/25] update (set up): new line in Note quote --- Setup.md | 1 + 1 file changed, 1 insertion(+) diff --git a/Setup.md b/Setup.md index 02630f4..a5b26d3 100644 --- a/Setup.md +++ b/Setup.md @@ -49,6 +49,7 @@ L'installation est complète. conda activate v_wf_ntp ``` > **Note** +> > Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: > ```bash > conda config --append channels conda-forge From 43e00a7a03ab0d489248539969c2ef0f92c82377 Mon Sep 17 00:00:00 2001 From: madvid Date: Sun, 4 Sep 2022 18:56:57 +0200 Subject: [PATCH 15/25] doc + update (Readme + Setup): traduction de setup.md --- README.md | 4 +- Setup.md | 243 ++++++++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 218 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 0b87eeb..a70c327 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ## Cloner le répertoire et sous modules Ce répertoire contient un sous-module `WF_NTP`. Afin de cloner ce répertoire et les sous-modules, taper la commande: ```bash -git clone --recurse-submodules git@github.com:42-AI/Elegant-Elegans.git +git clone git@github.com:42-AI/Elegant-Elegans.git ``` --- @@ -13,7 +13,7 @@ git clone --recurse-submodules git@github.com:42-AI/Elegant-Elegans.git ## Cloning the repository and submodules The present repository include a submodule named `WF_NTP`. To clone this repository and the submodules, one should enter: ```bash -git clone --recurse-submodules git@github.com:42-AI/Elegant-Elegans.git +git clone git@github.com:42-AI/Elegant-Elegans.git ``` ## Contributors diff --git a/Setup.md b/Setup.md index a5b26d3..bdd14e0 100644 --- a/Setup.md +++ b/Setup.md @@ -1,28 +1,24 @@ -# Installation de l'environnement - -- [Installation de l'environnement](#installation-de-lenvironnement) - - [Introduction](#introduction) - - [Installation d'Anaconda pour Python3.8](#installation-danaconda-pour-python38) - - [Installation de WF_NTP](#installation-de-wf_ntp) - - [Exécution de CeleST](#exécution-de-celest) - - [Étapes préliminaires](#étapes-préliminaires) - - [Octave](#octave) - - [CeleST](#celest) - - [Exécution](#exécution) - - [Interface graphique](#interface-graphique) - - [interface ligne de commande](#interface-ligne-de-commande) +*(english below ([here](#introduction-english-version)))* +# Installation de l'environnement / overall Setup +--- + +- [Introduction](#introduction) +- [Installation d'Anaconda pour Python3.8](#installation-danaconda-pour-python38) +- [Installation de WF_NTP](#installation-de-wf_ntp) +- [Installation de CeleST](#installation-de-celest) + +--- ## Introduction -Comme mentionné dans le Readme à la racine du répertoire, ce projet s'appuie sur 2 outils utilisés dans le monde de la recherche en biologie: -* [CeleST](https://dcs-lcsr.github.io/CeleST/) (version MacOS) qui est un programme développé en Matlab -* [WF\NTP](https://github.com/impact27/WF_NTP) qui est un programme développé en Python -À ce jour, ces 2 outils ne semblent plus être actif en terme de développement et maintenance mais restent tout de même utilisé par la communauté scientifique. +Le projet Elegant-Elegans s'appuie à ce jour sur 2 outils utilisés dans le monde de la recherche en biologie: +* [CeleST](https://dcs-lcsr.github.io/CeleST/) (version MacOS) qui est un programme développé en Matlab. +* [WF\_NTP](https://github.com/impact27/WF_NTP) qui est un programme développé en Python. +À ce jour, ces 2 outils ne semblent plus être actif en terme de développement et maintenance mais semblent encore utilisés par la communauté scientifique. Dans les sections qui vont suivre sont détaillés les étapes d'installation pour **CeleST** et **WF\_NTP**. - ## Installation d'Anaconda pour Python3.8 - +La documentation détaillée d'anaconda se trouve [ici](https://docs.anaconda.com/anaconda/install/). ### Linux 1. télécharger le script d'installation d'anaconda (`wget -P /tmp https://repo.anaconda.com/archive/Anaconda3-2020.02-Linux-x86_64.sh`) > À noter que l'exécutable télécharger par la commande wget est pour un système dont l'architecture est x86 (pour vérifier tapper `arch` ou `uname -m` dans votre terminal) @@ -61,10 +57,9 @@ L'installation est complète. ``` 3. Copier le fichier `run_script/multiwormtracker_app` à la racine du répertoire `WF_NTP/` -L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec la commande `./multiwormtracker_app`. +L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec la commande `./multiwormtracker_app` ou bien double cliquer dessus. ### Windows - #### Création de l'environnement conda 1. Ouvrir l'Anaconda Prompt et se placer dans le répertoire `Elegant-Elegans` ```bash @@ -87,7 +82,7 @@ L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec l python multiwormtracker_app ``` -## Exécution de CeleST +## Installation de CeleST ### Étapes préliminaires Afin de pouvoir lancer CeleST, plusieurs étapes préliminaires sont nécessaires: 1. télécharger le code source à partir de http://celest.mbb.rutgers.edu/ @@ -115,7 +110,12 @@ flatpak install flathub org.octave.Octave L'installation d'Octave est complète. Vous pouvez le lancer via votre centre d'applications. -### CeleST +## Installation et exécution de CeleST + +Le code source de CeleST est présent au sein du répertoire pour des raisons de comodités car il a été nécesaire de faire quelques modifications pour le faire fonctionner sur une version récente d'Octave ou Matlab. La version fournie est prête à être lancée avec Octave directement. + + +Si vous désirez exécuter CeleST à partir du code source original, voici les étapes à suivre: 1. décompresser l'archive `'source code.zip'`: ```bash unzip 'source code.zip' -d /path/to/the/desired/directory/celest @@ -130,13 +130,13 @@ L'installation d'Octave est complète. Vous pouvez le lancer via votre centre d' # remplacer les crochets par: tableVideos = uitable(...,'ColumnEditable',false,...); ``` - 2. **Installation MacOS**: #TODO + 2. **Installation MacOS**: #TODO: cela devrait être sensiblement les mêmes étapes que sous Linux. `CeleST` est prêt à être lancer avec `Octave`. -#### Exécution +### Exécution -##### Interface graphique +#### Interface graphique 1. Lancer `Octave` 2. Modifier le répertoire courant afin que celui-ci soit le répertoire contenant le code source (voir image). @@ -146,9 +146,198 @@ L'installation d'Octave est complète. Vous pouvez le lancer via votre centre d' L'interface de `CeleST` s'ouvre, vous pouvez alors utiliser le programme. -##### interface ligne de commande +#### interface ligne de commande 1. `cd source_code` 2. `octave --eval "run(CeleST.m)"` *Note: Les différentes étapes ont été réalisées sur un système Ubuntu22.04, a priori cela devrait être identique sur Ubuntu21.04* + +--- +--- + +- [Introduction](#introduction-english-version) +- [Anaconda for Python3.8](#anaconda-for-python38) +- [WF_NTP setup](#wf_ntp-setup) +- [CeleST setup](#celest-setup) + +# Setup +## Introduction (english version) + +Elegant-Elegans is based on 2 tools used by the academic biological community: +* [CeleST](https://dcs-lcsr.github.io/CeleST/) (MacOS version) written in Matlab. +* [WF\_NTP](https://github.com/impact27/WF_NTP) written in Python. + +There are no recent activities on the respective github repository, but it seems that these tools are still used by some scientist. + +In the following section, we described the different steps to setup a working environment for **CeleST** and **WF\_NTP**. + +### Anaconda for Python3.8 +Anaconda documentation can be found [here](https://docs.anaconda.com/anaconda/install/) (*installation is more detailled there*) +#### Linux +1. Download installation script of anaconda (`wget -P /tmp https://repo.anaconda.com/archive/Anaconda3-2020.02-Linux-x86_64.sh`). + > **info** + > + > Note that the downloaded executable with the command above is for a x86 architecture system. + > To check your architecture, you can look at the output of: + > ```bash + > arch + > # or + > uname -m + >``` +2. Add the executable permission (`sudo chmod u+x Anaconda3-2020.02-Linux-x86_64.sh`) and execute the script (```./Anaconda3-2020.02-Linux-x86_64.sh```) +3. Add the repository `anaconda3/bin` at your `PATH` variable environment by writing the export in your `~/.bashrc` ou `~/.zshrc` (`export PATH="$HOME/anaconda3/bin:$HOME/.local/bin:$PATH"` at the end of `~/.bashrc` ou `~/.zshrc`). +4. Now you can close and reopen your terminal and execute the command `conda init bash` (ou `conda init zsh` depending on the shell you are using). +5. Close and reopen your terminal one more time. + +The installation should be complete. + +#### Windows +1. Download the installation executable [Anaconda](https://www.anaconda.com/products/distribution) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html) for Python 3.8. +2. Execute the script and follow the instructions until the installation is completed. + +### WF_NTP setup + +#### Linux +1. Créer un environement conda virtuel avec la commande: + ```bash + conda create -n v_wf_ntp python=3.8 + ``` +2. Activer l'environnement: + ```bash + conda activate v_wf_ntp + ``` +> **Note** +> +> Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: +> ```bash +> conda config --append channels conda-forge + ``` + +3. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande: + ```bash + conda install -f conda_wf_ntp_requirements.txt + ``` +3. Copier le fichier `run_script/multiwormtracker_app` à la racine du répertoire `WF_NTP/` + +L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec la commande `./multiwormtracker_app` ou bien double cliquer dessus. + +#### Windows +##### Création de l'environnement conda +1. Ouvrir l'Anaconda Prompt et se placer dans le répertoire `Elegant-Elegans` + ```bash + cd path\to\Elegant-Elegans + ``` +2. Créer un environnement conda virtuel `v_wf_ntp`, puis l'activer : + ```bash + conda env create -f env_wf_ntp.yml + conda activate v_wf_ntp + ``` + > L'environnement est bien activé si `v_wf_ntp` apparaît entre parenthèses au début de la ligne de commande. + +##### Exécution +1. Dans l'Anaconda Prompt, se placer dans le répertoire `WF_NTP` + ```bash + cd path\to\Elegant-Elegans\WF_NTP + ``` +2. Vérifier que l'environnement `v_wf_ntp` est bien activé et lancer le programme de `WF_NTP` avec la commande : + ```bash + python multiwormtracker_app + ``` + +### CeleST setup +### Étapes préliminaires +Afin de pouvoir lancer CeleST, plusieurs étapes préliminaires sont nécessaires: +1. télécharger le code source à partir de http://celest.mbb.rutgers.edu/ +2. installer le logiciel **Octave** ([ici](https://wiki.octave.org/Octave_for_GNU/Linux) pour les systèmes Linux ou encore [là](https://wiki.octave.org/Octave_for_macOS) pour MacOS) + +### Octave +L'installation du logiciel Octave est simple. +Si vous êtes *root* et sous un système *Ubuntu*, vous avez simplement à effectuer les commandes: +```bash +apt install octave +apt install liboctave-dev # development files +``` +Pour un système Linux différent, vous trouverez la démarche sur la page [wiki d'Octave](https://wiki.octave.org/Octave_for_GNU/Linux) + +Dans le cas où vous n'êtes pas *root*, il est possible d'installer Octave en tant que distribution indépendante au sein d'Anaconda. +Dans ce cas vous pouvez l'ajouter à l'environnement conda `v_wf_ntp` (de sorte à avoir un environnement unique) où bien dans un second environnement conda. +Une fois l'environnement activé, effectué la commande: +```bash +conda install -c conda-forge octave +``` +Il est également possible d'installer Octave avec `flatpak`: +```bash +flatpak install flathub org.octave.Octave +``` + +L'installation d'Octave est complète. Vous pouvez le lancer via votre centre d'applications. + +### Installation et exécution de CeleST + +CeleST source code can be found in the repository Elegant-Elegans for simplicity. It has been slighly modified to work with recent version of Octave or Matlab. Thus it should work directly. + +!!! warning + `CeleST` code is in Matlab and has been written few years ago. So the code is obsolete. + +If one want tot execute CeleST from the orignal source code, here are the steps you have to follow to make it work: +1. unzip the file `'source code.zip'`: + ```bash + unzip 'source code.zip' -d /path/to/the/desired/directory/celest + ``` +2. You will find 2 folders within `CeleST`: + * `__MACOSX` (code source for MacOS) + * `source code` (code source for Linux) + 1. **Linux Installation**: In the folder (`source code`) one have to make the following modification:: + ```bash + # CeleST.m file - line 174 + tableVideos = uitable(...,'ColumnEditable',[],...); + # replacing the square bracket by: + tableVideos = uitable(...,'ColumnEditable',false,...); + ``` + 2. **MacOS Installation**: #TODO: should be quite similar to Linux. + +`CeleST` should be ready to be execute with `Octave`. + +#### Execution +##### Graphical interface + +1. Launch `Octave` +2. Modify the `current folder` to be the folder containing the source code (see picture). +![octave change directory](.assets/octave_change_directory.png) +3. right click on the file `CeleST.m` and select `Run`: +![CeleST run selection](.assets/CeleST_m_run_selection.png) + +`CeleST` interface should be opening, you can use `CelesT` with Octave. + +##### interface ligne de commande + +1. `cd source_code` +2. `octave --eval "run(CeleST.m)"` + +*Note: The different steps has been realised with Ubuntu22.04, a priori this should be identical on Ubuntu21.04* + + +--- +--- + +## Contribution + +Contributors must used black formatter, used isort and flake8 to check their code. +Thus before doing a pull request, you have to setup your local repository to install the pre-commit hooks. + + +Here are the steps to setup the pre-commit hooks: + +* Install pre-commit + ```sh + pip install pre-commit + ``` +* Install the hooks with: + ```sh + pre-commit install + ``` +* run the hooks with + ```sh + pre-commit run -a + ``` From 7dc0bdfd84c4fb2d8013c03f52f006a3b4cc6bd4 Mon Sep 17 00:00:00 2001 From: Matthieu DAVID Date: Sun, 4 Sep 2022 23:22:48 +0200 Subject: [PATCH 16/25] 61 update documentation minor fix on readme (#63) * doc + update (Readme + Setup): traduction de setup.md --- Setup.md | 65 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/Setup.md b/Setup.md index bdd14e0..bae2183 100644 --- a/Setup.md +++ b/Setup.md @@ -53,7 +53,7 @@ L'installation est complète. 3. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande: ```bash - conda install -f conda_wf_ntp_requirements.txt + conda install --file conda_wf_ntp_requirements.txt ``` 3. Copier le fichier `run_script/multiwormtracker_app` à la racine du répertoire `WF_NTP/` @@ -161,7 +161,6 @@ L'interface de `CeleST` s'ouvre, vous pouvez alors utiliser le programme. - [WF_NTP setup](#wf_ntp-setup) - [CeleST setup](#celest-setup) -# Setup ## Introduction (english version) Elegant-Elegans is based on 2 tools used by the academic biological community: @@ -199,88 +198,90 @@ The installation should be complete. ### WF_NTP setup #### Linux -1. Créer un environement conda virtuel avec la commande: +1. Create an virtual conda environment with the command: ```bash conda create -n v_wf_ntp python=3.8 ``` -2. Activer l'environnement: +2. activate the venv: ```bash conda activate v_wf_ntp ``` > **Note** > -> Si vous utilisez miniconda a la place d'anaconda pensez a ajouter le channel conda-forge grâce à la commande: +> If you are using Minicondat instead of Anaconda, you need to add the `conda-forge` channel: > ```bash > conda config --append channels conda-forge ``` - -3. Installer les différentes librairies rassemblées dans le fichier `conda_wf_ntp_requirements.txt` via la commande: +3. Install the libraries gather in `conda_wf_ntp_requirements.txt` with the command: ```bash - conda install -f conda_wf_ntp_requirements.txt + conda install --file conda_wf_ntp_requirements.txt ``` -3. Copier le fichier `run_script/multiwormtracker_app` à la racine du répertoire `WF_NTP/` +4. Copy the file `run_script/multiwormtracker_app` at the root of the repository `WF_NTP/` -L'installation de `WF_NTP` est complète, vous pouvez lancer le programme avec la commande `./multiwormtracker_app` ou bien double cliquer dessus. +Installation of `WF_NTP` should be complete, you can run the program with the command `./multiwormtracker_app` or double click on it. #### Windows -##### Création de l'environnement conda -1. Ouvrir l'Anaconda Prompt et se placer dans le répertoire `Elegant-Elegans` +##### Conda environment +1. Open the Anaconda pompt and go to `Elegant-Elegans` folder. ```bash cd path\to\Elegant-Elegans ``` -2. Créer un environnement conda virtuel `v_wf_ntp`, puis l'activer : +2. Create a conda virtual environment named `v_wf_ntp`, and activate it: ```bash conda env create -f env_wf_ntp.yml conda activate v_wf_ntp ``` - > L'environnement est bien activé si `v_wf_ntp` apparaît entre parenthèses au début de la ligne de commande. + > Virtual environment is correctly created if `v_wf_ntp` appears between parentheses at the beginning of the line in the prompt. -##### Exécution -1. Dans l'Anaconda Prompt, se placer dans le répertoire `WF_NTP` +##### Execution +1. In the Anaconda Prompt, go to the folder `WF_NTP` ```bash cd path\to\Elegant-Elegans\WF_NTP ``` -2. Vérifier que l'environnement `v_wf_ntp` est bien activé et lancer le programme de `WF_NTP` avec la commande : +2. Verify `v_wf_ntp` is activated and run the program `WF_NTP` with the command: ```bash python multiwormtracker_app ``` ### CeleST setup -### Étapes préliminaires -Afin de pouvoir lancer CeleST, plusieurs étapes préliminaires sont nécessaires: -1. télécharger le code source à partir de http://celest.mbb.rutgers.edu/ -2. installer le logiciel **Octave** ([ici](https://wiki.octave.org/Octave_for_GNU/Linux) pour les systèmes Linux ou encore [là](https://wiki.octave.org/Octave_for_macOS) pour MacOS) +### Preliminaries steps (if not using the CeleST code given) +To run `CeleST`, several steps are needed: +1. Download the source code from http://celest.mbb.rutgers.edu/ +2. Install **Octave** ([here](https://wiki.octave.org/Octave_for_GNU/Linux) Linux system or [there](https://wiki.octave.org/Octave_for_macOS) for MacOS) ### Octave -L'installation du logiciel Octave est simple. -Si vous êtes *root* et sous un système *Ubuntu*, vous avez simplement à effectuer les commandes: +Installation step are prety simple. + +If you are *root* user and on *Ubuntu* system, you only need to run the following commands: ```bash apt install octave apt install liboctave-dev # development files ``` -Pour un système Linux différent, vous trouverez la démarche sur la page [wiki d'Octave](https://wiki.octave.org/Octave_for_GNU/Linux) +For a different system than Linux, one can find the steps on the wiki of Octave ([wiki d'Octave](https://wiki.octave.org/Octave_for_GNU/Linux)). -Dans le cas où vous n'êtes pas *root*, il est possible d'installer Octave en tant que distribution indépendante au sein d'Anaconda. -Dans ce cas vous pouvez l'ajouter à l'environnement conda `v_wf_ntp` (de sorte à avoir un environnement unique) où bien dans un second environnement conda. -Une fois l'environnement activé, effectué la commande: +If you are not a *root* user, it is possible to install Octave as an independante distribution within Anaconda. +In that case, you can either add it to your conda virtual environment `v_wf_ntp` or create a new virtual environment. + +After you choose in which virtual environment you want to add octave, activate it and write the command: ```bash conda install -c conda-forge octave ``` -Il est également possible d'installer Octave avec `flatpak`: + +Last option listed here, you can install Octave with `flatpak`: ```bash flatpak install flathub org.octave.Octave ``` -L'installation d'Octave est complète. Vous pouvez le lancer via votre centre d'applications. +Octave installation should be complete. You can run it from your application center. -### Installation et exécution de CeleST +### CeleST modification CeleST source code can be found in the repository Elegant-Elegans for simplicity. It has been slighly modified to work with recent version of Octave or Matlab. Thus it should work directly. !!! warning `CeleST` code is in Matlab and has been written few years ago. So the code is obsolete. -If one want tot execute CeleST from the orignal source code, here are the steps you have to follow to make it work: +If one want to use CeleST from the orignal source code, here are the steps you have to follow to make it work: 1. unzip the file `'source code.zip'`: ```bash unzip 'source code.zip' -d /path/to/the/desired/directory/celest @@ -310,7 +311,7 @@ If one want tot execute CeleST from the orignal source code, here are the steps `CeleST` interface should be opening, you can use `CelesT` with Octave. -##### interface ligne de commande +##### Command Line Interface (CLI) 1. `cd source_code` 2. `octave --eval "run(CeleST.m)"` From ac8a686ccc6ff798591044e6cd1c3846036c6618 Mon Sep 17 00:00:00 2001 From: madvid Date: Sat, 6 Aug 2022 16:12:46 +0200 Subject: [PATCH 17/25] fix (bug with converter): maybe this is the bug fix, need to get more info about the images sequence leading to bug --- converter/convert.py | 2 +- converter/parser.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/converter/convert.py b/converter/convert.py index d51336d..938ec18 100644 --- a/converter/convert.py +++ b/converter/convert.py @@ -52,7 +52,7 @@ def tiff_images_to_video(dir_path, video_name, format, metadata): # Loop through each image and add them to the VideoWriter object frames_list = metadata.get("frame_names") for file_name in frames_list: - img = cv2.VideoCapture(dir_path + file_name) + img = cv2.VideoCapture(f"{dir_path}/{file_name}") ret, frame = img.read() if ret: video.write(frame) diff --git a/converter/parser.py b/converter/parser.py index 884cc77..01b0e40 100644 --- a/converter/parser.py +++ b/converter/parser.py @@ -30,8 +30,8 @@ def parser() -> dict: help="Name of the converted video (without the video format).", ) parser.add_argument( - "-f", "--format", + "-f", type=str, required=False, default="mp4", From c926fae5b98ccc00e58f7a9ac1426c7e46993733 Mon Sep 17 00:00:00 2001 From: cnstll Date: Wed, 3 Aug 2022 17:54:43 +0200 Subject: [PATCH 18/25] =?UTF-8?q?Calculation=20of=20Activity=20Index=20|?= =?UTF-8?q?=C2=A0Printing=20of=20global=20statistics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- WF_NTP/WF_NTP/WF_NTP_script.py | 35 ++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index b344cde..8b5aca0 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -11,19 +11,31 @@ import os import pickle import sys +import pickle +import sys import time import traceback import warnings from collections import Counter, defaultdict +import cv2 +import traceback +import warnings +from collections import Counter, defaultdict + import cv2 import mahotas as mh import matplotlib.cm as cm import matplotlib.path as mplPath import matplotlib.pyplot as plt import numpy as np +import matplotlib.cm as cm +import matplotlib.path as mplPath +import matplotlib.pyplot as plt +import numpy as np import pandas as pd import skimage.draw +import skimage.draw import trackpy as tp from scipy import interpolate, ndimage from scipy.signal import savgol_filter @@ -196,6 +208,7 @@ def release(self): def track_all_locations(video, settings, stdout_queue): """Track and get all locations.""" + def get_Z_brightness(zi): if settings["keep_paralyzed_method"]: return find_Z_with_paralyzed(video, settings, *zi) @@ -249,9 +262,9 @@ def process_frame(settings, Z, mean_brightness, nframes, args=None, return_plot= frame = np.array(frameorig, dtype=np.float64) frame = np.abs(frame - Z) * settings["all_regions"] if (frame > 1.1).any(): - frame /= 255.0 + frame /= 255.00 - thresholded = frame > (settings["threshold"] / 255.0) + thresholded = frame > (settings["threshold"] / 255.00) opening = settings["opening"] closing = settings["closing"] save_folder = settings["save_as"] @@ -295,7 +308,7 @@ def process_frame(settings, Z, mean_brightness, nframes, args=None, return_plot= if settings["do_full_prune"]: skel_labeled = prune_fully(skel_labeled) - skel_props = measure.regionprops(skel_labeled) + skel_props = measure.regionprops(skel_labeled, coordinates="xy") for j in range(len(skel_props)): prop_list[j]["length"] = skel_props[j].area prop_list[j]["eccentricity"] = skel_props[j].eccentricity @@ -458,17 +471,20 @@ def extract_data(track, settings): # Velocity particle_dataframe.at[p, "Speed"] = extract_velocity( T[P == p], X[P == p], Y[P == p], settings + ) # Max velocity: 90th percentile to avoid skewed results due to tracking # inefficiency particle_dataframe.at[p, "Max speed"] = extract_max_speed( T[P == p], X[P == p], Y[P == p], settings + ) # Move per bend particle_dataframe.at[p, "Dist per bend"] = extract_move_per_bend( bl, T[P == p], X[P == p], Y[P == p], px_to_mm + ) particle_dataframe.at[p, "bends"] = bl @@ -491,11 +507,16 @@ def extract_data(track, settings): particle_dataframe.at[index, "bends_in_movie"] = ( last_bend / np.ptp(T[P == index]) * x * fps ) + particle_dataframe.at[index, "activity_index"] = ( + particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 + + ) particle_dataframe.at[index, "activity_index"] = ( particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 ) particle_dataframe.at[index, "Appears in frames"] = len( particle_dataframe.at[index, "bends"] + ) # Cut off-tool for skewed statistics @@ -560,6 +581,7 @@ def extract_data(track, settings): particle_dataframe.loc[:, "Moving"] = np.logical_or( particle_dataframe.loc[:, "BPM"] > settings["maximum_bpm"], particle_dataframe.loc[:, "Speed"] > settings["maximum_velocity"], + , ) return dict( @@ -576,13 +598,14 @@ def extract_data(track, settings): # ============================================================================= + def find_Z(video, settings, i0, i1): """Get thresholded image.""" # Adjust brightness: frame = video[(i0 + i1) // 2] mean_brightness = np.mean(frame) if mean_brightness > 1: - mean_brightness /= 255.0 + mean_brightness /= 255.00 Z = np.zeros_like(frame, dtype=np.float64) if settings["darkfield"]: minv = np.zeros_like(frame, dtype=np.float64) + 256 @@ -632,6 +655,7 @@ def find_skel_endpoints(skel): np.array([[1, 2, 0], [2, 1, 0], [0, 0, 0]]), np.array([[2, 0, 0], [1, 1, 0], [2, 0, 0]]), np.array([[0, 0, 0], [2, 1, 0], [1, 2, 0]]), + , ] ep = 0 @@ -1023,7 +1047,6 @@ def write_particles(settings, particles_dataframe, filename): :, [ "BPM", - "activity_index", "bends_in_movie", "Speed", "Max speed", @@ -1040,7 +1063,6 @@ def write_particles(settings, particles_dataframe, filename): x = settings["limit_images_to"] / settings["fps"] df.columns = [ "BPM", - "Activity Index", f"Bends per {x:.2f} s", "Speed", "Max speed", @@ -1162,6 +1184,7 @@ def small_imshow(settings, img, *args, **kwargs): np.asarray(img, float), (int(img.shape[0] * factor), int(img.shape[1] * factor)), preserve_range=True, + , ) plt.clf() plt.imshow(img, *args, extent=[0, original_shape[1], original_shape[0], 0], **kwargs) From 095b651aff75df274c44c2e182a6753b8c27e3b1 Mon Sep 17 00:00:00 2001 From: COTHSC Date: Thu, 11 Aug 2022 17:26:15 +0200 Subject: [PATCH 19/25] added csv of available activity index values for celest and wfntp --- .../CeleST_vs_WFNTP_activity_index_comparison.csv | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 experiments/activity_index/CeleST_vs_WFNTP_activity_index_comparison.csv diff --git a/experiments/activity_index/CeleST_vs_WFNTP_activity_index_comparison.csv b/experiments/activity_index/CeleST_vs_WFNTP_activity_index_comparison.csv new file mode 100644 index 0000000..68b026a --- /dev/null +++ b/experiments/activity_index/CeleST_vs_WFNTP_activity_index_comparison.csv @@ -0,0 +1,9 @@ +video_src,CeleST_activity_index_10_normalized,Celest_activity_index_median,Celest_activity_index_median_normalized,WF_NTP_activity_index_normalized +sample05,-1.30947885393232,140.453948,-0.735046359658527,-1.28607487396111 +sample05,0.366508953118392,170.811533,0.207815446789699,-0.248306412784322 +sample05,1.06576611343826,206.966828,1.33074557302894,0.99869915010498 +sample05,-0.12279621262433,138.249455,-0.803514660160113,0.535682136640454 +sample01,-0.636284514236627,55.244184,-0.703842871953718,-0.428269899834041 +sample01,-0.230149081290255,78.927102,-0.0532987065501446,1.11493457446069 +sample01,1.71021625284489,132.961965,1.43098058931329,-0.566481289642593 +sample01,-0.060401910118327,56.336468,-0.673839010809424,-1.11966300038972 From cd3845e3305ba9d62c102a86fe09911b0b59bb07 Mon Sep 17 00:00:00 2001 From: COTHSC Date: Wed, 24 Aug 2022 23:40:37 +0200 Subject: [PATCH 20/25] implemented more accurate activity index --- WF_NTP/WF_NTP/WF_NTP_script.py | 73 ++++++++++++++++++++++++++++------ 1 file changed, 60 insertions(+), 13 deletions(-) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index 8b5aca0..4794ab8 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -17,11 +17,7 @@ import traceback import warnings from collections import Counter, defaultdict - -import cv2 -import traceback -import warnings -from collections import Counter, defaultdict +from tkinter import Image import cv2 import mahotas as mh @@ -37,6 +33,8 @@ import skimage.draw import skimage.draw import trackpy as tp +from PIL import Image +from pylab import imshow, show from scipy import interpolate, ndimage from scipy.signal import savgol_filter from skimage import io, measure, morphology @@ -105,6 +103,7 @@ def run_tracker(settings, stdout_queue=None): all_regions = im > 0.1 else: all_regions = np.zeros_like(video[0]) + for key, d in list(regions.items()): im = np.zeros_like(video[0]) rr, cc = skimage.draw.polygon(np.array(d["y"]), np.array(d["x"])) @@ -131,6 +130,8 @@ def run_tracker(settings, stdout_queue=None): if settings["stop_after_example_output"]: return print_data, None track = form_trajectories(locations, settings) + print("this is what track looks like") + print(track) results = extract_data(track, settings) if not check_for_worms(results["particle_dataframe"].index, settings): @@ -224,7 +225,6 @@ def get_Z_brightness(zi): for i, j in apply_indeces ] - # Get frames0 print material Z, mean_brightness = get_Z_brightness(Z_indeces[0]) print_data = process_frame( settings, Z, mean_brightness, len(video), args=(0, video[0]), return_plot=True @@ -290,6 +290,8 @@ def process_frame(settings, Z, mean_brightness, nframes, args=None, return_plot= labeled_removed, n_left = mh.labeled.relabel(labeled_removed) props = measure.regionprops(labeled_removed) + frame_width = frame_after_close.shape[1] + # this is where the magic happens prop_list = [ { "area": props[j].area, @@ -297,9 +299,12 @@ def process_frame(settings, Z, mean_brightness, nframes, args=None, return_plot= "eccentricity": props[j].eccentricity, "area_eccentricity": props[j].eccentricity, "minor_axis_length": props[j].minor_axis_length / (props[j].major_axis_length + 0.001), + "coords": props[j].coords, + "frame_width": frame_width, } for j in range(len(props)) ] + if settings["skeletonize"]: skeletonized_frame = morphology.skeletonize(frame_after_close) skeletonized_frame = prune(skeletonized_frame, settings["prune_size"]) @@ -353,6 +358,37 @@ def args(): return map(func, args()) +def coords_to_one_d(array, frame_width): + one_d = [] + # TODO get the video dimentions somehow + for arr in array: + one_d.append(arr[1] * frame_width + arr[0]) + return one_d + + +def activity_index(data): + test = data + activity_indices = [] + frame_width = 696 + test["coords"] = test["coords"].apply(coords_to_one_d, frame_width=frame_width) + # get the last bend number + last_bend = data["bends"].max() + bend = 0 + while bend <= last_bend: + if bend % 2: + array = test[["frame", "coords", "bends"]][test["bends"].between(bend - 1, bend)] + sum = list(set(array["coords"].sum())) + total_area = len(sum) + array["area"] = array["coords"].apply(lambda x: len(x)) + average_area = array["area"].sum() / len(array["area"]) + activity_index = total_area - average_area + activity_index = activity_index / average_area + activity_index = 1 - (average_area / (total_area - average_area)) + activity_indices.append(activity_index) + bend += 1 + return activity_indices + + def form_trajectories(loc, settings): """Form worm trajectories.""" print("Forming worm trajectories...", end=" ") @@ -364,6 +400,8 @@ def form_trajectories(loc, settings): "area": [], "minor_axis_length": [], "area_eccentricity": [], + "frame_width": [], + "coords": [], } for t, l in enumerate(loc): data["x"] += [d["centroid"][0] for d in l] @@ -373,7 +411,10 @@ def form_trajectories(loc, settings): data["minor_axis_length"] += [d["minor_axis_length"] for d in l] data["area"] += [d["area"] for d in l] data["frame"] += [t] * len(l) + data["frame_width"] += [d["frame_width"] for d in l] + data["coords"] += [d["coords"] for d in l] data = pd.DataFrame(data) + data.head() try: track = tp.link_df(data, search_range=settings["max_dist_move"], memory=settings["memory"]) except tp.linking.SubnetOversizeException: @@ -458,6 +499,16 @@ def extract_data(track, settings): else: bl = np.array([0.0] * len(T[P == p])) + coords = track[["coords", "frame_width"]][P == p] + coords = coords.reset_index() + coords["bends"] = 0 + coords["bends"] = bl + activity_indices = activity_index(coords) + if len(activity_indices): + particle_dataframe.at[p, "activity_index"] = np.median(activity_indices) + else: + particle_dataframe.at[p, "activity_index"] = 0 + px_to_mm = settings["px_to_mm"] # Area if settings["skeletonize"]: @@ -507,13 +558,9 @@ def extract_data(track, settings): particle_dataframe.at[index, "bends_in_movie"] = ( last_bend / np.ptp(T[P == index]) * x * fps ) - particle_dataframe.at[index, "activity_index"] = ( - particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 - - ) - particle_dataframe.at[index, "activity_index"] = ( - particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 - ) + # particle_dataframe.at[index, "activity_index"] = ( + # particle_dataframe.at[index, "Area"] * particle_dataframe.at[index, "BPM"] / 120 + # ) particle_dataframe.at[index, "Appears in frames"] = len( particle_dataframe.at[index, "bends"] From 3df382a4f4a06742b38e791fb6a8c9cf00ed8653 Mon Sep 17 00:00:00 2001 From: COTHSC Date: Wed, 24 Aug 2022 23:41:54 +0200 Subject: [PATCH 21/25] implemented more accurate activity index --- WF_NTP/WF_NTP/WF_NTP_script.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index 4794ab8..dac5828 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -382,8 +382,9 @@ def activity_index(data): array["area"] = array["coords"].apply(lambda x: len(x)) average_area = array["area"].sum() / len(array["area"]) activity_index = total_area - average_area - activity_index = activity_index / average_area - activity_index = 1 - (average_area / (total_area - average_area)) + # this is where we want to play around with theories as to how exactly CeleST calculates the activity_intex + # activity_index = activity_index / average_area + # activity_index = 1 - (average_area / (total_area - average_area)) activity_indices.append(activity_index) bend += 1 return activity_indices From d2e762e24fc946226afbbab766d0787d11cf1f3a Mon Sep 17 00:00:00 2001 From: COTHSC Date: Wed, 31 Aug 2022 13:35:10 +0200 Subject: [PATCH 22/25] removed hardcoded image width --- WF_NTP/WF_NTP/WF_NTP_script.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/WF_NTP/WF_NTP/WF_NTP_script.py b/WF_NTP/WF_NTP/WF_NTP_script.py index dac5828..bde0536 100755 --- a/WF_NTP/WF_NTP/WF_NTP_script.py +++ b/WF_NTP/WF_NTP/WF_NTP_script.py @@ -369,7 +369,7 @@ def coords_to_one_d(array, frame_width): def activity_index(data): test = data activity_indices = [] - frame_width = 696 + frame_width = data["frame_width"].iloc[0] test["coords"] = test["coords"].apply(coords_to_one_d, frame_width=frame_width) # get the last bend number last_bend = data["bends"].max() @@ -382,8 +382,7 @@ def activity_index(data): array["area"] = array["coords"].apply(lambda x: len(x)) average_area = array["area"].sum() / len(array["area"]) activity_index = total_area - average_area - # this is where we want to play around with theories as to how exactly CeleST calculates the activity_intex - # activity_index = activity_index / average_area + activity_index = activity_index / average_area # activity_index = 1 - (average_area / (total_area - average_area)) activity_indices.append(activity_index) bend += 1 @@ -505,6 +504,7 @@ def extract_data(track, settings): coords["bends"] = 0 coords["bends"] = bl activity_indices = activity_index(coords) + if len(activity_indices): particle_dataframe.at[p, "activity_index"] = np.median(activity_indices) else: From b7998b30e735fecf7565003b50f17cc7e7c50688 Mon Sep 17 00:00:00 2001 From: cnstll Date: Mon, 12 Sep 2022 23:04:37 +0200 Subject: [PATCH 23/25] Normed auditor files --- auditor/__main__.py | 34 +++++++++ auditor/auditor.py | 46 ++++++++++++ auditor/parser.py | 176 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 256 insertions(+) create mode 100644 auditor/__main__.py create mode 100644 auditor/auditor.py create mode 100644 auditor/parser.py diff --git a/auditor/__main__.py b/auditor/__main__.py new file mode 100644 index 0000000..e0b3077 --- /dev/null +++ b/auditor/__main__.py @@ -0,0 +1,34 @@ +from os import stat + +from auditor.auditor import path_checker +from auditor.parser import audit_images, load_metadata, parser + +# ########################################################################## # +# FUNCTIONS # +# ########################################################################## # + + +def main(): + # parsing the argument(s) + args = parser() + + # print(args) + dir_path = args.path + + # checker of the path + path_checker(dir_path) + + # load the json metadata file + metadata = load_metadata(dir_path) + + # auditor of the tiff images based on metadata. + # Retrieving some info about frames + stat_frames = audit_images(metadata, dir_path) + + +# ########################################################################## # +# MAIN # +# ########################################################################## # + +if __name__ == "__main__": + main() diff --git a/auditor/auditor.py b/auditor/auditor.py new file mode 100644 index 0000000..0daf67f --- /dev/null +++ b/auditor/auditor.py @@ -0,0 +1,46 @@ +import os + +NB_LIMIT = 10 + +# Checker related to the argument parsed. +def path_checker(path: str): + """Check the existence and access of a directory. + + Arguments: + path (str): path to the input directory (containing the images). + + Raises: + NotADirectoryError: directory doesn't exist or is not a directory. + PermissionError: user doesn't have access to the directory. + """ + if not os.path.isdir(path): + raise NotADirectoryError(path + " is not a directory.") + if not os.access(path, os.R_OK | os.W_OK): + raise PermissionError("Permission denied to " + path) + + +def path_inside_checker(dir_path: str): + """Check that the files inside the directory are .tif or .json, that a .json file exists and + that there are at least 100 .tif files. + + Arguments: + dir_path : path to directory + + Raises: + FileNotFoundError: there is no .json file + Exception: there are fewer than 100 .tif files + Exception a file other than .tif or .json + """ + number_tif_files = 0 + metadata_file = False + for file in os.listdir(dir_path): + if file[-4:] == ".tif": + number_tif_files += 1 + elif file == "metadata.txt": + metadata_file = True + elif file[-5:] == ".json": + continue + else: + raise Exception("File other than .tif or .json or metadata.txt found") + if metadata_file is False: + raise FileNotFoundError("No metadata file found") diff --git a/auditor/parser.py b/auditor/parser.py new file mode 100644 index 0000000..a978976 --- /dev/null +++ b/auditor/parser.py @@ -0,0 +1,176 @@ +import argparse +import json +from cmath import exp +from curses import meta +from json import JSONDecodeError +from os import F_OK, R_OK, access, path +from os.path import exists as file_exists +from pathlib import Path + +import cv2 as cv +import pandas as pd + + +# ########################################################################### # +# Parsing of the inputs of converter # +# ########################################################################### # +# Parser related to the arguments of converter program +def parser() -> dict: + """Parse arguments to get name directory as input and the video file's name as output. + + Return: + A dictionary containing the name of the path to input directory + and the defined name of the video file as output + Namespace(output='', path='') + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "--path", type=str, required=True, help="path where source will be look in." + ) + return parser.parse_args() + + +# ########################################################################### # +# Parsing of the inputs of converter # +# ########################################################################### # + + +def load_metadata(directoryPath: str) -> dict: + """loads the metadata file in a python dictionary. + + args: + directorypath (str): directory path to json file to load. + + raises: + filenotfounderror: file [directorypath]/metadata.txt does not exist. + exception: [directorypath]/metadata.txt is not readable by the user. + jsondecodeerror: issue when loading the metadata from file. + returns: + dict: the loaded metadata. + """ + metadata_file = directoryPath + "/metadata.txt" + if not access(metadata_file, F_OK): + raise FileNotFoundError(f"File {metadata_file} does not exists.") + if not access(metadata_file, R_OK): + raise Exception(f"File {metadata_file} is not readable for the user.") + with open(file=metadata_file, mode="r") as file: + metadata = json.load(file) + return metadata + + +def load_dataframe(file_path): + """loads a dataframe from a .csv file containing the audit data, creates one if none exist. + + args: + directorypath (str): directory path to json file to load. + + raises: + filenotfounderror: file [directorypath]/metadata.txt does not exist. + exception: [directorypath]/metadata.txt is not readable by the user. + jsondecodeerror: issue when loading the metadata from file. + returns: + dict: the loaded metadata. + """ + if file_exists(file_path): + df = pd.read_csv(file_path, index_col=False) + else: + df = pd.DataFrame( + columns=[ + "video_name", + "expected_frames", + "number_of_actual_frames", + "expected_interval", + "average_interval", + "stdev_interval", + "actual_length_seconds", + "avg_fps", + ] + ) + return df + + +def audit_images(metadata: dict, directoryPath: str) -> dict: + """Audits the video frames and saves the results to .csv file. + + Args: + metadata: the metadata resulting from loadMetadata + directoryPath: the directory where the images are located + + Returns: + dict: a dictionary with the audited metadata + """ + video_name = directoryPath.rsplit("/", 1)[-1] + total_time_ms = 0 + expect_frame_no = 0 + expected_frames = metadata["Summary"]["Frames"] + theoretical_interval = metadata["Summary"]["Interval_ms"] + filenames_list = [] + intervals_list = [] + missing_frames = 0 + tmp_total_time_ms = 0 + audit_out_file = "./audit.csv" + # Loop over each frame obj in the metadata file + for obj in metadata: + if obj.startswith("Metadata-Default"): + filename = obj.rsplit("/", 1)[-1] + if filename != "Summary": + cv_img = cv.imread(directoryPath + "/" + filename) + if cv_img is None: + missing_frames += 1 + expect_frame_no = expect_frame_no + 1 + continue + filenames_list.append(filename) + + # Checking the shape of the image and the expected shape + actual_height, actual_width = cv_img.shape[0], cv_img.shape[1] + expected_width = metadata[obj]["Width"] + expected_height = metadata[obj]["Height"] + if (actual_height != expected_height) or (actual_width != expected_width): + raise Exception(f"Mismatched image size: frame: {directoryPath}/{filename}") + + currentFrame = metadata[obj]["Frame"] + if currentFrame == 0: + time_to_first_image = metadata[obj]["ElapsedTime-ms"] + + # checks for missing frames + if currentFrame != expect_frame_no: + missing_frames = missing_frames + 1 + + # create a list of the intervals between two frames + total_time_ms = metadata[obj]["ElapsedTime-ms"] - time_to_first_image + intervals_list.append(total_time_ms - tmp_total_time_ms) + tmp_total_time_ms = total_time_ms + expect_frame_no = expect_frame_no + 1 + + if expect_frame_no != expected_frames: + missing_frames = expect_frame_no - expected_frames + df = pd.DataFrame(intervals_list, columns=["intervals"]) + df2 = load_dataframe(audit_out_file) + data = [ + video_name, + expected_frames, + expected_frames - missing_frames, + theoretical_interval, + df["intervals"].mean(), + df["intervals"].std(), + total_time_ms / 1000, + expect_frame_no / (total_time_ms / 1000), + ] + # If video name not found create a new entry, else update data + if video_name not in df2.values: + df2.loc[len(df2.index)] = data + else: + df2.loc[df2["video_name"] == video_name] = data + df2.reset_index(drop=True, inplace=True) + + print(df2) + df2.to_csv(audit_out_file, index=False) + return { + "number_of_expected_frames": expected_frames, + "number_of_actual_frames": expected_frames - missing_frames, + "expected_interval": theoretical_interval, + "average_interval": df["intervals"].mean(), + "stdev_interval": df["intervals"].std(), + "actual_length_seconds": total_time_ms / 1000, + "avg_fps": expect_frame_no / (total_time_ms / 1000), + } From cb9143f8d0fd8322a7c196ab76bb40844729a497 Mon Sep 17 00:00:00 2001 From: cnstll Date: Mon, 12 Sep 2022 23:05:00 +0200 Subject: [PATCH 24/25] audit result for aws buckets images --- audit.csv | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 audit.csv diff --git a/audit.csv b/audit.csv new file mode 100644 index 0000000..026086a --- /dev/null +++ b/audit.csv @@ -0,0 +1,16 @@ +video_name,expected_frames,number_of_actual_frames,expected_interval,average_interval,stdev_interval,actual_length_seconds,avg_fps +220427_BF_RC7_30ms_11.3x-crawl-ACR125_50ms_1,300,300,50.0,264.76,165.13202096451826,79.428,3.777005589968273 +220427_BF_RC7_30ms_11.3x-crawl-ACR125_50ms_2_1,300,300,50.0,283.46666666666664,199.99634333335013,85.04,3.527751646284101 +220427_BF_RC7_30ms_11.3x-crawl-ACR125_burst_1,100,100,1.0,36.308,11.82934222067049,3.6308,27.54213947339429 +220427_BF_RC7_30ms_11.3x-crawl-ACR125_burst_3_1,100,100,1.0,37.5768,13.263488798619749,3.75768,26.612164952843244 +220427_BF_RC7_30ms_11.3x-swim-acr125_burst_1,100,100,1.0,37.6839,17.963933726759127,3.76839,26.53653151611166 +220427_BF_RC7_30ms_11.3x-swim-acr125_burst_2,100,107,1.0,34.70451612903226,9.860474293660149,3.22752,28.814693634741225 +220427_BF_RC7_30ms_11.3x-swim-acr125_burst_3,100,100,1.0,35.348600000000005,10.821602508338788,3.53486,28.28966352274206 +220427_BF_RC7_30ms_11.3x-swim-acr125_burst_4_2,100,100,1.0,34.4214,13.222972665063253,3.44214,29.051694585345164 +raw-220503_TPS_ACR085-crawling_zoom113_10ms_100img,100,100,10.0,33.0665,7.184647404941543,3.3066500000000003,30.24208791374956 +raw-220503_TPS_ACR085-trashing_zoom113_10ms_100img,100,100,10.0,33.1473,7.744858816718934,3.31473,30.1683696711346 +raw-220503_TPS_ACR125-crawling_zoom113_10ms_100img,100,100,10.0,32.8245,8.158093010358304,3.28245,30.465048972566223 +raw-220503_TPS_ACR125-thrashing_zoom113_10ms_100img,100,100,10.0,33.0078,6.778871450426971,3.30078,30.295869461157665 +raw-220503_TPS_N2-zoom113_20ms_2,500,747,20.0,250.1225296442688,159.04675365295503,63.281,3.998040486085871 +raw-220503_TPS_N2-zoom197_20ms_1,500,781,20.0,261.75342465753425,141.24246647006768,57.324,3.8203893657106973 +raw-220503_TPS_N2-zoom197_20ms_2,NaN,NaN,NaN,NaN,NaN,NaN,NaN From 4c97179a037d8ab2767eb3fd3b02217868294330 Mon Sep 17 00:00:00 2001 From: cnstll Date: Mon, 19 Sep 2022 15:13:51 +0200 Subject: [PATCH 25/25] Script to download and make videos from provided tif images --- experiments/activity_index/bucket_path.txt | 19 ++++ .../activity_index/dl_and_make_videos.py | 103 ++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 experiments/activity_index/bucket_path.txt create mode 100644 experiments/activity_index/dl_and_make_videos.py diff --git a/experiments/activity_index/bucket_path.txt b/experiments/activity_index/bucket_path.txt new file mode 100644 index 0000000..56ab650 --- /dev/null +++ b/experiments/activity_index/bucket_path.txt @@ -0,0 +1,19 @@ +#s3://lab-nematode/raw/220503_TPS_ACR085/crawling_zoom113_10ms_100img/Default/ +#s3://lab-nematode/raw/220503_TPS_ACR085/trashing_zoom113_10ms_100img/Default/ +#s3://lab-nematode/raw/220503_TPS_ACR125/crawling_zoom113_10ms_100img/Default/ +#s3://lab-nematode/raw/220503_TPS_ACR125/thrashing_zoom113_10ms_100img/Default/ +#s3://lab-nematode/raw/220503_TPS_N2/zoom113_20ms_2/Default/ +#s3://lab-nematode/raw/220503_TPS_N2/zoom197_20ms_1/Default/ +#s3://lab-nematode/raw/220503_TPS_N2/zoom197_20ms_2/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/crawl/ACR125_50ms_1/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/crawl/ACR125_50ms_2_1/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/crawl/ACR125_burst_1/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/crawl/ACR125_burst_3_1/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/swim/acr125_burst_1/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/swim/acr125_burst_2/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/swim/acr125_burst_3/Default/ +#s3://lab-nematode/220427_BF_RC7_30ms_11.3x/swim/acr125_burst_4_2/Default/ +#s3://lab-nematode/raw/dt100ms_11.3x_RC8_BF_expo33ms/Default/ +#s3://lab-nematode/raw/dt10ms_11.3x_RC8_BF_expo33ms/Default/ +s3://lab-nematode/raw/dt50ms_11.3x_RC8_BF_expo33ms/Default/ +s3://lab-nematode/raw/stream_11.3x_RC8_BF_expo_33ms/Default/ diff --git a/experiments/activity_index/dl_and_make_videos.py b/experiments/activity_index/dl_and_make_videos.py new file mode 100644 index 0000000..15c7e0a --- /dev/null +++ b/experiments/activity_index/dl_and_make_videos.py @@ -0,0 +1,103 @@ +import json +import os + + +def extract_bucket_names(abs_file_path: str) -> list: + """Extract aws bucket filepath from a txt file into a list. + + Does not take into account hashtag commented buckets. + Args: + abs_file_path (str): text file with the list of bucket path + Returns: + list: list of bucket paths + """ + bucket_list = [] + with open(abs_file_path, "r") as f: + while True: + bucket_name = f.readline() + if not bucket_name: + break + else: + if bucket_name.find("#") == -1: + bucket_list.append(bucket_name.replace("\n", " ")) + return bucket_list + + +def load_metadata(directoryPath: str) -> dict: + """loads the metadata file in a python dictionary. + + args: + directorypath (str): directory path to json file to load. + + raises: + filenotfounderror: file [directorypath]/metadata.txt does not exist. + exception: [directorypath]/metadata.txt is not readable by the user. + jsondecodeerror: issue when loading the metadata from file. + returns: + dict: the loaded metadata. + """ + metadata_file = directoryPath + "/metadata.txt" + if not os.access(metadata_file, os.F_OK): + raise FileNotFoundError(f"File {metadata_file} does not exists.") + if not os.access(metadata_file, os.R_OK): + raise Exception(f"File {metadata_file} is not readable for the user.") + with open(file=metadata_file, mode="r") as file: + metadata = json.load(file) + return metadata + + +def calculate_expected_fps(metadata_path=".") -> float: + """Use the metadata file provided to retrieve a theoretical fps. + + Args: + metadata_path (str, optional): path to the metadata file. Defaults to '.'. + + Returns: + float: the number of theoretical fps retrieved from metadata + """ + metadata = load_metadata(metadata_path) + theoretical_interval = metadata["Summary"]["Interval_ms"] + expected_fps = 1000 / theoretical_interval + return expected_fps + + +def makeVideo(expected_fps: float, dir_name: str) -> None: + """Use ffmpeg to make a video from available tif images in the provided dir. + + Args: + expected_fps (float): fps used to make the video + dir_name (str): name of dir containing tif images to convert + """ + ffmpeg_cmd = "ffmpeg -loglevel 1 -framerate " + ffmpeg_cmd += f"{expected_fps}" + ffmpeg_cmd += f" -i img_channel000_position000_time%09d_z000.tif {dir_name}.mp4" + print(ffmpeg_cmd) + os.system(ffmpeg_cmd) + + +if __name__ == "__main__": + working_dir = os.path.dirname(__file__) + bucket_names_file = os.path.join(working_dir, "bucket_path.txt") + bucket_list = extract_bucket_names(bucket_names_file) + # Loop over each bucket to download images and convert them into a video + for bucket_path in bucket_list: + # Set up new dir and cd to it + dir_name = ( + bucket_path.replace("/Default/", "") + .replace("s3://lab-nematode/", "") + .replace("/", "-") + .replace(" ", "") + ) + video_dir = os.path.join(working_dir, dir_name) + if not os.path.exists(video_dir): + os.mkdir(video_dir) + os.chdir(video_dir) + # Download all tiff images from an aws bucket + aws_cmd = f"aws s3 cp {bucket_path} . --recursive" + os.system(aws_cmd) + else: + raise FileExistsError(f"File {video_dir} already exists") + # Create a video from previously downloaded images + expected_fps = calculate_expected_fps() + makeVideo(expected_fps, dir_name) + os.chdir("../../../")