Skip to content

Commit

Permalink
Merge pull request #34 from LINCellularNeuroscience/vame-1.0-alpha
Browse files Browse the repository at this point in the history
Vame 1.0 alpha -> Vame 1.0 RC1
  • Loading branch information
Pavol Bauer authored Mar 22, 2021
2 parents d8558bc + 8712959 commit be72970
Show file tree
Hide file tree
Showing 26 changed files with 32,364 additions and 487 deletions.
3 changes: 3 additions & 0 deletions VAME.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,6 @@ dependencies:
- pyyaml
- opencv-python-headless
- h5py
- umap-learn
- networkx
- tqdm
71 changes: 55 additions & 16 deletions examples/demo.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 16:50:23 2019
Variational Animal Motion Embedding 1.0-alpha Toolbox
© K. Luxem & P. Bauer, Department of Cellular Neuroscience
Leibniz Institute for Neurobiology, Magdeburg, Germany
@author: luxemk
https://github.com/LINCellularNeuroscience/VAME
Licensed under GNU General Public License v3.0
"""

import vame
Expand All @@ -14,34 +17,70 @@
videos = ['/directory/to/your/video-1','/directory/to/your/video-2','...']

# Initialize your project
# Step 1:
config = vame.init_new_project(project=project, videos=videos, working_directory=working_directory)
# Step 1.1:
config = vame.init_new_project(project=project, videos=videos, working_directory=working_directory, videotype='.mp4')

# After the inital creation of your project you can always access the config.yaml file
# via specifying the path to your project
config = '/YOUR/WORKING/DIRECTORY/Your-VAME-Project-Apr14-2020/config.yaml'

# Step 1.2:
# Align your behavior videos egocentric and create training dataset:
# Make sure to put them into the data folder for every video. The name of this file is the video name + -PE-seq.npy:
# E.g.: /Your-VAME-Project/data/video-1/video-1-PE-seq.npy
# pose_ref_index: list of reference coordinate indices for alignment
# Example: 0: snout, 1: forehand_left, 2: forehand_right, 3: hindleft, 4: hindright, 5: tail
vame.egocentric_alignment(config, pose_ref_index=[0,5])

# If your experiment is by design egocentrical (e.g. head-fixed experiment on treadmill etc)
# you can use the following to convert your .csv to a .npy array, ready to train vame on it
vame.csv_to_numpy(config, datapath='C:\\Research\\VAME\\vame_alpha_release-Mar16-2021\\videos\\pose_estimation\\')

# Step 1.3:
# create the training set for the VAME model
vame.create_trainset(config)

# Step 2:
# Train rnn model:
vame.rnn_model(config, model_name='VAME', pretrained_weights=False, pretrained_model='pretrained')
# Train VAME:
vame.train_model(config)

# Step 3:
# Evaluate model
vame.evaluate_model(config, model_name='VAME')
vame.evaluate_model(config)

# Step 4:
# Quantify Behavior
vame.behavior_segmentation(config, model_name='VAME', cluster_method='kmeans', n_cluster=[30])
# Segment motifs/pose
vame.pose_segmentation(config)

#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# The following are optional choices to create motif videos, communities/hierarchies of behavior,
# community videos

# OPTIONIAL: Create motif videos to get insights about the fine grained poses
vame.motif_videos(config, videoType='.mp4')

# OPTIONAL: Create behavioural hierarchies via community detection
vame.community(config, umap_vis=False, cut_tree=2)

# OPTIONAL: Create community videos to get insights about behavior on a hierarchical scale
vame.community_videos(config)

# OPTIONAL: Down projection of latent vectors and visualization via UMAP
vame.visualization(config, label=None) #options: label: None, "motif", "community"

# OPTIONAL: Use the generative model (reconstruction decoder) to sample from
# the learned data distribution, reconstruct random real samples or visualize
# the cluster center for validation
vame.generative_model(config, mode="centers") #options: mode: "sampling", "reconstruction", "centers

# OPTIONAL: Create a video of an egocentrically aligned mouse + path through
# the community space (similar to our gif on github) to learn more about your representation
# and have something cool to show around ;)
# Note: This function is currently very slow. Once the frames are saved you can create a video
# or gif via e.g. ImageJ or other tools
vame.gif(config, pose_ref_index=[0,5], subtract_background=True, start=None,
length=500, max_lag=30, label='community', file_format='.mp4', crop_size=(300,300))



# Step 5:
# Get behavioral transition matrix, model usage and graph
vame.behavior_quantification(config, model_name='VAME', cluster_method='kmeans', n_cluster=30)

# Get motif videos:
vame.motif_videos(config, model_name='VAME', cluster_method="kmeans", n_cluster=[30])

Binary file removed examples/video-1-PE-seq.npy
Binary file not shown.
30,000 changes: 30,000 additions & 0 deletions examples/video-1.csv

Large diffs are not rendered by default.

12 changes: 9 additions & 3 deletions vame/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,16 @@

from vame.initialize_project import init_new_project
from vame.model import create_trainset
from vame.model import rnn_model
from vame.model import train_model
from vame.model import evaluate_model
from vame.analysis import behavior_segmentation
from vame.analysis import behavior_quantification
from vame.analysis import pose_segmentation
from vame.analysis import motif_videos
from vame.analysis import community
from vame.analysis import community_videos
from vame.analysis import visualization
from vame.analysis import generative_model
from vame.analysis import gif
from vame.util.csv_to_npy import csv_to_numpy
from vame.util.align_egocentrical import egocentric_alignment
from vame.util import auxiliary

9 changes: 6 additions & 3 deletions vame/analysis/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,10 @@
import sys
sys.dont_write_bytecode = True

from vame.analysis.segment_behavior import behavior_segmentation
from vame.analysis.behavior_structure import behavior_quantification
from vame.analysis.videowriter import motif_videos
from vame.analysis.pose_segmentation import pose_segmentation
from vame.analysis.videowriter import motif_videos, community_videos
from vame.analysis.community_analysis import community
from vame.analysis.umap_visualization import visualization
from vame.analysis.generative_functions import generative_model
from vame.analysis.gif_creator import gif

137 changes: 0 additions & 137 deletions vame/analysis/behavior_structure.py

This file was deleted.

Loading

0 comments on commit be72970

Please sign in to comment.