Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove unused functions #28

Merged
merged 1 commit into from
Jan 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 8 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,14 +71,6 @@ python3 -m pip install ensemble-kalman-smoother
Note that you will not have access to the example data or example scripts with the pip install
option.

### Note: Using GPU for fast parallel-scan

As of now, EKS singlecam features a jitted parallel scan implementation for quickly optimizing the
smoothing parameter (notably for larger datasets of 10,000+ frames). In order to utilize parallel scan,
you will need to have a cuda environment with jax enabled. Further instructions can be found in the [jax
docs](https://jax.readthedocs.io/en/latest/installation.html).


## Example scripts

We provide several example datasets and fitting scripts to illustrate use of the package. See
Expand All @@ -103,8 +95,15 @@ implementations, including fast smoothing parameter auto-tuning using GPU-driven
[Here](docs/singlecam_overview.md) is a detailed overview of the workflow.

### Multi-camera datasets
Coming soon!
The `multicam_example.py` script demonstrates how to run the EKS code for multi-camera
setups where the pose predictions for a given model are all stored a separate csv file per camera.
We provide example data in the `data/mirror-mouse-separate` directory inside this repo,
for a two-view video of a mouse with cameras named `top` and `bot`.
To run the EKS on the example data provided, execute the following command from inside this repo:

```console
python scripts/multicam_example.py --input-dir ./data/mirror-mouse-separate --bodypart-list paw1LH paw2LF paw3RF paw4RH --camera-names top bot
```

### Mirrored multi-camera datasets
The `mirrored_multicam_example.py` script demonstrates how to run the EKS code for multi-camera
Expand Down
1 change: 0 additions & 1 deletion docs/singlecam_overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ Smoother (EKS).

5. **Save Smoothed Results**:
- For each body part, convert the resulting DataFrames to CSV files.
- Use `populate_output_dataframe` to integrate the results into the output DataFrame.
- Save the output DataFrame as a CSV file in the specified directory.

6. **Plot Results**:
Expand Down
2 changes: 1 addition & 1 deletion eks/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
from eks import *

__version__ = '3.0.0'
__version__ = '4.0.0'
2 changes: 1 addition & 1 deletion eks/ibl_pupil_smoother.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def fit_eks_pupil(
"""

# Load and format input files
input_dfs_list, _, keypoint_names = format_data(input_source)
input_dfs_list, keypoint_names = format_data(input_source)

print(f"Input data loaded for keypoints: {keypoint_names}")

Expand Down
7 changes: 3 additions & 4 deletions eks/multicam_smoother.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
forward_pass,
)
from eks.ibl_paw_multiview_smoother import pca, remove_camera_means
from eks.utils import crop_frames, format_data, make_dlc_pandas_index, populate_output_dataframe
from eks.utils import crop_frames, format_data, make_dlc_pandas_index


def fit_eks_mirrored_multicam(
Expand Down Expand Up @@ -53,7 +53,7 @@ def fit_eks_mirrored_multicam(
bodypart_list (list): List of body parts used.
"""
# Load and format input files
input_dfs_list, _, keypoint_names = format_data(input_source)
input_dfs_list, keypoint_names = format_data(input_source)
if bodypart_list is None:
bodypart_list = keypoint_names

Expand Down Expand Up @@ -136,8 +136,7 @@ def fit_eks_multicam(
"""
# Load and format input files
# NOTE: input_dfs_list is a list of camera-specific lists of Dataframes
input_dfs_list, _, keypoint_names = format_data(input_source,
camera_names=camera_names)
input_dfs_list, keypoint_names = format_data(input_source, camera_names=camera_names)
if bodypart_list is None:
bodypart_list = keypoint_names
print(f'Input data loaded for keypoints:\n{bodypart_list}')
Expand Down
2 changes: 1 addition & 1 deletion eks/singlecam_smoother.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def fit_eks_singlecam(

"""
# Load and format input files using the unified format_data function
input_dfs_list, _, keypoint_names = format_data(input_source)
input_dfs_list, keypoint_names = format_data(input_source)

if bodypart_list is None:
bodypart_list = keypoint_names
Expand Down
108 changes: 12 additions & 96 deletions eks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,8 @@ def convert_lp_dlc(df_lp, keypoint_names, model_name=None):
for feat2 in ['x', 'y', 'likelihood']:
try:
if model_name is None:
col_tuple = (feat, feat2)
else:
col_tuple = (model_name, feat, feat2)
model_name = df_lp.columns[0][0]
col_tuple = (model_name, feat, feat2)

# Skip columns with any unnamed level
if any(level.startswith('Unnamed') for level in col_tuple if
Expand Down Expand Up @@ -92,7 +91,6 @@ def format_data(input_source, camera_names=None):

Returns:
input_dfs_list (list): List of formatted DataFrames (List of Lists for un-mirrored sets).
output_df (DataFrame): Empty DataFrame for storing results.
keypoint_names (list): List of keypoint names.

"""
Expand All @@ -114,17 +112,15 @@ def format_data(input_source, camera_names=None):
if camera_names is None:
for file_path in file_paths:
if file_path.endswith('.slp'):
markers_curr = convert_slp_dlc(os.path.dirname(file_path),
os.path.basename(file_path))
markers_curr = convert_slp_dlc(
os.path.dirname(file_path), os.path.basename(file_path),
)
keypoint_names = [c[1] for c in markers_curr.columns[::3]]
markers_curr_fmt = markers_curr
elif file_path.endswith('.csv'):
markers_curr = pd.read_csv(file_path, header=[0, 1, 2], index_col=0)
keypoint_names = [c[1] for c in markers_curr.columns[::3]]
model_name = markers_curr.columns[0][0]
markers_curr_fmt = convert_lp_dlc(markers_curr,
keypoint_names,
model_name=model_name)
markers_curr_fmt = convert_lp_dlc(markers_curr, keypoint_names)
else:
continue
input_dfs_list.append(markers_curr_fmt)
Expand All @@ -136,17 +132,15 @@ def format_data(input_source, camera_names=None):
continue
else: # file_path matches the camera name, proceed with processing
if file_path.endswith('.slp'):
markers_curr = convert_slp_dlc(os.path.dirname(file_path),
os.path.basename(file_path))
markers_curr = convert_slp_dlc(
os.path.dirname(file_path), os.path.basename(file_path),
)
keypoint_names = [c[1] for c in markers_curr.columns[::3]]
markers_curr_fmt = markers_curr
elif file_path.endswith('.csv'):
markers_curr = pd.read_csv(file_path, header=[0, 1, 2], index_col=0)
keypoint_names = [c[1] for c in markers_curr.columns[::3]]
model_name = markers_curr.columns[0][0]
markers_curr_fmt = convert_lp_dlc(markers_curr,
keypoint_names,
model_name=model_name)
markers_curr_fmt = convert_lp_dlc(markers_curr, keypoint_names)
else:
continue
markers_for_this_camera.append(markers_curr_fmt)
Expand All @@ -157,84 +151,7 @@ def format_data(input_source, camera_names=None):
if len(input_dfs_list) == 0:
raise FileNotFoundError(f'No valid marker input files found in {input_source}')

# Create an empty output DataFrame using the last processed DataFrame as a template
if camera_names is None:
last_df = input_dfs_list[0]
else: # multicam
last_df = input_dfs_list[0][0]
output_df = make_output_dataframe(last_df)

return input_dfs_list, output_df, keypoint_names


def make_output_dataframe(markers_curr):
''' Makes empty DataFrame for EKS output, including x_var and y_var '''
markers_eks = markers_curr.copy()

# Check if the columns Index is a MultiIndex
if isinstance(markers_eks.columns, pd.MultiIndex):
# Set the first level of the MultiIndex to 'ensemble-kalman_tracker'
markers_eks.columns = markers_eks.columns.set_levels(['ensemble-kalman_tracker'], level=0)
else:
# Convert the columns Index to a MultiIndex with three levels
new_columns = []

for col in markers_eks.columns:
# Extract instance number, keypoint name, and feature from the column name
parts = col.split('_')
instance_num = parts[0]
keypoint_name = '_'.join(parts[1:-1]) # Combine parts for keypoint name
if keypoint_name != '':
keypoint_name = f'_{keypoint_name}'
feature = parts[-1]

# Construct new column names with desired MultiIndex structure
new_columns.append(
('ensemble-kalman_tracker', f'{instance_num}{keypoint_name}', feature))

# Convert the columns Index to a MultiIndex with three levels
markers_eks.columns = pd.MultiIndex.from_tuples(new_columns,
names=['scorer', 'bodyparts', 'coords'])

# Iterate over columns and set initial values for likelihood and variance
for col in markers_eks.columns:
if col[-1] == 'likelihood':
# Set likelihood values to 1.0
markers_eks[col].values[:] = 1.0
elif col[-1] in ['x_var', 'y_var']:
markers_eks[col].values[:] = np.nan
else:
# Set other values to NaN
markers_eks[col].values[:] = np.nan

return markers_eks


def dataframe_to_csv(df, filename):
"""
Converts a DataFrame to a CSV file.

Parameters:
df (pandas.DataFrame): The DataFrame to be converted.
filename (str): The name of the CSV file to be created.

Returns:
None
"""
try:
df.to_csv(filename, index=False)
except Exception as e:
print("Error:", e)


def populate_output_dataframe(keypoint_df, keypoint_ensemble, output_df, key_suffix=''):
# Include 'x', 'y', 'zscore', 'nll', 'x_var', and 'y_var' in the coordinates to transfer
for coord in ['x', 'y', 'zscore', 'nll', 'x_var', 'y_var']:
src_cols = ('ensemble-kalman_tracker', f'{keypoint_ensemble}', coord)
dst_cols = ('ensemble-kalman_tracker', f'{keypoint_ensemble}' + key_suffix, coord)
output_df.loc[:, dst_cols] = keypoint_df.loc[:, src_cols]

return output_df
return input_dfs_list, keypoint_names


def plot_results(
Expand Down Expand Up @@ -284,8 +201,7 @@ def plot_results(

plt.suptitle(f'EKS results for {key}, smoothing = {s_final}', fontsize=14)
plt.tight_layout()
save_file = os.path.join(save_dir,
f'{smoother_type}_{key}.pdf')
save_file = os.path.join(save_dir, f'{smoother_type}_{key}.pdf')
plt.savefig(save_file)
plt.close()
print(f'see example EKS output at {save_file}')
Expand Down
3 changes: 1 addition & 2 deletions scripts/ibl_paw_multiview_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,7 @@
markers_curr = pd.read_csv(
os.path.join(input_dir, filename), header=[0, 1, 2], index_col=0)
keypoint_names = [c[1] for c in markers_curr.columns[::3]]
model_name = markers_curr.columns[0][0]
markers_curr_fmt = convert_lp_dlc(markers_curr, keypoint_names, model_name=model_name)
markers_curr_fmt = convert_lp_dlc(markers_curr, keypoint_names)
if 'left' in filename:
markers_list_left.append(markers_curr_fmt)
else:
Expand Down
Loading