From 4c156003e42bee35d049bd5f1a94c8fb6fcb8ace Mon Sep 17 00:00:00 2001 From: Payam Jome Yazdian Date: Wed, 1 Feb 2023 10:41:50 -0800 Subject: [PATCH] Add files via upload --- scripts/Clustering.py | 1624 +++++++++++++++++ scripts/creat_human-study.py | 126 ++ .../data_preprocessor.cpython-37.pyc | Bin 0 -> 7591 bytes .../lmdb_data_loader.cpython-37.pyc | Bin 0 -> 19298 bytes scripts/data_loader/data_preprocessor.py | 402 ++++ scripts/data_loader/gesture_labels.txt | 411 +++++ scripts/data_loader/lmdb_data_loader.py | 746 ++++++++ scripts/inference.py | 189 ++ scripts/inference_Autoencoder.py | 564 ++++++ scripts/inference_DAE.py | 659 +++++++ scripts/inference_cluster2gesture.py | 215 +++ scripts/inference_text2embedding.py | 774 ++++++++ scripts/inference_text2embedding_GENEA.py | 690 +++++++ scripts/model/Autoencoder_VQVAE_model.py | 1383 ++++++++++++++ scripts/model/Autoencoder_model.py | 388 ++++ scripts/model/DAE_model.py | 470 +++++ scripts/model/Helper_models.py | 667 +++++++ .../Autoencoder_VQVAE_model.cpython-37.pyc | Bin 0 -> 28281 bytes .../Autoencoder_model.cpython-37.pyc | Bin 0 -> 9284 bytes .../__pycache__/DAE_model.cpython-37.pyc | Bin 0 -> 8951 bytes .../__pycache__/Helper_models.cpython-37.pyc | Bin 0 -> 15447 bytes .../__pycache__/seq2seq_net.cpython-37.pyc | Bin 0 -> 7713 bytes .../seq2seq_with_cluster_model.cpython-37.pyc | Bin 0 -> 2283 bytes .../text2embedding_GAN_model.cpython-37.pyc | Bin 0 -> 17664 bytes .../text2embedding_model.cpython-37.pyc | Bin 0 -> 14326 bytes .../model/__pycache__/vocab.cpython-37.pyc | Bin 0 -> 3856 bytes scripts/model/autoencoder_backup.py | 38 + scripts/model/seq2seq_net.py | 257 +++ scripts/model/seq2seq_with_cluster_model.py | 71 + scripts/model/tcn.py | 64 + scripts/model/text2embedding_GAN_model.py | 636 +++++++ scripts/model/text2embedding_model.py | 597 ++++++ .../model/text2embedding_transformer_model.py | 0 scripts/model/vocab.py | 130 ++ scripts/pymo/__init__.py | 0 .../pymo/__pycache__/__init__.cpython-37.pyc | Bin 0 -> 211 bytes scripts/pymo/__pycache__/data.cpython-37.pyc | Bin 0 -> 2748 bytes .../pymo/__pycache__/parsers.cpython-37.pyc | Bin 0 -> 7821 bytes .../__pycache__/preprocessing.cpython-37.pyc | Bin 0 -> 30154 bytes .../pymo/__pycache__/viz_tools.cpython-37.pyc | Bin 0 -> 6993 bytes .../pymo/__pycache__/writers.cpython-37.pyc | Bin 0 -> 2634 bytes scripts/pymo/data.py | 53 + scripts/pymo/features.py | 43 + scripts/pymo/parsers.py | 260 +++ scripts/pymo/preprocessing.py | 975 ++++++++++ scripts/pymo/rotation_tools.py | 220 +++ scripts/pymo/viz_tools.py | 235 +++ scripts/pymo/writers.py | 70 + scripts/save_clustered_fast.py | 201 ++ scripts/test_pymo.py | 138 ++ .../test_transformers/transformers_model.py | 1092 +++++++++++ scripts/train.py | 189 ++ scripts/train_Autoencoder.py | 568 ++++++ scripts/train_DAE.py | 475 +++++ scripts/train_autoencoder_VQVAE.py | 505 +++++ scripts/train_cluster2gesture.py | 192 ++ .../__pycache__/train_seq2seq.cpython-37.pyc | Bin 0 -> 8352 bytes scripts/train_eval/train_seq2seq.py | 489 +++++ scripts/train_gan.py | 240 +++ scripts/train_stream_cluster.py | 189 ++ .../train_text2embedding(stream_gesture_).py | 193 ++ scripts/train_text2embedding.py | 332 ++++ scripts/trinity_data_to_lmdb.py | 161 ++ scripts/twh_dataset_to_lmdb.py | 279 +++ scripts/utils/Unityfier.py | 37 + .../__pycache__/average_meter.cpython-37.pyc | Bin 0 -> 1207 bytes .../__pycache__/data_utils.cpython-37.pyc | Bin 0 -> 2073 bytes .../__pycache__/data_utils_twh.cpython-37.pyc | Bin 0 -> 2227 bytes .../__pycache__/train_utils.cpython-37.pyc | Bin 0 -> 2772 bytes .../__pycache__/vocab_utils.cpython-37.pyc | Bin 0 -> 1580 bytes scripts/utils/average_meter.py | 23 + scripts/utils/data_utils.py | 48 + scripts/utils/data_utils_twh.py | 65 + scripts/utils/train_utils.py | 85 + scripts/utils/vocab_utils.py | 57 + 75 files changed, 18515 insertions(+) create mode 100644 scripts/Clustering.py create mode 100644 scripts/creat_human-study.py create mode 100644 scripts/data_loader/__pycache__/data_preprocessor.cpython-37.pyc create mode 100644 scripts/data_loader/__pycache__/lmdb_data_loader.cpython-37.pyc create mode 100644 scripts/data_loader/data_preprocessor.py create mode 100644 scripts/data_loader/gesture_labels.txt create mode 100644 scripts/data_loader/lmdb_data_loader.py create mode 100644 scripts/inference.py create mode 100644 scripts/inference_Autoencoder.py create mode 100644 scripts/inference_DAE.py create mode 100644 scripts/inference_cluster2gesture.py create mode 100644 scripts/inference_text2embedding.py create mode 100644 scripts/inference_text2embedding_GENEA.py create mode 100644 scripts/model/Autoencoder_VQVAE_model.py create mode 100644 scripts/model/Autoencoder_model.py create mode 100644 scripts/model/DAE_model.py create mode 100644 scripts/model/Helper_models.py create mode 100644 scripts/model/__pycache__/Autoencoder_VQVAE_model.cpython-37.pyc create mode 100644 scripts/model/__pycache__/Autoencoder_model.cpython-37.pyc create mode 100644 scripts/model/__pycache__/DAE_model.cpython-37.pyc create mode 100644 scripts/model/__pycache__/Helper_models.cpython-37.pyc create mode 100644 scripts/model/__pycache__/seq2seq_net.cpython-37.pyc create mode 100644 scripts/model/__pycache__/seq2seq_with_cluster_model.cpython-37.pyc create mode 100644 scripts/model/__pycache__/text2embedding_GAN_model.cpython-37.pyc create mode 100644 scripts/model/__pycache__/text2embedding_model.cpython-37.pyc create mode 100644 scripts/model/__pycache__/vocab.cpython-37.pyc create mode 100644 scripts/model/autoencoder_backup.py create mode 100644 scripts/model/seq2seq_net.py create mode 100644 scripts/model/seq2seq_with_cluster_model.py create mode 100644 scripts/model/tcn.py create mode 100644 scripts/model/text2embedding_GAN_model.py create mode 100644 scripts/model/text2embedding_model.py create mode 100644 scripts/model/text2embedding_transformer_model.py create mode 100644 scripts/model/vocab.py create mode 100644 scripts/pymo/__init__.py create mode 100644 scripts/pymo/__pycache__/__init__.cpython-37.pyc create mode 100644 scripts/pymo/__pycache__/data.cpython-37.pyc create mode 100644 scripts/pymo/__pycache__/parsers.cpython-37.pyc create mode 100644 scripts/pymo/__pycache__/preprocessing.cpython-37.pyc create mode 100644 scripts/pymo/__pycache__/viz_tools.cpython-37.pyc create mode 100644 scripts/pymo/__pycache__/writers.cpython-37.pyc create mode 100644 scripts/pymo/data.py create mode 100644 scripts/pymo/features.py create mode 100644 scripts/pymo/parsers.py create mode 100644 scripts/pymo/preprocessing.py create mode 100644 scripts/pymo/rotation_tools.py create mode 100644 scripts/pymo/viz_tools.py create mode 100644 scripts/pymo/writers.py create mode 100644 scripts/save_clustered_fast.py create mode 100644 scripts/test_pymo.py create mode 100644 scripts/test_transformers/transformers_model.py create mode 100644 scripts/train.py create mode 100644 scripts/train_Autoencoder.py create mode 100644 scripts/train_DAE.py create mode 100644 scripts/train_autoencoder_VQVAE.py create mode 100644 scripts/train_cluster2gesture.py create mode 100644 scripts/train_eval/__pycache__/train_seq2seq.cpython-37.pyc create mode 100644 scripts/train_eval/train_seq2seq.py create mode 100644 scripts/train_gan.py create mode 100644 scripts/train_stream_cluster.py create mode 100644 scripts/train_text2embedding(stream_gesture_).py create mode 100644 scripts/train_text2embedding.py create mode 100644 scripts/trinity_data_to_lmdb.py create mode 100644 scripts/twh_dataset_to_lmdb.py create mode 100644 scripts/utils/Unityfier.py create mode 100644 scripts/utils/__pycache__/average_meter.cpython-37.pyc create mode 100644 scripts/utils/__pycache__/data_utils.cpython-37.pyc create mode 100644 scripts/utils/__pycache__/data_utils_twh.cpython-37.pyc create mode 100644 scripts/utils/__pycache__/train_utils.cpython-37.pyc create mode 100644 scripts/utils/__pycache__/vocab_utils.cpython-37.pyc create mode 100644 scripts/utils/average_meter.py create mode 100644 scripts/utils/data_utils.py create mode 100644 scripts/utils/data_utils_twh.py create mode 100644 scripts/utils/train_utils.py create mode 100644 scripts/utils/vocab_utils.py diff --git a/scripts/Clustering.py b/scripts/Clustering.py new file mode 100644 index 0000000..bb3a390 --- /dev/null +++ b/scripts/Clustering.py @@ -0,0 +1,1624 @@ +import argparse +import glob +import math +import pickle +import pprint +from pathlib import Path +from collections import Counter + +import numpy as np +import time +import matplotlib.pyplot as plt +from numpy import string_ +from sklearn import metrics +from sklearn.datasets import make_blobs +from sklearn.cluster import DBSCAN, KMeans, AgglomerativeClustering + +import torch +from scipy.signal import savgol_filter +import joblib as jl + +import utils +from pymo.preprocessing import * +from pymo.viz_tools import * +from pymo.writers import * +from sklearn.preprocessing import StandardScaler +from tqdm import tqdm +from utils.data_utils import SubtitleWrapper, normalize_string +from utils.train_utils import set_logger + +from data_loader.data_preprocessor import DataPreprocessor + +from trinity_data_to_lmdb import process_bvh as process_bvh_trinity +from twh_dataset_to_lmdb import process_bvh_rot_only_Taras as process_bvh_rot_only_Taras + +from twh_dataset_to_lmdb import process_bvh_test1 as process_bvh_rot_test1 +from inference_DAE import make_bvh_Trinity + + +from inference_DAE import feat2bvh +import os.path +from sklearn.preprocessing import StandardScaler + +import matplotlib.pyplot as plt +# from kneed import KneeLocator +from sklearn.datasets import make_blobs +from sklearn.cluster import KMeans +from sklearn.metrics import silhouette_score +from sklearn.preprocessing import StandardScaler +from tslearn.clustering import TimeSeriesKMeans +from mpl_toolkits.mplot3d import Axes3D +from scipy.spatial.distance import pdist +import random +from scipy import stats + + +from sklearn.decomposition import PCA +from openTSNE import TSNE +import seaborn as sns +from scipy.ndimage.filters import gaussian_filter + +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +DATA_TYPE = 'TWH' + +def generate_gestures_latent_dataset(args, DAE, rnn, bvh_file): + + if DATA_TYPE=='Trinity': + poses, poses_mirror = process_bvh(bvh_file) + elif DATA_TYPE == 'TWH': + poses = process_bvh_rot_test1(bvh_file) + poses_mirror = poses + mean = np.array(args.data_mean).squeeze() + std = np.array(args.data_std).squeeze() + std = np.clip(std, a_min=0.01, a_max=None) + out_poses = (np.copy(poses_mirror) - mean) / std + + target = torch.from_numpy(out_poses) + # target = torch.unsqueeze(target,2) + target = target.to(device).float() + reconstructed = [] + # for i in range(len(out_poses)): + # input = torch.unsqueeze(target[i],0) + # current_out = pose_decoder(input) + # reconstructed.append(current_out) + if DAE.encoder == None: + encoded = target + else: + encoded = DAE.encoder(target) + # encoded = torch.squeeze(encoded, 2) + # encoded = encoded.to('cpu') + # encoded = encoded.detach().numpy() + all_frames_from_rnn = None + all_sequences_poses_latent = [] + for i in range(0, len(encoded), args.subdivision_stride): + current_dict = dict() + input_seq = encoded[i:i+args.n_poses] + if len(input_seq)