-
Notifications
You must be signed in to change notification settings - Fork 58
/
Copy pathdatasets.py
36 lines (29 loc) · 1.22 KB
/
datasets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
"""
Implementation of the Deep Temporal Clustering model
Dataset loading functions
@author Florent Forest (FlorentF9)
"""
import numpy as np
from tslearn.datasets import UCR_UEA_datasets
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from sklearn.preprocessing import LabelEncoder
ucr = UCR_UEA_datasets()
# UCR/UEA univariate and multivariate datasets.
all_ucr_datasets = ucr.list_datasets()
def load_ucr(dataset='CBF'):
X_train, y_train, X_test, y_test = ucr.load_dataset(dataset)
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
if dataset == 'HandMovementDirection': # this one has special labels
y = [yy[0] for yy in y]
y = LabelEncoder().fit_transform(y) # sometimes labels are strings or start from 1
assert(y.min() == 0) # assert labels are integers and start from 0
# preprocess data (standardization)
X_scaled = TimeSeriesScalerMeanVariance().fit_transform(X)
return X_scaled, y
def load_data(dataset_name):
if dataset_name in all_ucr_datasets:
return load_ucr(dataset_name)
else:
print('Dataset {} not available! Available datasets are UCR/UEA univariate and multivariate datasets.'.format(dataset_name))
exit(0)