diff --git a/Model.py b/Model.py new file mode 100644 index 0000000..d9937ed --- /dev/null +++ b/Model.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Sep 17 11:16:34 2019 + +@author: anne marie delaney + eoin brophy + +Module of the GAN model for time series synthesis. + +""" + +import torch +import torch.nn as nn + +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + + +""" +NN Definitions +--------------- +Defining the Neural Network Classes to be evaluated in this Notebook + +Minibatch Discrimination +-------------------------- +Creating a module for Minibatch Discrimination to avoid mode collapse as described: +https://arxiv.org/pdf/1606.03498.pdf +https://torchgan.readthedocs.io/en/latest/modules/layers.html#minibatch-discrimination + +""" + +class MinibatchDiscrimination(nn.Module): + def __init__(self,input_features,output_features,minibatch_normal_init, hidden_features=16): + super(MinibatchDiscrimination,self).__init__() + + self.input_features = input_features + self.output_features = output_features + self.hidden_features = hidden_features + self.T = nn.Parameter(torch.randn(self.input_features,self.output_features, self.hidden_features)) + if minibatch_normal_init == True: + nn.init.normal(self.T, 0,1) + + def forward(self,x): + M = torch.mm(x,self.T.view(self.input_features,-1)) + M = M.view(-1, self.output_features, self.hidden_features).unsqueeze(0) + M_t = M.permute(1, 0, 2, 3) + # Broadcasting reduces the matrix subtraction to the form desired in the paper + out = torch.sum(torch.exp(-(torch.abs(M - M_t).sum(3))), dim=0) - 1 + return torch.cat([x, out], 1) + +""" +Discriminator Class +------------------- +This discriminator has a parameter num_cv which allows the user to specify if +they want to have 1 or 2 Convolution Neural Network Layers. + +""" + +class Discriminator(nn.Module): + def __init__(self,seq_length,batch_size,minibatch_normal_init, n_features = 1, num_cv = 1, minibatch = 0, cv1_out= 10, cv1_k = 3, cv1_s = 4, p1_k = 3, p1_s = 3, cv2_out = 10, cv2_k = 3, cv2_s = 3 ,p2_k = 3, p2_s = 3): + super(Discriminator,self).__init__() + self.n_features = n_features + self.seq_length = seq_length + self.batch_size = batch_size + self.num_cv = num_cv + self.minibatch = minibatch + self.cv1_dims = int((((((seq_length - cv1_k)/cv1_s) + 1)-p1_k)/p1_s)+1) + self.cv2_dims = int((((((self.cv1_dims - cv2_k)/cv2_s) + 1)-p2_k)/p2_s)+1) + self.cv1_out = cv1_out + self.cv2_out = cv2_out + + #input should be size (batch_size,num_features,seq_length) for the convolution layer + self.CV1 = nn.Sequential( + nn.Conv1d(in_channels = self.n_features, out_channels = int(cv1_out),kernel_size = int(cv1_k), stride = int(cv1_s)) + ,nn.ReLU() + ,nn.MaxPool1d(kernel_size = int(p1_k), stride = int(p1_s)) + ) + + # 2 convolutional layers + if self.num_cv > 1: + self.CV2 = nn.Sequential( + nn.Conv1d(in_channels = int(cv1_out), out_channels = int(cv2_out) ,kernel_size =int(cv2_k), stride = int(cv2_s)) + ,nn.ReLU() + ,nn.MaxPool1d(kernel_size = int(p2_k), stride = int(p2_s)) + ) + + #Adding a minibatch discriminator layer to add a cripple affect to the discriminator so that it needs to generate sequences that are different from each other. + + if self.minibatch > 0: + self.mb1 = MinibatchDiscrimination(self.cv2_dims*cv2_out,self.minibatch, minibatch_normal_init) + self.out = nn.Sequential(nn.Linear(int(self.cv2_dims*cv2_out)+self.minibatch,1),nn.Sigmoid()) # to make sure the output is between 0 and 1 + else: + self.out = nn.Sequential(nn.Linear(int(self.cv2_dims*cv2_out),1),nn.Sigmoid()) # to make sure the output is between 0 and 1 + + # 1 convolutional layer + else: + + #Adding a minibatch discriminator layer to add a cripple affect to the discriminator so that it needs to generate sequences that are different from each other. + if self.minibatch > 0 : + + self.mb1 = MinibatchDiscrimination(int(self.cv1_dims*cv1_out),self.minibatch, minibatch_normal_init) + self.out = nn.Sequential(nn.Linear(int(self.cv1_dims*cv1_out)+self.minibatch,1),nn.Dropout(0.2),nn.Sigmoid()) # to make sure the output is between 0 and 1 + else: + self.out = nn.Sequential(nn.Linear(int(self.cv1_dims*cv1_out),1),nn.Sigmoid()) + + + + def forward(self,x): + # print("Calculated Output dims after CV1: "+str(self.cv1_dims)) + # print("input: "+str(x.size())) + x = self.CV1(x.view(self.batch_size,1,self.seq_length)) + # print("CV1 Output: "+str(x.size())) + + #2 Convolutional Layers + if self.num_cv > 1: + + x = self.CV2(x) + x = x.view(self.batch_size,-1) + + # print("CV2 Output: "+str(x.size())) + if self.minibatch > 0: + x = self.mb1(x.squeeze()) + # print("minibatch output: "+str(x.size())) + x = self.out(x.squeeze()) + else: + + x = self.out(x.squeeze()) + + # 1 convolutional layers + else: + + x = x.view(self.batch_size,-1) + + #1 convolutional Layer and minibatch discrimination + if self.minibatch > 0: + x = self.mb1(x) + x = self.out(x) + #1 convolutional Layer and no minibatch discrimination + else: + x = self.out(x) + + + + return x + +""" +Generator Class +--------------- +This defines the Generator for evaluation. The Generator consists of two LSTM +layers with a final fully connected layer. + +""" + +class Generator(nn.Module): + def __init__(self,seq_length,batch_size,n_features = 1, hidden_dim = 50, + num_layers = 2, tanh_output = False): + super(Generator,self).__init__() + self.n_features = n_features + self.hidden_dim = hidden_dim + self.num_layers = num_layers + self.seq_length = seq_length + self.batch_size = batch_size + self.tanh_output = tanh_output + + + + self.layer1 = nn.LSTM(input_size = self.n_features, hidden_size = self.hidden_dim, + num_layers = self.num_layers,batch_first = True#,dropout = 0.2, + ) + if self.tanh_output == True: + self.out = nn.Sequential(nn.Linear(self.hidden_dim,1),nn.Tanh()) # to make sure the output is between 0 and 1 - removed ,nn.Sigmoid() + else: + self.out = nn.Linear(self.hidden_dim,1) + + def init_hidden(self): + weight = next(self.parameters()).data + hidden = (weight.new(self.num_layers, self.batch_size, self.hidden_dim).zero_().to(device), weight.new(self.num_layers, self.batch_size, self.hidden_dim).zero_().to(device)) + return hidden + + def forward(self,x,hidden): + + x,hidden = self.layer1(x.view(self.batch_size,self.seq_length,1),hidden) + + x = self.out(x) + + return x #,hidden \ No newline at end of file diff --git a/data.py b/data.py new file mode 100644 index 0000000..48765ef --- /dev/null +++ b/data.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Sep 17 11:09:33 2019 + +@author: anne marie delaney + eoin brophy + +Data Loading module for GAN training +------------------------------------ + +Creating the Training Set + +Creating the pytorch dataset class for use with Data Loader to enable batch training of the GAN +""" +import torch +from torch.utils.data import Dataset +import pandas as pd + +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + +class ECGData(Dataset): + #This is the class for the ECG Data that we need to load, transform and then use in the dataloader. + def __init__(self,source_file,class_id, transform = None): + self.source_file = source_file + data = pd.read_csv(source_file, header = None) + class_data = data[data[187]==class_id] + self.data = class_data.drop(class_data.iloc[:,187],axis=1) + self.transform = transform + self.class_id = class_id + + def __len__(self): + return self.data.shape[0] + + def __getitem__(self,idx): + sample = self.data.iloc[idx] + if self.transform: + sample = self.transform(sample) + return sample + +"""Including the function that will transform the dataframe to a pytorch tensor""" + +class PD_to_Tensor(object): + def __call__(self,sample): + return torch.tensor(sample.values).to(device) + diff --git a/train.py b/train.py new file mode 100644 index 0000000..5789d0d --- /dev/null +++ b/train.py @@ -0,0 +1,494 @@ +# -*- coding: utf-8 -*- +""" +Copy of GAN with Generator: LSTM, Discriminator: Convolutional NN with ECG Data + + Introduction + ------------ + The aim of this script is to use a convolutional neural network with + a max pooling layer in the discrimiantor. + This was found to work well with the Physionet ECG data in a paper. + They used two convolutional NN so we will compare the difference between the + images generated using a single layer of CNN in the discriminator and 2 CNN layers + to see if this improves the quality of series generated. + +""" +""" +Bringing in required dependencies as defined in the GitHub repo: + https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/permutation_test.pyx""" +from __future__ import division + +import torch +from tqdm import tqdm +import numpy as np +from matplotlib import pyplot as plt +import seaborn as sns + +from torchvision import transforms +from torch.autograd.variable import Variable +sns.set(rc={'figure.figsize':(11, 4)}) + +import datetime +from datetime import date +today = date.today() + +import random +import json as js +import pickle +import os + +from data import ECGData, PD_to_Tensor +from Model import Generator, Discriminator + +device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + +if device == 'cuda:0': + print('Using GPU : ') + print(torch.cuda.get_device_name(device)) +else : + print('Using CPU') + + +"""#MMD Evaluation Metric Definition +Using MMD to determine the similarity between distributions + +PDIST code comes from torch-two-sample utils code: + https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py +""" + +def pdist(sample_1, sample_2, norm=2, eps=1e-5): + r"""Compute the matrix of all squared pairwise distances. + Arguments + --------- + sample_1 : torch.Tensor or Variable + The first sample, should be of shape ``(n_1, d)``. + sample_2 : torch.Tensor or Variable + The second sample, should be of shape ``(n_2, d)``. + norm : float + The l_p norm to be used. + Returns + ------- + torch.Tensor or Variable + Matrix of shape (n_1, n_2). The [i, j]-th entry is equal to + ``|| sample_1[i, :] - sample_2[j, :] ||_p``.""" + n_1, n_2 = sample_1.size(0), sample_2.size(0) + norm = float(norm) + + if norm == 2.: + norms_1 = torch.sum(sample_1**2, dim=1, keepdim=True) + norms_2 = torch.sum(sample_2**2, dim=1, keepdim=True) + norms = (norms_1.expand(n_1, n_2) + + norms_2.transpose(0, 1).expand(n_1, n_2)) + distances_squared = norms - 2 * sample_1.mm(sample_2.t()) + return torch.sqrt(eps + torch.abs(distances_squared)) + else: + dim = sample_1.size(1) + expanded_1 = sample_1.unsqueeze(1).expand(n_1, n_2, dim) + expanded_2 = sample_2.unsqueeze(0).expand(n_1, n_2, dim) + differences = torch.abs(expanded_1 - expanded_2) ** norm + inner = torch.sum(differences, dim=2, keepdim=False) + return (eps + inner) ** (1. / norm) + +def permutation_test_mat(matrix, + n_1, n_2, n_permutations, + a00=1, a11=1, a01=0): + """Compute the p-value of the following statistic (rejects when high) + \sum_{i,j} a_{\pi(i), \pi(j)} matrix[i, j]. + """ + n = n_1 + n_2 + pi = np.zeros(n, dtype=np.int8) + pi[n_1:] = 1 + + larger = 0. + count = 0 + + for sample_n in range(1 + n_permutations): + count = 0. + for i in range(n): + for j in range(i, n): + mij = matrix[i, j] + matrix[j, i] + if pi[i] == pi[j] == 0: + count += a00 * mij + elif pi[i] == pi[j] == 1: + count += a11 * mij + else: + count += a01 * mij + if sample_n == 0: + statistic = count + elif statistic <= count: + larger += 1 + + np.random.shuffle(pi) + + return larger / n_permutations + +"""Code from Torch-Two-Samples at https://torch-two-sample.readthedocs.io/en/latest/#""" + +class MMDStatistic: + r"""The *unbiased* MMD test of :cite:`gretton2012kernel`. + + The kernel used is equal to: + + .. math :: + k(x, x') = \sum_{j=1}^k e^{-\alpha_j\|x - x'\|^2}, + + for the :math:`\alpha_j` proved in :py:meth:`~.MMDStatistic.__call__`. + + Arguments + --------- + n_1: int + The number of points in the first sample. + n_2: int + The number of points in the second sample.""" + + def __init__(self, n_1, n_2): + self.n_1 = n_1 + self.n_2 = n_2 + + # The three constants used in the test. + self.a00 = 1. / (n_1 * (n_1 - 1)) + self.a11 = 1. / (n_2 * (n_2 - 1)) + self.a01 = - 1. / (n_1 * n_2) + + def __call__(self, sample_1, sample_2, alphas, ret_matrix=False): + r"""Evaluate the statistic. + + The kernel used is + + .. math:: + + k(x, x') = \sum_{j=1}^k e^{-\alpha_j \|x - x'\|^2}, + + for the provided ``alphas``. + + Arguments + --------- + sample_1: :class:`torch:torch.autograd.Variable` + The first sample, of size ``(n_1, d)``. + sample_2: variable of shape (n_2, d) + The second sample, of size ``(n_2, d)``. + alphas : list of :class:`float` + The kernel parameters. + ret_matrix: bool + If set, the call with also return a second variable. + + This variable can be then used to compute a p-value using + :py:meth:`~.MMDStatistic.pval`. + + Returns + ------- + :class:`float` + The test statistic. + :class:`torch:torch.autograd.Variable` + Returned only if ``ret_matrix`` was set to true.""" + sample_12 = torch.cat((sample_1, sample_2), 0) + distances = pdist(sample_12, sample_12, norm=2) + + kernels = None + for alpha in alphas: + kernels_a = torch.exp(- alpha * distances ** 2) + if kernels is None: + kernels = kernels_a + else: + kernels = kernels + kernels_a + + k_1 = kernels[:self.n_1, :self.n_1] + k_2 = kernels[self.n_1:, self.n_1:] + k_12 = kernels[:self.n_1, self.n_1:] + + mmd = (2 * self.a01 * k_12.sum() + + self.a00 * (k_1.sum() - torch.trace(k_1)) + + self.a11 * (k_2.sum() - torch.trace(k_2))) + if ret_matrix: + return mmd, kernels + else: + return mmd + + + def pval(self, distances, n_permutations=1000): + r"""Compute a p-value using a permutation test. + + Arguments + --------- + matrix: :class:`torch:torch.autograd.Variable` + The matrix computed using :py:meth:`~.MMDStatistic.__call__`. + n_permutations: int + The number of random draws from the permutation null. + + Returns + ------- + float + The estimated p-value.""" + if isinstance(distances, Variable): + distances = distances.data + return permutation_test_mat(distances.cpu().numpy(), + self.n_1, self.n_2, + n_permutations, + a00=self.a00, a11=self.a11, a01=self.a01) + +""" + +This paper +https://arxiv.org/pdf/1611.04488.pdf says that the most common way to +calculate sigma is to use the median pairwise distances between the joint data. + +""" + +def pairwisedistances(X,Y,norm=2): + dist = pdist(X,Y,norm) + return np.median(dist.numpy()) + + +""" + +Function for loading ECG Data + +""" +def GetECGData(source_file,class_id): + compose = transforms.Compose( + [PD_to_Tensor() + ]) + return ECGData(source_file ,class_id = class_id, transform = compose) + +""" + +Creating the training set of sine/ECG signals + +""" + +#Taking normal ECG data for now +source_filename = './mitbih_train.csv' +ecg_data = GetECGData(source_file = source_filename,class_id = 0) + +sample_size = 119 #batch size needed for Data Loader and the noise creator function. + +# Create loader with data, so that we can iterate over it + +data_loader = torch.utils.data.DataLoader(ecg_data, batch_size=sample_size, shuffle=True) +# Num batches +num_batches = len(data_loader) +print(num_batches) + +"""Creating the Test Set""" +test_filename = './mitbih_test.csv' + +ecg_data_test = GetECGData(source_file = test_filename,class_id = 0) + +data_loader_test = torch.utils.data.DataLoader(ecg_data_test[:18088], batch_size=sample_size, shuffle=True) + + + +"""##Defining the noise creation function""" + +def noise(batch_size, features): + noise_vec = torch.randn(batch_size, features).to(device) + return noise_vec + +"""#Initialising Parameters""" + +seq_length = ecg_data[0].size()[0] #Number of features + + +#Params for the generator +hidden_nodes_g = 50 +layers = 2 +tanh_layer = False + +#No. of training rounds per epoch +D_rounds = 3 +G_rounds = 1 +num_epoch = 35 +learning_rate = 0.0002 + +#Params for the Discriminator +minibatch_layer = 0 +minibatch_normal_init_ = True +num_cvs = 2 +cv1_out= 10 +cv1_k = 3 +cv1_s = 1 +p1_k = 3 +p1_s = 2 +cv2_out = 10 +cv2_k = 3 +cv2_s = 1 +p2_k = 3 +p2_s = 2 + +"""# Evaluation of GAN with 2 CNN Layer in Discriminator + +##Generator and Discriminator training phase +""" + +minibatch_out = [0,3,5,8,10] +for minibatch_layer in minibatch_out: + path = ".../your_path/Run_"+str(today.strftime("%d_%m_%Y"))+"_"+ str(datetime.datetime.now().time()).split('.')[0] + os.mkdir(path) + + dict = {'data' : source_filename, + 'sample_size' : sample_size, + 'seq_length' : seq_length, + 'num_layers': layers, + 'tanh_layer': tanh_layer, + 'hidden_dims_generator': hidden_nodes_g, + 'minibatch_layer': minibatch_layer, + 'minibatch_normal_init_' : minibatch_normal_init_, + 'num_cvs':num_cvs, + 'cv1_out':cv1_out, + 'cv1_k':cv1_k, + 'cv1_s':cv1_s, + 'p1_k':p1_k, + 'p1_s':p1_s, + 'cv2_out':cv2_out, + 'cv2_k':cv2_k, + 'cv2_s':cv2_s, + 'p2_k':p2_k, + 'p2_s':p2_s, + 'num_epoch':num_epoch, + 'D_rounds': D_rounds, + 'G_rounds': G_rounds, + 'learning_rate' : learning_rate + } + + json = js.dumps(dict) + f = open(path+"/settings.json","w") + f.write(json) + f.close() + + generator_1 = Generator(seq_length,sample_size,hidden_dim = hidden_nodes_g, tanh_output = tanh_layer).to(device) + discriminator_1 = Discriminator(seq_length, sample_size ,minibatch_normal_init = minibatch_normal_init_, minibatch = minibatch_layer,num_cv = num_cvs, cv1_out = cv1_out,cv1_k = cv1_k, cv1_s = cv1_s, p1_k = p1_k, p1_s = p1_s, cv2_out= cv2_out, cv2_k = cv2_k, cv2_s = cv2_s, p2_k = p2_k, p2_s = p2_s).to(device) + #Loss function + loss_1 = torch.nn.BCELoss() + + generator_1.train() + discriminator_1.train() + + d_optimizer_1 = torch.optim.Adam(discriminator_1.parameters(),lr = learning_rate) + g_optimizer_1 = torch.optim.Adam(generator_1.parameters(),lr = learning_rate) + + G_losses = [] + D_losses = [] + mmd_list = [] + series_list = np.zeros((1,seq_length)) + + + for n in tqdm(range(num_epoch)): + # for k in range(1): + + for n_batch, sample_data in enumerate(data_loader): + ### TRAIN DISCRIMINATOR ON FAKE DATA + for d in range(D_rounds): + discriminator_1.zero_grad() + + h_g = generator_1.init_hidden() + + #Generating the noise and label data + noise_sample = Variable(noise(len(sample_data),seq_length)) + + #Use this line if generator outputs hidden states: dis_fake_data, (h_g_n,c_g_n) = generator.forward(noise_sample,h_g) + dis_fake_data = generator_1.forward(noise_sample,h_g).detach() + + y_pred_fake = discriminator_1(dis_fake_data) + + loss_fake = loss_1(y_pred_fake,torch.zeros([len(sample_data),1]).to(device)) + loss_fake.backward() + + #Train discriminator on real data + real_data = Variable(sample_data.float()).to(device) + y_pred_real = discriminator_1.forward(real_data) + + loss_real = loss_1(y_pred_real,torch.ones([len(sample_data),1]).to(device)) + loss_real.backward() + + d_optimizer_1.step() #Updating the weights based on the predictions for both real and fake calculations. + + + + #Train Generator + for g in range(G_rounds): + generator_1.zero_grad() + h_g = generator_1.init_hidden() + + noise_sample = Variable(noise(len(sample_data), seq_length)) + + + #Use this line if generator outputs hidden states: gen_fake_data, (h_g_n,c_g_n) = generator.forward(noise_sample,h_g) + gen_fake_data = generator_1.forward(noise_sample,h_g) + y_pred_gen = discriminator_1(gen_fake_data) + + error_gen = loss_1(y_pred_gen,torch.ones([len(sample_data),1]).to(device)) + error_gen.backward() + g_optimizer_1.step() + + if n_batch ==( num_batches - 1): + G_losses.append(error_gen.item()) + D_losses.append((loss_real+loss_fake).item()) + + torch.save(generator_1.state_dict(), path+'/generator_state_'+str(n)+'.pt') + torch.save(discriminator_1.state_dict(),path+ '/discriminator_state_'+str(n)+'.pt') + + # Check how the generator is doing by saving G's output on fixed_noise + with torch.no_grad(): + h_g = generator_1.init_hidden() + fake = generator_1(noise(len(sample_data), seq_length),h_g).detach().cpu() + generated_sample = torch.zeros(1,seq_length).to(device) + + for iter in range(0,int(len(ecg_data_test[:18088])/sample_size)): + noise_sample_test = noise(sample_size, seq_length) + h_g = generator_1.init_hidden() + generated_data = generator_1.forward(noise_sample_test,h_g).detach().squeeze() + generated_sample = torch.cat((generated_sample,generated_data),dim = 0) + + + # Getting the MMD Statistic for each Training Epoch + generated_sample = generated_sample[1:][:] + sigma = [pairwisedistances(ecg_data_test[:18088].type(torch.DoubleTensor),generated_sample.type(torch.DoubleTensor).squeeze())] + mmd = MMDStatistic(len(ecg_data_test[:18088]),generated_sample.size(0)) + mmd_eval = mmd(ecg_data_test[:18088].type(torch.DoubleTensor),generated_sample.type(torch.DoubleTensor).squeeze(),sigma, ret_matrix=False) + mmd_list.append(mmd_eval.item()) + + + series_list = np.append(series_list,fake[0].numpy().reshape((1,seq_length)),axis=0) + + #Dumping the errors and mmd evaluations for each training epoch. + with open(path+'/generator_losses.txt', 'wb') as fp: + pickle.dump(G_losses, fp) + with open(path+'/discriminator_losses.txt', 'wb') as fp: + pickle.dump(D_losses, fp) + with open(path+'/mmd_list.txt', 'wb') as fp: + pickle.dump(mmd_list, fp) + + #Plotting the error graph + plt.plot(G_losses,'-r',label='Generator Error') + plt.plot(D_losses, '-b', label = 'Discriminator Error') + plt.title('GAN Errors in Training') + plt.legend() + plt.savefig(path+'/GAN_errors.png') + plt.close() + + #Plot a figure for each training epoch with the MMD value in the title + i = 0 + while i < num_epoch: + if i%3==0: + fig, ax = plt.subplots(3,1,constrained_layout=True) + fig.suptitle("Generated fake data") + for j in range(0,3): + ax[j].plot(series_list[i][:]) + ax[j].set_title('Epoch '+str(i)+ ', MMD: %.4f' % (mmd_list[i])) + i = i+1 + + plt.savefig(path+'/Training_Epoch_Samples_MMD_'+str(i)+'.png') + plt.close(fig) + #Checking the diversity of the samples: + generator_1.eval() + h_g = generator_1.init_hidden() + test_noise_sample = noise(sample_size, seq_length) + gen_data= generator_1.forward(test_noise_sample,h_g).detach() + + + plt.title("Generated ECG Waves") + plt.plot(gen_data[random.randint(0,sample_size-1)].tolist(),'-b') + plt.plot(gen_data[random.randint(0,sample_size-1)].tolist(),'-r') + plt.plot(gen_data[random.randint(0,sample_size-1)].tolist(),'-g') + plt.plot(gen_data[random.randint(0,sample_size-1)].tolist(),'-', color = 'orange') + plt.savefig(path+'/Generated_Data_Sample1.png') + plt.close() \ No newline at end of file