Skip to content

Commit

Permalink
Compute_CheckLabels.m
Browse files Browse the repository at this point in the history
  • Loading branch information
Manuel D. Morales committed Apr 20, 2021
0 parents commit c2df57b
Show file tree
Hide file tree
Showing 22 changed files with 2,879 additions and 0 deletions.
70 changes: 70 additions & 0 deletions Compute_CNNcfg.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
function cfg = Compute_CNNcfg(Nsamples)



% ----------------------------------------
% 1) Initialize structure
%
cfg = [];



% ----------------------------------------
% 2) Iteration and epochs:
%
% Recall that an iteration is one step taken in the gradient descent
% algorithm towards minimizing the loss function using a mini-batch. In
% other words, each iteration is an estimation of the gradient and an
% update of the network parameters
%
cfg.MiniBatchSize = 128; % Subset of the training set that is used to evaluate the gradient of the loss function and update the weights
cfg.MaxEpochs = 100; % An epoch is the full pass of the training algorithm over the entire training set
cfg.Shuffle = 'once'; % (never|once|every-epoch) Shuffle the training data before each training epoch, and shuffle the validation data before each network validation



% ----------------------------------------
% 3) Validation process
%
% A validation set is used to test learning and generalization during the
% training process.
% By default, if the validation loss is larger than or equal to the
% previously smallest loss five times in a row, then network training
% stops. To change the number of times that the validation loss is allowed
% to not decrease before training stops, use the 'ValidationPatience'. You
% can add additional stopping criteria using output functions
%
cfg.ValidationData = []; % ({Xval,Yval}) Used to validate the network at regular intervals during training.
% cfg.ValidationFrequency = 20; % Number of iterations between evaluations of validation metrics. A suggestion is to choose this value so that the network is validated once/twice/.. per epoch.
cfg.NumValPerEpoch = 1; % Number of validations per epoch. This is not a "trainingOptions" parameter but it is used to gently compute the "ValidationFrequency"
cfg.ValidationFrequency = floor(Nsamples/cfg.MiniBatchSize/cfg.NumValPerEpoch);
cfg.ValidationPatience = 5; % (scalar|Inf) Turn off the built-in validation stopping criterion (which uses the loss) by setting the 'ValidationPatience' value to Inf.
cfg.OutputFcn = '@(info)stopIfAccuracyNotImproving(info,3));'; % The traininf calls the specified functions once before the start of training, after each iteration, and once after training has finished. The training passes a structure containing information in the following fields:
%cfg.OutputFcn = '@(info)savetrainingplot(info);';

% ----------------------------------------
% 4) Learning rate
%
cfg.InitialLearnRate = 1.01; % (default:0.01) If the learning rate is too low, then training takes a long time. If the learning rate is too high, then training might reach a suboptimal result
cfg.LearnRateSchedule = 'none'; % (none|piecewise) Option for dropping the learning rate during training. The software updates the learning rate every certain number of epochs by multiplying with a certain factor.
cfg.LearnRateDropFactor = 0.1; % (default:0.1) Factor for dropping the learning rate. Multiplicative factor to apply to the learning rate every time a certain number of epochs passes. Valid only when the value of LearnRateSchedule is 'piecewise'.
cfg.LearnRateDropPeriod = 4; % (default:10) Number of epochs for dropping the learning rate. Valid only when the value of LearnRateSchedule is 'piecewise'.



% ----------------------------------------
% Otros paramteres
%
% cfg.CheckpointPath = % Path for saving checkpoint networks
% cfg.ExecutionEnvironment = % (default:??????) (auto|cpu|gpu|multi-gpu|parallel) Hardware resource for training network
% cfg.L2Regularization = % (default:0.0001) Factor for L2 regularizer (weight decay). You can specify a multiplier for the L2 regularizer for network layers with learnable parameters.
% cfg.Momentum = % (default:0.9) Contribution of the gradient step from the previous iteration to the current iteration of the training. A value of 0 means no contribution from the previous step, whereas a value of 1 means maximal contribution from the previous step.



% ----------------------------------------
% Progress visualization
%
cfg.Plots = 'none'; % (none|training-progress)
cfg.Verbose = 1; % Indicator to display training progress information in the command window
cfg.VerboseFrequency = cfg.ValidationFrequency; % Frequency of verbose printing, which is the number of iterations between printing to the command window
21 changes: 21 additions & 0 deletions Compute_CNNconstructXY.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
function Data = Compute_CNNconstructXY(Data)

% Convertir Xtfr a X: the data format of X for the CNN should be (16 32 1 Nsamples)
for i=1:size(Data.Xtfr,1)
Data.X(:,:,1,i) = squeeze(Data.Xtfr(i,:,:));
end
Data = rmfield(Data,'Xtfr');

% Save currect Y and create the class label Y vector
YALL = Data.Y;
Data = rmfield(Data,'Y');

Data.Y = YALL(:,1);
Data.YInfo = YALL(:,2:end);
% Data.M1 = YALL(:,2);
% Data.M1 = YALL(:,3);
% Data.D = YALL(:,4);
% Data.SNR = YALL(:,5);

% Clear garbage
clear ans doplot IFO Nsamples IndRan IndTrai IndTest Nlabels IndRan i YALL
61 changes: 61 additions & 0 deletions Compute_CNNeliminateSNR.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
function Data = Compute_CNNeliminateSNR(Data,SNR2ELIM)

% SNR2ELIM: SNR limit value to eliminate

% ----------------------------------------
% Get data for class noise: 1
Ind_c0 = Data.Y(:,1)==1;
Y_c0 = Data.Y(Ind_c0,:);
TFR_c0 = Data.Xtfr(Ind_c0,:,:);

% ----------------------------------------
% Get data for class gw: 2
Ind_c1 = Data.Y(:,1)==2;
Y_c1 = Data.Y(Ind_c1,:);
TFR_c1 = Data.Xtfr(Ind_c1,:,:);

% ----------------------------------------
% Eliminate data for which SNR<=10
Ind2Eli = Y_c1(:,5)<=SNR2ELIM;
Y_c0(Ind2Eli,:) = [];
Y_c1(Ind2Eli,:) = [];
TFR_c0(Ind2Eli,:,:) = [];
TFR_c1(Ind2Eli,:,:) = [];

% ----------------------------------------
% Construct data to keep
Data.Y = [Y_c0 ; Y_c1 ];
Data.Xtfr = [TFR_c0 ; TFR_c1];

% ----------------------------------------
% Clear garbage
clear ans Ind2Eli Ind_c1 Ind_c0 TFR_c0 TFR_c1 Y_c0 Y_c1


%% PLOT FOR DEBUGGING

% if (0)
%
% % ----------------------------------------
% for i=1:1:size(Data.Y,1)/2
% figure(1)
%
% subplot(2,1,1)
% imagesc(Data.t,Data.f,squeeze(Data.Xtfr(i,:,:)))
% xlabel('Time (s)'), ylabel('Frequency (Hz)'), title('n(t)')
% colormap jet, view(0,90), box on, grid on, set(gca,'YDir','normal')
%
% subplot(2,1,2)
% imagesc(Data.t,Data.f,squeeze(Data.Xtfr(i+size(Data.Y,1)/2,:,:)))
% xlabel('Time (s)'), ylabel('Frequency (Hz)'), title(['n(t)+h(t) | SNR=' num2str(Data.Y(i+size(Data.Y,1)/2,5))])
% colormap jet, view(0,90), box on, grid on, set(gca,'YDir','normal')
%
% pause(0.2)
%
% end % for i=1:1:size(Data.Y,1)/2
% clear ans i
%
% % ----------------------------------------
% % Return
% return
% end % if (1)
13 changes: 13 additions & 0 deletions Compute_ClassificationApply.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
function [YEsti, YProb] = Compute_ClassificationApply(XTest,Model)



%% APPLY CLASSIFIER: APPLY MODEL TO NEW DATA

% Apply classifier. Get predicted class and the probability for each class
[YEsti,YProb] = classify(Model.net,XTest);

% Convert predictec class from categorical to double
if iscategorical(YEsti)
YEsti = grp2idx(YEsti);
end
26 changes: 26 additions & 0 deletions Compute_ClassificationApplyNB.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
function [YEstiNB, YProbNB, CostNB] = Compute_ClassificationApplyNB(XTest,Model)



%% APPLY CLASSIFIER: APPLY MODEL TO NEW DATA

% Size of input data
Sx = size(XTest,1);
Sy = size(XTest,2);
Sz = size(XTest,3);
Sn = size(XTest,4);

% i) flat images in XTrain, ii) remove dimension of length 1, iii) transpose
XTest_flat = transpose(squeeze(reshape(XTest,[Sx*Sy,Sz,Sn])));

% Apply classifier. Get predicted class and the probability for each class
[YEstiNB,YProbNB,CostNB] = predict(Model.NBModel,XTest_flat);


% Convert predictec class from categorical to double
if iscategorical(YEstiNB)
YEstiNB = grp2idx(YEstiNB);
end

% Convert cell array to double array
YEstiNB = str2num(cell2mat(YEstiNB));
26 changes: 26 additions & 0 deletions Compute_ClassificationApplySVM.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
function [YEstiSVM, YScoreSVM] = Compute_ClassificationApplySVM(XTest,Model)



%% APPLY CLASSIFIER: APPLY MODEL TO NEW DATA

% Size of input data
Sx = size(XTest,1);
Sy = size(XTest,2);
Sz = size(XTest,3);
Sn = size(XTest,4);

% i) flat images in XTrain, ii) remove dimension of length 1, iii) transpose
XTest_flat = transpose(squeeze(reshape(XTest,[Sx*Sy,Sz,Sn])));

% Apply classifier. Get predicted class and the probability for each class
[YEstiSVM,YScoreSVM] = predict(Model.SVMModel,XTest_flat);


% Convert predictec class from categorical to double
if iscategorical(YEstiSVM)
YEstiSVM = grp2idx(YEstiSVM);
end

% Convert cell array to double array
YEstiSVM = str2num(cell2mat(YEstiSVM));
Loading

0 comments on commit c2df57b

Please sign in to comment.