Skip to content

Commit

Permalink
Add section docs from OS's word file to source files
Browse files Browse the repository at this point in the history
  • Loading branch information
nobias committed May 29, 2018
1 parent 2f05769 commit 61e0741
Show file tree
Hide file tree
Showing 9 changed files with 232 additions and 117 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -93,3 +93,4 @@ ENV/
*.asv
*.log

*.sublime-workspace
48 changes: 30 additions & 18 deletions src/LS_nnls.m
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
function [X, G] = LS_nnls(A,Y,opts,G,x)
% LS_NNLS is a solver for large non-negative least squares problems.
function [X, G] = LS_nnls(A, Y, opts, G, x)
%% LS_NNLS Solver for large non-negative least-squares problems
%
% argmin_{x>=0}||y-A(x)||_2^2
% Solves argmin_{x>=0}||y-A(x)||_2^2
%
% Input:
% A... Matrix corresponding to A in the formula above.
% x... Matrix of solution vectros of the above problem. LS_nnls
% A Matrix corresponding to A in the formula above.
% x Matrix of solution vectros of the above problem. LS_nnls
% solves multiple nnls problems in parallel.
% Y... Matrix of inhomogenities, each row represents one nnls
% Y Matrix of inhomogenities, each row represents one nnls
% problm.
% struct opts
% opts.display... boolean, if true messages will be printed in console.
% opts.lambda... lagrangian multiplier for L1 regularization
% opts.gpu_id... ID of GPU to be used if GPU support is available.
% opts.use_std... calculate least standard deviation instead of L2-norm
% opts.sample... Read about convergence check below!
% opts.tol... -
% opts.tol_... -
% opts.display boolean, if true messages will be printed in console.
% opts.lambda lagrangian multiplier for L1 regularization
% opts.gpu_id ID of GPU to be used if GPU support is available.
% opts.use_std calculate least standard deviation instead of L2-norm
% opts.sample Read about convergence check below!
% opts.tol -
% opts.tol_ -
%
% Output
% X... Matrix of approximations of the solutions to the
% X Matrix of approximations of the solutions to the
% nnls problems. Each row is one solution.
% G... Gram-matrix.
% G Gramian matrix.
%
% Convergence check:
% LS_nnls checks for each of the nnls subproblem if a certain accuracy is
Expand All @@ -34,7 +34,8 @@
% this information to accurately estimate the true error. If the true error
% is below opts.tol the algorithm stops for the nnls-sub-problem in
% question and puts the current solution into the output array.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%% Set default values for parameters not set by user
if nargin<3
opts =struct;
end
Expand Down Expand Up @@ -71,8 +72,10 @@
opts.max_iter = 2000;
end

%% Compute the first term of the gradient that won’t change throughout the function
h = A'*Y - opts.lambda;

%% Assert that the dimensions of the input corresponds to an NNLS problem
if nargin<5
x = zeros(size(A,2),size(Y,2));
if nargin<4
Expand All @@ -94,20 +97,24 @@
end
rds=1:size(Y,2);

%% Compute the second term (the Gramian matrix) that does not change throughout the function
if opts.use_std
G = @(x) G*x - (G*sum(x,2))/size(Y,2);
h = h - A'*sum(Y,2)/size(Y,2);
else
G = @(x) G*x;
end

%% Loop until all sub-problems have been solved to within desired accuracy
iter = 0;
test=[];
dn=[[1:2:2*opts.sample]', ones(opts.sample,1)];
Xi=inv(dn'*dn)*dn';
tic
while ~isempty(x)
iter = iter + 1;

%% Projected gradient descent with exact line search update
x_ = gather(x);
df = -h + G(x);
passive=max(x>0,df<0);
Expand All @@ -117,10 +124,12 @@
x = x - df_.*alpha;
x(x<0) = 0;

%% If max. iterations has not been reached, log consecutive error history (see function help)
% wait for two times opts.sample and then use the error estimator
if iter>opts.max_iter
ids=true(1,size(x,2));
if opts.display
disp('max number of iterations is reached');
disp('Max. number of iterations has been reached.');
end
elseif ~opts.use_std
if mod(iter,2)==1
Expand All @@ -144,6 +153,8 @@
ids = [];
end

%% If one of the nnls-sub-problems k is finished (id(k)==true), transfer k from the GPU to the output array
% and delete it from the current job description
if max(ids(:))
if ~isempty(opts.gpu_id)
X(:,rds(ids)) = gather(x(:,ids));
Expand All @@ -159,8 +170,9 @@
disp(iter);
end
end

if opts.display
toc
end

end
end
29 changes: 24 additions & 5 deletions src/S_update.m
Original file line number Diff line number Diff line change
@@ -1,31 +1,44 @@
function [S,T]=S_update(Y,S,T,opts)
function [S, T] = S_update(Y, S, T, opts)
%% S_update Perform a gradient descent with exact line search update for the variables in S

T(isnan(T))=0;
S(isnan(S))=0;

%% Compute components of the gradient of the 2-norm of Y-S*T
Q_T = T*T';
q_S = Y*T';

%%
if opts.use_std
opts.T = sum(T,2);
Q_T = Q_T - opts.T*opts.T'/size(T,2);
Q_T = Q_T - opts.T * opts.T'/size(T,2);
q_S = q_S - opts.Y.*opts.T'/size(T,2);
end

%% Modify matrix Q_T to include the contribution of the 1-norm orthogonality regularizer
if opts.lamb_orth_L1
Q_T = Q_T + opts.lamb_orth_L1*(opts.hilf);
end

%% Generate a function handle that computes the contribution of the spatial Total Variation regularizer
if opts.lamb_spat_TV
lb_spat_TV =@(X) opts.lamb_spat_TV*reshape(convn(reshape(X',opts.rank,opts.size(1),...
opts.size(2)),laplace,'same'),opts.rank,[])';
else
lb_spat_TV =@(X) 0;
end

%% Assemble gradient from its components
df_S = -q_S + S*Q_T + lb_spat_TV(S) + opts.lamb_spat;

%% Generate the direction v in which the update shall be performed
% v is generated from the gradient by projecting along the normalization constraint
% that is imposed when useing an orthogonality regularizer,
% and by projecting onto the surface of the non-negativity constraint
if (opts.lamb_orth_L1 + opts.lamb_orth_L2)
if opts.lamb_orth_L2
% Final assembly of the gradient for the 2-norm orthogonality regularizer
% (indirectly, via modification of the direction v)
v = df_S + opts.lamb_orth_L2*(S*(opts.hilf*(S'*S)));
else
v = df_S;
Expand All @@ -38,7 +51,11 @@
passive_S = max(S>0,df_S<0);
v = passive_S.*df_S;
end


%% Exact line search for the direction v
% In the case of the 2-norm orthogonality regularizer, the exact line search is approximated by exact line search for the residual gradient only.
% If this results in a value that leads into the opposing direction of the negative gradient, the learning rate is set to a fixed value 1e-6.
% This is done since the corresponding regularizer leads to a non-quadratic problem.
if opts.pointwise
alpha_S = sum(v.*df_S,2)./sum(v.*(v*Q_T + lb_spat_TV(v)),2);
else
Expand All @@ -51,8 +68,10 @@
alpha_S(isnan(alpha_S))=0;
alpha_S(isinf(alpha_S))=0;

%% Update S
S = S - alpha_S.*v;

%% Project onto constraints
S(S<0)=0;

if opts.lamb_orth_L1 + opts.lamb_orth_L2
Expand All @@ -61,6 +80,7 @@
S = S./platz;
end

%% Output diagnostic info
if opts.diagnostic
figure(1);
clf('reset')
Expand All @@ -74,5 +94,4 @@
drawnow expose
end


end
end
Loading

0 comments on commit 61e0741

Please sign in to comment.