-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdpcmopt.m
86 lines (78 loc) · 3.12 KB
/
dpcmopt.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
## Copyright (C) 2012 Leonardo Araujo <[email protected]>
##
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation; either version 3 of the License, or (at your option) any later
## version.
##
## This program is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
## FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License along with
## this program; if not, see <http://www.gnu.org/licenses/>.
## -*- texinfo -*-
## @deftypefn {Function File} {@var{predictor} =} dpcmopt (@var{training_set}, @var{ord})
## @deftypefnx {Function File} {[@var{predictor}, @var{partition}, @var{codebook}] =} dpcmopt (@var{training_set}, @var{ord}, @var{cb})
## Optimize the DPCM parameters and codebook.
##
## It uses the Levinson-Durbin algorithm to find the all-pole IIR filter
## using the autocorrelation sequence. After the best predictor is found,
## it uses the Lloyds algorithm to find the best codebook and partition
## for the interval.
##
## @table @code
## @item predictor = dpcmopt (training_set, ord)
## Optimize the DPCM parameters using the Levinson-Durbin algorithm.
## The predictor vector describes a m-th order prediction for the
## output according to the following equation
## y(k) = p(1)sig(k-1) + p(2)sig(k-2) + ... + p(m-1)sig(k-m+1) + p(m)sig(k-m)
## where the predictor vector is given by
## predictor = [0, p(1), p(2), p(3),..., p(m-1), p(m)].
##
## training_set is the training data used to find the best predictor.
##
## ord is the order of the desired prediction model.
##
## @item [predictor, partition, codebook] = dpcmopt (training_set,ord,cb)
## Optimize the DPCM parameters and also uses the Lloyds algorithm to find
## the best codebook and partition for the given training signal.
##
## cb might be the initial codebook used by Lloyds algorithm or
## the length of the desired codebook.
##
## @end table
## @seealso{dpcmenco, dpcmdeco, levinson, lloyds}
## @end deftypefn
function [predictor, partition, codebook] = dpcmopt (training_set, ord, cb)
if (nargin < 2 || nargin > 3)
print_usage ();
endif
training_set = training_set(:);
L = length (training_set);
corr_tr = xcorr (training_set'); # autocorrelation
ncorr_tr = corr_tr(L:L+ord+1) ./ (L - [1:ord+2]); # normalize
## use Levinson-Durbin recursion to solve the Yule-Walker equations
a = levinson (ncorr_tr, ord);
predictor = [0 -a(2:end)];
if (nargin > 2 && nargout > 1)
## predictive error
e = [];
for i = ord+1 : L
e(i-ord) = training_set(i) - fliplr (predictor) * training_set(i-ord:i);
endfor
## find the best codebook and partition table
if (length (cb) == 1)
len = cb;
[partition, codebook] = lloyds (e, len);
else
initcodebook = cb;
[partition, codebook] = lloyds (e, initcodebook);
endif
endif
endfunction
%% Test input validation
%!error dpcmopt ()
%!error dpcmopt (1)
%!error dpcmopt (1, 2, 3, 4)