Skip to content

Commit

Permalink
Add DynamicFactor and a criterion for number of factors (#4)
Browse files Browse the repository at this point in the history
  • Loading branch information
junyuan-chen committed Apr 20, 2024
1 parent ad2e843 commit 7462cbf
Show file tree
Hide file tree
Showing 17 changed files with 930 additions and 190 deletions.
2 changes: 1 addition & 1 deletion data/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ that generates these files from original data.

| Name | Source | File | Note |
| :--: | :----: | :--: | :--- |
| lpw_est_data.mat | [Li, Plagborg-Møller and Wolf (2024)](https://doi.org/10.1016/j.jeconom.2024.105722) | `DFM/Subroutines/SW_DFM_Estimation/data/hom_fac_1.xlsx` | Data are processed with [`lpw_savedata.m`](src/lpw_savedata.m) and used in [Stock and Watson (2016)](https://doi.org/10.1016/bs.hesmac.2016.04.002) |
| lpw_data.mat | [Li, Plagborg-Møller and Wolf (2024)](https://doi.org/10.1016/j.jeconom.2024.105722) | `DFM/Subroutines/SW_DFM_Estimation/data/hom_fac_1.xlsx` | Data are processed with [`lpw_savedata.m`](src/lpw_savedata.m) and used in [Stock and Watson (2016)](https://doi.org/10.1016/bs.hesmac.2016.04.002) |

## References

Expand Down
Binary file added data/lpw_data.mat
Binary file not shown.
Binary file removed data/lpw_est_data.mat
Binary file not shown.
83 changes: 82 additions & 1 deletion data/src/lpw_savedata.m
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,87 @@
lsout.r2vec = r2vec; lsout.nobs = nobs;
lsout.nt = nt; lsout.ns = ns;


%% Content from factor_estimation_ls_full.m

if levels
lsout.fac_diff = lsout.fac;
lsout.fac = cumsum_nan(lsout.fac(2:end,:)); % If data was differenced, cumulate factors
end


% Compute estimates of factor loadings;
n_lc = 0; % number of constraints placed on lambda
lambda_constraints_full = est_par.fac_par.lambda_constraints_full;
if size(lambda_constraints_full,2) > 1;
lam_c_index = lambda_constraints_full(:,1); % Which row of lambda: Constraints are then R*lambda = r
lam_c_R = lambda_constraints_full(:,2:end-1); % R matrix
lam_c_r = lambda_constraints_full(:,end); % r value
n_lc = size(lambda_constraints_full,1);
end;

lam_mat = NaN(n_series,est_par.fac_par.nfac.total);
ismpl = smpl(calvec,nfirst,nlast,nper);
uar_coef_mat = NaN(n_series,n_uarlag);
uar_ser_mat = NaN(n_series,1);
uar_resid_mat = NaN(n_series,size(calvec,1));
res_mat = NaN(n_series,size(calvec,1));
r2_mat = NaN(n_series,1); % R-squared value
trend_tmp = (1:1:size(calvec,1))';
for is = 1:n_series;
tmp = packr([data(ismpl==1,is) lsout.fac(ismpl==1,:) trend_tmp(ismpl==1)]);
itmp = tmp(:,end);
tmp = tmp(:,1:end-1+levels); % Include time trend if data is in levels
if size(tmp,1) >= ntmin;
y = tmp(:,1);
x = [tmp(:,2:end), ones(size(tmp,1),1)];
xxi = inv(x'*x);
bols = xxi*(x'*y);
b = bols;
% Check for restrictions and impose;
if n_lc > 0;
ii = lam_c_index == is;
if sum(ii) > 0;
R = [lam_c_R(ii==1,:), zeros(sum(ii),1)]; % No constraints on constant term
r = lam_c_r(ii==1,:);
tmp1 = xxi*R';
tmp2 = inv(R*tmp1);
b = bols - tmp1*tmp2*(R*bols-r);
end;
end;
lam_mat(is,:) = b(1:end-1-levels)';
u = y - x*b;
% Compute R-squared
ssr = sum(u.^2);
ym = y - mean(y);
tss = sum(ym.^2);
r2_mat(is) = 1-(ssr/tss);
% Compute AR model for errors
if r2_mat(is) < 0.9999;
if levels==1 % If variables are in levels...
[~,arcoef,ser2] = VAR_CorrectBias(u(~isnan(u)),n_uarlag); % Bias-corrected AR coefficients
arcoef = arcoef(:)';
ser = sqrt(ser2);
ar_resid = NaN(size(u,1),1); % We don't really need the AR residuals later
else
[arcoef, ser, ar_resid] = uar(u,n_uarlag); % AR Coefficients and ser
end
else;
arcoef = zeros(n_uarlag,1);
ser = 0.0;
ar_resid = NaN(size(u,1),1);
end;
uar_coef_mat(is,:) = arcoef';
uar_ser_mat(is,1) = ser;
uar_resid_mat(is,itmp) = ar_resid';
res_mat(is,itmp) = u';
end;
end;

varout = varest(lsout.fac,est_par.var_par,est_par.smpl_par,levels,coint_rank);


%% Save the data
save("../lpw_est_data.mat", 'est_data')
dataout = data(:,inclcode==1);
save("../lpw_data.mat", 'dataout')

22 changes: 18 additions & 4 deletions src/AutoregressiveModels.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ export VARProcess,
histvar!,
histvar,

OLS,
VAROLS,
intercept,
coefcorrected,
residvcov,
Expand All @@ -51,14 +51,28 @@ export VARProcess,

ARMAProcess,

Factor
AbstractNFactorCriterion,
ICp2penalty,
BaiNg,
criterion,
nfactor,
Factor,

AbstractDetrend,
NoTrend,
FirstDiff,
nskip,
detrend!,
invdetrend!,
DynamicFactor

include("lapack.jl")
include("utils.jl")
include("process.jl")
include("estimation.jl")
include("varprocess.jl")
include("varestimation.jl")
include("bootstrap.jl")
include("arma.jl")
include("factor.jl")
include("dfm.jl")

end # module AutoregressiveModels
59 changes: 1 addition & 58 deletions src/bootstrap.jl
Original file line number Diff line number Diff line change
@@ -1,60 +1,3 @@
# An unsafe in-place version of OLS for bootstrap
function _fit!(m::OLS{TF}) where TF
Y = m.resid
X = m.X
coef = m.coef
mul!(coef, X', Y)
mul!(m.crossXcache, X', X)
ldiv!(cholesky!(m.crossXcache), coef)
if m.intercept === nothing
copyto!(m.coefB, coef')
else
copyto!(m.coefB, view(coef, 2:size(coef,1), :)')
copyto!(m.intercept, view(coef, 1, :))
end
mul!(Y, X, coef, -1.0, 1.0)
mul!(m.residvcov, Y', Y)
rdiv!(m.residvcov, m.dofr)
residchol = m.residchol
residcholL = m.residcholL
if residchol !== nothing
Cfactors = getfield(residchol, :factors)
Cuplo = getfield(residchol, :uplo)
copyto!(Cfactors, m.residvcov)
# residchol could involve changes in immutable objects but it is not used
residchol = cholesky!(Cfactors)
N = size(m.residvcov, 1)
@inbounds for j in 1:N
for i in 1:j-1
residcholL[i,j] = zero(TF)
end
for i in j:N
residcholL[i,j] = Cuplo === 'U' ? Cfactors[j,i] : Cfactors[i,j]
end
end
end
return nothing
end

# An unsafe version for bootstrap
function _fitvar!(data::Matrix, m::OLS, nlag, nocons)
i0 = nocons ? 0 : 1
# Y in OLS is left untouched but use resid in-place
Y = residuals(m)
X = modelmatrix(m)
N = size(Y, 2)
# esample is not used here
for j in 1:N
col = view(data, :, j)
Tfull = length(col)
copyto!(view(Y, :, j), view(col, nlag+1:Tfull))
for l in 1:nlag
copyto!(view(X, :, i0+(l-1)*N+j), view(col, nlag+1-l:Tfull-l))
end
end
_fit!(m)
end

randomindex(r::VectorAutoregression) = sample(1:size(residuals(r),1))

function wilddraw!(out, r::VectorAutoregression)
Expand Down Expand Up @@ -103,7 +46,7 @@ function _bootstrap!(ks, stats, r::VectorAutoregression, var, initialindex, draw
var(view(bootdata, t, :), _reshape(view(bootdata, t-1:-1:t-nlag, :)', N*nlag))
end
keepbootdata && (allbootdata[k] = copy(bootdata))
estimatevar && _fitvar!(bootdata, m, nlag, !hasintercept(r))
estimatevar && fit!(m, bootdata; Yinresid=true)
for stat in stats
nt = (out=selectdim(stat[1], ndims(stat[1]), k), data=bootdata, r=rk)
stat[2](nt)
Expand Down
Loading

0 comments on commit 7462cbf

Please sign in to comment.