Skip to content

Commit

Permalink
Merge branch 'fs-devel' into devel
Browse files Browse the repository at this point in the history
  • Loading branch information
fabian-s committed Jun 20, 2016
2 parents f5b7860 + 4fabbfa commit d599a5c
Show file tree
Hide file tree
Showing 3 changed files with 115 additions and 35 deletions.
89 changes: 64 additions & 25 deletions R/fpca.ssvd.R
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
#'points.
#'
#'@param Y data matrix (rows: observations; columns: grid of eval. points)
#'@param argvals the argument values where functions are evaluated. It is
#' implemented yet.
#'@param ydata a data frame \code{ydata} representing
#' irregularly observed functions. NOT IMPLEMENTED for this method.
#'@param argvals the argument values of the function evaluations in \code{Y},
#' defaults to a equidistant grid from 0 to 1. See Details.
#'@param npc how many smooth SVs to try to extract, if \code{NA} (the default)
#' the hard thresholding rule of Donoho, Gavish (2013) is used (see Details,
#' References).
Expand All @@ -33,14 +35,24 @@
#'@param upper.alpha upper limit for smoothing parameter if \code{!gridsearch}
#'@param verbose generate graphical summary of progress and diagnostic messages?
#' defaults to \code{FALSE}
#' @param integration ignored, see Details.
#'@details Note that \code{fpca.ssvd} computes smoothed orthonormal eigenvectors
#' of the supplied function evaluations (and associated scores), not (!)
#' evaluations of the smoothed orthormal eigenfunctions. The smoothed
#' orthonormal eigenvectors are then rescaled by the length of the domain
#' defined by \code{argvals} to have a quadratic integral approximately equal
#' to one (instead of crossproduct equal to one), so they approximate the behavior
#' of smooth eigenfunctions. If \code{argvals} is not equidistant,
#' \code{fpca.ssvd} will simply return the smoothed eigenvectors without rescaling,
#' with a warning.
#'@return an \code{fpca} object like that returned from \code{\link{fpca.sc}},
#' with entries \code{Yhat}, the smoothed trajectories, \code{Y}, the observed
#' data, \code{scores}, the estimated FPC loadings, \code{mu}, the column means
#' of \code{Y} (or a vector of zeroes if \code{!center}), \code{efunctions},
#' the estimated smooth FPCs (note that these are orthonormal vectors, not
#' evaluations of orthonormal functions...), \code{evalues}, their associated
#' eigenvalues, and \code{npc}, the number of smooth components that were
#' extracted.
#' evaluations of orthonormal functions if \code{argvals} is not equidistant),
#' \code{evalues}, their associated eigenvalues, and \code{npc}, the number of
#' smooth components that were extracted.
#'@seealso \code{\link{fpca.sc}} and \code{\link{fpca.face}} for FPCA based on
#' smoothing a covariance estimate; \code{\link{fpca2s}} for a faster SVD-based
#' approach.
Expand Down Expand Up @@ -83,11 +95,35 @@
#' matplot(1:m, t(smoothSV$Yhat), xlab="", ylab="",
#' type="l", lty=1,col=clrs, main="estimated smooth Y", bty="n")
#'@export
fpca.ssvd <- function(Y, argvals = NULL, npc = NA, center = TRUE, maxiter = 15,
fpca.ssvd <- function(Y=NULL, ydata = NULL, argvals = NULL, npc = NA, center = TRUE, maxiter = 15,
tol = 1e-4, diffpen = 3, gridsearch = TRUE, alphagrid = 1.5^(-20:40),
lower.alpha = 1e-5, upper.alpha = 1e7, verbose = FALSE){
lower.alpha = 1e-5, upper.alpha = 1e7, verbose = FALSE, integration = "trapezoidal"){

stopifnot(!is.null(Y))
if(any(is.na(Y))) stop("No missing values in <Y> allowed.")
m <- ncol(Y)
n <- nrow(Y)
if(!is.null(ydata)) {
stop(paste("<ydata> argument for irregular data is not supported,",
"please use fpca.sc instead."))
}
irregular <- FALSE
if(!is.null(argvals)) {
stopifnot(is.numeric(argvals),
length(argvals) == m,
all(!is.na(argvals)))
if(any(diff(argvals)/mean(diff(argvals)) > 1.05 |
diff(argvals)/mean(diff(argvals)) < 0.95)) {
warning(paste("non-equidistant <argvals>-grid detected:",
"fpca.ssvd will return orthonormal eigenvectors of the function evaluations",
"not evaluations of the orthonormal eigenvectors.",
"Use fpca.sc() for the latter instead."))
irregular <- TRUE
}
} else {
argvals <- seq(0, 1, length = m)
}

if(!is.null(argvals)) warning("<argvals> is not supported and will be ignored.")

#GCV criterion from eq. (10), App. C:
gcv <- function(alpha, w, m, lambda){
Expand All @@ -103,11 +139,6 @@ fpca.ssvd <- function(Y, argvals = NULL, npc = NA, center = TRUE, maxiter = 15,
}
}


if(any(is.na(Y))) stop("No missing values in <Y> allowed.")
m <- ncol(Y)
n <- nrow(Y)

if(is.na(npc)){
npc <- getNPC.DonohoGavish(Y)
}
Expand Down Expand Up @@ -141,6 +172,7 @@ fpca.ssvd <- function(Y, argvals = NULL, npc = NA, center = TRUE, maxiter = 15,
V <- matrix(NA, nrow=m, ncol=npc)
d <- rep(NA, npc)

Yorig <- Y
if(center){
meanY <- predict(smooth.spline(x=1:m, y=colMeans(Y)), x=1:m)$y
Y <- t(t(Y) - meanY)
Expand Down Expand Up @@ -212,20 +244,27 @@ fpca.ssvd <- function(Y, argvals = NULL, npc = NA, center = TRUE, maxiter = 15,
}# end for(k)
if(length(uhoh)) warning("First SV for remaining un-smooth signal larger than ",
"SV found for smooth signal for component(s) ", paste(uhoh, collapse=","))

# return(list(smooth=list(d=d, u=U, v=V),
# noise=svd(Ynow, nu=min(dim(Y))-npc, nv=min(dim(Y))-npc),
# mean=meanY))
scores <- U%*%(d*diag(length(d)))
if(!irregular){
# scale smooth eigenvectors so they're scaled as realizations of orthonormal
# eigenfunctions i.e. so that colSums(diff(argvals) * V^2) == 1 instead of
# crossprod(V) == diag(npc)
scale <- sqrt(mean(diff(argvals)))
V <- V / scale
d <- d * scale
} else {
scale <- 1
}
scores <- U%*%(d * diag(length(d)))

ret = list(
Yhat= t(meanY + t(scores%*%t(V))),
Y = Y,
scores=scores,
mu=meanY,
efunctions=V,
evalues=d^2,
npc=npc)
Yhat = t(meanY + t(scores%*%t(V))),
Y = Yorig,
scores = scores,
mu = meanY,
efunctions = V,
#FIXME: this should be d^2 but the scaling is way off....
evalues = diag(var(scores)),
npc = npc)
class(ret) = "fpca"
return(ret)

Expand Down
33 changes: 25 additions & 8 deletions man/fpca.ssvd.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

28 changes: 26 additions & 2 deletions tests/testthat/test-fpca.R
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,16 @@ efcts <- poly(t, 2)
Y <- outer(2 * drop(scale(rnorm(n))), efcts[, 1]) +
outer(drop(scale(rnorm(n))), efcts[, 2])

flip_efunctions <- function(ef1, ef2) {
squared_diff <- function(x,y) crossprod(x - y)
for(i in 1:ncol(ef1)){
if(squared_diff(ef1[,i], ef2[,i]) > squared_diff(ef1[,i], - ef2[,i])){
ef2[,i] <- -ef2[,i]
}
}
ef2
}

test_that("all fpca functions agree on toy example", {
sc <- fpca.sc(Y)
face <- fpca.face(Y)
Expand All @@ -18,13 +28,15 @@ test_that("all fpca functions agree on toy example", {
expect_equal(sc$Yhat, ssvd$Yhat, tolerance=.01)
expect_equal(sc$Yhat, twos$Yhat, tolerance=.01)

ssvd$efunctions <- flip_efunctions(sc$efunctions, ssvd$efunctions)
expect_equal(sc$efunctions, ssvd$efunctions, tolerance=.1)
expect_equal(sc$evalues, ssvd$evalues, tolerance=.1)

if(FALSE){
##TODO: - fix quadrature weights first
## - flip sign of efunctions if necessary
expect_equal(sc$efunctions, face$efunctions, tolerance=.01)
expect_equal(sc$efunctions, ssvd$efunctions, tolerance=.01)
expect_equal(sc$efunctions, twos$efunctions, tolerance=.01)

expect_equal(sc$evalues, face$evalues, tolerance=.01)
expect_equal(sc$evalues, ssvd$evalues, tolerance=.01)
expect_equal(sc$evalues, twos$evalues, tolerance=.01)
Expand All @@ -41,3 +53,15 @@ test_that("fpca.sc options work", {
expect_equal(sc$Yhat, sc_sym$Yhat, tolerance=.01)
expect_equal(sc$Yhat, sc_int$Yhat, tolerance=.01)
})


test_that("fpca.ssvd options work", {
expect_error(fpca.ssvd(Y = 1:10, ydata=data.frame()), "irregular data")
expect_warning(fpca.ssvd(Y = Y, argvals=sqrt(t)), "non-equidistant")
ssvd <- fpca.ssvd(Y)
ssvd_npc1 <- fpca.ssvd(Y, npc=1)
ssvd_d2 <- fpca.ssvd(Y, diffpen = 2)
expect_equal(ssvd_npc1$efunctions[,1], ssvd$efunctions[,1])
expect_true(ncol(ssvd_npc1$efunctions) == 1)
expect_equal(ssvd_d2$efunctions, ssvd$efunctions, tol=.01)
})

0 comments on commit d599a5c

Please sign in to comment.