diff --git a/.github/workflows/html-5-check.yaml b/.github/workflows/html-5-check.yaml
index 1439a3228..f25b4eeaf 100644
--- a/.github/workflows/html-5-check.yaml
+++ b/.github/workflows/html-5-check.yaml
@@ -6,8 +6,8 @@ on:
pull_request:
branches: [main, master]
-name: HTML5 check
+name: html-5-check
jobs:
- HTML5-check:
+ html-5-check:
uses: easystats/workflows/.github/workflows/html-5-check.yaml@main
diff --git a/DESCRIPTION b/DESCRIPTION
index 3b95351d9..933df479a 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,7 +1,7 @@
Type: Package
Package: performance
Title: Assessment of Regression Models Performance
-Version: 0.10.5.4
+Version: 0.10.5.6
Authors@R:
c(person(given = "Daniel",
family = "Lüdecke",
@@ -117,6 +117,7 @@ Suggests:
mgcv,
mlogit,
multimode,
+ nestedLogit,
nlme,
nonnest2,
ordinal,
@@ -149,4 +150,4 @@ Config/Needs/website:
r-lib/pkgdown,
easystats/easystatstemplate
Config/rcmdcheck/ignore-inconsequential-notes: true
-Remotes: easystats/see, easystats/parameters
+Remotes: easystats/see, easystats/parameters, easystats/insight
diff --git a/NAMESPACE b/NAMESPACE
index b5c3ee92b..e573d5ce0 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -180,6 +180,7 @@ S3method(model_performance,model_fit)
S3method(model_performance,multinom)
S3method(model_performance,negbinirr)
S3method(model_performance,negbinmfx)
+S3method(model_performance,nestedLogit)
S3method(model_performance,plm)
S3method(model_performance,poissonirr)
S3method(model_performance,poissonmfx)
@@ -371,6 +372,7 @@ S3method(r2,model_fit)
S3method(r2,multinom)
S3method(r2,negbinirr)
S3method(r2,negbinmfx)
+S3method(r2,nestedLogit)
S3method(r2,ols)
S3method(r2,phylolm)
S3method(r2,plm)
@@ -413,6 +415,7 @@ S3method(r2_coxsnell,mclogit)
S3method(r2_coxsnell,multinom)
S3method(r2_coxsnell,negbinirr)
S3method(r2_coxsnell,negbinmfx)
+S3method(r2_coxsnell,nestedLogit)
S3method(r2_coxsnell,poissonirr)
S3method(r2_coxsnell,poissonmfx)
S3method(r2_coxsnell,polr)
@@ -468,6 +471,7 @@ S3method(r2_nagelkerke,mclogit)
S3method(r2_nagelkerke,multinom)
S3method(r2_nagelkerke,negbinirr)
S3method(r2_nagelkerke,negbinmfx)
+S3method(r2_nagelkerke,nestedLogit)
S3method(r2_nagelkerke,poissonirr)
S3method(r2_nagelkerke,poissonmfx)
S3method(r2_nagelkerke,polr)
@@ -479,6 +483,8 @@ S3method(r2_posterior,BFBayesFactor)
S3method(r2_posterior,brmsfit)
S3method(r2_posterior,stanmvreg)
S3method(r2_posterior,stanreg)
+S3method(r2_tjur,default)
+S3method(r2_tjur,nestedLogit)
S3method(residuals,BFBayesFactor)
S3method(residuals,check_normality_numeric)
S3method(residuals,iv_robust)
diff --git a/NEWS.md b/NEWS.md
index 40e57d346..06eee8f47 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,5 +1,9 @@
# performance (development version)
+## General
+
+* Support for `nestedLogit` models.
+
## Changes to functions
* `check_outliers()` for method `"ics"` now detects number of available cores
@@ -7,6 +11,11 @@
the previous method, which used `parallel::detectCores()`. Now you should
set the number of cores via `options(mc.cores = 4)`.
+## Bug fixes
+
+* Fixed issues is `check_model()` for models that used data sets with
+ variables of class `"haven_labelled"`.
+
# performance 0.10.5
## Changes to functions
diff --git a/R/check_itemscale.R b/R/check_itemscale.R
index 8d8b71082..5aa704618 100644
--- a/R/check_itemscale.R
+++ b/R/check_itemscale.R
@@ -40,8 +40,6 @@
#' - Briggs SR, Cheek JM (1986) The role of factor analysis in the development
#' and evaluation of personality scales. Journal of Personality, 54(1),
#' 106-148. doi: 10.1111/j.1467-6494.1986.tb00391.x
-#' - Trochim WMK (2008) Types of Reliability.
-#' ([web](https://conjointly.com/kb/types-of-reliability/))
#'
#' @examplesIf require("parameters") && require("psych")
#' # data generation from '?prcomp', slightly modified
@@ -82,9 +80,9 @@ check_itemscale <- function(x) {
Mean = vapply(items, mean, numeric(1), na.rm = TRUE),
SD = vapply(items, stats::sd, numeric(1), na.rm = TRUE),
Skewness = vapply(items, function(i) as.numeric(datawizard::skewness(i)), numeric(1)),
- "Difficulty" = item_difficulty(items)$Difficulty,
- "Discrimination" = .item_discr,
- "alpha if deleted" = .item_alpha,
+ Difficulty = item_difficulty(items)$Difficulty,
+ Discrimination = .item_discr,
+ `alpha if deleted` = .item_alpha,
stringsAsFactors = FALSE,
check.names = FALSE
)
diff --git a/R/check_model_diagnostics.R b/R/check_model_diagnostics.R
index 3b86799cc..46a64f9c2 100644
--- a/R/check_model_diagnostics.R
+++ b/R/check_model_diagnostics.R
@@ -153,7 +153,7 @@
# prepare data for normality of residuals plot ----------------------------------
.diag_norm <- function(model, verbose = TRUE) {
- r <- try(stats::residuals(model), silent = TRUE)
+ r <- try(as.numeric(stats::residuals(model)), silent = TRUE)
if (inherits(r, "try-error")) {
insight::format_alert(sprintf("Non-normality of residuals could not be computed. Cannot extract residuals from objects of class '%s'.", class(model)[1]))
@@ -174,16 +174,16 @@
if (inherits(model, "lm", which = TRUE) == 1) {
cook_levels <- round(stats::qf(0.5, s$fstatistic[2], s$fstatistic[3]), 2)
- } else if (!is.null(threshold)) {
- cook_levels <- threshold
- } else {
+ } else if (is.null(threshold)) {
cook_levels <- c(0.5, 1)
+ } else {
+ cook_levels <- threshold
}
n_params <- tryCatch(model$rank, error = function(e) insight::n_parameters(model))
infl <- stats::influence(model, do.coef = FALSE)
- resid <- insight::get_residuals(model)
+ resid <- as.numeric(insight::get_residuals(model))
std_resid <- tryCatch(stats::rstandard(model, infl), error = function(e) resid)
@@ -212,8 +212,8 @@
ncv <- tryCatch(
{
data.frame(
- x = stats::fitted(model),
- y = stats::residuals(model)
+ x = as.numeric(stats::fitted(model)),
+ y = as.numeric(stats::residuals(model))
)
},
error = function(e) {
diff --git a/R/model_performance.lm.R b/R/model_performance.lm.R
index e55253125..e8e5b07cf 100644
--- a/R/model_performance.lm.R
+++ b/R/model_performance.lm.R
@@ -125,10 +125,10 @@ model_performance.lm <- function(model, metrics = "all", verbose = TRUE, ...) {
if (("LOGLOSS" %in% toupper(metrics)) && isTRUE(info$is_binomial)) {
out$Log_loss <- .safe({
.logloss <- performance_logloss(model, verbose = verbose)
- if (!is.na(.logloss)) {
- .logloss
- } else {
+ if (is.na(.logloss)) {
NULL
+ } else {
+ .logloss
}
})
}
@@ -253,6 +253,22 @@ model_performance.zeroinfl <- model_performance.lm
#' @export
model_performance.zerotrunc <- model_performance.lm
+#' @export
+model_performance.nestedLogit <- function(model, metrics = "all", verbose = TRUE, ...) {
+ mp <- lapply(model$models, model_performance.lm, metrics = metrics, verbose = verbose, ...)
+ out <- cbind(
+ data.frame(Response = names(mp), stringsAsFactors = FALSE),
+ do.call(rbind, mp)
+ )
+ # need to handle R2 separately
+ if (any(c("ALL", "R2") %in% toupper(metrics))) {
+ out$R2 <- unlist(r2_tjur(model))
+ }
+
+ row.names(out) <- NULL
+ class(out) <- unique(c("performance_model", class(out)))
+ out
+}
diff --git a/R/r2.R b/R/r2.R
index 26a16f9e6..c844f22cd 100644
--- a/R/r2.R
+++ b/R/r2.R
@@ -49,11 +49,8 @@ r2 <- function(model, ...) {
}
-
-
# Default models -----------------------------------------------
-
#' @rdname r2
#' @export
r2.default <- function(model, ci = NULL, verbose = TRUE, ...) {
@@ -115,6 +112,8 @@ r2.lm <- function(model, ci = NULL, ...) {
#' @export
r2.phylolm <- r2.lm
+# helper -------------
+
.r2_lm <- function(model_summary, ci = NULL) {
out <- list(
R2 = model_summary$r.squared,
@@ -140,7 +139,6 @@ r2.phylolm <- r2.lm
}
-
#' @export
r2.summary.lm <- function(model, ci = NULL, ...) {
if (!is.null(ci) && !is.na(ci)) {
@@ -150,7 +148,6 @@ r2.summary.lm <- function(model, ci = NULL, ...) {
}
-
#' @export
r2.systemfit <- function(model, ...) {
out <- lapply(summary(model)$eq, function(model_summary) {
@@ -173,11 +170,11 @@ r2.systemfit <- function(model, ...) {
#' @export
r2.lm_robust <- function(model, ...) {
out <- list(
- "R2" = tryCatch(
+ R2 = tryCatch(
model[["r.squared"]],
error = function(e) NULL
),
- "R2_adjusted" = tryCatch(
+ R2_adjusted = tryCatch(
model[["adj.r.squared"]],
error = function(e) NULL
)
@@ -198,8 +195,6 @@ r2.ols <- function(model, ...) {
structure(class = "r2_generic", out)
}
-
-
#' @export
r2.lrm <- r2.ols
@@ -207,7 +202,6 @@ r2.lrm <- r2.ols
r2.cph <- r2.ols
-
#' @export
r2.mhurdle <- function(model, ...) {
resp <- insight::get_response(model, verbose = FALSE)
@@ -230,7 +224,6 @@ r2.mhurdle <- function(model, ...) {
}
-
#' @export
r2.aov <- function(model, ci = NULL, ...) {
if (!is.null(ci) && !is.na(ci)) {
@@ -252,7 +245,6 @@ r2.aov <- function(model, ci = NULL, ...) {
}
-
#' @export
r2.mlm <- function(model, ...) {
model_summary <- summary(model)
@@ -276,7 +268,6 @@ r2.mlm <- function(model, ...) {
}
-
#' @export
r2.glm <- function(model, ci = NULL, verbose = TRUE, ...) {
if (!is.null(ci) && !is.na(ci)) {
@@ -291,7 +282,7 @@ r2.glm <- function(model, ci = NULL, verbose = TRUE, ...) {
if (info$family %in% c("gaussian", "inverse.gaussian")) {
out <- r2.default(model, ...)
} else if (info$is_logit && info$is_bernoulli) {
- out <- list("R2_Tjur" = r2_tjur(model, ...))
+ out <- list(R2_Tjur = r2_tjur(model, model_info = info, ...))
attr(out, "model_type") <- "Logistic"
names(out$R2_Tjur) <- "Tjur's R2"
class(out) <- c("r2_pseudo", class(out))
@@ -301,7 +292,7 @@ r2.glm <- function(model, ci = NULL, verbose = TRUE, ...) {
}
out <- NULL
} else {
- out <- list("R2_Nagelkerke" = r2_nagelkerke(model, ...))
+ out <- list(R2_Nagelkerke = r2_nagelkerke(model, ...))
names(out$R2_Nagelkerke) <- "Nagelkerke's R2"
attr(out, "model_type") <- "Generalized Linear"
class(out) <- c("r2_pseudo", class(out))
@@ -313,6 +304,14 @@ r2.glm <- function(model, ci = NULL, verbose = TRUE, ...) {
r2.glmx <- r2.glm
+#' @export
+r2.nestedLogit <- function(model, ci = NULL, verbose = TRUE, ...) {
+ out <- list(R2_Tjur = r2_tjur(model, ...))
+ attr(out, "model_type") <- "Logistic"
+ class(out) <- c("r2_pseudo", class(out))
+ out
+}
+
# mfx models ---------------------
@@ -358,7 +357,7 @@ r2.model_fit <- r2.logitmfx
#' @export
r2.BBreg <- function(model, ...) {
- out <- list("R2_CoxSnell" = r2_coxsnell(model))
+ out <- list(R2_CoxSnell = r2_coxsnell(model))
names(out$R2_CoxSnell) <- "Cox & Snell's R2"
class(out) <- c("r2_pseudo", class(out))
out
@@ -378,7 +377,7 @@ r2.bayesx <- r2.BBreg
#' @export
r2.censReg <- function(model, ...) {
- out <- list("R2_Nagelkerke" = r2_nagelkerke(model))
+ out <- list(R2_Nagelkerke = r2_nagelkerke(model))
names(out$R2_Nagelkerke) <- "Nagelkerke's R2"
class(out) <- c("r2_pseudo", class(out))
out
@@ -507,8 +506,8 @@ r2.wbm <- function(model, tolerance = 1e-5, ...) {
names(r2_marginal) <- "Marginal R2"
out <- list(
- "R2_conditional" = r2_conditional,
- "R2_marginal" = r2_marginal
+ R2_conditional = r2_conditional,
+ R2_marginal = r2_marginal
)
attr(out, "model_type") <- "Fixed Effects"
@@ -531,8 +530,8 @@ r2.sem <- function(model, ...) {
structure(
class = "r2_nakagawa",
list(
- "R2_conditional" = r2_conditional,
- "R2_marginal" = r2_marginal
+ R2_conditional = r2_conditional,
+ R2_marginal = r2_marginal
)
)
}
@@ -563,12 +562,10 @@ r2.gam <- function(model, ...) {
# gamlss inherits from gam, and summary.gamlss prints results automatically
printout <- utils::capture.output(s <- summary(model)) # nolint
- if (!is.null(s$r.sq)) {
- list(
- R2 = c(`Adjusted R2` = s$r.sq)
- )
- } else {
+ if (is.null(s$r.sq)) {
NextMethod()
+ } else {
+ list(R2 = c(`Adjusted R2` = s$r.sq))
}
}
@@ -603,7 +600,7 @@ r2.rma <- function(model, ...) {
#' @export
r2.feis <- function(model, ...) {
out <- list(
- R2 = c(`R2` = model$r2),
+ R2 = c(R2 = model$r2),
R2_adjusted = c(`adjusted R2` = model$adj.r2)
)
@@ -654,7 +651,7 @@ r2.fixest_multi <- function(model, ...) {
r2.felm <- function(model, ...) {
model_summary <- summary(model)
out <- list(
- R2 = c(`R2` = model_summary$r2),
+ R2 = c(R2 = model_summary$r2),
R2_adjusted = c(`adjusted R2` = model_summary$r2adj)
)
@@ -668,7 +665,7 @@ r2.felm <- function(model, ...) {
#' @export
r2.iv_robust <- function(model, ...) {
out <- list(
- R2 = c(`R2` = model$r.squared),
+ R2 = c(R2 = model$r.squared),
R2_adjusted = c(`adjusted R2` = model$adj.r.squared)
)
@@ -682,7 +679,7 @@ r2.iv_robust <- function(model, ...) {
r2.ivreg <- function(model, ...) {
model_summary <- summary(model)
out <- list(
- R2 = c(`R2` = model_summary$r.squared),
+ R2 = c(R2 = model_summary$r.squared),
R2_adjusted = c(`adjusted R2` = model_summary$adj.r.squared)
)
@@ -694,7 +691,7 @@ r2.ivreg <- function(model, ...) {
#' @export
r2.bigglm <- function(model, ...) {
- out <- list("R2_CoxSnell" = summary(model)$rsq)
+ out <- list(R2_CoxSnell = summary(model)$rsq)
names(out$R2_CoxSnell) <- "Cox & Snell's R2"
class(out) <- c("r2_pseudo", class(out))
out
@@ -728,7 +725,7 @@ r2.biglm <- function(model, ...) {
r2.lmrob <- function(model, ...) {
model_summary <- summary(model)
out <- list(
- R2 = c(`R2` = model_summary$r.squared),
+ R2 = c(R2 = model_summary$r.squared),
R2_adjusted = c(`adjusted R2` = model_summary$adj.r.squared)
)
@@ -749,10 +746,10 @@ r2.mmclogit <- function(model, ...) {
#' @export
r2.Arima <- function(model, ...) {
- if (!requireNamespace("forecast", quietly = TRUE)) {
- list(R2 = NA)
- } else {
+ if (requireNamespace("forecast", quietly = TRUE)) {
list(R2 = stats::cor(stats::fitted(model), insight::get_data(model, verbose = FALSE))^2)
+ } else {
+ list(R2 = NA)
}
}
@@ -762,8 +759,8 @@ r2.Arima <- function(model, ...) {
r2.plm <- function(model, ...) {
model_summary <- summary(model)
out <- list(
- "R2" = c(`R2` = model_summary$r.squared[1]),
- "R2_adjusted" = c(`adjusted R2` = model_summary$r.squared[2])
+ R2 = c(R2 = model_summary$r.squared[1]),
+ R2_adjusted = c(`adjusted R2` = model_summary$r.squared[2])
)
attr(out, "model_type") <- "Panel Data"
@@ -779,8 +776,8 @@ r2.selection <- function(model, ...) {
return(NULL)
}
out <- list(
- "R2" = c(`R2` = model_summary$rSquared$R2),
- "R2_adjusted" = c(`adjusted R2` = model_summary$rSquared$R2adj)
+ R2 = c(R2 = model_summary$rSquared$R2),
+ R2_adjusted = c(`adjusted R2` = model_summary$rSquared$R2adj)
)
attr(out, "model_type") <- "Tobit 2"
@@ -795,7 +792,7 @@ r2.svyglm <- function(model, ...) {
rsq.adjust <- 1 - ((1 - rsq) * (model$df.null / model$df.residual))
out <- list(
- R2 = c(`R2` = rsq),
+ R2 = c(R2 = rsq),
R2_adjusted = c(`adjusted R2` = rsq.adjust)
)
@@ -807,7 +804,7 @@ r2.svyglm <- function(model, ...) {
#' @export
r2.vglm <- function(model, ...) {
- out <- list("R2_McKelvey" = r2_mckelvey(model))
+ out <- list(R2_McKelvey = r2_mckelvey(model))
names(out$R2_McKelvey) <- "McKelvey's R2"
class(out) <- c("r2_pseudo", class(out))
out
@@ -820,7 +817,7 @@ r2.vgam <- r2.vglm
#' @export
r2.DirichletRegModel <- function(model, ...) {
- out <- list("R2_Nagelkerke" = r2_nagelkerke(model))
+ out <- list(R2_Nagelkerke = r2_nagelkerke(model))
names(out$R2_Nagelkerke) <- "Nagelkerke's R2"
class(out) <- c("r2_pseudo", class(out))
out
diff --git a/R/r2_coxsnell.R b/R/r2_coxsnell.R
index c151136d4..cdb73dea7 100644
--- a/R/r2_coxsnell.R
+++ b/R/r2_coxsnell.R
@@ -88,6 +88,26 @@ r2_coxsnell.glm <- function(model, verbose = TRUE, ...) {
#' @export
r2_coxsnell.BBreg <- r2_coxsnell.glm
+
+#' @export
+r2_coxsnell.nestedLogit <- function(model, ...) {
+ n <- insight::n_obs(model, disaggregate = TRUE)
+ stats::setNames(
+ lapply(names(model$models), function(i) {
+ m <- model$models[[i]]
+ # if no deviance, return NA
+ if (is.null(m$deviance)) {
+ return(NA)
+ }
+ r2_coxsnell <- (1 - exp((m$deviance - m$null.deviance) / n[[i]]))
+ names(r2_coxsnell) <- "Cox & Snell's R2"
+ r2_coxsnell
+ }),
+ names(model$models)
+ )
+}
+
+
#' @export
r2_coxsnell.mclogit <- function(model, ...) {
insight::check_if_installed("mclogit", reason = "to calculate R2")
diff --git a/R/r2_nagelkerke.R b/R/r2_nagelkerke.R
index bb6230f22..85bcc6e8f 100644
--- a/R/r2_nagelkerke.R
+++ b/R/r2_nagelkerke.R
@@ -77,6 +77,26 @@ r2_nagelkerke.glm <- function(model, verbose = TRUE, ...) {
#' @export
r2_nagelkerke.BBreg <- r2_nagelkerke.glm
+
+#' @export
+r2_nagelkerke.nestedLogit <- function(model, ...) {
+ n <- insight::n_obs(model, disaggregate = TRUE)
+ stats::setNames(
+ lapply(names(model$models), function(i) {
+ m <- model$models[[i]]
+ # if no deviance, return NA
+ if (is.null(m$deviance)) {
+ return(NA)
+ }
+ r2_nagelkerke <- (1 - exp((m$deviance - m$null.deviance) / n[[i]])) / (1 - exp(-m$null.deviance / n[[i]]))
+ names(r2_nagelkerke) <- "Nagelkerke's R2"
+ r2_nagelkerke
+ }),
+ names(model$models)
+ )
+}
+
+
#' @export
r2_nagelkerke.bife <- function(model, ...) {
r2_nagelkerke <- r2_coxsnell(model) / (1 - exp(-model$null_deviance / insight::n_obs(model)))
diff --git a/R/r2_tjur.R b/R/r2_tjur.R
index e0aa6117c..6a044e299 100644
--- a/R/r2_tjur.R
+++ b/R/r2_tjur.R
@@ -23,6 +23,11 @@
#'
#' @export
r2_tjur <- function(model, ...) {
+ UseMethod("r2_tjur")
+}
+
+#' @export
+r2_tjur.default <- function(model, ...) {
info <- list(...)$model_info
if (is.null(info)) {
info <- suppressWarnings(insight::model_info(model, verbose = FALSE))
@@ -50,3 +55,29 @@ r2_tjur <- function(model, ...) {
names(tjur_d) <- "Tjur's R2"
tjur_d
}
+
+#' @export
+r2_tjur.nestedLogit <- function(model, ...) {
+ resp <- insight::get_response(model, dichotomies = TRUE, verbose = FALSE)
+
+ stats::setNames(
+ lapply(names(model$models), function(i) {
+ y <- resp[[i]]
+ m <- model$models[[i]]
+ pred <- stats::predict(m, type = "response")
+ # delete pred for cases with missing residuals
+ if (anyNA(stats::residuals(m))) {
+ pred <- pred[!is.na(stats::residuals(m))]
+ }
+ categories <- unique(y)
+ m1 <- mean(pred[which(y == categories[1])], na.rm = TRUE)
+ m2 <- mean(pred[which(y == categories[2])], na.rm = TRUE)
+
+ tjur_d <- abs(m2 - m1)
+
+ names(tjur_d) <- "Tjur's R2"
+ tjur_d
+ }),
+ names(model$models)
+ )
+}
diff --git a/man/check_itemscale.Rd b/man/check_itemscale.Rd
index 7fa487ab5..7f790b1d2 100644
--- a/man/check_itemscale.Rd
+++ b/man/check_itemscale.Rd
@@ -61,7 +61,5 @@ check_itemscale(pca)
\item Briggs SR, Cheek JM (1986) The role of factor analysis in the development
and evaluation of personality scales. Journal of Personality, 54(1),
106-148. doi: 10.1111/j.1467-6494.1986.tb00391.x
-\item Trochim WMK (2008) Types of Reliability.
-(\href{https://conjointly.com/kb/types-of-reliability/}{web})
}
}
diff --git a/tests/testthat/_snaps/check_distribution.md b/tests/testthat/_snaps/check_distribution.md
new file mode 100644
index 000000000..11187a23b
--- /dev/null
+++ b/tests/testthat/_snaps/check_distribution.md
@@ -0,0 +1,21 @@
+# check_distribution
+
+ Code
+ print(out)
+ Output
+ # Distribution of Model Family
+
+ Predicted Distribution of Residuals
+
+ Distribution Probability
+ cauchy 94%
+ lognormal 3%
+ weibull 3%
+
+ Predicted Distribution of Response
+
+ Distribution Probability
+ lognormal 47%
+ gamma 44%
+ beta-binomial 3%
+
diff --git a/tests/testthat/_snaps/nestedLogit.md b/tests/testthat/_snaps/nestedLogit.md
new file mode 100644
index 000000000..f5c9a0bdd
--- /dev/null
+++ b/tests/testthat/_snaps/nestedLogit.md
@@ -0,0 +1,12 @@
+# model_performance
+
+ Code
+ model_performance(mnl)
+ Output
+ # Indices of model performance
+
+ Response | AIC | BIC | RMSE | Sigma | R2
+ ----------------------------------------------------
+ work | 325.733 | 336.449 | 0.456 | 1.000 | 0.138
+ full | 110.495 | 118.541 | 0.398 | 1.000 | 0.333
+
diff --git a/tests/testthat/test-binned_residuals.R b/tests/testthat/test-binned_residuals.R
new file mode 100644
index 000000000..4aa69e0ec
--- /dev/null
+++ b/tests/testthat/test-binned_residuals.R
@@ -0,0 +1,67 @@
+test_that("binned_residuals", {
+ data(mtcars)
+ model <- glm(vs ~ wt + mpg, data = mtcars, family = "binomial")
+ result <- binned_residuals(model)
+ expect_named(
+ result,
+ c("xbar", "ybar", "n", "x.lo", "x.hi", "se", "ci_range", "CI_low", "CI_high", "group")
+ )
+ expect_equal(
+ result$xbar,
+ c(0.03786, 0.09514, 0.25911, 0.47955, 0.71109, 0.97119),
+ tolerance = 1e-4
+ )
+ expect_equal(
+ result$ybar,
+ c(-0.03786, -0.09514, 0.07423, -0.07955, 0.28891, -0.13786),
+ tolerance = 1e-4
+ )
+})
+
+
+test_that("binned_residuals, n_bins", {
+ data(mtcars)
+ model <- glm(vs ~ wt + mpg, data = mtcars, family = "binomial")
+ result <- binned_residuals(model, n_bins = 10)
+ expect_named(
+ result,
+ c("xbar", "ybar", "n", "x.lo", "x.hi", "se", "ci_range", "CI_low", "CI_high", "group")
+ )
+ expect_equal(
+ result$xbar,
+ c(
+ 0.02373, 0.06301, 0.08441, 0.17907, 0.29225, 0.44073, 0.54951,
+ 0.69701, 0.9168, 0.99204
+ ),
+ tolerance = 1e-4
+ )
+ expect_equal(
+ result$ybar,
+ c(
+ -0.02373, -0.06301, -0.08441, -0.17907, 0.20775, -0.1074, 0.11715,
+ 0.30299, -0.25014, 0.00796
+ ),
+ tolerance = 1e-4
+ )
+})
+
+
+test_that("binned_residuals, terms", {
+ data(mtcars)
+ model <- glm(vs ~ wt + mpg, data = mtcars, family = "binomial")
+ result <- binned_residuals(model, term = "mpg")
+ expect_named(
+ result,
+ c("xbar", "ybar", "n", "x.lo", "x.hi", "se", "ci_range", "CI_low", "CI_high", "group")
+ )
+ expect_equal(
+ result$xbar,
+ c(12.62, 15.34, 18.1, 20.9, 22.875, 30.06667),
+ tolerance = 1e-4
+ )
+ expect_equal(
+ result$ybar,
+ c(-0.05435, -0.07866, 0.13925, -0.11861, 0.27763, -0.13786),
+ tolerance = 1e-4
+ )
+})
diff --git a/tests/testthat/test-check_autocorrelation.R b/tests/testthat/test-check_autocorrelation.R
new file mode 100644
index 000000000..b389f3985
--- /dev/null
+++ b/tests/testthat/test-check_autocorrelation.R
@@ -0,0 +1,7 @@
+test_that("check_autocorrelation", {
+ data(mtcars)
+ m <- lm(mpg ~ wt + cyl + gear + disp, data = mtcars)
+ set.seed(123)
+ out <- check_autocorrelation(m)
+ expect_equal(as.vector(out), 0.316, ignor_attr = TRUE, tolerance = 1e-2)
+})
diff --git a/tests/testthat/test-check_distribution.R b/tests/testthat/test-check_distribution.R
new file mode 100644
index 000000000..e8ab2835f
--- /dev/null
+++ b/tests/testthat/test-check_distribution.R
@@ -0,0 +1,36 @@
+test_that("check_distribution", {
+ skip_if_not_installed("lme4")
+ skip_if_not_installed("randomForest")
+ data(sleepstudy, package = "lme4")
+ model <<- lme4::lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
+ out <- check_distribution(model)
+
+ expect_identical(
+ out$Distribution,
+ c(
+ "bernoulli", "beta", "beta-binomial", "binomial", "cauchy",
+ "chi", "exponential", "F", "gamma", "half-cauchy", "inverse-gamma",
+ "lognormal", "neg. binomial (zero-infl.)", "negative binomial",
+ "normal", "pareto", "poisson", "poisson (zero-infl.)", "tweedie",
+ "uniform", "weibull"
+ )
+ )
+ expect_equal(
+ out$p_Residuals,
+ c(
+ 0, 0, 0, 0, 0.9375, 0, 0, 0, 0, 0, 0, 0.03125, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0.03125
+ ),
+ tolerance = 1e-4
+ )
+ expect_equal(
+ out$p_Response,
+ c(
+ 0, 0, 0.03125, 0, 0, 0, 0, 0, 0.4375, 0.03125, 0, 0.46875,
+ 0.03125, 0, 0, 0, 0, 0, 0, 0, 0
+ ),
+ tolerance = 1e-4
+ )
+
+ expect_snapshot(print(out))
+})
diff --git a/tests/testthat/test-check_heterogeneity_bias.R b/tests/testthat/test-check_heterogeneity_bias.R
new file mode 100644
index 000000000..7abc6af30
--- /dev/null
+++ b/tests/testthat/test-check_heterogeneity_bias.R
@@ -0,0 +1,33 @@
+test_that("check_heterogeneity_bias", {
+ data(iris)
+ set.seed(123)
+ iris$ID <- sample(1:4, nrow(iris), replace = TRUE) # fake-ID
+ out <- check_heterogeneity_bias(iris, select = c("Sepal.Length", "Petal.Length"), group = "ID")
+ expect_equal(out, c("Sepal.Length", "Petal.Length"), ignore_attr = TRUE)
+ expect_output(print(out), "Possible heterogeneity bias due to following predictors: Sepal\\.Length, Petal\\.Length")
+
+ out <- check_heterogeneity_bias(iris, select = ~ Sepal.Length + Petal.Length, group = ~ID)
+ expect_equal(out, c("Sepal.Length", "Petal.Length"), ignore_attr = TRUE)
+ expect_output(print(out), "Possible heterogeneity bias due to following predictors: Sepal\\.Length, Petal\\.Length")
+
+ m <- lm(Sepal.Length ~ Petal.Length + Petal.Width + Species + ID, data = iris)
+ expect_error(
+ check_heterogeneity_bias(m, select = c("Sepal.Length", "Petal.Length"), group = "ID"),
+ regex = "no mixed model"
+ )
+
+ skip_if_not_installed("lme4")
+ m <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + Species + (1 | ID), data = iris)
+ out <- check_heterogeneity_bias(m, select = c("Sepal.Length", "Petal.Length"), group = "ID")
+ expect_equal(out, c("Petal.Length", "Petal.Width", "Species"), ignore_attr = TRUE)
+ expect_output(
+ print(out),
+ "Possible heterogeneity bias due to following predictors: Petal\\.Length, Petal\\.Width, Species"
+ )
+ out <- check_heterogeneity_bias(m, select = ~ Sepal.Length + Petal.Length, group = ~ID)
+ expect_equal(out, c("Petal.Length", "Petal.Width", "Species"), ignore_attr = TRUE)
+ expect_output(
+ print(out),
+ "Possible heterogeneity bias due to following predictors: Petal\\.Length, Petal\\.Width, Species"
+ )
+})
diff --git a/tests/testthat/test-nestedLogit.R b/tests/testthat/test-nestedLogit.R
new file mode 100644
index 000000000..47a852b04
--- /dev/null
+++ b/tests/testthat/test-nestedLogit.R
@@ -0,0 +1,66 @@
+skip_on_os(c("mac", "linux"))
+skip_if(packageVersion("insight") <= "0.19.5.10")
+skip_if_not_installed("nestedLogit")
+skip_if_not_installed("carData")
+
+data("Womenlf", package = "carData")
+comparisons <- nestedLogit::logits(
+ work = nestedLogit::dichotomy("not.work", working = c("parttime", "fulltime")),
+ full = nestedLogit::dichotomy("parttime", "fulltime")
+)
+mnl <- nestedLogit::nestedLogit(
+ partic ~ hincome + children,
+ dichotomies = comparisons,
+ data = Womenlf
+)
+
+test_that("r2", {
+ out <- r2(mnl)
+ expect_equal(
+ out,
+ list(R2_Tjur = list(
+ work = c(`Tjur's R2` = 0.137759452521642),
+ full = c(`Tjur's R2` = 0.332536937208286)
+ )),
+ ignore_attr = TRUE,
+ tolerance = 1e-4
+ )
+
+ out <- r2_tjur(mnl)
+ expect_equal(
+ out,
+ list(
+ work = c(`Tjur's R2` = 0.137759452521642),
+ full = c(`Tjur's R2` = 0.332536937208286)
+ ),
+ ignore_attr = TRUE,
+ tolerance = 1e-4
+ )
+
+ out <- r2_coxsnell(mnl)
+ expect_equal(
+ out,
+ list(
+ work = c(`Cox & Snell's R2` = 0.129313084315599),
+ full = c(`Cox & Snell's R2` = 0.308541455410686)
+ ),
+ ignore_attr = TRUE,
+ tolerance = 1e-4
+ )
+
+ out <- r2_nagelkerke(mnl)
+ expect_equal(
+ out,
+ list(
+ work = c(`Nagelkerke's R2` = 0.174313365512442),
+ full = c(`Nagelkerke's R2` = 0.418511411473948)
+ ),
+ ignore_attr = TRUE,
+ tolerance = 1e-4
+ )
+})
+
+
+test_that("model_performance", {
+ expect_snapshot(model_performance(mnl))
+})