From 50a0443c3ca2a218934c456a30aa077c4b32f1e2 Mon Sep 17 00:00:00 2001 From: Daniel Date: Sun, 29 Oct 2023 14:45:39 +0100 Subject: [PATCH] posterior predictive check for binomial glm with matrix response (#645) --- DESCRIPTION | 3 +- NEWS.md | 13 +++ R/binned_residuals.R | 12 +- R/check_model.R | 2 +- R/check_predictions.R | 40 ++++--- R/looic.R | 2 + R/r2_bayes.R | 142 ++++++++++++------------ man/binned_residuals.Rd | 3 + man/looic.Rd | 2 + man/r2_bayes.Rd | 102 +++++++++-------- tests/testthat/test-binned_residuals.R | 19 ++++ tests/testthat/test-check_predictions.R | 50 +++++++++ 12 files changed, 246 insertions(+), 144 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index d8dff46a6..0612d8264 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Type: Package Package: performance Title: Assessment of Regression Models Performance -Version: 0.10.7.1 +Version: 0.10.7.2 Authors@R: c(person(given = "Daniel", family = "Lüdecke", @@ -153,3 +153,4 @@ Config/Needs/website: r-lib/pkgdown, easystats/easystatstemplate Config/rcmdcheck/ignore-inconsequential-notes: true +Remotes: easystats/insight diff --git a/NEWS.md b/NEWS.md index 2bb37d162..6209f55b8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,16 @@ +# performance 0.10.8 + +## Changes + +* Changed behaviour of `check_predictions()` for models from binomial family, + to get comparable plots for different ways of outcome specification. Now, + if the outcome is a proportion, or defined as matrix of trials and successes, + the produced plots are the same (because the models should be the same, too). + +## Bug fixes + +* Fixed CRAN check errors. + # performance 0.10.7 ## Breaking changes diff --git a/R/binned_residuals.R b/R/binned_residuals.R index 8b5533bb0..370f33c20 100644 --- a/R/binned_residuals.R +++ b/R/binned_residuals.R @@ -29,6 +29,7 @@ #' time-consuming. By default, `show_dots = NULL`. In this case `binned_residuals()` #' tries to guess whether performance will be poor due to a very large model #' and thus automatically shows or hides dots. +#' @param verbose Toggle warnings and messages. #' @param ... Currently not used. #' #' @return A data frame representing the data that is mapped in the accompanying @@ -83,11 +84,20 @@ binned_residuals <- function(model, ci_type = c("exact", "gaussian", "boot"), residuals = c("deviance", "pearson", "response"), iterations = 1000, + verbose = TRUE, ...) { # match arguments ci_type <- match.arg(ci_type) residuals <- match.arg(residuals) + # for non-bernoulli models, `"exact"` doesn't work + if (isFALSE(insight::model_info(model)$is_bernoulli)) { + ci_type <- "gaussian" + if (verbose) { + insight::format_alert("Using `ci_type = \"gaussian\"` because model is not bernoulli.") + } + } + fitted_values <- stats::fitted(model) mf <- insight::get_data(model, verbose = FALSE) @@ -186,7 +196,7 @@ binned_residuals <- function(model, } out <- out / n - quant <- stats::quantile(out, c((1 - ci) / 2, (1 + ci) / 2)) + quant <- stats::quantile(out, c((1 - ci) / 2, (1 + ci) / 2), na.rm = TRUE) c(CI_low = quant[1L], CI_high = quant[2L]) } diff --git a/R/check_model.R b/R/check_model.R index 58757468f..e261704a5 100644 --- a/R/check_model.R +++ b/R/check_model.R @@ -374,7 +374,7 @@ check_model.model_fit <- function(x, dat$INFLUENTIAL <- .influential_obs(model, threshold = threshold) dat$PP_CHECK <- .safe(check_predictions(model, ...)) if (isTRUE(model_info$is_binomial)) { - dat$BINNED_RESID <- binned_residuals(model, ...) + dat$BINNED_RESID <- binned_residuals(model, verbose = verbose, ...) } if (isTRUE(model_info$is_count)) { dat$OVERDISPERSION <- .diag_overdispersion(model) diff --git a/R/check_predictions.R b/R/check_predictions.R index 832f9cc87..ac4eaf45c 100644 --- a/R/check_predictions.R +++ b/R/check_predictions.R @@ -197,10 +197,10 @@ pp_check.lm <- function(object, out <- .check_re_formula(out, object, iterations, re_formula, verbose, ...) # save information about model - if (!is.null(model_info)) { - minfo <- model_info - } else { + if (is.null(model_info)) { minfo <- insight::model_info(object) + } else { + minfo <- model_info } # glmmTMB returns column matrix for bernoulli @@ -215,9 +215,10 @@ pp_check.lm <- function(object, } if (is.null(out)) { - insight::format_error( - sprintf("Could not simulate responses. Maybe there is no `simulate()` for objects of class `%s`?", class(object)[1]) - ) + insight::format_error(sprintf( + "Could not simulate responses. Maybe there is no `simulate()` for objects of class `%s`?", + class(object)[1] + )) } # get response data, and response term, to check for transformations @@ -263,7 +264,7 @@ pp_check.glm <- function(object, out <- tryCatch( { matrix_sim <- stats::simulate(object, nsim = iterations, re.form = re_formula, ...) - as.data.frame(sapply(matrix_sim, function(i) i[, 1] / i[, 2], simplify = TRUE)) + as.data.frame(sapply(matrix_sim, function(i) i[, 1] / rowSums(i, na.rm = TRUE), simplify = TRUE)) }, error = function(e) { NULL @@ -274,9 +275,10 @@ pp_check.glm <- function(object, out <- .check_re_formula(out, object, iterations, re_formula, verbose, ...) if (is.null(out)) { - insight::format_error( - sprintf("Could not simulate responses. Maybe there is no `simulate()` for objects of class `%s`?", class(object)[1]) - ) + insight::format_error(sprintf( + "Could not simulate responses. Maybe there is no `simulate()` for objects of class `%s`?", + class(object)[1] + )) } # get response data, and response term @@ -285,13 +287,13 @@ pp_check.glm <- function(object, ) resp_string <- insight::find_terms(object)$response - out$y <- response[, 1] / response[, 2] + out$y <- response[, 1] / rowSums(response, na.rm = TRUE) # safe information about model - if (!is.null(model_info)) { - minfo <- model_info - } else { + if (is.null(model_info)) { minfo <- insight::model_info(object) + } else { + minfo <- model_info } attr(out, "check_range") <- check_range @@ -363,14 +365,20 @@ print.performance_pp_check <- function(x, verbose = TRUE, ...) { if (is.numeric(original)) { if (min(replicated) > min(original)) { insight::print_color( - insight::format_message("Warning: Minimum value of original data is not included in the replicated data.", "Model may not capture the variation of the data."), + insight::format_message( + "Warning: Minimum value of original data is not included in the replicated data.", + "Model may not capture the variation of the data." + ), "red" ) } if (max(replicated) < max(original)) { insight::print_color( - insight::format_message("Warning: Maximum value of original data is not included in the replicated data.", "Model may not capture the variation of the data."), + insight::format_message( + "Warning: Maximum value of original data is not included in the replicated data.", + "Model may not capture the variation of the data." + ), "red" ) } diff --git a/R/looic.R b/R/looic.R index 8f0a0c66e..4ded6ccd7 100644 --- a/R/looic.R +++ b/R/looic.R @@ -12,6 +12,7 @@ #' @return A list with four elements, the ELPD, LOOIC and their standard errors. #' #' @examplesIf require("rstanarm") +#' \donttest{ #' model <- suppressWarnings(rstanarm::stan_glm( #' mpg ~ wt + cyl, #' data = mtcars, @@ -20,6 +21,7 @@ #' refresh = 0 #' )) #' looic(model) +#' } #' @export looic <- function(model, verbose = TRUE) { insight::check_if_installed("loo") diff --git a/R/r2_bayes.R b/R/r2_bayes.R index fc007489c..24e8269ba 100644 --- a/R/r2_bayes.R +++ b/R/r2_bayes.R @@ -30,67 +30,63 @@ #' `r2_posterior()` is the actual workhorse for `r2_bayes()` and #' returns a posterior sample of Bayesian R2 values. #' -#' @examples +#' @examplesIf require("rstanarm") && require("rstantools") && require("BayesFactor") && require("brms") #' library(performance) -#' if (require("rstanarm") && require("rstantools")) { -#' model <- suppressWarnings(stan_glm( -#' mpg ~ wt + cyl, -#' data = mtcars, -#' chains = 1, -#' iter = 500, -#' refresh = 0, -#' show_messages = FALSE -#' )) -#' r2_bayes(model) +#' \donttest{ +#' model <- suppressWarnings(rstanarm::stan_glm( +#' mpg ~ wt + cyl, +#' data = mtcars, +#' chains = 1, +#' iter = 500, +#' refresh = 0, +#' show_messages = FALSE +#' )) +#' r2_bayes(model) #' -#' model <- suppressWarnings(stan_lmer( -#' Petal.Length ~ Petal.Width + (1 | Species), -#' data = iris, -#' chains = 1, -#' iter = 500, -#' refresh = 0 -#' )) -#' r2_bayes(model) +#' model <- suppressWarnings(rstanarm::stan_lmer( +#' Petal.Length ~ Petal.Width + (1 | Species), +#' data = iris, +#' chains = 1, +#' iter = 500, +#' refresh = 0 +#' )) +#' r2_bayes(model) #' } #' -#' if (require("BayesFactor")) { -#' BFM <- generalTestBF(mpg ~ qsec + gear, data = mtcars, progress = FALSE) -#' FM <- lmBF(mpg ~ qsec + gear, data = mtcars) +#' BFM <- BayesFactor::generalTestBF(mpg ~ qsec + gear, data = mtcars, progress = FALSE) +#' FM <- BayesFactor::lmBF(mpg ~ qsec + gear, data = mtcars) #' -#' r2_bayes(FM) -#' r2_bayes(BFM[3]) -#' r2_bayes(BFM, average = TRUE) # across all models +#' r2_bayes(FM) +#' r2_bayes(BFM[3]) +#' r2_bayes(BFM, average = TRUE) # across all models #' -#' # with random effects: -#' mtcars$gear <- factor(mtcars$gear) -#' model <- lmBF( -#' mpg ~ hp + cyl + gear + gear:wt, -#' mtcars, -#' progress = FALSE, -#' whichRandom = c("gear", "gear:wt") -#' ) +#' # with random effects: +#' mtcars$gear <- factor(mtcars$gear) +#' model <- BayesFactor::lmBF( +#' mpg ~ hp + cyl + gear + gear:wt, +#' mtcars, +#' progress = FALSE, +#' whichRandom = c("gear", "gear:wt") +#' ) #' -#' r2_bayes(model) -#' } +#' r2_bayes(model) #' #' \donttest{ -#' if (require("brms")) { -#' model <- suppressWarnings(brms::brm( -#' mpg ~ wt + cyl, -#' data = mtcars, -#' silent = 2, -#' refresh = 0 -#' )) -#' r2_bayes(model) +#' model <- suppressWarnings(brms::brm( +#' mpg ~ wt + cyl, +#' data = mtcars, +#' silent = 2, +#' refresh = 0 +#' )) +#' r2_bayes(model) #' -#' model <- suppressWarnings(brms::brm( -#' Petal.Length ~ Petal.Width + (1 | Species), -#' data = iris, -#' silent = 2, -#' refresh = 0 -#' )) -#' r2_bayes(model) -#' } +#' model <- suppressWarnings(brms::brm( +#' Petal.Length ~ Petal.Width + (1 | Species), +#' data = iris, +#' silent = 2, +#' refresh = 0 +#' )) +#' r2_bayes(model) #' } #' @references #' Gelman, A., Goodrich, B., Gabry, J., and Vehtari, A. (2018). @@ -114,7 +110,7 @@ r2_bayes <- function(model, robust = TRUE, ci = 0.95, verbose = TRUE, ...) { mean(i) } }), - "SE" = rapply(r2_bayesian, function(i) { + SE = rapply(r2_bayesian, function(i) { if (robust) { stats::mad(i) } else { @@ -122,9 +118,9 @@ r2_bayes <- function(model, robust = TRUE, ci = 0.95, verbose = TRUE, ...) { } }), # "Estimates" = rapply(r2_bayesian, bayestestR::point_estimate, centrality = "all", dispersion = TRUE), - "CI" = rapply(r2_bayesian, bayestestR::hdi, ci = ci), - "ci_method" = "HDI", - "robust" = robust + CI = rapply(r2_bayesian, bayestestR::hdi, ci = ci), + ci_method = "HDI", + robust = robust ) } else { structure( @@ -136,17 +132,17 @@ r2_bayes <- function(model, robust = TRUE, ci = 0.95, verbose = TRUE, ...) { mean(i) } }), - "SE" = lapply(r2_bayesian, function(i) { + SE = lapply(r2_bayesian, function(i) { if (robust) { stats::mad(i) } else { stats::sd(i) } }), - # "Estimates" = lapply(r2_bayesian, bayestestR::point_estimate, centrality = "all", dispersion = TRUE), - "CI" = lapply(r2_bayesian, bayestestR::hdi, ci = ci), - "ci_method" = "HDI", - "robust" = robust + # Estimates = lapply(r2_bayesian, bayestestR::point_estimate, centrality = "all", dispersion = TRUE), + CI = lapply(r2_bayesian, bayestestR::hdi, ci = ci), + ci_method = "HDI", + robust = robust ) } } @@ -178,13 +174,13 @@ r2_posterior.brmsfit <- function(model, verbose = TRUE, ...) { res <- insight::find_response(model) if (mi[[1]]$is_mixed) { br2_mv <- list( - "R2_Bayes" = rstantools::bayes_R2( + R2_Bayes = rstantools::bayes_R2( model, re.form = NULL, re_formula = NULL, summary = FALSE ), - "R2_Bayes_marginal" = rstantools::bayes_R2( + R2_Bayes_marginal = rstantools::bayes_R2( model, re.form = NA, re_formula = NA, @@ -193,28 +189,28 @@ r2_posterior.brmsfit <- function(model, verbose = TRUE, ...) { ) br2 <- lapply(seq_along(res), function(x) { list( - "R2_Bayes" = unname(as.vector(br2_mv$R2_Bayes[, x])), - "R2_Bayes_marginal" = unname(as.vector(br2_mv$R2_Bayes_marginal[, x])) + R2_Bayes = unname(as.vector(br2_mv$R2_Bayes[, x])), + R2_Bayes_marginal = unname(as.vector(br2_mv$R2_Bayes_marginal[, x])) ) }) names(br2) <- res } else { - br2_mv <- list("R2_Bayes" = rstantools::bayes_R2(model, summary = FALSE)) + br2_mv <- list(R2_Bayes = rstantools::bayes_R2(model, summary = FALSE)) br2 <- lapply(seq_along(res), function(x) { - list("R2_Bayes" = unname(as.vector(br2_mv$R2_Bayes[, x]))) + list(R2_Bayes = unname(as.vector(br2_mv$R2_Bayes[, x]))) }) names(br2) <- res } } else { if (mi$is_mixed) { br2 <- list( - "R2_Bayes" = as.vector(rstantools::bayes_R2( + R2_Bayes = as.vector(rstantools::bayes_R2( model, re.form = NULL, re_formula = NULL, summary = FALSE )), - "R2_Bayes_marginal" = as.vector(rstantools::bayes_R2( + R2_Bayes_marginal = as.vector(rstantools::bayes_R2( model, re.form = NA, re_formula = NA, @@ -224,7 +220,7 @@ r2_posterior.brmsfit <- function(model, verbose = TRUE, ...) { names(br2$R2_Bayes) <- rep("Conditional R2", length(br2$R2_Bayes)) names(br2$R2_Bayes_marginal) <- rep("Marginal R2", length(br2$R2_Bayes)) } else { - br2 <- list("R2_Bayes" = as.vector(rstantools::bayes_R2(model, summary = FALSE))) + br2 <- list(R2_Bayes = as.vector(rstantools::bayes_R2(model, summary = FALSE))) names(br2$R2_Bayes) <- rep("R2", length(br2$R2_Bayes)) } } @@ -339,10 +335,10 @@ r2_posterior.BFBayesFactor <- function(model, # Compute posterior model probabilities - if (!is.null(prior_odds)) { - prior_odds <- c(1, prior_odds) - } else { + if (is.null(prior_odds)) { prior_odds <- rep(1, nrow(BFMods)) + } else { + prior_odds <- c(1, prior_odds) } posterior_odds <- prior_odds * BFMods$BF posterior_odds <- posterior_odds[-1] / posterior_odds[1] @@ -416,7 +412,7 @@ as.data.frame.r2_bayes <- function(x, ...) { if (utils::packageVersion("BayesFactor") < package_version("0.9.12.4.3")) { insight::format_error("R2 for BayesFactor models with random effects requires BayesFactor v0.9.12.4.3 or higher.") } - insight::format_error("Woops, you seem to have stumbled on some weird edge case. Please file an issue at {.url https://github.com/easystats/performance/issues}") + insight::format_error("Woops, you seem to have stumbled on some weird edge case. Please file an issue at {.url https://github.com/easystats/performance/issues}") # nolint } out <- list( diff --git a/man/binned_residuals.Rd b/man/binned_residuals.Rd index 33e710f11..632e2a050 100644 --- a/man/binned_residuals.Rd +++ b/man/binned_residuals.Rd @@ -13,6 +13,7 @@ binned_residuals( ci_type = c("exact", "gaussian", "boot"), residuals = c("deviance", "pearson", "response"), iterations = 1000, + verbose = TRUE, ... ) } @@ -51,6 +52,8 @@ available.} \item{iterations}{Integer, the number of iterations to use for the bootstrap method. Only used if \code{ci_type = "boot"}.} +\item{verbose}{Toggle warnings and messages.} + \item{...}{Currently not used.} } \value{ diff --git a/man/looic.Rd b/man/looic.Rd index 742ac3482..7f985f4a2 100644 --- a/man/looic.Rd +++ b/man/looic.Rd @@ -22,6 +22,7 @@ indicative of a better fit. } \examples{ \dontshow{if (require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +\donttest{ model <- suppressWarnings(rstanarm::stan_glm( mpg ~ wt + cyl, data = mtcars, @@ -30,5 +31,6 @@ model <- suppressWarnings(rstanarm::stan_glm( refresh = 0 )) looic(model) +} \dontshow{\}) # examplesIf} } diff --git a/man/r2_bayes.Rd b/man/r2_bayes.Rd index 63b082e08..edc78150e 100644 --- a/man/r2_bayes.Rd +++ b/man/r2_bayes.Rd @@ -62,67 +62,65 @@ R2 takes both the fixed and random effects into account. returns a posterior sample of Bayesian R2 values. } \examples{ +\dontshow{if (require("rstanarm") && require("rstantools") && require("BayesFactor") && require("brms")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(performance) -if (require("rstanarm") && require("rstantools")) { - model <- suppressWarnings(stan_glm( - mpg ~ wt + cyl, - data = mtcars, - chains = 1, - iter = 500, - refresh = 0, - show_messages = FALSE - )) - r2_bayes(model) - - model <- suppressWarnings(stan_lmer( - Petal.Length ~ Petal.Width + (1 | Species), - data = iris, - chains = 1, - iter = 500, - refresh = 0 - )) - r2_bayes(model) +\donttest{ +model <- suppressWarnings(rstanarm::stan_glm( + mpg ~ wt + cyl, + data = mtcars, + chains = 1, + iter = 500, + refresh = 0, + show_messages = FALSE +)) +r2_bayes(model) + +model <- suppressWarnings(rstanarm::stan_lmer( + Petal.Length ~ Petal.Width + (1 | Species), + data = iris, + chains = 1, + iter = 500, + refresh = 0 +)) +r2_bayes(model) } -if (require("BayesFactor")) { - BFM <- generalTestBF(mpg ~ qsec + gear, data = mtcars, progress = FALSE) - FM <- lmBF(mpg ~ qsec + gear, data = mtcars) +BFM <- BayesFactor::generalTestBF(mpg ~ qsec + gear, data = mtcars, progress = FALSE) +FM <- BayesFactor::lmBF(mpg ~ qsec + gear, data = mtcars) - r2_bayes(FM) - r2_bayes(BFM[3]) - r2_bayes(BFM, average = TRUE) # across all models +r2_bayes(FM) +r2_bayes(BFM[3]) +r2_bayes(BFM, average = TRUE) # across all models - # with random effects: - mtcars$gear <- factor(mtcars$gear) - model <- lmBF( - mpg ~ hp + cyl + gear + gear:wt, - mtcars, - progress = FALSE, - whichRandom = c("gear", "gear:wt") - ) +# with random effects: +mtcars$gear <- factor(mtcars$gear) +model <- BayesFactor::lmBF( + mpg ~ hp + cyl + gear + gear:wt, + mtcars, + progress = FALSE, + whichRandom = c("gear", "gear:wt") +) - r2_bayes(model) -} +r2_bayes(model) \donttest{ -if (require("brms")) { - model <- suppressWarnings(brms::brm( - mpg ~ wt + cyl, - data = mtcars, - silent = 2, - refresh = 0 - )) - r2_bayes(model) - - model <- suppressWarnings(brms::brm( - Petal.Length ~ Petal.Width + (1 | Species), - data = iris, - silent = 2, - refresh = 0 - )) - r2_bayes(model) -} +model <- suppressWarnings(brms::brm( + mpg ~ wt + cyl, + data = mtcars, + silent = 2, + refresh = 0 +)) +r2_bayes(model) + +model <- suppressWarnings(brms::brm( + Petal.Length ~ Petal.Width + (1 | Species), + data = iris, + silent = 2, + refresh = 0 +)) +r2_bayes(model) } +\dontshow{\}) # examplesIf} } \references{ Gelman, A., Goodrich, B., Gabry, J., and Vehtari, A. (2018). diff --git a/tests/testthat/test-binned_residuals.R b/tests/testthat/test-binned_residuals.R index 6f0155092..e23404452 100644 --- a/tests/testthat/test-binned_residuals.R +++ b/tests/testthat/test-binned_residuals.R @@ -155,3 +155,22 @@ test_that("binned_residuals, bootstrapped CI", { tolerance = 1e-4 ) }) + +test_that("binned_residuals, msg for non-bernoulli", { + skip_on_cran() + tot <- rep(10, 100) + suc <- rbinom(100, prob = 0.9, size = tot) + + dat <- data.frame(tot, suc) + dat$prop <- suc / tot + dat$x1 <- as.factor(sample(1:5, 100, replace = TRUE)) + + mod <- glm(prop ~ x1, + family = binomial, + data = dat, + weights = tot + ) + + expect_message(binned_residuals(mod), regex = "Using `ci_type = \"gaussian\"`") + expect_silent(binned_residuals(mod, verbose = FALSE)) +}) diff --git a/tests/testthat/test-check_predictions.R b/tests/testthat/test-check_predictions.R index 61d97f9b9..a840894dc 100644 --- a/tests/testthat/test-check_predictions.R +++ b/tests/testthat/test-check_predictions.R @@ -78,3 +78,53 @@ test_that("check_predictions, glmmTMB", { ) expect_true(attributes(out)$model_info$is_bernoulli) }) + + +test_that("check_predictions, glm, binomial", { + skip_if(packageVersion("insight") <= "0.19.6") + data(mtcars) + set.seed(1) + tot <- rep(10, 100) + suc <- rbinom(100, prob = 0.9, size = tot) + dat <- data.frame(tot, suc) + dat$prop <- suc / tot + + mod1 <- glm(cbind(suc, tot - suc) ~ 1, + family = binomial, + data = dat + ) + + mod2 <- glm(prop ~ 1, + family = binomial, + data = dat, + weights = tot + ) + + mod3 <- glm(cbind(suc, tot) ~ 1, + family = binomial, + data = dat + ) + + mod4 <- glm(am ~ 1, + family = binomial, + data = mtcars + ) + + set.seed(1) + out1 <- check_predictions(mod1) + set.seed(1) + out2 <- check_predictions(mod2) + set.seed(1) + out3 <- check_predictions(mod3) + set.seed(1) + out4 <- check_predictions(mod4) + + expect_equal(head(out1$sim_1), c(1, 0.9, 0.9, 0.8, 1, 0.8), tolerance = 1e-4) + expect_false(attributes(out1)$model_info$is_bernoulli) + expect_equal(head(out2$sim_1), c(1, 0.9, 0.9, 0.8, 1, 0.8), tolerance = 1e-4) + expect_false(attributes(out2)$model_info$is_bernoulli) + expect_equal(head(out3$sim_1), c(0.4, 0.42105, 0.47368, 0.61111, 0.4, 0.61111), tolerance = 1e-3) + expect_false(attributes(out3)$model_info$is_bernoulli) + expect_equal(head(out4$sim_1), c(0, 0, 0, 1, 0, 1), tolerance = 1e-4) + expect_true(attributes(out4)$model_info$is_bernoulli) +})