Skip to content

Commit

Permalink
Formatting probability of direction objects fails (#608)
Browse files Browse the repository at this point in the history
* Formatting probability of direction objects fails
Fixes #603

* news, desc

* tests

* fix test

* fix

* lintr

* docs

* skip on mac

* skip example

* styler

* vig
  • Loading branch information
strengejacke committed Jun 29, 2023
1 parent efb0cab commit ed5eb94
Show file tree
Hide file tree
Showing 11 changed files with 120 additions and 31 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
Type: Package
Package: bayestestR
Title: Understand and Describe Bayesian Models and Posterior Distributions
Version: 0.13.1.1
Version: 0.13.1.2
Authors@R:
c(person(given = "Dominique",
family = "Makowski",
Expand Down
2 changes: 2 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -220,12 +220,14 @@ S3method(format,bayestestR_eti)
S3method(format,bayestestR_hdi)
S3method(format,bayestestR_si)
S3method(format,describe_posterior)
S3method(format,equivalence_test)
S3method(format,map_estimate)
S3method(format,p_direction)
S3method(format,p_map)
S3method(format,p_rope)
S3method(format,p_significance)
S3method(format,point_estimate)
S3method(format,rope)
S3method(hdi,BFBayesFactor)
S3method(hdi,BGGM)
S3method(hdi,MCMCglmm)
Expand Down
5 changes: 5 additions & 0 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

* Retrieving models from the environment was improved.

## Bug fixes

* Fixed issues in various `format()` methods, which did not work properly for
some few functions (like `p_direction()`).

# bayestestR 0.13.1

## Changes
Expand Down
6 changes: 3 additions & 3 deletions R/bayesfactor_parameters.R
Original file line number Diff line number Diff line change
Expand Up @@ -136,13 +136,13 @@
#' bayesfactor_parameters(group_diff, prior = stan_model, verbose = FALSE)
#'
#' # Or
#' group_diff_prior <- pairs(emmeans(unupdate(stan_model), ~group))
#' bayesfactor_parameters(group_diff, prior = group_diff_prior, verbose = FALSE)
#' # group_diff_prior <- pairs(emmeans(unupdate(stan_model), ~group))
#' # bayesfactor_parameters(group_diff, prior = group_diff_prior, verbose = FALSE)
#' }
#'
#' # brms models
#' # -----------
#' if (require("brms")) {
#' if (require("brms") && require("logspline")) {
#' contrasts(sleep$group) <- contr.equalprior_pairs # see vingette
#' my_custom_priors <-
#' set_prior("student_t(3, 0, 1)", class = "b") +
Expand Down
12 changes: 8 additions & 4 deletions R/equivalence_test.R
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,17 @@ equivalence_test.numeric <- function(x, range = "default", ci = 0.95, verbose =
out <- as.data.frame(rope_data)

if (all(ci < 1)) {
out$ROPE_Equivalence <- ifelse(out$ROPE_Percentage == 0, "Rejected",
ifelse(out$ROPE_Percentage == 1, "Accepted", "Undecided")
out$ROPE_Equivalence <- datawizard::recode_into(
out$ROPE_Percentage == 0 ~ "Rejected",
out$ROPE_Percentage == 1 ~ "Accepted",
default = "Undecided"
)
} else {
# Related to guidelines for full rope (https://easystats.github.io/bayestestR/articles/4_Guidelines.html)
out$ROPE_Equivalence <- ifelse(out$ROPE_Percentage < 0.025, "Rejected",
ifelse(out$ROPE_Percentage > 0.975, "Accepted", "Undecided")
out$ROPE_Equivalence <- datawizard::recode_into(
out$ROPE_Percentage < 0.025 ~ "Rejected",
out$ROPE_Percentage > 0.975 ~ "Accepted",
default = "Undecided"
)
}

Expand Down
5 changes: 5 additions & 0 deletions R/format.R
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@ format.bayestestR_eti <- format.describe_posterior
#' @export
format.bayestestR_si <- format.describe_posterior

#' @export
format.equivalence_test <- format.describe_posterior

#' @export
format.rope <- format.describe_posterior



Expand Down
24 changes: 14 additions & 10 deletions R/unupdate.R
Original file line number Diff line number Diff line change
Expand Up @@ -102,15 +102,17 @@ unupdate.brmsfit_multiple <- function(model,
insight::format_alert("Sampling priors, please wait...")
}

utils::capture.output({model_prior <-
try(suppressMessages(suppressWarnings(
stats::update(
model,
sample_prior = "only",
newdata = newdata,
refresh = 0
)
)), silent = TRUE)})
utils::capture.output({
model_prior <-
try(suppressMessages(suppressWarnings(
stats::update(
model,
sample_prior = "only",
newdata = newdata,
refresh = 0
)
)), silent = TRUE)
})

if (methods::is(model_prior, "try-error")) {
if (grepl("proper priors", model_prior, fixed = TRUE)) {
Expand Down Expand Up @@ -143,7 +145,9 @@ unupdate.blavaan <- function(model, verbose = TRUE, ...) {

cl$prisamp <- TRUE
suppressMessages(suppressWarnings(
utils::capture.output({model_prior <- eval(cl)})
utils::capture.output({
model_prior <- eval(cl)
})
))

model_prior
Expand Down
6 changes: 3 additions & 3 deletions man/bayesfactor_parameters.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

20 changes: 11 additions & 9 deletions tests/testthat/test-check_prior.R
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
skip_on_os(os = "mac")

test_that("check_prior - stanreg", {
skip_on_cran()
skip_on_os(os = "windows")
skip_on_os(os = c("windows", "mac"))
skip_if_offline()
skip_if_not_or_load_if_installed("rstanarm")
skip_if_not_or_load_if_installed("BH")
Expand All @@ -10,12 +12,12 @@ test_that("check_prior - stanreg", {
set.seed(333)
model1 <- insight::download_model("stanreg_lm_1")

expect_equal(
expect_identical(
check_prior(model1)$Prior_Quality,
c("informative", "uninformative")
)

expect_equal(
expect_identical(
check_prior(model1, method = "lakeland")$Prior_Quality,
c("informative", "informative")
)
Expand Down Expand Up @@ -43,15 +45,15 @@ test_that("check_prior - brms (linux)", {
)
})

expect_warning(expect_equal(
expect_warning(expect_identical(
check_prior(model2)$Prior_Quality,
c(
"uninformative", "informative", "informative", "uninformative",
"uninformative", "not determinable", "not determinable", "not determinable"
)
))

expect_warning(expect_equal(
expect_warning(expect_identical(
check_prior(model2, method = "lakeland")$Prior_Quality,
c(
"informative", "informative", "informative", "informative",
Expand Down Expand Up @@ -88,7 +90,7 @@ test_that("check_prior - brms (linux)", {
# ))


expect_warning(expect_equal(
expect_warning(expect_identical(
check_prior(model2, method = "lakeland")$Prior_Quality,
c(
"informative", "informative", "informative", "informative",
Expand All @@ -99,7 +101,7 @@ test_that("check_prior - brms (linux)", {

test_that("check_prior - brms (not linux or windows)", {
skip_on_cran()
skip_on_os(os = c("linux", "windows"))
skip_on_os(os = c("linux", "windows", "mac"))
skip_if_offline()
skip_if_not_or_load_if_installed("rstanarm")
skip_if_not_or_load_if_installed("BH")
Expand All @@ -117,15 +119,15 @@ test_that("check_prior - brms (not linux or windows)", {
)
})

expect_warning(expect_equal(
expect_warning(expect_identical(
check_prior(model2)$Prior_Quality,
c(
"uninformative", "uninformative", "informative", "uninformative",
"uninformative", "not determinable", "not determinable", "not determinable"
)
))

expect_warning(expect_equal(
expect_warning(expect_identical(
check_prior(model2, method = "lakeland")$Prior_Quality,
c(
"informative", "informative", "informative", "informative",
Expand Down
59 changes: 59 additions & 0 deletions tests/testthat/test-format.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
test_that("p_significance", {
set.seed(333)
x <- rnorm(100)
expect_equal(
format(point_estimate(x)),
data.frame(Median = "0.05", Mean = "-0.02", MAP = "0.13", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(ci(x)),
data.frame(`95% CI` = "[-1.93, 1.77]", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(p_rope(x)),
data.frame(ROPE = "[-0.10, 0.10]", `p (ROPE)` = "0.100", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(map_estimate(x)),
data.frame(x = "0.13", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(p_direction(x)),
data.frame(x = "0.51", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(p_map(x)),
data.frame(x = "0.97", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(p_significance(x)),
data.frame(x = "0.46", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(rope(x)),
data.frame(CI = "0.95", ROPE = "[-0.10, 0.10]", `% in ROPE` = "10.64%", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
expect_equal(
format(equivalence_test(x)),
data.frame(
CI = "0.95", ROPE = "[-0.10, 0.10]", `% in ROPE` = "10.64%",
`Equivalence (ROPE)` = "Undecided", HDI_low = "-1.93", HDI_high = "1.77",
stringsAsFactors = FALSE
),
ignore_attr = TRUE
)
skip_if_not_installed("logspline")
expect_equal(
format(bayesfactor_parameters(x, verbose = FALSE)),
data.frame(BF = "1.00", stringsAsFactors = FALSE),
ignore_attr = TRUE
)
})
10 changes: 9 additions & 1 deletion vignettes/bayes_factors.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -920,14 +920,22 @@ posterior distribution, and estimate the HDI.

In `bayestestR`, we can do this with the `weighted_posteriors()` function:

```{r}
```{r eval=FALSE}
BMA_draws <- weighted_posteriors(mod, mod_carb, verbose = FALSE)
BMA_hdi <- hdi(BMA_draws, ci = 0.95)
BMA_hdi
plot(BMA_hdi)
```
```{r echo=FALSE}
BMA_draws <- weighted_posteriors(mod, mod_carb, verbose = FALSE)
BMA_hdi <- hdi(BMA_draws, ci = 0.95)
BMA_hdi
plot(BMA_hdi, data = BMA_draws)
```

We can see that across both models under consideration, the posterior of the
`carb` effect is almost equally weighted between the alternative model and the
Expand Down

0 comments on commit ed5eb94

Please sign in to comment.