|
| 1 | +### FROG |
| 2 | + |
| 3 | +# transition matrix |
| 4 | +q <- matrix(rep(0, 9), nrow = 3) |
| 5 | +q[1,] <- c(0.0, 0.5, 0.5) |
| 6 | +q[2,] <- c(0.5, 0.0, 0.5) |
| 7 | +q[3,] <- c(0.5, 0.5, 0.0) |
| 8 | + |
| 9 | +m <- 10000 |
| 10 | +x <- 1 |
| 11 | + |
| 12 | +set.seed(12345) |
| 13 | +for (i in 1:m) { |
| 14 | + x <- c(x, sample(1:3, 1, prob = q[x[i],])) # sample next state |
| 15 | +} |
| 16 | + |
| 17 | +table(x) / (m + 1) |
| 18 | + |
| 19 | +### FLY |
| 20 | +m <- 10000 |
| 21 | +x <- 0.5 |
| 22 | + |
| 23 | +set.seed(1010101) |
| 24 | +for (i in 1:m) { |
| 25 | + # propose next state |
| 26 | + x_new <- x[i] + runif(1, -0.5, +0.5) |
| 27 | + # wrap-around |
| 28 | + if (x_new < 0) x_new <- 0 - x_new |
| 29 | + if (x_new > 1) x_new<- 2 - x_new |
| 30 | + # add new state |
| 31 | + x <- c(x, x_new) |
| 32 | +} |
| 33 | + |
| 34 | +hist(x, breaks = 20) |
| 35 | + |
| 36 | + |
| 37 | +### 0-1 |
| 38 | +q <- matrix(rep(0, 4), nrow = 2) |
| 39 | +q[1,] <- c(0.5, 0.5) |
| 40 | +q[2,] <- c(0.5, 0.5) |
| 41 | + |
| 42 | +m <- 10000 |
| 43 | +x <- 1 |
| 44 | + |
| 45 | +set.seed(12345) |
| 46 | +for (i in 1:m) { |
| 47 | + x <- c(x, sample(1:2, 1, prob = q[x[i],])) # sample next state |
| 48 | +} |
| 49 | + |
| 50 | +x <- x - 1 # get 0-1 instead of 1-2 |
| 51 | +table(x) / (m + 1) |
| 52 | + |
| 53 | + |
| 54 | + |
| 55 | +### Bayesian posterior |
| 56 | + |
| 57 | +# We're working with a simple Bernoulli-Beta model |
| 58 | +# Prior Beta(1,1), 12 data points, 9 ones, 3 zeroes |
| 59 | +# The posterior is then Beta(10, 4) |
| 60 | +# We're going to calculate posterior probability of parameter |
| 61 | +# being greater than 0.75 |
| 62 | +library(mcmcse) |
| 63 | +library(ggplot2) |
| 64 | +a0 <- 1; b0 <- 1 |
| 65 | +y <- c(rep(1, 9), rep(0, 3)) |
| 66 | +n <- length(y) |
| 67 | + |
| 68 | +## 1. Computation based on deriving that the posterior is Beta(10, 4) |
| 69 | +x <- seq(0, 1, 0.01) |
| 70 | +xx <- data.frame(x = x, y = dbeta(x, a0 + sum(y), b0 + n - sum(y)), type = "posterior") |
| 71 | +ggplot(xx, aes(x = x, y = y, group = type, colour = type)) + geom_line() + xlab("theta") + ylab("density") |
| 72 | +1 - pbeta(0.75, a0 + sum(y), b0 + n - sum(y)) |
| 73 | + |
| 74 | +## 2. Computation based on just integrating the non-normalized |
| 75 | +## posterior and normalizing |
| 76 | +prop_posterior <- function(theta, a0, b0, y) { |
| 77 | + dbinom(sum(y), length(y), theta) * dbeta(theta, a0, b0) |
| 78 | +} |
| 79 | +p1 <- integrate(prop_posterior, lower = 0.75, upper = 1.00, a0, b0, y)$value |
| 80 | +p2 <- integrate(prop_posterior, lower = 0.00, upper = 0.75, a0, b0, y)$value |
| 81 | +p1 / (p1 + p2) # normalization |
| 82 | + |
| 83 | +## 3. Computation based on Monte Carlo approximation |
| 84 | +## with actual distribution |
| 85 | +set.seed(0) |
| 86 | +m <- 1000 |
| 87 | +x <- rbeta(m, a0 + sum(y), b0 + n - sum(y)) |
| 88 | +mcse(x > 0.75) |
| 89 | + |
| 90 | +## 4. Computation based on Monte Carlo approximation |
| 91 | +## with prop_posterior and Metropolis algorithm (MCMC) |
| 92 | +set.seed(0) |
| 93 | +m <- 10000 |
| 94 | +x <- 0.5 # starting position |
| 95 | +for (i in 1:m) { |
| 96 | + # proposing a new state (with wrap-around) |
| 97 | + x_new <- x[i] + runif(1, -0.5, +0.5) |
| 98 | + if (x_new < 0) x_new <- 0 - x_new |
| 99 | + if (x_new > 1) x_new<- 2 - x_new |
| 100 | + |
| 101 | + # Metropolis correction |
| 102 | + p_curr <- prop_posterior(x[i], a0, b0, y) |
| 103 | + p_new <- prop_posterior(x_new, a0, b0, y) |
| 104 | + acceptance_prob <- min(p_new / p_curr, 1.0) |
| 105 | + if (runif(1) > acceptance_prob) { |
| 106 | + # reject |
| 107 | + x_new <- x[i] |
| 108 | + } |
| 109 | + |
| 110 | + # add state |
| 111 | + x <- c(x, x_new) |
| 112 | +} |
| 113 | +hist(x) |
| 114 | +mcse(x > 0.75) |
| 115 | + |
| 116 | +## 5. Computation using Stan |
| 117 | +library(rstan) |
| 118 | +if (!file.exists("../Lecture03 - Probabilistic programming/Temp/bernoulli.compiled")) { |
| 119 | + bernoulli_compiled <- stan_model("../Lecture03 - Probabilistic programming/bernoulli.stan") |
| 120 | + saveRDS(bernoulli_compiled, file = "../Lecture03 - Probabilistic programming/Temp/bernoulli.compiled") |
| 121 | +} else { |
| 122 | + bernoulli_compiled <- readRDS("../Lecture03 - Probabilistic programming/Temp/bernoulli.compiled") |
| 123 | +} |
| 124 | + |
| 125 | +stan_data <- list(y = y, |
| 126 | + n = length(y)) |
| 127 | + |
| 128 | +samples <- sampling(bernoulli_compiled, data = stan_data, # we supply the model and data |
| 129 | + chains = 1, iter = 1200, warmup = 200) |
| 130 | +x <- extract(samples)$theta |
| 131 | +hist(x) |
| 132 | +mcse(x > 0.75) |
| 133 | + |
0 commit comments