diff --git a/previews/PR513/.documenter-siteinfo.json b/previews/PR513/.documenter-siteinfo.json
new file mode 100644
index 000000000..10fd30b93
--- /dev/null
+++ b/previews/PR513/.documenter-siteinfo.json
@@ -0,0 +1 @@
+{"documenter":{"julia_version":"1.11.0","generation_timestamp":"2024-10-24T02:43:44","documenter_version":"1.7.0"}}
\ No newline at end of file
diff --git a/previews/PR513/assets/documenter.js b/previews/PR513/assets/documenter.js
new file mode 100644
index 000000000..235cb2e5d
--- /dev/null
+++ b/previews/PR513/assets/documenter.js
@@ -0,0 +1,1065 @@
+// Generated by Documenter.jl
+requirejs.config({
+ paths: {
+ 'highlight-julia': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/languages/julia.min',
+ 'headroom': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/headroom.min',
+ 'jqueryui': 'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.13.2/jquery-ui.min',
+ 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.0/jquery.min',
+ 'headroom-jquery': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/jQuery.headroom.min',
+ 'highlight': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/highlight.min',
+ 'highlight-julia-repl': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/languages/julia-repl.min',
+ },
+ shim: {
+ "highlight-julia": {
+ "deps": [
+ "highlight"
+ ]
+ },
+ "headroom-jquery": {
+ "deps": [
+ "jquery",
+ "headroom"
+ ]
+ },
+ "highlight-julia-repl": {
+ "deps": [
+ "highlight"
+ ]
+ }
+}
+});
+////////////////////////////////////////////////////////////////////////////////
+require([], function() {
+window.MathJax = {
+ "tex": {
+ "packages": [
+ "base",
+ "ams",
+ "autoload",
+ "mhchem"
+ ],
+ "inlineMath": [
+ [
+ "$",
+ "$"
+ ],
+ [
+ "\\(",
+ "\\)"
+ ]
+ ],
+ "tags": "ams"
+ },
+ "options": {
+ "ignoreHtmlClass": "tex2jax_ignore",
+ "processHtmlClass": "tex2jax_process"
+ }
+}
+;
+
+(function () {
+ var script = document.createElement('script');
+ script.src = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/3.2.2/es5/tex-svg-full.js';
+ script.async = true;
+ document.head.appendChild(script);
+})();
+
+})
+////////////////////////////////////////////////////////////////////////////////
+require(['jquery', 'highlight', 'highlight-julia', 'highlight-julia-repl'], function($) {
+$(document).ready(function() {
+ hljs.highlightAll();
+})
+
+})
+////////////////////////////////////////////////////////////////////////////////
+require(['jquery'], function($) {
+
+let timer = 0;
+var isExpanded = true;
+
+$(document).on(
+ "click",
+ ".docstring .docstring-article-toggle-button",
+ function () {
+ let articleToggleTitle = "Expand docstring";
+ const parent = $(this).parent();
+
+ debounce(() => {
+ if (parent.siblings("section").is(":visible")) {
+ parent
+ .find("a.docstring-article-toggle-button")
+ .removeClass("fa-chevron-down")
+ .addClass("fa-chevron-right");
+ } else {
+ parent
+ .find("a.docstring-article-toggle-button")
+ .removeClass("fa-chevron-right")
+ .addClass("fa-chevron-down");
+
+ articleToggleTitle = "Collapse docstring";
+ }
+
+ parent
+ .children(".docstring-article-toggle-button")
+ .prop("title", articleToggleTitle);
+ parent.siblings("section").slideToggle();
+ });
+ }
+);
+
+$(document).on("click", ".docs-article-toggle-button", function (event) {
+ let articleToggleTitle = "Expand docstring";
+ let navArticleToggleTitle = "Expand all docstrings";
+ let animationSpeed = event.noToggleAnimation ? 0 : 400;
+
+ debounce(() => {
+ if (isExpanded) {
+ $(this).removeClass("fa-chevron-up").addClass("fa-chevron-down");
+ $("a.docstring-article-toggle-button")
+ .removeClass("fa-chevron-down")
+ .addClass("fa-chevron-right");
+
+ isExpanded = false;
+
+ $(".docstring section").slideUp(animationSpeed);
+ } else {
+ $(this).removeClass("fa-chevron-down").addClass("fa-chevron-up");
+ $("a.docstring-article-toggle-button")
+ .removeClass("fa-chevron-right")
+ .addClass("fa-chevron-down");
+
+ isExpanded = true;
+ articleToggleTitle = "Collapse docstring";
+ navArticleToggleTitle = "Collapse all docstrings";
+
+ $(".docstring section").slideDown(animationSpeed);
+ }
+
+ $(this).prop("title", navArticleToggleTitle);
+ $(".docstring-article-toggle-button").prop("title", articleToggleTitle);
+ });
+});
+
+function debounce(callback, timeout = 300) {
+ if (Date.now() - timer > timeout) {
+ callback();
+ }
+
+ clearTimeout(timer);
+
+ timer = Date.now();
+}
+
+})
+////////////////////////////////////////////////////////////////////////////////
+require([], function() {
+function addCopyButtonCallbacks() {
+ for (const el of document.getElementsByTagName("pre")) {
+ const button = document.createElement("button");
+ button.classList.add("copy-button", "fa-solid", "fa-copy");
+ button.setAttribute("aria-label", "Copy this code block");
+ button.setAttribute("title", "Copy");
+
+ el.appendChild(button);
+
+ const success = function () {
+ button.classList.add("success", "fa-check");
+ button.classList.remove("fa-copy");
+ };
+
+ const failure = function () {
+ button.classList.add("error", "fa-xmark");
+ button.classList.remove("fa-copy");
+ };
+
+ button.addEventListener("click", function () {
+ copyToClipboard(el.innerText).then(success, failure);
+
+ setTimeout(function () {
+ button.classList.add("fa-copy");
+ button.classList.remove("success", "fa-check", "fa-xmark");
+ }, 5000);
+ });
+ }
+}
+
+function copyToClipboard(text) {
+ // clipboard API is only available in secure contexts
+ if (window.navigator && window.navigator.clipboard) {
+ return window.navigator.clipboard.writeText(text);
+ } else {
+ return new Promise(function (resolve, reject) {
+ try {
+ const el = document.createElement("textarea");
+ el.textContent = text;
+ el.style.position = "fixed";
+ el.style.opacity = 0;
+ document.body.appendChild(el);
+ el.select();
+ document.execCommand("copy");
+
+ resolve();
+ } catch (err) {
+ reject(err);
+ } finally {
+ document.body.removeChild(el);
+ }
+ });
+ }
+}
+
+if (document.readyState === "loading") {
+ document.addEventListener("DOMContentLoaded", addCopyButtonCallbacks);
+} else {
+ addCopyButtonCallbacks();
+}
+
+})
+////////////////////////////////////////////////////////////////////////////////
+require(['jquery', 'headroom', 'headroom-jquery'], function($, Headroom) {
+
+// Manages the top navigation bar (hides it when the user starts scrolling down on the
+// mobile).
+window.Headroom = Headroom; // work around buggy module loading?
+$(document).ready(function () {
+ $("#documenter .docs-navbar").headroom({
+ tolerance: { up: 10, down: 10 },
+ });
+});
+
+})
+////////////////////////////////////////////////////////////////////////////////
+require(['jquery'], function($) {
+
+$(document).ready(function () {
+ let meta = $("div[data-docstringscollapsed]").data();
+
+ if (meta?.docstringscollapsed) {
+ $("#documenter-article-toggle-button").trigger({
+ type: "click",
+ noToggleAnimation: true,
+ });
+ }
+});
+
+})
+////////////////////////////////////////////////////////////////////////////////
+require(['jquery'], function($) {
+
+/*
+To get an in-depth about the thought process you can refer: https://hetarth02.hashnode.dev/series/gsoc
+
+PSEUDOCODE:
+
+Searching happens automatically as the user types or adjusts the selected filters.
+To preserve responsiveness, as much as possible of the slow parts of the search are done
+in a web worker. Searching and result generation are done in the worker, and filtering and
+DOM updates are done in the main thread. The filters are in the main thread as they should
+be very quick to apply. This lets filters be changed without re-searching with minisearch
+(which is possible even if filtering is on the worker thread) and also lets filters be
+changed _while_ the worker is searching and without message passing (neither of which are
+possible if filtering is on the worker thread)
+
+SEARCH WORKER:
+
+Import minisearch
+
+Build index
+
+On message from main thread
+ run search
+ find the first 200 unique results from each category, and compute their divs for display
+ note that this is necessary and sufficient information for the main thread to find the
+ first 200 unique results from any given filter set
+ post results to main thread
+
+MAIN:
+
+Launch worker
+
+Declare nonconstant globals (worker_is_running, last_search_text, unfiltered_results)
+
+On text update
+ if worker is not running, launch_search()
+
+launch_search
+ set worker_is_running to true, set last_search_text to the search text
+ post the search query to worker
+
+on message from worker
+ if last_search_text is not the same as the text in the search field,
+ the latest search result is not reflective of the latest search query, so update again
+ launch_search()
+ otherwise
+ set worker_is_running to false
+
+ regardless, display the new search results to the user
+ save the unfiltered_results as a global
+ update_search()
+
+on filter click
+ adjust the filter selection
+ update_search()
+
+update_search
+ apply search filters by looping through the unfiltered_results and finding the first 200
+ unique results that match the filters
+
+ Update the DOM
+*/
+
+/////// SEARCH WORKER ///////
+
+function worker_function(documenterSearchIndex, documenterBaseURL, filters) {
+ importScripts(
+ "https://cdn.jsdelivr.net/npm/minisearch@6.1.0/dist/umd/index.min.js"
+ );
+
+ let data = documenterSearchIndex.map((x, key) => {
+ x["id"] = key; // minisearch requires a unique for each object
+ return x;
+ });
+
+ // list below is the lunr 2.1.3 list minus the intersect with names(Base)
+ // (all, any, get, in, is, only, which) and (do, else, for, let, where, while, with)
+ // ideally we'd just filter the original list but it's not available as a variable
+ const stopWords = new Set([
+ "a",
+ "able",
+ "about",
+ "across",
+ "after",
+ "almost",
+ "also",
+ "am",
+ "among",
+ "an",
+ "and",
+ "are",
+ "as",
+ "at",
+ "be",
+ "because",
+ "been",
+ "but",
+ "by",
+ "can",
+ "cannot",
+ "could",
+ "dear",
+ "did",
+ "does",
+ "either",
+ "ever",
+ "every",
+ "from",
+ "got",
+ "had",
+ "has",
+ "have",
+ "he",
+ "her",
+ "hers",
+ "him",
+ "his",
+ "how",
+ "however",
+ "i",
+ "if",
+ "into",
+ "it",
+ "its",
+ "just",
+ "least",
+ "like",
+ "likely",
+ "may",
+ "me",
+ "might",
+ "most",
+ "must",
+ "my",
+ "neither",
+ "no",
+ "nor",
+ "not",
+ "of",
+ "off",
+ "often",
+ "on",
+ "or",
+ "other",
+ "our",
+ "own",
+ "rather",
+ "said",
+ "say",
+ "says",
+ "she",
+ "should",
+ "since",
+ "so",
+ "some",
+ "than",
+ "that",
+ "the",
+ "their",
+ "them",
+ "then",
+ "there",
+ "these",
+ "they",
+ "this",
+ "tis",
+ "to",
+ "too",
+ "twas",
+ "us",
+ "wants",
+ "was",
+ "we",
+ "were",
+ "what",
+ "when",
+ "who",
+ "whom",
+ "why",
+ "will",
+ "would",
+ "yet",
+ "you",
+ "your",
+ ]);
+
+ let index = new MiniSearch({
+ fields: ["title", "text"], // fields to index for full-text search
+ storeFields: ["location", "title", "text", "category", "page"], // fields to return with results
+ processTerm: (term) => {
+ let word = stopWords.has(term) ? null : term;
+ if (word) {
+ // custom trimmer that doesn't strip @ and !, which are used in julia macro and function names
+ word = word
+ .replace(/^[^a-zA-Z0-9@!]+/, "")
+ .replace(/[^a-zA-Z0-9@!]+$/, "");
+
+ word = word.toLowerCase();
+ }
+
+ return word ?? null;
+ },
+ // add . as a separator, because otherwise "title": "Documenter.Anchors.add!", would not
+ // find anything if searching for "add!", only for the entire qualification
+ tokenize: (string) => string.split(/[\s\-\.]+/),
+ // options which will be applied during the search
+ searchOptions: {
+ prefix: true,
+ boost: { title: 100 },
+ fuzzy: 2,
+ },
+ });
+
+ index.addAll(data);
+
+ /**
+ * Used to map characters to HTML entities.
+ * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts
+ */
+ const htmlEscapes = {
+ "&": "&",
+ "<": "<",
+ ">": ">",
+ '"': """,
+ "'": "'",
+ };
+
+ /**
+ * Used to match HTML entities and HTML characters.
+ * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts
+ */
+ const reUnescapedHtml = /[&<>"']/g;
+ const reHasUnescapedHtml = RegExp(reUnescapedHtml.source);
+
+ /**
+ * Escape function from lodash
+ * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts
+ */
+ function escape(string) {
+ return string && reHasUnescapedHtml.test(string)
+ ? string.replace(reUnescapedHtml, (chr) => htmlEscapes[chr])
+ : string || "";
+ }
+
+ /**
+ * RegX escape function from MDN
+ * Refer: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
+ */
+ function escapeRegExp(string) {
+ return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
+ }
+
+ /**
+ * Make the result component given a minisearch result data object and the value
+ * of the search input as queryString. To view the result object structure, refer:
+ * https://lucaong.github.io/minisearch/modules/_minisearch_.html#searchresult
+ *
+ * @param {object} result
+ * @param {string} querystring
+ * @returns string
+ */
+ function make_search_result(result, querystring) {
+ let search_divider = `
`;
+ let display_link =
+ result.location.slice(Math.max(0), Math.min(50, result.location.length)) +
+ (result.location.length > 30 ? "..." : ""); // To cut-off the link because it messes with the overflow of the whole div
+
+ if (result.page !== "") {
+ display_link += ` (${result.page})`;
+ }
+ searchstring = escapeRegExp(querystring);
+ let textindex = new RegExp(`${searchstring}`, "i").exec(result.text);
+ let text =
+ textindex !== null
+ ? result.text.slice(
+ Math.max(textindex.index - 100, 0),
+ Math.min(
+ textindex.index + querystring.length + 100,
+ result.text.length
+ )
+ )
+ : ""; // cut-off text before and after from the match
+
+ text = text.length ? escape(text) : "";
+
+ let display_result = text.length
+ ? "..." +
+ text.replace(
+ new RegExp(`${escape(searchstring)}`, "i"), // For first occurrence
+ '$&'
+ ) +
+ "..."
+ : ""; // highlights the match
+
+ let in_code = false;
+ if (!["page", "section"].includes(result.category.toLowerCase())) {
+ in_code = true;
+ }
+
+ // We encode the full url to escape some special characters which can lead to broken links
+ let result_div = `
+
+
The purpose of this page is to collate a series of checklists for commonly performed changes to the source code of EpiAware. It has been adapted from Documenter.jl.
In each case, copy the checklist into the description of the pull request.
In preparation for a release, use the following checklist. These steps should be performed on a branch with an open pull request, either for a topic branch, or for a new branch release-1.y.z ("Release version 1.y.z") if multiple changes have accumulated on the master branch since the last release.
## Pre-release
+
+ - [ ] Change the version number in `Project.toml`
+ * If the release is breaking, increment MAJOR
+ * If the release adds a new user-visible feature, increment MINOR
+ * Otherwise (bug-fixes, documentation improvements), increment PATCH
+ - [ ] Update `CHANGELOG.md`, following the existing style (in particular, make sure that the change log for this version has the correct version number and date).
+ - [ ] Run `make changelog`, to make sure that all the issue references in `CHANGELOG.md` are up to date.
+ - [ ] Check that the commit messages in this PR do not contain `[ci skip]`
+ - [ ] Run https://github.com/JuliaDocs/Documenter.jl/actions/workflows/regression-tests.yml
+ using a `workflow_dispatch` trigger to check for any changes that broke extensions.
+
+## The release
+
+ - [ ] After merging the pull request, tag the release. There are two options for this:
+
+ 1. [Comment `[at]JuliaRegistrator register` on the GitHub commit.](https://github.com/JuliaRegistries/Registrator.jl#via-the-github-app)
+ 2. Use [JuliaHub's package registration feature](https://help.juliahub.com/juliahub/stable/contribute/#registrator) to trigger the registration.
+
+ Either of those should automatically publish a new version to the Julia registry.
+ - Once registered, the `TagBot.yml` workflow should create a tag, and rebuild the documentation for this tag.
+ - These steps can take quite a bit of time (1 hour or more), so don't be surprised if the new documentation takes a while to appear.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
release-* branches are used for tagged minor versions of this package. This follows the same approach used in the main Julia repository, albeit on a much more modest scale.
Please open pull requests against the master branch rather than any of the release-* branches whenever possible.
Bug fixes are backported to the release-* branches using git cherry-pick -x by a EpiAware member and will become available in point releases of that particular minor version of the package.
Feel free to nominate commits that should be backported by opening an issue. Requests for new point releases to be tagged in METADATA.jl can also be made in the same way.
Each new minor version x.y.0 gets a branch called release-x.y (a protected branch).
New versions are usually tagged only from the release-x.y branches.
For patch releases, changes get backported to the release-x.y branch via a single PR with the standard name "Backports for x.y.z" and label "Type: Backport". The PR message links to all the PRs that are providing commits to the backport. The PR gets merged as a merge commit (i.e. not squashed).
The old release-* branches may be removed once they have outlived their usefulness.
Patch version milestones are used to keep track of which PRs get backported etc.
Follow the style of the surrounding text when making changes. When adding new features please try to stick to the following points whenever applicable. This project follows the SciML style guide.
Tests that build example package docs from source and inspect the results (end to end tests) are located in /test/examples. The main entry points are test/examples/make.jl for building and test/examples/test.jl for doing some basic checks on the generated outputs.
Some of the showcase examples in EpiAware/docs/src/showcase use Pluto.jl notebooks for the underlying computation. The output of the notebooks is rendered into HTML for inclusion in the documentation in two steps:
PlutoStaticHTML.jl converts the notebook with output into a machine-readable .md format.
Documenter.jl renders the .md file into HTML for inclusion in the documentation during the build process.
For other examples of using Pluto to generate documentation see the examples shown here.
Committing changes to the Pluto.jl notebooks in the EpiAware documentation is the same as committing changes to any other part of the repository. However, please note that we expect the following features for the environment management of the notebooks:
Use the environment determined by the Project.toml file in the EpiAware/docs directory. If you want extra packages, add them to this environment.
Use the version of EpiAware that is used in these notebooks to be the version of EpiAware on the branch being pull requested into main. To do this use the Pkg.develop function.
To do this you can use the following code snippet in the Pluto notebook:
# Determine the relative path to the `EpiAware/docs` directory
+docs_dir = dirname(dirname(dirname(dirname(@__DIR__))))
+# Determine the relative path to the `EpiAware` package directory
+pkg_dir = dirname(docs_dir)
+
+using Pkg: Pkg
+Pkg.activate(docs_dir)
+Pkg.develop(; path = pkg_dir)
+Pkg.instantiate()
Adding a new Pluto.jl notebook to the EpiAware documentation is the same as adding any other file to the repository. However, in addition to following the guidelines for modifying an existing notebook, please note that the new notebook is added to the set of notebook builds using build in the EpiAware/docs/make.jl file. This will generate an .md of the same name as the notebook which can be rendered when makedocs is run. For this document to be added to the overall documentation the path to the .md file must be added to the Pages array defined in EpiAware/docs/pages.jl.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
This section contains a series of explainers that provide a detailed overview of the EpiAware platform and its features. These explainers are designed to help you understand the platform and its capabilities, and to provide you with the information you need to get started using EpiAware. See the sidebar for the list of explainers.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
We support two primary workflows for using the package:
EpiProblem: A high-level interface for defining and fitting models to data. This is the recommended way to use the package.
Turing interface: A lower-level interface for defining and fitting models to data. This is the more flexible way to use the package and may also be more familiar to users of Turing.jl.
See the getting started section for tutorials on each of these workflows.
Each module of the overall epidemiological model we are interested in is a TuringModel in its own right. In this section, we compose the individual models into the full epidemiological model using the EpiProblem struct.
The constructor for an EpiProblem requires:
An epi_model.
A latent_model.
An observation_model.
A tspan.
The tspan set the range of the time index for the models.
The Turing interface is a lower-level interface for defining and fitting models to data. This is the more flexible way to use the package and may also be more familiar to users of Turing.jl.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
diff --git a/previews/PR513/getting-started/explainers/intro/index.html b/previews/PR513/getting-started/explainers/intro/index.html
new file mode 100644
index 000000000..303af7057
--- /dev/null
+++ b/previews/PR513/getting-started/explainers/intro/index.html
@@ -0,0 +1,63 @@
+
+Introduction to EpiAware · EpiAware.jl
Julia is a programming language aimed at technical computing. This guide is aimed at helping you set up Julia on your system and pointing towards resources for learning more.
[!NOTE] If you are familar with other languages with tooling for technical computing (e.g. R, MATLAB, Python) these noteworthy differences may be useful.
This isn't a guide to learning the Julia programming language. Instead we providing an opinionated guide to setting up your system to use Julia effectively in project workflows aimed at people with familiarity with Julia but have maybe only developed projects in other languages (e.g. R, MATLAB, Python).
If you want to learn more about the Julia programming language, we recommend the following resources:
Download Juliaup: This is a cross-platform installer/updater for the Julia programming language. It simplifies the process of installing and managing Julia versions. Go to the Juliaup GitHub repository or to the official Julia website for installation instructions.
Verify Installation: Open a terminal (or Command Prompt on Windows) and type julia to start the Julia REPL (Read-Eval-Print Loop). You should see a Julia prompt julia>.
Juliaup is a tool for managing Julia installations on your system. It allows you to install, update, and switch between different versions of Julia. Details are available at the Juliaup GitHub repository, but here are some examples of common commands:
Juliaup default installs the latest release version of Julia. To install a specific version, use the add command followed by the version number. For example, to install Julia version 1.9.3, use the following command:
To switch between different versions of Julia, use + julia-version after the julia command. For example, to use Julia version 1.9.3, use the following command:
% julia +1.9.3
This will use the specified version of Julia for the current REPL. In general, adding the + julia-version flag after the julia command will execute using the specified version of Julia.
The environment of a Julia project determines which packages, and their version, are available to the project. This is useful when you want to ensure that a project uses a specific version of a package, or when you want to isolate the project from other projects on your system. As per other languages, Julia environments are useful for managing dependencies and ensuring reproducibility.
The most common usage of environments is to create a new explicit environment for a project in a directory. This creates a Project.toml file in the directory that specifies the dependencies for the project and a Manifest.toml file that specifies the exact versions of the dependencies, and their underlying dependencies. We'll discuss how to set up a new environment for a project in the REPL section.
Julia environments can be stacked. This means that you can have a primary environment embedded in the stacked environment, along with secondary environment(s) that define common packages to be available to many projects.
From a project development point of view, most commonly the project environment will be the primary environment, isolated from other project environments. And the environment of the Julia version installation (e.g. the @v1.10 env) will be a secondary environment because its in the default LOAD_PATHJulia environmental variable. You can add packages to the Julia version environment that you want to be available to all projects as we'll show in the REPL section. See section Recommended packages for the primary Julia environment for our recommendations.
The Julia REPL (Read-Eval-Print Loop) is an interactive programming environment that takes single user inputs (i.e., single expressions), evaluates them, and returns the result to the user.
Julia has a built-in package manager called Pkg, which is documented briefly here and in more detail here. The package manager is used to install, update, and manage Julia packages and environments.
You can use Pkg programmatically as a normal Julia package, which is often done in scripts. For example, if we wanted to install the OrdinaryDiffEq package as part of executing a julia script, we would add the following lines to the script:
using Pkg
+Pkg.add("OrdinaryDiffEq")
However, you can also use the package manager interactively from the REPL. In our opinion, this is the more common usage of package management in Julia project development.
For example, to install the OrdinaryDiffEq package from the REPL you can switch to package mode by typing ] and then type add OrdinaryDiffEq. To exit package mode, type backspace.
julia> ]
+(@v1.10) pkg> add OrdinaryDiffEq
This workflow is often more convenient than the programmatic interface, especially when setting packages you want to install to the environment for your julia installation, e.g the @v1.10 environment for julia 1.10.
By default, the environment for a julia installation is stacked as a primary environment, so that the packages you install in the julia installation environment are available to all projects.
To set a new active project environment, you can use the Pkg package manager from the REPL with the command activate with a local directory path. The project environment is named after the directory hosting the Project.toml file. After activating the project environment, you can manage packages to the project environment, as well as use packages from the primary stacked environment as described above.
Here is an example of how you can create a new environment for a project when the REPL working directory is in some directory /myproject, and then add OrdinaryDiffEq to the project environment:
It is quite common to want to experiment with new Julia packages and code snippets. A convenient way to do this without setting up a new project environment or adding dependencies to the primary environment is to use a temporary environment. To do this:
This will create a temporary environment, stacked with the primary environment, that is not saved to disk, and you can add packages to this environment without affecting the primary environment or any project environments. When you exit the REPL, the temporary environment will be deleted.
Revise and Term useful to have available in every Julia session. It is convenient to have these packages loaded automatically when you start a Julia session by adding a startup.jl file. This file should be located in the ~/.julia/config directory. Here is an example of a startup.jl file that loads the Revise and Term:
atreplinit() do repl
+ # Load Revise if it is installed
+ try
+ @eval using Revise
+ catch e
+ @warn "error while importing Revise" e
+ end
+ # Load Term if it is installed
+ try
+ @eval using Term
+ @eval install_term_repr()
+ @eval install_term_stacktrace()
+ catch e
+ @warn "error while importing Term" e
+ end
+end
+
Visual Studio Code (VS-Code) is a popular code editor that supports Julia development. The Julia extension for VS-Code provides an interactive development environment that will be familiar to users of other scientific IDEs (e.g. developing R projects in RStudio or using the MATLAB application).
Julia REPL: The Julia extension provides an integrated REPL in the TERMINAL pane that allows you to interact with Julia code directly from the editor. For example, you can run code snippets from highlighting or code blocks defined by ## comments in the scripts.
Plotting: By default, plots generated by featured plotting packages (e.g. Plots.jl) will be displayed in a Plot pane generated by the VS-Code editor.
Julia Tab: The Julia extension provides a Julia tab with the following sub-tabs:
Workspace: This allows you to inspect the modules, functions and variables in your current REPL session. For variables that can be understood as a Table, you can view them in a tabular format from the workspace tab.
Documentation: This allows you to view the documentation for functions and types in the Julia standard library and any packages you have installed.
Plot Navigator: This allows you to navigate the plots generated by the featured plotting packages.
Testing: The Julia extension provides interaction between the Testing tab in VS-Code with Julia tests defined using the Julia package TestItems macro @testitem run with TestItemRunner.
Other standard IDE features are Code completion, Code linting, Code formatting, Debugging, and Profiling.
The settings of the Julia extension can be found by accessing Preferences: Open User Settings from the command palette in VS-Code and then searching for Julia.
We recommend the following workplace settings saved in a file .vscode/settings.json relative to your working directory:
These settings set basic code formatting and whitespace settings for Julia files, as well as setting the path to the test file for the project and the path to the project directory for the environment.
The VS-Code command Julia: Start REPL will start a REPL in TERMINAL tab in the editor with the environment set to the project directory and the Testing tab will detect the defined tests for the project.
Its common to develop technical computing projects using a literate programming style, where code and documentation are interwoven. Julia supports this style of programming through a number of packages. In EpiAware we recommend the following:
Pluto: A native Julia notebook for interactive development. Pluto notebooks are reactive, meaning that the output of all cells are updated as input changes. Installation instructions are available here. Pluto notebook files have the extension .jl and can be run as scripts.
Quarto: A literate programming tool that allows you to write documents in markdown with embedded Julia code. Installation instructions are available here. Quarto files have the extension .qmd.
We use Pluto for interactive development and Quarto for generating reports and academic articles. Both tools are useful for developing reproducible workflows.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
This page contains a list of frequently asked questions about the EpiAware package. If you have a question that is not answered here, please open a discussion on the GitHub repository.
In some of the showcase examples in EpiAware/docs/src/showcase we use Pluto.jl notebooks for the underlying computation. As well as reading the code blocks and output of the notebooks in this documentation, you can also run these notebooks by cloning EpiAware and running the notebooks with Pluto.jl (for further details see developer notes).
It should be noted that Pluto.jl notebooks are reactive, meaning that they re-run downstream code after changes with downstreaming determined by a tree of dependent code blocks. This is different from the standard Julia REPL, and some other notebook formats (e.g. .ipynb). In Pluto each code block is a single lines of code or encapsulated by let ... end and begin ... end. The difference between let ... end blocks and begin ... end blocks are that the let ... end type of code block only adds the final output/return value of the block to scope, like an anonymous function, whereas begin ... end executes each line and adds defined variables to scope.
For installation instructions and more information and documentation on Pluto.jl see the Pluto.jl documentation.
One of the key features of EpiAware is the ability to specify models as components of a larger model. This is useful for specifying models that are shared across multiple EpiProblems or for specifying models that are used in multiple methods. You can see an examples of this approach in our showcases.
An alternative to modular model construction is to remake models with different parameters. This can be useful for comparing models with different parameters or for comparing models with different priors. Whilst we don't have a built in function for this, we recommend the Accessors.jl package for this purpose. For examples of how to use this package see the documentation.
Whilst Turing.jl is the front end of the Turing.jl ecosystem, it is not the only package that can be used to work with Turing.jl models. DynamicPPL.jl is the part of the ecosytem that deals with defining, running, and manipulating models.
DynamicPPL supports the condition (alased with |) to fix values as known observations in the model (i.e fixing values on the left hand side of ~ definitions). This is useful for fixing parameters to known values or for conditioning the model on data. The decondition function can be used to remove these conditions. Internally this is what apply_method(::EpiProblem, ...) does to condition the user supplied EpiProblem to data. See more here.
Similarly to conditioning and deconditioning models, DynamicPPL supports fixing and unfixing models via the fix and unfix functions. Fixing is essentially saying that variables are constants (i.e replacing the right hand side of ~ with a value and changing the ~ to a =). A common use of this would be to simplify a prespecified model, for example to make the variance of a random walk be known versus estimated from the data. We also use this functionality in apply_method(::EpiProblem, ...) to allow users to simplify EpiProblems on the fly. See more here.
MCMCChain.jl is the package from which MCMCChains is imported. It provides a number of useful functions for working with MCMCChain objects. These include functions for summarising, plotting, and manipulating chains. Below is a list of some of the most useful functions.
plot: Plots trace and density plots for each parameter in the chain object.
histogram: Plots histograms for each parameter in the chain object by chain.
get: Accesses the values of a parameter/s in the chain object.
DataFrames.DataFrame converts a chain into a wide format DataFrame.
describe: Prints the summary statistics of the chain object.
There are many more functions available in the MCMCChain.jl package. For a full list of functions, see the documentation.
An alternative to MCMCChain.jl is the ArviZ.jl package. ArviZ.jl is a Julia meta-package for exploratory analysis of Bayesian models. It is part of the ArviZ project, which also includes a related Python package.
ArviZ.jl uses a InferenceData object to store the results of a Bayesian analysis. This object can be created from a MCMCChain object using the from_mcmcchains function. The InferenceData object can then be used to create a range of plots and summaries of the model. This is particularly useful as it allows you to specify the indexes of your parameters (for example you could use dates for time parameters).
In addition to this useful functionality from_mcmcchains can also be used to combine posterior predictions with prior predictions, prior information and the log likelihood of the model (see here for an example of this). This unlocks a range of useful diagnostics and plots that can be used to assess the model.
There is a lot of functionality in ArviZ.jl and it is worth exploring the documentation to see what is available.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
Note that this section of the documentation is still under construction. Please see replications for the most up-to-date information. Please feel free to contribute to the documentation by submitting a pull request.
Welcome to the EpiAware documentation! This section is designed to help you get started with the package. It includes a frequently asked questions (FAQ) section, a series of explainers that provide a detailed overview of the platform and its features, and tutorials that will help you get started with EpiAware for specific tasks. See the sidebar for the list of topics.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
Eventually, EpiAware is likely to be added to the Julia registry. Until then, you can install it from the /EpiAware sub-directory of this repository by running the following command in the Julia REPL:
using Pkg; Pkg.add(url="https://github.com/CDCgov/Rt-without-renewal", subdir="EpiAware")
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
Get up and running with EpiAware in just a few minutes using this quickstart guide.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
diff --git a/previews/PR513/getting-started/tutorials/censored-obs.jl b/previews/PR513/getting-started/tutorials/censored-obs.jl
new file mode 100644
index 000000000..92a40ebdb
--- /dev/null
+++ b/previews/PR513/getting-started/tutorials/censored-obs.jl
@@ -0,0 +1,373 @@
+### A Pluto.jl notebook ###
+# v0.20.0
+
+using Markdown
+using InteractiveUtils
+
+# ╔═╡ a2624404-48b1-4faa-abbe-6d78b8e04f2b
+let
+ docs_dir = dirname(dirname(dirname(@__DIR__)))
+ using Pkg: Pkg
+ Pkg.activate(docs_dir)
+ Pkg.instantiate()
+end
+
+# ╔═╡ 5baa8d2e-bcf8-4e3b-b007-175ad3e2ca95
+begin
+ using EpiAware.EpiAwareUtils: censored_pmf, censored_cdf, ∫F
+ using Random, Distributions, StatsBase #utilities for random events
+ using DataFramesMeta #Data wrangling
+ using CairoMakie, PairPlots #plotting
+ using Turing #PPL
+end
+
+# ╔═╡ 8de5c5e0-6e95-11ef-1693-bfd465c8d919
+md"
+# Fitting distributions using `EpiAware` and Turing PPL
+
+## Introduction
+
+### What are we going to do in this Vignette
+
+In this vignette, we'll demonstrate how to use the CDF function for censored delay distributions `EpiAwareUtils.∫F`, which underlies `EpiAwareUtils.censored_pmf` in conjunction with the Turing PPL for Bayesian inference of epidemiological delay distributions. We'll cover the following key points:
+
+1. Simulating censored delay distribution data
+2. Fitting a naive model using Turing
+3. Evaluating the naive model's performance
+4. Fitting an improved model using censored delay functionality from `EpiAware`.
+5. Comparing the censored delay model's performance to the naive model
+
+### What might I need to know before starting
+
+This note builds on the concepts introduced in the R/stan package [`primarycensoreddist`](https://github.com/epinowcast/primarycensoreddist), especially the [Fitting distributions using primarycensorseddist and cmdstan](https://primarycensoreddist.epinowcast.org/articles/fitting-dists-with-stan.html) vignette and assumes familiarity with using Turing tools as covered in the [Turing documentation](https://turinglang.org/).
+
+This note is generated using the `EpiAware` package locally via `Pkg.develop`, in the `EpiAware/docs` environment. It is also possible to install `EpiAware` using
+
+```julia
+Pkg.add(url=\"https://github.com/CDCgov/Rt-without-renewal\", subdir=\"EpiAware\")
+```
+### Packages used in this vignette
+As well as `EpiAware` and `Turing` we will use `Makie` ecosystem packages for plotting and `DataFramesMeta` for data manipulation.
+"
+
+# ╔═╡ 30dd9af4-b64f-42b1-8439-a890752f68e3
+md"
+The other dependencies are as follows:
+"
+
+# ╔═╡ c5704f67-208d-4c2e-8513-c07c6b94ca99
+md"
+## Simulating censored and truncated delay distribution data
+
+We'll start by simulating some censored and truncated delay distribution data. We’ll define a `rpcens` function for generating data.
+"
+
+# ╔═╡ aed124c7-b4ba-4c97-a01f-ff553f376c86
+Random.seed!(123) # For reproducibility
+
+# ╔═╡ ec5ed3e9-6ea9-4cfe-afd2-82aabbbe8130
+md"Define the true distribution parameters"
+
+# ╔═╡ 105b9594-36ce-4ae8-87a8-5c81867b1ce3
+n = 2000
+
+# ╔═╡ 8aa9f9c1-d3c4-49f3-be18-a400fc71e8f7
+meanlog = 1.5
+
+# ╔═╡ 84bb3999-9f2b-4eaa-9c2d-776a86677eaf
+sdlog = 0.75
+
+# ╔═╡ 2bf6677e-ebe9-4aa8-aa91-f631e99669bb
+true_dist = LogNormal(meanlog, sdlog)
+
+# ╔═╡ f4083aea-8106-401a-b60f-383d0b94102a
+md"Generate varying pwindow, swindow, and obs_time lengths
+"
+
+# ╔═╡ aea8b28e-fffe-4aa6-b51e-8199a7c7975c
+pwindows = rand(1:2, n)
+
+# ╔═╡ 4d3a853d-0b8d-402a-8309-e9f6da2b7a8c
+swindows = rand(1:2, n)
+
+# ╔═╡ 7522f05b-1750-4983-8947-ef70f4298d06
+obs_times = rand(8:10, n)
+
+# ╔═╡ 5eac2f60-8cec-4460-9d10-6bade7f0f406
+md"
+We recreate the primary censored sampling function from `primarycensoreddist`, c.f. documentation [here](https://primarycensoreddist.epinowcast.org/reference/rprimarycensoreddist.html).
+"
+
+# ╔═╡ 9443b893-9e22-4267-9a1f-319a3adb8c0d
+"""
+ function rpcens(dist; pwindow = 1, swindow = 1, D = Inf, max_tries = 1000)
+
+Does a truncated censored sample from `dist` with a uniform primary time on `[0, pwindow]`.
+"""
+function rpcens(dist; pwindow = 1, swindow = 1, D = Inf, max_tries = 1000)
+ T = zero(eltype(dist))
+ invalid_sample = true
+ attempts = 1
+ while (invalid_sample && attempts <= max_tries)
+ X = rand(dist)
+ U = rand() * pwindow
+ T = X + U
+ attempts += 1
+ if X + U < D
+ invalid_sample = false
+ end
+ end
+
+ @assert !invalid_sample "censored value not found in $max_tries attempts"
+
+ return (T ÷ swindow) * swindow
+end
+
+# ╔═╡ a4f5e9b6-ff3a-48fa-aa51-0abccb9c7bed
+#Sample secondary time relative to beginning of primary censor window respecting the right-truncation
+samples = map(pwindows, swindows, obs_times) do pw, sw, ot
+ rpcens(true_dist; pwindow = pw, swindow = sw, D = ot)
+end
+
+# ╔═╡ 2a9da9e5-0925-4ae0-8b70-8db90903cb0b
+md"
+Aggregate to unique combinations and count occurrences
+"
+
+# ╔═╡ 0b5e96eb-9312-472e-8a88-d4509a4f25d0
+delay_counts = mapreduce(vcat, pwindows, swindows, obs_times, samples) do pw, sw, ot, s
+ DataFrame(
+ pwindow = pw,
+ swindow = sw,
+ obs_time = ot,
+ observed_delay = s,
+ observed_delay_upper = s + sw
+ )
+end |>
+ df -> @groupby(df, :pwindow, :swindow, :obs_time, :observed_delay,
+ :observed_delay_upper) |>
+ gd -> @combine(gd, :n=length(:pwindow))
+
+# ╔═╡ c0cce80f-dec7-4a55-aefd-339ef863f854
+md"
+Compare the samples with and without secondary censoring to the true distribution and calculate empirical CDF
+"
+
+# ╔═╡ a7bff47d-b61f-499e-8631-206661c2bdc0
+empirical_cdf = ecdf(samples)
+
+# ╔═╡ 16bcb80a-970f-4633-aca2-261fa04172f7
+empirical_cdf_obs = ecdf(delay_counts.observed_delay, weights = delay_counts.n)
+
+# ╔═╡ 60711c3c-266e-42b5-acc6-6624db294f24
+x_seq = range(minimum(samples), maximum(samples), 100)
+
+# ╔═╡ 1f1bcee4-8e0d-46fb-9a6f-41998bf54957
+theoretical_cdf = x_seq |> x -> cdf(true_dist, x)
+
+# ╔═╡ 59bb2a18-eaf4-438a-9359-341efadfe897
+let
+ f = Figure()
+ ax = Axis(f[1, 1],
+ title = "Comparison of Observed vs Theoretical CDF",
+ ylabel = "Cumulative Probability",
+ xlabel = "Delay"
+ )
+ lines!(
+ ax, x_seq, empirical_cdf_obs, label = "Empirical CDF", color = :blue, linewidth = 2)
+ lines!(ax, x_seq, theoretical_cdf, label = "Theoretical CDF",
+ color = :black, linewidth = 2)
+ vlines!(ax, [mean(samples)], color = :blue, linestyle = :dash,
+ label = "Empirical mean", linewidth = 2)
+ vlines!(ax, [mean(true_dist)], linestyle = :dash,
+ label = "Theoretical mean", color = :black, linewidth = 2)
+ axislegend(position = :rb)
+
+ f
+end
+
+# ╔═╡ f66d4b2e-ed66-423e-9cba-62bff712862b
+md"
+We've aggregated the data to unique combinations of `pwindow`, `swindow`, and `obs_time` and counted the number of occurrences of each `observed_delay` for each combination. This is the data we will use to fit our model.
+"
+
+# ╔═╡ 010ebe37-782b-4a35-bf5c-dca6dc0fee45
+md"
+## Fitting a naive model using Turing
+
+We'll start by fitting a naive model using NUTS from `Turing`. We define the model in the `Turing` PPL.
+"
+
+# ╔═╡ d9d14c48-8700-42b5-89b4-7fc51d0f577c
+@model function naive_model(N, y, n)
+ mu ~ Normal(1.0, 1.0)
+ sigma ~ truncated(Normal(0.5, 1.0); lower = 0.0)
+ d = LogNormal(mu, sigma)
+
+ for i in eachindex(y)
+ Turing.@addlogprob! n[i] * logpdf(d, y[i])
+ end
+end
+
+# ╔═╡ 8a7cd9ec-5640-4f5f-84c3-ae3f465ca68b
+md"
+Now lets instantiate this model with data
+"
+
+# ╔═╡ 028ade5c-17bd-4dfc-8433-23aaff02c181
+naive_mdl = naive_model(
+ size(delay_counts, 1),
+ delay_counts.observed_delay .+ 1e-6, # Add a small constant to avoid log(0)
+ delay_counts.n)
+
+# ╔═╡ 04b4eefb-f0f9-4887-8db0-7cbb7f3b169b
+md"
+and now let's fit the compiled model.
+"
+
+# ╔═╡ 21655344-d12b-4e47-a9a9-d06bd909f6ea
+naive_fit = sample(naive_mdl, NUTS(), MCMCThreads(), 500, 4)
+
+# ╔═╡ 3b89fe00-6aaf-4764-8b29-e71479f1e641
+summarize(naive_fit)
+
+# ╔═╡ 8e09d931-fca7-4ac2-81f7-2bc36b0174f3
+let
+ f = pairplot(naive_fit)
+ vlines!(f[1, 1], [meanlog], linewidth = 4)
+ vlines!(f[2, 2], [sdlog], linewidth = 4)
+ f
+end
+
+# ╔═╡ 43eac8dd-8f1d-440e-b1e8-85db9e740651
+md"
+We see that the model has converged and the diagnostics look good. However, just from the model posterior summary we see that we might not be very happy with the fit. `mu` is smaller than the target $(meanlog) and `sigma` is larger than the target $(sdlog).
+
+"
+
+# ╔═╡ b2efafab-8849-4a7a-bb64-ac9ce126ca75
+md"
+## Fitting an improved model using censoring utilities
+
+We'll now fit an improved model using the `∫F` function from `EpiAware.EpiAwareUtils` for calculating the CDF of the _total delay_ from the beginning of the primary window to the secondary event time. This includes both the delay distribution we are making inference on and the time between the start of the primary censor window and the primary event.
+The `∫F` function underlies `censored_pmf` function from the `EpiAware.EpiAwareUtils` submodule.
+
+Using the `∫F` function we can write a log-pmf function `primary_censored_dist_lpmf` that accounts for:
+- The primary and secondary censoring windows, which can vary in length.
+- The effect of right truncation in biasing our observations.
+
+This is the analog function to the function of the same name in `primarycensoreddist`: it calculates the log-probability of the secondary event occurring in the secondary censoring window conditional on the primary event occurring in the primary censoring window by calculating the increase in the CDF over the secondary window and rescaling by the probability of the secondary event occuring within the maximum observation time `D`.
+"
+
+# ╔═╡ 348fc3b4-073b-4997-ae50-58ede5d6d0c9
+function primary_censored_dist_lpmf(dist, y, pwindow, y_upper, D)
+ if y == 0.0
+ return log(∫F(dist, y_upper, pwindow)) - log(∫F(dist, D, pwindow))
+ else
+ return log(∫F(dist, y_upper, pwindow) - ∫F(dist, y, pwindow)) -
+ log(∫F(dist, D, pwindow))
+ end
+end
+
+# ╔═╡ cefb5d56-fecd-4de7-bd0e-156be91c705c
+md"
+We make a new `Turing` model that now uses `primary_censored_dist_lpmf` rather than the naive uncensored and untruncated `logpdf`.
+"
+
+# ╔═╡ ef40112b-f23e-4d4b-8a7d-3793b786f472
+@model function primarycensoreddist_model(y, y_upper, n, pws, Ds)
+ mu ~ Normal(1.0, 1.0)
+ sigma ~ truncated(Normal(0.5, 0.5); lower = 0.0)
+ dist = LogNormal(mu, sigma)
+
+ for i in eachindex(y)
+ Turing.@addlogprob! n[i] * primary_censored_dist_lpmf(
+ dist, y[i], pws[i], y_upper[i], Ds[i])
+ end
+end
+
+# ╔═╡ b823d824-419d-41e9-9ac9-2c45ef190acf
+md"
+Lets instantiate this model with data
+"
+
+# ╔═╡ 93bca93a-5484-47fa-8424-7315eef15e37
+primarycensoreddist_mdl = primarycensoreddist_model(
+ delay_counts.observed_delay,
+ delay_counts.observed_delay_upper,
+ delay_counts.n,
+ delay_counts.pwindow,
+ delay_counts.obs_time
+)
+
+# ╔═╡ d5144247-eb57-48bf-8e32-fd71167ecbc8
+md"Now let’s fit the compiled model."
+
+# ╔═╡ 7ae6c61d-0e33-4af8-b8d2-e31223a15a7c
+primarycensoreddist_fit = sample(
+ primarycensoreddist_mdl, NUTS(), MCMCThreads(), 1000, 4)
+
+# ╔═╡ 1210443f-480f-4e9f-b195-d557e9e1fc31
+summarize(primarycensoreddist_fit)
+
+# ╔═╡ b2376beb-dd7b-442d-9ff5-ac864e75366b
+let
+ f = pairplot(primarycensoreddist_fit)
+ CairoMakie.vlines!(f[1, 1], [meanlog], linewidth = 3)
+ CairoMakie.vlines!(f[2, 2], [sdlog], linewidth = 3)
+ f
+end
+
+# ╔═╡ 673b47ec-b333-45e8-9557-9e65ad425c35
+md"
+We see that the model has converged and the diagnostics look good. We also see that the posterior means are very near the true parameters and the 90% credible intervals include the true parameters.
+"
+
+# ╔═╡ Cell order:
+# ╟─8de5c5e0-6e95-11ef-1693-bfd465c8d919
+# ╠═a2624404-48b1-4faa-abbe-6d78b8e04f2b
+# ╟─30dd9af4-b64f-42b1-8439-a890752f68e3
+# ╠═5baa8d2e-bcf8-4e3b-b007-175ad3e2ca95
+# ╟─c5704f67-208d-4c2e-8513-c07c6b94ca99
+# ╠═aed124c7-b4ba-4c97-a01f-ff553f376c86
+# ╟─ec5ed3e9-6ea9-4cfe-afd2-82aabbbe8130
+# ╠═105b9594-36ce-4ae8-87a8-5c81867b1ce3
+# ╠═8aa9f9c1-d3c4-49f3-be18-a400fc71e8f7
+# ╠═84bb3999-9f2b-4eaa-9c2d-776a86677eaf
+# ╠═2bf6677e-ebe9-4aa8-aa91-f631e99669bb
+# ╟─f4083aea-8106-401a-b60f-383d0b94102a
+# ╠═aea8b28e-fffe-4aa6-b51e-8199a7c7975c
+# ╠═4d3a853d-0b8d-402a-8309-e9f6da2b7a8c
+# ╠═7522f05b-1750-4983-8947-ef70f4298d06
+# ╟─5eac2f60-8cec-4460-9d10-6bade7f0f406
+# ╠═9443b893-9e22-4267-9a1f-319a3adb8c0d
+# ╠═a4f5e9b6-ff3a-48fa-aa51-0abccb9c7bed
+# ╟─2a9da9e5-0925-4ae0-8b70-8db90903cb0b
+# ╠═0b5e96eb-9312-472e-8a88-d4509a4f25d0
+# ╟─c0cce80f-dec7-4a55-aefd-339ef863f854
+# ╠═a7bff47d-b61f-499e-8631-206661c2bdc0
+# ╠═16bcb80a-970f-4633-aca2-261fa04172f7
+# ╠═60711c3c-266e-42b5-acc6-6624db294f24
+# ╠═1f1bcee4-8e0d-46fb-9a6f-41998bf54957
+# ╠═59bb2a18-eaf4-438a-9359-341efadfe897
+# ╟─f66d4b2e-ed66-423e-9cba-62bff712862b
+# ╠═010ebe37-782b-4a35-bf5c-dca6dc0fee45
+# ╠═d9d14c48-8700-42b5-89b4-7fc51d0f577c
+# ╟─8a7cd9ec-5640-4f5f-84c3-ae3f465ca68b
+# ╠═028ade5c-17bd-4dfc-8433-23aaff02c181
+# ╟─04b4eefb-f0f9-4887-8db0-7cbb7f3b169b
+# ╠═21655344-d12b-4e47-a9a9-d06bd909f6ea
+# ╠═3b89fe00-6aaf-4764-8b29-e71479f1e641
+# ╠═8e09d931-fca7-4ac2-81f7-2bc36b0174f3
+# ╟─43eac8dd-8f1d-440e-b1e8-85db9e740651
+# ╟─b2efafab-8849-4a7a-bb64-ac9ce126ca75
+# ╠═348fc3b4-073b-4997-ae50-58ede5d6d0c9
+# ╟─cefb5d56-fecd-4de7-bd0e-156be91c705c
+# ╠═ef40112b-f23e-4d4b-8a7d-3793b786f472
+# ╟─b823d824-419d-41e9-9ac9-2c45ef190acf
+# ╠═93bca93a-5484-47fa-8424-7315eef15e37
+# ╟─d5144247-eb57-48bf-8e32-fd71167ecbc8
+# ╠═7ae6c61d-0e33-4af8-b8d2-e31223a15a7c
+# ╠═1210443f-480f-4e9f-b195-d557e9e1fc31
+# ╠═b2376beb-dd7b-442d-9ff5-ac864e75366b
+# ╟─673b47ec-b333-45e8-9557-9e65ad425c35
diff --git a/previews/PR513/getting-started/tutorials/censored-obs/index.html b/previews/PR513/getting-started/tutorials/censored-obs/index.html
new file mode 100644
index 000000000..816e59523
--- /dev/null
+++ b/previews/PR513/getting-started/tutorials/censored-obs/index.html
@@ -0,0 +1,351 @@
+
+Fitting distributions with censored data · EpiAware.jl
Fitting distributions using EpiAware and Turing PPL
Introduction
What are we going to do in this Vignette
In this vignette, we'll demonstrate how to use the CDF function for censored delay distributions EpiAwareUtils.∫F, which underlies EpiAwareUtils.censored_pmf in conjunction with the Turing PPL for Bayesian inference of epidemiological delay distributions. We'll cover the following key points:
Simulating censored delay distribution data
Fitting a naive model using Turing
Evaluating the naive model's performance
Fitting an improved model using censored delay functionality from EpiAware.
Comparing the censored delay model's performance to the naive model
This note is generated using the EpiAware package locally via Pkg.develop, in the EpiAware/docs environment. It is also possible to install EpiAware using
As well as EpiAware and Turing we will use Makie ecosystem packages for plotting and DataFramesMeta for data manipulation.
+
+
let
+ docs_dir = dirname(dirname(dirname(@__DIR__)))
+ using Pkg: Pkg
+ Pkg.activate(docs_dir)
+ Pkg.instantiate()
+end
+
+
+
+
The other dependencies are as follows:
+
+
begin
+ using EpiAware.EpiAwareUtils: censored_pmf, censored_cdf, ∫F
+ using Random, Distributions, StatsBase #utilities for random events
+ using DataFramesMeta #Data wrangling
+ using CairoMakie, PairPlots #plotting
+ using Turing #PPL
+end
We recreate the primary censored sampling function from primarycensoreddist, c.f. documentation here.
+
+
"""
+ function rpcens(dist; pwindow = 1, swindow = 1, D = Inf, max_tries = 1000)
+
+Does a truncated censored sample from `dist` with a uniform primary time on `[0, pwindow]`.
+"""
+function rpcens(dist; pwindow = 1, swindow = 1, D = Inf, max_tries = 1000)
+ T = zero(eltype(dist))
+ invalid_sample = true
+ attempts = 1
+ while (invalid_sample && attempts <= max_tries)
+ X = rand(dist)
+ U = rand() * pwindow
+ T = X + U
+ attempts += 1
+ if X + U < D
+ invalid_sample = false
+ end
+ end
+
+ @assert !invalid_sample "censored value not found in $max_tries attempts"
+
+ return (T ÷ swindow) * swindow
+end
+
+
+
#Sample secondary time relative to beginning of primary censor window respecting the right-truncation
+samples = map(pwindows, swindows, obs_times) do pw, sw, ot
+ rpcens(true_dist; pwindow = pw, swindow = sw, D = ot)
+end
We've aggregated the data to unique combinations of pwindow, swindow, and obs_time and counted the number of occurrences of each observed_delay for each combination. This is the data we will use to fit our model.
We'll start by fitting a naive model using NUTS from Turing. We define the model in the Turing PPL.
+
+
@model function naive_model(N, y, n)
+ mu ~ Normal(1.0, 1.0)
+ sigma ~ truncated(Normal(0.5, 1.0); lower = 0.0)
+ d = LogNormal(mu, sigma)
+
+ for i in eachindex(y)
+ Turing.@addlogprob! n[i] * logpdf(d, y[i])
+ end
+end
+
naive_model (generic function with 2 methods)
+
+
+
Now lets instantiate this model with data
+
+
naive_mdl = naive_model(
+ size(delay_counts, 1),
+ delay_counts.observed_delay .+ 1e-6, # Add a small constant to avoid log(0)
+ delay_counts.n)
let
+ f = pairplot(naive_fit)
+ vlines!(f[1, 1], [meanlog], linewidth = 4)
+ vlines!(f[2, 2], [sdlog], linewidth = 4)
+ f
+end
+
+
+
+
We see that the model has converged and the diagnostics look good. However, just from the model posterior summary we see that we might not be very happy with the fit. mu is smaller than the target 1.5 and sigma is larger than the target 0.75.
We'll now fit an improved model using the ∫F function from EpiAware.EpiAwareUtils for calculating the CDF of the total delay from the beginning of the primary window to the secondary event time. This includes both the delay distribution we are making inference on and the time between the start of the primary censor window and the primary event. The ∫F function underlies censored_pmf function from the EpiAware.EpiAwareUtils submodule.
Using the ∫F function we can write a log-pmf function primary_censored_dist_lpmf that accounts for:
The primary and secondary censoring windows, which can vary in length.
The effect of right truncation in biasing our observations.
This is the analog function to the function of the same name in primarycensoreddist: it calculates the log-probability of the secondary event occurring in the secondary censoring window conditional on the primary event occurring in the primary censoring window by calculating the increase in the CDF over the secondary window and rescaling by the probability of the secondary event occuring within the maximum observation time D.
+
+
function primary_censored_dist_lpmf(dist, y, pwindow, y_upper, D)
+ if y == 0.0
+ return log(∫F(dist, y_upper, pwindow)) - log(∫F(dist, D, pwindow))
+ else
+ return log(∫F(dist, y_upper, pwindow) - ∫F(dist, y, pwindow)) -
+ log(∫F(dist, D, pwindow))
+ end
+end
+
primary_censored_dist_lpmf (generic function with 1 method)
+
+
+
We make a new Turing model that now uses primary_censored_dist_lpmf rather than the naive uncensored and untruncated logpdf.
+
+
@model function primarycensoreddist_model(y, y_upper, n, pws, Ds)
+ mu ~ Normal(1.0, 1.0)
+ sigma ~ truncated(Normal(0.5, 0.5); lower = 0.0)
+ dist = LogNormal(mu, sigma)
+
+ for i in eachindex(y)
+ Turing.@addlogprob! n[i] * primary_censored_dist_lpmf(
+ dist, y[i], pws[i], y_upper[i], Ds[i])
+ end
+end
+
primarycensoreddist_model (generic function with 2 methods)
let
+ f = pairplot(primarycensoreddist_fit)
+ CairoMakie.vlines!(f[1, 1], [meanlog], linewidth = 3)
+ CairoMakie.vlines!(f[2, 2], [sdlog], linewidth = 3)
+ f
+end
+
+
+
+
We see that the model has converged and the diagnostics look good. We also see that the posterior means are very near the true parameters and the 90% credible intervals include the true parameters.
+
+
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
EpiMethod represents a method for performing EpiAware inference and/or generative modelling, which combines a sequence of optimization steps to pass initialisation information to a sampler method.
Fields
pre_sampler_steps::Vector{O} where O<:AbstractEpiOptMethod: Pre-sampler optimization steps.
Wrap the _apply_method function by calling it with the given model, method, data, and optional keyword arguments (kwargs). The resulting solution is then passed to the generated_observables function, along with the model and input data, to compute the generated observables.
Arguments
model: The model to apply the method to.
method: The method to apply to the model.
data: The data to pass to the apply_method function.
kwargs: Optional keyword arguments to pass to the apply_method function.
Returns
The generated observables computed from the solution.
Run the EpiAware algorithm to estimate the parameters of an epidemiological model.
Arguments
epiproblem::EpiProblem: An EpiProblem object specifying the epidemiological problem.
method::EpiMethod: An EpiMethod object specifying the inference method.
data: The observed data used for inference.
Keyword Arguments
fix_parameters::NamedTuple: A NamedTuple of fixed parameters for the model.
condition_parameters::NamedTuple: A NamedTuple of conditioned parameters for the model.
kwargs...: Additional keyword arguments passed to the inference methods.
Returns
A NamedTuple with a samples field which is the output of applying methods and a model field with the model used. Optionally, a gens field with the generated quantities from the model if that makes sense with the inference method.
broadcast_n(
+ broadcast_rule::AbstractBroadcastRule,
+ latent,
+ n,
+ period
+)
+
This function is used to define the behavior of broadcasting for a specific type of AbstractBroadcastRule.
The broadcast_n function returns the length of the latent periods to generate using the given broadcast_rule. Which model of broadcasting to be implemented is set by the type of broadcast_rule. If no implemention is defined for the given broadcast_rule, then EpiAware will return a warning and return nothing.
broadcast_rule(
+ broadcast_rule::AbstractBroadcastRule,
+ n,
+ period
+)
+
This function is used to define the behavior of broadcasting for a specific type of AbstractBroadcastRule.
The broadcast_rule function implements a model of broadcasting a latent process. Which model of broadcasting to be implemented is set by the type of broadcast_rule. If no implemention is defined for the given broadcast_rule, then EpiAware will return a warning and return nothing.
generate_latent(latent_model::AbstractLatentModel, n) -> Any
+
Constructor function for a latent process path $Z_t$ of length n.
The generate_latent function implements a model of generating a latent process. Which model for generating the latent process infections is implemented is set by the type of latent_model. If no implemention is defined for the type of latent_model, then EpiAware will pass a warning and return nothing.
Interface to Turing.jl probablilistic programming language (PPL)
Apart from the no implementation fallback method, the generate_latent implementation function should return a constructor function for a DynamicPPL.Model object. Sample paths of $Z_t$ are generated quantities of the constructed model. Priors for model parameters are fields of epi_model.
generate_latent_infs(
+ epi_model::AbstractEpiModel,
+ Z_t
+) -> Any
+
Constructor function for unobserved/latent infections based on the type of epi_model <: AbstractEpimodel and a latent process path $Z_t$.
The generate_latent_infs function implements a model of generating unobserved/latent infections conditional on a latent process. Which model of generating unobserved/latent infections to be implemented is set by the type of epi_model. If no implemention is defined for the given epi_model, then EpiAware will return a warning and return nothing.
Interface to Turing.jl probablilistic programming language (PPL)
Apart from the no implementation fallback method, the generate_latent_infs implementation function returns a constructor function for a DynamicPPL.Model object where the unobserved/latent infections are a generated quantity. Priors for model parameters are fields of epi_model.
Constructor function for generating observations based on the given observation model.
The generate_observations function implements a model of generating observations based on the given observation model. Which model of generating observations to be implemented is set by the type of obs_model. If no implemention is defined for the given obs_model, then EpiAware will return a warning and return nothing.
Apply steps defined by an EpiMethod to a model object.
This function applies the steps defined by an EpiMethod object to a Model object. It iterates over the pre-sampler steps defined in the EpiMethod object and recursively applies them to the model. Finally, it applies the sampler step defined in the EpiMethod object to the model. The prev_result argument is used to pass the result obtained from applying the previous steps, if any.
Arguments
method::EpiMethod: The EpiMethod object containing the steps to be applied.
model::Model: The model object to which the steps will be applied.
prev_result: The previous result obtained from applying the steps. Defaults to nothing.
kwargs...: Additional keyword arguments that can be passed to the steps.
Returns
prev_result: The result obtained after applying the steps.
n_samples::Union{Nothing, Int64}: Number of samples from a model. If an integer is provided, the model is sampled n_samples times using Turing.Prior() returning an MCMChains. Chain object. If nothing, the model is sampled once returning a NamedTuple object of the sampled random variables along with generated quantities
Create a Negative binomial distribution with the specified mean that avoids InExactError when the mean is too large.
Parameterisation:
We are using a mean and cluster factorization of the negative binomial distribution such that the variance to mean relationship is:
\[\sigma^2 = \mu + \alpha^2 \mu^2\]
The reason for this parameterisation is that at sufficiently large mean values (i.e. r > 1 / p) p is approximately equal to the standard fluctuation of the distribution, e.g. if p = 0.05 we expect typical fluctuations of samples from the negative binomial to be about 5% of the mean when the mean is notably larger than 20. Otherwise, we expect approximately Poisson noise. In our opinion, this parameterisation is useful for specifying the distribution in a way that is easier to reason on priors for p.
Arguments:
r: The number of successes, although this can be extended to a continous number.
p: Success rate.
Returns:
A SafeNegativeBinomial distribution with the specified mean.
Examples:
using EpiAware, Distributions
+
+bigμ = exp(48.0) #Large value of μ
+σ² = bigμ + 0.05 * bigμ^2 #Large variance
+
+# We can calculate the success rate from the mean to variance relationship
+p = bigμ / σ²
+r = bigμ * p / (1 - p)
+d = SafeNegativeBinomial(r, p)
+# output
+EpiAware.EpiAwareUtils.SafeNegativeBinomial{Float64}(r=20.0, p=2.85032816548187e-20)
Apply the `accumulate` function to the `AbstractAccumulationStep` object.
+This is effectively a optimised version of a for loop that applies the
+`AbstractAccumulationStep` object to the input data in a single pass.
+
+# Arguments
+- `acc_step::AbstractAccumulationStep: The accumulation step function.
+- `initial_state`: The initial state of the accumulation.
+- `ϵ_t::AbstractVector{<:Real}`: The input data.
+
+# Returns
+- `state::AbstractVector{<:Real}`: The accumulated state as returned by the
+`get_state` function from the output of the `accumulate` function.
+
+# Examples
+```julia
+using EpiAware
+struct TestStep <: AbstractAccumulationStep
+ a::Float64
+end
+
+function (step::TestStep)(state, ϵ)
+ new_state = step.a * ϵ
+ return new_state
+end
+
+acc_step = TestStep(0.5)
+initial_state = zeros(3)
+
+accumulate_scan(acc_step, initial_state, [1.0, 2.0, 3.0])
+
+function get_state(acc_step::TestStep, initial_state, state)
+ return state
+end
+
+accumulate_scan(acc_step, initial_state, [1.0, 2.0, 3.0])
+```
Create a discrete probability cumulative distribution function (CDF) from a given distribution, assuming a uniform distribution over primary event times with censoring intervals of width Δd for both primary and secondary events.
NB: censored_cdf returns the non-truncated CDF, i.e. the CDF without conditioning on the secondary event occuring either before or after some time.
Arguments
dist: The distribution from which to create the PMF.
Δd: The step size for discretizing the domain. Default is 1.0.
D: The upper bound of the domain. Must be greater than Δd. Default D = nothing
indicates that the distribution should be truncated at its upperth percentile rounded to nearest multiple of Δd.
Returns
A vector representing the CDF with 0.0 appended at the beginning.
Raises
AssertionError if the minimum value of dist is negative.
Create a discrete probability mass function (PMF) from a given distribution, assuming that the primary event happens at primary_approximation_point * Δd within an intial censoring interval. Common single-censoring approximations are primary_approximation_point = 0 (left-hand approximation), primary_approximation_point = 1 (right-hand) and primary_approximation_point = 0.5 (midpoint).
Arguments
dist: The distribution from which to create the PMF.
::Val{:single_censored}: A dummy argument to dispatch to this method. The purpose of the Val
type argument is that to use single-censored approximation is an active decision.
primary_approximation_point: A approximation point for the primary time in its censoring interval.
Default is 0.5 for midpoint approximation.
Δd: The step size for discretizing the domain. Default is 1.0.
D: The upper bound of the domain. Must be greater than Δd.
Returns
A vector representing the PMF.
Raises:
AssertionError if the minimum value of dist is negative.
Create a discrete probability mass function (PMF) from a given distribution, assuming a uniform distribution over primary event times with censoring intervals of width Δd for both primary and secondary events. The CDF for the time from the left edge of the interval containing the primary event to the secondary event is created by direct numerical integration (quadrature) of the convolution of the CDF of dist with the uniform density on [0,Δd), using the censored_cdf function. The discrete PMF for double censored delays is then found using simple differencing on the CDF.
NB: censored_pmf returns a right-truncated PMF, i.e. the PMF conditioned on the secondary event occurring before or on the final secondary censoring window.
Arguments
dist: The distribution from which to create the PMF.
Δd: The step size for discretizing the domain. Default is 1.0.
D: The upper bound of the domain. Must be greater than Δd. Default D = nothing
indicates that the distribution should be truncated at its upperth percentile rounded to nearest multiple of Δd.
Returns
A vector representing the PMF.
Raises
AssertionError if the minimum value of dist is negative.
get_state(
+ acc_step::AbstractAccumulationStep,
+ initial_state,
+ state
+) -> Any
+
Processes the output of the `accumulate` function to return the final state.
+
+# Arguments
+- `acc_step::AbstractAccumulationStep`: The accumulation step function.
+- `initial_state`: The initial state of the accumulation.
+- `state`: The output of the `accumulate` function.
+
+# Returns
+- `state`: The combination of the initial state and the last element of
+ each accumulated state.
Apply f to each element of xs and accumulate the results.
f must be a callable on a sub-type of AbstractModel.
Design note
scan is being restricted to AbstractModel sub-types to ensure: 1. That compiler specialization is activated 2. Also avoids potential compiler overhead from specialisation on f<: Function.
Arguments
f: A callable/functor that takes two arguments, carry and x, and returns a new carry and a result y.
init: The initial value for the carry variable.
xs: An iterable collection of elements.
Returns
ys: An array containing the results of applying f to each element of xs.
carry: The final value of the carry variable after processing all elements of xs.
Examples
```jldoctest using EpiAware
struct Adder <: EpiAwareBase.AbstractModel end function (a::Adder)(carry, x) carry + x, carry + x end
generate_latent_infs(
+ epi_model::AbstractTuringRenewal,
+ _Rt
+) -> Any
+
Implement the generate_latent_infs function for the Renewal model.
Example usage with Renewal type of model for unobserved infection process
generate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.
First, we construct an Renewal struct with an EpiData object, an initialisation prior and a transformation function.
using Distributions, Turing, EpiAware
+gen_int = [0.2, 0.3, 0.5]
+g = exp
+
+# Create an EpiData object
+data = EpiData(gen_int, g)
+
+# Create an Renewal model
+renewal_model = Renewal(data; initialisation_prior = Normal())
Then, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of renewal_model.
# Construct a Turing model
+Z_t = randn(100) * 0.05
+latent_inf = generate_latent_infs(renewal_model, Z_t)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
# Sample from the unobserved infections model
+
+#Sample random parameters from prior
+θ = rand(latent_inf)
+#Get unobserved infections as a generated quantities from the model
+I_t = generated_quantities(latent_inf, θ)
generate_latent_infs(
+ epi_model::DirectInfections,
+ Z_t
+) -> Any
+
Implement the generate_latent_infs function for the DirectInfections model.
Example usage with DirectInfections type of model for unobserved infection process
First, we construct a DirectInfections struct with an EpiData object, an initialisation prior and a transformation function.
using Distributions, Turing, EpiAware
+gen_int = [0.2, 0.3, 0.5]
+g = exp
+
+# Create an EpiData object
+data = EpiData(gen_int, g)
+
+# Create a DirectInfections model
+direct_inf_model = DirectInfections(data = data, initialisation_prior = Normal())
Then, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.
# Construct a Turing model
+Z_t = randn(100)
+latent_inf = generate_latent_infs(direct_inf_model, Z_t)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
# Sample from the unobserved infections model
+
+#Sample random parameters from prior
+θ = rand(latent_inf)
+#Get unobserved infections as a generated quantities from the model
+I_t = generated_quantities(latent_inf, θ)
generate_latent_infs(epi_model::ExpGrowthRate, rt) -> Any
+
Implement the generate_latent_infs function for the ExpGrowthRate model.
Example usage with ExpGrowthRate type of model for unobserved infection process
generate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.
First, we construct an ExpGrowthRate struct with an EpiData object, an initialisation prior and a transformation function.
using Distributions, Turing, EpiAware
+gen_int = [0.2, 0.3, 0.5]
+g = exp
+
+# Create an EpiData object
+data = EpiData(gen_int, g)
+
+# Create an ExpGrowthRate model
+exp_growth_model = ExpGrowthRate(data = data, initialisation_prior = Normal())
Then, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.
# Construct a Turing model
+Z_t = randn(100) * 0.05
+latent_inf = generate_latent_infs(exp_growth_model, Z_t)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
# Sample from the unobserved infections model
+
+#Sample random parameters from prior
+θ = rand(latent_inf)
+#Get unobserved infections as a generated quantities from the model
+I_t = generated_quantities(latent_inf, θ)
Model unobserved/latent infections as a transformation on a sampled latent process.
Mathematical specification
If $Z_t$ is a realisation of the latent model, then the unobserved/latent infections are given by
\[I_t = g(\hat{I}_0 + Z_t).\]
where $g$ is a transformation function and the unconstrained initial infections $\hat{I}_0$ are sampled from a prior distribution.
DirectInfections are constructed by passing an EpiData object data and an initialisation_prior for the prior distribution of $\hat{I}_0$. The default initialisation_prior is Normal().
Constructors
DirectInfections(; data, initialisation_prior)
Example usage with generate_latent_infs
generate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.
First, we construct a DirectInfections struct with an EpiData object, an initialisation prior and a transformation function.
using Distributions, Turing, EpiAware
+gen_int = [0.2, 0.3, 0.5]
+g = exp
+
+# Create an EpiData object
+data = EpiData(gen_int, g)
+
+# Create a DirectInfections model
+direct_inf_model = DirectInfections(data = data, initialisation_prior = Normal())
Then, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.
# Construct a Turing model
+Z_t = randn(100)
+latent_inf = generate_latent_infs(direct_inf_model, Z_t)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
# Sample from the unobserved infections model
+
+#Sample random parameters from prior
+θ = rand(latent_inf)
+#Get unobserved infections as a generated quantities from the model
+I_t = generated_quantities(latent_inf, θ)
Fields
data::EpiData: Epidata object.
initialisation_prior::Distributions.Sampleable: Prior distribution for the initialisation of the infections. Default is Normal().
Constructs an EpiData object with double interval censoring discretisation of the continuous next generation interval distribution gen_distribution with additional right truncation at D_gen. Δd sets the interval width (default = 1.0). transformation sets the transformation function
Examples
Construction direct from discrete generation interval and transformation function:
Model unobserved/latent infections as due to time-varying exponential growth rate $r_t$ which is generated by a latent process.
Mathematical specification
If $Z_t$ is a realisation of the latent model, then the unobserved/latent infections are given by
\[I_t = g(\hat{I}_0) \exp(Z_t).\]
where $g$ is a transformation function and the unconstrained initial infections $\hat{I}_0$ are sampled from a prior distribution.
ExpGrowthRate are constructed by passing an EpiData object data and an initialisation_prior for the prior distribution of $\hat{I}_0$. The default initialisation_prior is Normal().
Constructor
ExpGrowthRate(; data, initialisation_prior).
Example usage with generate_latent_infs
generate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.
First, we construct an ExpGrowthRate struct with an EpiData object, an initialisation prior and a transformation function.
using Distributions, Turing, EpiAware
+gen_int = [0.2, 0.3, 0.5]
+g = exp
+
+# Create an EpiData object
+data = EpiData(gen_int, g)
+
+# Create an ExpGrowthRate model
+exp_growth_model = ExpGrowthRate(data = data, initialisation_prior = Normal())
Then, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.
# Construct a Turing model
+Z_t = randn(100) * 0.05
+latent_inf = generate_latent_infs(exp_growth_model, Z_t)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
# Sample from the unobserved infections model
+
+#Sample random parameters from prior
+θ = rand(latent_inf)
+#Get unobserved infections as a generated quantities from the model
+I_t = generated_quantities(latent_inf, θ)
Model unobserved/latent infections as due to time-varying Renewal model with reproduction number $\mathcal{R}_t$ which is generated by a latent process.
Mathematical specification
If $Z_t$ is a realisation of the latent model, then the unobserved/latent infections are given by
where $g$ is a transformation function and the unconstrained initial infections $\hat{I}_0$ are sampled from a prior distribution. The discrete generation interval is given by $g_i$.
$r(\mathcal{R}_1)$ is the exponential growth rate implied by $\mathcal{R}_1)$ using the implicit relationship between the exponential growth rate and the reproduction number.
\[\mathcal{R} \sum_{j \geq 1} g_j \exp(- r j)= 1.\]
Renewal are constructed by passing an EpiData object data and an initialisation_prior for the prior distribution of $\hat{I}_0$. The default initialisation_prior is Normal().
Constructors
Renewal(; data, initialisation_prior). Construct a Renewal model with default update steps.
Renewal(data; initialisation_prior). Construct a Renewal model with default update steps.
Renewal(data, initialisation_prior, recurrent_step) Construct a Renewal model with recurrent_step update step function.
Example usage with generate_latent_infs
generate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.
First, we construct an Renewal struct with an EpiData object, an initialisation prior and a transformation function.
using Distributions, Turing, EpiAware
+gen_int = [0.2, 0.3, 0.5]
+g = exp
+
+# Create an EpiData object
+data = EpiData(gen_int, g)
+
+# Create an Renewal model
+renewal_model = Renewal(data; initialisation_prior = Normal())
Then, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.
# Construct a Turing model
+Z_t = randn(100) * 0.05
+latent_inf = generate_latent_infs(renewal_model, Z_t)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
# Sample from the unobserved infections model
+
+#Sample random parameters from prior
+θ = rand(latent_inf)
+#Get unobserved infections as a generated quantities from the model
+I_t = generated_quantities(latent_inf, θ)
This function computes an approximation to the exponential growth rate r given the reproductive ratio R₀ and the discretized generation interval w with discretized interval width Δd. This is based on the implicit solution of
\[G(r) - {1 \over R_0} = 0.\]
where
\[G(r) = \sum_{i=1}^n w_i e^{-r i}.\]
is the negative moment generating function (MGF) of the generation interval distribution.
The two step approximation is based on: 1. Direct solution of implicit equation for a small r approximation. 2. Improving the approximation using Newton's method for a fixed number of steps newton_steps.
Apply a ManyPathfinder method to a DynamicPPL.Model object.
If prev_result is a vector of real numbers, then the ManyPathfinder method is applied with the initial values set to prev_result. Otherwise, the ManyPathfinder method is run with default initial values generated.
A function that returns the length of the latent periods to generate using the RepeatBlock rule which is equal n divided by the period and rounded up to the nearest integer.
generate_latent(latent_model::DiffLatentModel, n) -> Any
+
Generate a Turing model for n-step latent process $Z_t$ using a differenced latent model defined by latent_model.
Arguments
latent_model::DiffLatentModel: The differential latent model.
n: The length of the latent variables.
Turing model specifications
Sampled random variables
latent_init: The initial latent process variables.
Other random variables defined by model<:AbstractTuringLatentModel field of the undifferenced model.
Generated quantities
A tuple containing the generated latent process as its first argument and a NamedTuple of sampled auxiliary variables as second argument.
Example usage with DiffLatentModel model constructor
generate_latent can be used to construct a Turing model for the differenced latent process. In this example, the underlying undifferenced process is a RandomWalk model.
First, we construct a RandomWalk struct with an initial value prior and a step size standard deviation prior.
Then, we can use DiffLatentModel to construct a DiffLatentModel for d-fold differenced process with rw as the undifferenced latent process.
We have two constructor options for DiffLatentModel. The first option is to supply a common prior distribution for the initial terms and specify d as follows:
diff_model = DiffLatentModel(rw, Normal(); d = 2)
Or we can supply a vector of priors for the initial terms and d is inferred as follows:
Then, we can use generate_latent to construct a Turing model for the differenced latent process generating a length n process,
# Construct a Turing model
+n = 100
+difference_mdl = generate_latent(diff_model, n)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved latent process.
#Sample random parameters from prior
+θ = rand(difference_mdl)
+#Get a sampled latent process as a generated quantity from the model
+(Z_t, _) = generated_quantities(difference_mdl, θ)
+Z_t
generate_latent(latent_model::RandomWalk, n) -> Any
+
Implement the generate_latent function for the RandomWalk model.
Example usage of generate_latent with RandomWalk type of latent process model
using Distributions, Turing, EpiAware
+
+# Create a RandomWalk model
+rw = RandomWalk(init_prior = Normal(2., 1.),
+ std_prior = HalfNormal(0.1))
Then, we can use generate_latent to construct a Turing model for a 10 step random walk.
# Construct a Turing model
+rw_model = generate_latent(rw, 10)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
#Sample random parameters from prior
+θ = rand(rw_model)
+#Get random walk sample path as a generated quantities from the model
+Z_t, _ = generated_quantities(rw_model, θ)
AR(damp_prior::Distribution, std_prior::Distribution, init_prior::Distribution; p::Int = 1): Constructs an AR model with the specified prior distributions for damping coefficients, standard deviation, and initial conditions. The order of the AR model can also be specified.
AR(; damp_priors::Vector{D} = [truncated(Normal(0.0, 0.05))], std_prior::Distribution = truncated(Normal(0.0, 0.05), 0.0, Inf), init_priors::Vector{I} = [Normal()]) where {D <: Distribution, I <: Distribution}: Constructs an AR model with the specified prior distributions for damping coefficients, standard deviation, and initial conditions. The order of the AR model is determined by the length of the damp_priors vector.
AR(damp_prior::Distribution, std_prior::Distribution, init_prior::Distribution, p::Int): Constructs an AR model with the specified prior distributions for damping coefficients, standard deviation, and initial conditions. The order of the AR model is explicitly specified.
The BroadcastLatentModel struct represents a latent model that supports broadcasting of latent periods.
Constructors
BroadcastLatentModel(;model::M; period::Int, broadcast_rule::B): Constructs a BroadcastLatentModel with the given model, period, and broadcast_rule.
BroadcastLatentModel(model::M, period::Int, broadcast_rule::B): An alternative constructor that allows the model, period, and broadcast_rule to be specified without keyword arguments.
This struct is used to combine multiple latent models into a single latent model. If a prefix is supplied wraps each model with PrefixLatentModel.
Constructors
CombineLatentModels(models::M, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, P <: AbstractVector{<:String}}: Constructs a CombineLatentModels instance with specified models and prefixes, ensuring that there are at least two models and the number of models and prefixes are equal.
CombineLatentModels(models::M) where {M <: AbstractVector{<:AbstractTuringLatentModel}}: Constructs a CombineLatentModels instance with specified models, automatically generating prefixes for each model. The
automatic prefixes are of the form Combine.1, Combine.2, etc.
This struct is used to concatenate multiple latent models into a single latent model.
Constructors
ConcatLatentModels(models::M, no_models::I, dimension_adaptor::F, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, I <: Int, F <: Function, P <: AbstractVector{String}}: Constructs a ConcatLatentModels instance with specified models, number of models, dimension adaptor, and prefixes.
ConcatLatentModels(models::M, dimension_adaptor::F; prefixes::P = "Concat." * string.(1:length(models))) where {M <: AbstractVector{<:AbstractTuringLatentModel}, F <: Function}: Constructs a ConcatLatentModels instance with specified models and dimension adaptor. The number of models is automatically determined as are the prefixes (of the form Concat.1, Concat.2, etc.) by default.
ConcatLatentModels(models::M; dimension_adaptor::Function, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, P <: AbstractVector{String}}: Constructs a ConcatLatentModels instance with specified models, dimension adaptor, prefixes, and automatically determines the number of models.The default dimension adaptor is equal_dimensions. The default prefixes are of the form Concat.1, Concat.2, etc.
ConcatLatentModels(; models::M, dimension_adaptor::Function, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, P <: AbstractVector{String}}: Constructs a ConcatLatentModels instance with specified models, dimension adaptor, prefixes, and automatically determines the number of models. The default dimension adaptor is equal_dimensions. The default prefixes are of the form Concat.1, Concat.2, etc.
models::AbstractVector{<:AbstractTuringLatentModel}: A vector of latent models
no_models::Int64: The number of models in the collection
dimension_adaptor::Function: The dimension function for the latent variables. By default this divides the number of latent variables by the number of models and returns a vector of dimensions rounding up the first element and rounding down the rest.
prefixes::AbstractVector{<:String}: A vector of prefixes for the latent models
Model the latent process as a d-fold differenced version of another process.
Mathematical specification
Let $\Delta$ be the differencing operator. If $\tilde{Z}_t$ is a realisation of undifferenced latent model supplied to DiffLatentModel, then the differenced process is given by,
\[\Delta^{(d)} Z_t = \tilde{Z}_t, \quad t = d+1, \ldots.\]
We can recover $Z_t$ by applying the inverse differencing operator $\Delta^{-1}$, which corresponds to the cumulative sum operator cumsum in Julia, d-times. The d initial terms $Z_1, \ldots, Z_d$ are inferred.
Constructors
DiffLatentModel(latent_model, init_prior_distribution::Distribution; d::Int) Constructs a DiffLatentModel for d-fold differencing with latent_model as the undifferenced latent process. All initial terms have common prior init_prior_distribution.
DiffLatentModel(;model, init_priors::Vector{D} where {D <: Distribution}) Constructs a DiffLatentModel for d-fold differencing with latent_model as the undifferenced latent process. The d initial terms have priors given by the vector init_priors, therefore length(init_priors) sets d.
Example usage with generate_latent
generate_latent can be used to construct a Turing model for the differenced latent process. In this example, the underlying undifferenced process is a RandomWalk model.
First, we construct a RandomWalk struct with an initial value prior and a step size standard deviation prior.
Then, we can use DiffLatentModel to construct a DiffLatentModel for d-fold differenced process with rw as the undifferenced latent process.
We have two constructor options for DiffLatentModel. The first option is to supply a common prior distribution for the initial terms and specify d as follows:
diff_model = DiffLatentModel(rw, Normal(); d = 2)
Or we can supply a vector of priors for the initial terms and d is inferred as follows:
Then, we can use generate_latent to construct a Turing model for the differenced latent process generating a length n process,
# Construct a Turing model
+n = 100
+difference_mdl = generate_latent(diff_model, n)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved latent process.
#Sample random parameters from prior
+θ = rand(difference_mdl)
+#Get a sampled latent process as a generated quantity from the model
+(Z_t, _) = generated_quantities(difference_mdl, θ)
+Z_t
Fields
model::AbstractTuringLatentModel: Underlying latent model for the differenced process
init_prior::Distributions.Distribution: The prior distribution for the initial latent variables.
The HierarchicalNormal struct represents a non-centered hierarchical normal distribution.
Constructors
HierarchicalNormal(mean, std_prior): Constructs a HierarchicalNormal instance with the specified mean and standard deviation prior.
HierarchicalNormal(; mean = 0.0, std_prior = truncated(Normal(0,1), 0, Inf)): Constructs a HierarchicalNormal instance with the specified mean and standard deviation prior using named arguments and with default values.
Generate a latent model with a prefix. A lightweight wrapper around `EpiAwareUtils.prefix_submodel`.
+
+# Constructors
+- `PrefixLatentModel(model::M, prefix::P)`: Create a `PrefixLatentModel` with the latent model `model` and the prefix `prefix`.
+- `PrefixLatentModel(; model::M, prefix::P)`: Create a `PrefixLatentModel` with the latent model `model` and the prefix `prefix`.
+
+# Examples
+```julia
+using EpiAware
+latent_model = PrefixLatentModel(model = HierarchicalNormal(), prefix = "Test")
+mdl = generate_latent(latent_model, 10)
+rand(mdl)
+```
Fields
model::AbstractTuringLatentModel: The latent model
The random walk $Z_t$ is specified as a parameteric transformation of the white noise sequence $(\epsilon_t)_{t\geq 1}$,
\[Z_t = Z_0 + \sigma \sum_{i = 1}^t \epsilon_t\]
Constructing a random walk requires specifying:
An init_prior as a prior for $Z_0$. Default is Normal().
A std_prior for $\sigma$. The default is HalfNormal with a mean of 0.25.
Constructors
RandomWalk(; init_prior, std_prior)
Example usage with generate_latent
generate_latent can be used to construct a Turing model for the random walk $Z_t$.
First, we construct a RandomWalk struct with priors,
using Distributions, Turing, EpiAware
+
+# Create a RandomWalk model
+rw = RandomWalk(init_prior = Normal(2., 1.),
+ std_prior = HalfNormal(0.1))
Then, we can use generate_latent to construct a Turing model for a 10 step random walk.
# Construct a Turing model
+rw_model = generate_latent(rw, 10)
Now we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.
#Sample random parameters from prior
+θ = rand(rw_model)
+#Get random walk sample path as a generated quantities from the model
+Z_t, _ = generated_quantities(rw_model, θ)
Record a variable (using the Turing:= syntax) in a latent model.
# Fields
+- `model::AbstractTuringLatentModel`: The latent model to dispatch to.
+
+# Constructors
+
+- `RecordExpectedLatent(model::AbstractTuringLatentModel)`: Record the expected latent vector from the model as `exp_latent`.
+
+# Examples
+
+```julia
+using EpiAware, Turing
+mdl = RecordExpectedLatent(FixedIntercept(0.1))
+gen_latent = generate_latent(mdl, 1)
+sample(gen_latent, Prior(), 10)
+```
RepeatBlock is a struct that represents a broadcasting rule. It is a subtype of AbstractBroadcastRule.
It repeats the latent process in blocks of size period. An example of this rule is to repeat the latent process in blocks of size 7 to model a weekly process (though for this we also provide the broadcast_weekly helper function).
RepeatEach is a struct that represents a broadcasting rule. It is a subtype of AbstractBroadcastRule.
It repeats the latent process at each period. An example of this rule is to repeat the latent process at each day of the week (though for this we also provide the dayofweek helper function).
Examples
using EpiAware
+rule = RepeatEach()
+latent = [1, 2]
+n = 10
+period = 2
+broadcast_rule(rule, latent, n, period)
The TransformLatentModel struct represents a latent model that applies a transformation function to the latent variables generated by another latent model.
Constructors
TransformLatentModel(model, trans_function): Constructs a TransformLatentModel instance with the specified latent model and transformation function.
TransformLatentModel(; model, trans_function): Constructs a TransformLatentModel instance with the specified latent model and transformation function using named arguments.
Example
using EpiAware, Distributions
+trans = TransformLatentModel(Intercept(Normal(2, 0.2)), x -> x .|> exp)
+trans_model = generate_latent(trans, 5)
+trans_model()
Fields
model::AbstractTuringLatentModel: The latent model to transform.
trans_function::Function: The transformation function.
Return a vector of dimensions that are equal or as close as possible, given the total number of elements n and the number of dimensions m. The default dimension adaptor for ConcatLatentModels.
Arguments
n::Int: The total number of elements.
m::Int: The number of dimensions.
Returns
dims::AbstractVector{Int}: A vector of dimensions, where the first element is the ceiling of n / m and the remaining elements are the floor of n / m.
Generates observations from an observation error model. It provides support for missing values in observations (y_t), and expected observations (Y_t) that are shorter than observations. When this is the case it assumes that the expected observations are the last length(Y_t) elements of y_t. It also pads the expected observations with a small value (1e-6) to mitigate potential numerical issues.
It dispatches to the observation_error function to generate the observation error distribution which uses priors generated by generate_observation_error_priors submodel. For most observation error models specific implementations of observation_error and generate_observation_error_priors are required but a specific implementation of generate_observations is not required.
Aggregates observations over a specified time period. For efficiency it also only passes the aggregated observations to the submodel. The aggregation vector is internally broadcasted to the length of the observations and the present vector is broadcasted to the length of the aggregation vector using broadcast_n.
Fields
model::AbstractTuringObservationModel: The submodel to use for the aggregated observations.
aggregation::AbstractVector{<: Int}: The number of time periods to aggregate over.
present::AbstractVector{<: Bool}: A vector of booleans indicating whether the observation is present or not.
Constructors
Aggregate(model, aggregation): Constructs an Aggregate object and automatically sets the present field.
Aggregate(; model, aggregation): Constructs an Aggregate object and automatically sets the present field using named keyword arguments
The Ascertainment struct represents an observation model that incorporates a ascertainment model. If a latent_prefixis supplied the latent_model is wrapped in a call to PrefixLatentModel.
Constructors
Ascertainment(model::M, latent_model::T, transform::F, latent_prefix::P) where {M <: AbstractTuringObservationModel, T <: AbstractTuringLatentModel, F <: Function, P <: String}: Constructs an Ascertainment instance with the specified observation model, latent model, transform function, and latent prefix.
Ascertainment(; model::M, latent_model::T, transform::F = (Y_t, x) -> xexpy.(Y_t, x), latent_prefix::P = "Ascertainment") where {M <: AbstractTuringObservationModel, T <: AbstractTuringLatentModel, F <: Function, P <: String}: Constructs an Ascertainment instance with the specified observation model, latent model, optional transform function (default: (Y_t, x) -> xexpy.(Y_t, x)), and optional latent prefix (default: "Ascertainment").
The LatentDelay struct represents an observation model that introduces a latent delay in the observations. It is a subtype of AbstractTuringObservationModel.
Note that the LatentDelay observation model shortens the expected observation vector by the length of the delay distribution and this is then passed to the underlying observation model. This is to prevent fitting to partially observed data.
Fields
model::M: The underlying observation model.
rev_pmf::T: The probability mass function (PMF) representing the delay distribution reversed.
Constructors
LatentDelay(model::M, distribution::C; D = nothing, Δd = 1.0) where {M <: AbstractTuringObservationModel, C <: ContinuousDistribution}: Constructs a LatentDelay object with the given underlying observation model and continuous distribution. The D parameter specifies the right truncation of the distribution, with default D = nothing indicates that the distribution should be truncated at its 99th percentile rounded to nearest multiple of Δd. The Δd parameter specifies the width of each delay interval.
LatentDelay(model::M, pmf::T) where {M <: AbstractTuringObservationModel, T <: AbstractVector{<:Real}}: Constructs a LatentDelay object with the given underlying observation model and delay PMF.
The NegativeBinomialError struct represents an observation model for negative binomial errors. It is a subtype of AbstractTuringObservationModel.
Constructors
NegativeBinomialError(; cluster_factor_prior::Distribution = HalfNormal(0.1)): Constructs a NegativeBinomialError object with default values for the cluster factor prior.
NegativeBinomialError(cluster_factor_prior::Distribution): Constructs a NegativeBinomialError object with a specified cluster factor prior.
Generate an observation model with a prefix. A lightweight wrapper around `EpiAwareUtils.prefix_submodel`.
+
+# Constructors
+- `PrefixObservationModel(model::M, prefix::P)`: Create a `PrefixObservationModel` with the observation model `model` and the prefix `prefix`.
+- `PrefixObservationModel(; model::M, prefix::P)`: Create a `PrefixObservationModel` with the observation model `model` and the prefix `prefix`.
+
+# Examples
+```julia
+using EpiAware
+observation_model = PrefixObservationModel(Poisson(), "Test")
+obs = generate_observations(observation_model, 10)
+rand(obs)
+```
Fields
model::AbstractTuringObservationModel: The observation model
prefix::String: The prefix for the observation model
Record a variable (using the Turing:= syntax) in the observation model.
# Fields
+- `model::AbstractTuringObservationModel`: The observation model to dispatch to.
+
+# Constructors
+
+- `RecordExpectedObs(model::AbstractTuringObservationModel)`: Record the expected observation from the model as `exp_y_t`.
+
+# Examples
+
+```julia
+using EpiAware, Turing
+mdl = RecordExpectedObs(NegativeBinomialError())
+gen_obs = generate_observations(mdl, missing, fill(100, 10))
+sample(gen_obs, Prior(), 10)
+```
A stack of observation models that are looped over to generate observations for each model in the stack. Note that the model names are used to prefix the parameters in each model (so if I have a model named cases and a parameter y_t, the parameter in the model will be cases.y_t). Inside the constructor PrefixObservationModel is wrapped around each observation model.
Constructors
StackObservationModels(models::Vector{<:AbstractTuringObservationModel}, model_names::Vector{<:AbstractString}): Construct a StackObservationModels object with a vector of observation models and a vector of model names.
model_names::Vector{<:AbstractString}): Construct aStackObservationModels` object with a vector of observation models and a vector of model names.
StackObservationModels(models::NamedTuple{names, T}): Construct a StackObservationModels object with a named tuple of observation models. The model names are automatically generated from the keys of the named tuple.
Generates priors for the observation error model. This should return a named tuple containing the priors required for generating the observation error distribution.
Generates observation error priors based on the NegativeBinomialError observation model. This function generates the cluster factor prior for the negative binomial error model.
The observation error distribution for the observation error model. This function should return the distribution for the observation error given the expected observation value Y_t and the priors generated by generate_observation_error_priors.
This function generates the observation error model based on the negative binomial error model with a positive shift. It dispatches to the NegativeBinomialMeanClust distribution.
Welcome to the EpiAware API reference! This section is designed to help you understand the API of the package which is split into submodules.
The EpiAware package itself contains no functions or types. Instead, it re-exports the functions and types from its submodules. See the sidebar for the list of submodules.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
diff --git a/previews/PR513/lib/internals/index.html b/previews/PR513/lib/internals/index.html
new file mode 100644
index 000000000..afd69923c
--- /dev/null
+++ b/previews/PR513/lib/internals/index.html
@@ -0,0 +1,2 @@
+
+Internal API · EpiAware.jl
EpiAware is not a standard toolkit for infectious disease modelling.
It seeks to be highly modular and composable for advanced users whilst still providing opinionated workflows for those who are new to the field. Developed by the authors behind other widely used infectious disease modelling packages such as EpiNow2, epinowcast, and epidist, alongside experts in infectious disease modelling in Julia,EpiAware is designed to go beyond the capabilities of these packages by providing a more flexible and extensible framework for modelling and inference of infectious disease dynamics.
EpiAware.jl is a wrapper around a series of submodules, each of which provides a different aspect of the package's functionality (much like the tidyverse in R). The package is designed to be modular, with a clear separation between modules and between modules and data. Currently included modules are:
EpiAwareBase: The core module, which provides the underlying abstract types and functions for the package.
EpiAwareUtils: A utility module, which provides a series of utility functions for working with the package.
EpiInference: An inference module, which provides a series of functions for fitting models to data. Builds on top of Turing.jl.
EpiInfModels: Provides tools for composing models of the disease transmission process. Builds on top of Turing.jl, in particular the DynamicPPL.jl interface.
EpiLatentModels: Provides tools for composing latent models such as random walks, autoregressive models, etc. Builds on top of DynamicPPL.jl. Used by all other modelling modules to define latent processes.
EpiObsModels: Provides tools for composing observation models, such as Poisson, Binomial, etc. Builds on top of DynamicPPL.jl.
We support two primary workflows for using the package:
EpiProblem: A high-level interface for defining and fitting models to data. This is the recommended way to use the package.
Turing interface: A lower-level interface for defining and fitting models to data. This is the more flexible way to use the package and may also be more familiar to users of Turing.jl.
See the getting started section for tutorials on each of these workflows.
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
diff --git a/previews/PR513/search_index.js b/previews/PR513/search_index.js
new file mode 100644
index 000000000..8ef4be35b
--- /dev/null
+++ b/previews/PR513/search_index.js
@@ -0,0 +1,3 @@
+var documenterSearchIndex = {"docs":
+[{"location":"getting-started/installation/#Installation","page":"Installation","title":"Installation","text":"","category":"section"},{"location":"getting-started/installation/","page":"Installation","title":"Installation","text":"Eventually, EpiAware is likely to be added to the Julia registry. Until then, you can install it from the /EpiAware sub-directory of this repository by running the following command in the Julia REPL:","category":"page"},{"location":"getting-started/installation/","page":"Installation","title":"Installation","text":"using Pkg; Pkg.add(url=\"https://github.com/CDCgov/Rt-without-renewal\", subdir=\"EpiAware\")","category":"page"},{"location":"lib/EpiInfModels/#EpiInfModels.jl","page":"Overview","title":"EpiInfModels.jl","text":"","category":"section"},{"location":"lib/EpiInfModels/","page":"Overview","title":"Overview","text":"This package provides infectious disease transmission models for the EpiAware ecosystem.","category":"page"},{"location":"lib/EpiInfModels/#API","page":"Overview","title":"API","text":"","category":"section"},{"location":"lib/EpiInfModels/","page":"Overview","title":"Overview","text":"Pages = [\"lib/EpiInfModels/public.md\", \"lib/EpiInfModels/internals.md\"]","category":"page"},{"location":"getting-started/quickstart/#Quickstart","page":"Quickstart","title":"Quickstart","text":"","category":"section"},{"location":"getting-started/quickstart/","page":"Quickstart","title":"Quickstart","text":"Get up and running with EpiAware in just a few minutes using this quickstart guide.","category":"page"},{"location":"lib/EpiLatentModels/#EpiLatentModels.jl","page":"Overview","title":"EpiLatentModels.jl","text":"","category":"section"},{"location":"lib/EpiLatentModels/","page":"Overview","title":"Overview","text":"This package provides latent variable models for the EpiAware ecosystem.","category":"page"},{"location":"lib/EpiLatentModels/#API","page":"Overview","title":"API","text":"","category":"section"},{"location":"lib/EpiLatentModels/","page":"Overview","title":"Overview","text":"Pages = [\"lib/EpiLatentModels/public.md\", \"lib/EpiLatentModels/internals.md\"]","category":"page"},{"location":"lib/#api-reference","page":"Overview","title":"API reference","text":"","category":"section"},{"location":"lib/","page":"Overview","title":"Overview","text":"Welcome to the EpiAware API reference! This section is designed to help you understand the API of the package which is split into submodules.","category":"page"},{"location":"lib/","page":"Overview","title":"Overview","text":"The EpiAware package itself contains no functions or types. Instead, it re-exports the functions and types from its submodules. See the sidebar for the list of submodules.","category":"page"},{"location":"lib/EpiLatentModels/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/EpiLatentModels/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpiLatentModels.jl's internal interface.","category":"page"},{"location":"lib/EpiLatentModels/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiLatentModels/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiLatentModels/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/EpiLatentModels/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/EpiLatentModels/internals/#Internal-API","page":"Internal API","title":"Internal API","text":"","category":"section"},{"location":"lib/EpiLatentModels/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware.EpiLatentModels]\nPublic = false","category":"page"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiLatentModels.ARStep","page":"Internal API","title":"EpiAware.EpiLatentModels.ARStep","text":"struct ARStep{D<:(AbstractVector{<:Real})} <: AbstractAccumulationStep\n\nThe autoregressive (AR) step function struct\n\n\n\nFields\n\ndamp_AR::AbstractVector{<:Real}\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiLatentModels.ARStep-Tuple{Any, Any}","page":"Internal API","title":"EpiAware.EpiLatentModels.ARStep","text":"The autoregressive (AR) step function for use with accumulate_scan.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.broadcast_n-Tuple{RepeatBlock, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.broadcast_n","text":"broadcast_n(_::RepeatBlock, n, period) -> Any\n\n\nA function that returns the length of the latent periods to generate using the RepeatBlock rule which is equal n divided by the period and rounded up to the nearest integer.\n\nArguments\n\nrule::RepeatBlock: The broadcasting rule.\nn: The number of samples to generate.\nperiod: The period of the broadcast.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.broadcast_n-Tuple{RepeatEach, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.broadcast_n","text":"broadcast_n(_::RepeatEach, n, period) -> Any\n\n\nA function that returns the length of the latent periods to generate using the RepeatEach rule which is equal to the period.\n\nArguments\n\nrule::RepeatEach: The broadcasting rule.\nn: The number of samples to generate.\nperiod: The period of the broadcast.\n\nReturns\n\nm: The length of the latent periods to generate.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{AR, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_model::AR, n) -> Any\n\n\nGenerate a latent AR series.\n\nArguments\n\nlatent_model::AR: The AR model.\nn::Int: The length of the AR series.\n\nReturns\n\nar::Vector{Float64}: The generated AR series.\n\nNotes\n\nThe length of damp_prior and init_prior must be the same.\nn must be longer than the order of the autoregressive process.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{BroadcastLatentModel, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(model::BroadcastLatentModel, n) -> Any\n\n\nGenerates latent periods using the specified model and n number of samples.\n\nArguments\n\nmodel::BroadcastLatentModel: The broadcast latent model.\nn::Any: The number of samples to generate.\n\nReturns\n\nbroadcasted_latent: The generated broadcasted latent periods.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{CombineLatentModels, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(\n latent_models::CombineLatentModels,\n n\n) -> Any\n\n\nGenerate latent variables using a combination of multiple latent models.\n\nArguments\n\nlatent_models::CombineLatentModels: An instance of the CombineLatentModels type representing the collection of latent models.\nn: The number of latent variables to generate.\n\nReturns\n\nThe combined latent variables generated from all the models.\n\nExample\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{ConcatLatentModels, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_models::ConcatLatentModels, n) -> Any\n\n\nGenerate latent variables by concatenating multiple latent models.\n\nArguments\n\nlatent_models::ConcatLatentModels: An instance of the ConcatLatentModels type representing the collection of latent models.\nn: The number of latent variables to generate.\n\nReturns\n\nconcatenated_latents: The combined latent variables generated from all the models.\nlatent_aux: A tuple containing the auxiliary latent variables generated from each individual model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{DiffLatentModel, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_model::DiffLatentModel, n) -> Any\n\n\nGenerate a Turing model for n-step latent process Z_t using a differenced latent model defined by latent_model.\n\nArguments\n\nlatent_model::DiffLatentModel: The differential latent model.\nn: The length of the latent variables.\n\nTuring model specifications\n\nSampled random variables\n\nlatent_init: The initial latent process variables.\nOther random variables defined by model<:AbstractTuringLatentModel field of the undifferenced model.\n\nGenerated quantities\n\nA tuple containing the generated latent process as its first argument and a NamedTuple of sampled auxiliary variables as second argument.\n\nExample usage with DiffLatentModel model constructor\n\ngenerate_latent can be used to construct a Turing model for the differenced latent process. In this example, the underlying undifferenced process is a RandomWalk model.\n\nFirst, we construct a RandomWalk struct with an initial value prior and a step size standard deviation prior.\n\nusing Distributions, EpiAware\nrw = RandomWalk(Normal(0.0, 1.0), truncated(Normal(0.0, 0.05), 0.0, Inf))\n\nThen, we can use DiffLatentModel to construct a DiffLatentModel for d-fold differenced process with rw as the undifferenced latent process.\n\nWe have two constructor options for DiffLatentModel. The first option is to supply a common prior distribution for the initial terms and specify d as follows:\n\ndiff_model = DiffLatentModel(rw, Normal(); d = 2)\n\nOr we can supply a vector of priors for the initial terms and d is inferred as follows:\n\ndiff_model2 = DiffLatentModel(;undiffmodel = rw, init_priors = [Normal(), Normal()])\n\nThen, we can use generate_latent to construct a Turing model for the differenced latent process generating a length n process,\n\n# Construct a Turing model\nn = 100\ndifference_mdl = generate_latent(diff_model, n)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved latent process.\n\n#Sample random parameters from prior\nθ = rand(difference_mdl)\n#Get a sampled latent process as a generated quantity from the model\n(Z_t, _) = generated_quantities(difference_mdl, θ)\nZ_t\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{FixedIntercept, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_model::FixedIntercept, n) -> Any\n\n\nGenerate a latent intercept series with a fixed intercept value.\n\nArguments\n\nlatent_model::FixedIntercept: The fixed intercept latent model.\nn: The number of latent variables to generate.\n\nReturns\n\nlatent_vars: An array of length n filled with the fixed intercept value.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{HierarchicalNormal, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(obs_model::HierarchicalNormal, n) -> Any\n\n\nfunction EpiAwareBase.generate_latent(obs_model::HierarchicalNormal, n)\n\nGenerate latent variables from the hierarchical normal distribution.\n\nArguments\n\nobs_model::HierarchicalNormal: The hierarchical normal distribution model.\nn: Number of latent variables to generate.\n\nReturns\n\nη_t: Generated latent variables.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{Intercept, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_model::Intercept, n) -> Any\n\n\nGenerate a latent intercept series.\n\nArguments\n\nlatent_model::Intercept: The intercept model.\nn::Int: The length of the intercept series.\n\nReturns\n\nintercept::Vector{Float64}: The generated intercept series.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{RandomWalk, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_model::RandomWalk, n) -> Any\n\n\nImplement the generate_latent function for the RandomWalk model.\n\nExample usage of generate_latent with RandomWalk type of latent process model\n\nusing Distributions, Turing, EpiAware\n\n# Create a RandomWalk model\nrw = RandomWalk(init_prior = Normal(2., 1.),\n std_prior = HalfNormal(0.1))\n\nThen, we can use generate_latent to construct a Turing model for a 10 step random walk.\n\n# Construct a Turing model\nrw_model = generate_latent(rw, 10)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n#Sample random parameters from prior\nθ = rand(rw_model)\n#Get random walk sample path as a generated quantities from the model\nZ_t, _ = generated_quantities(rw_model, θ)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/internals/#EpiAware.EpiAwareBase.generate_latent-Tuple{TransformLatentModel, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(model::TransformLatentModel, n) -> Any\n\n\ngenerate_latent(model::TransformLatentModel, n)\n\nGenerate latent variables using the specified TransformLatentModel.\n\nArguments\n\nmodel::TransformLatentModel: The TransformLatentModel to generate latent variables from.\nn: The number of latent variables to generate.\n\nReturns\n\nThe transformed latent variables.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/EpiObsModels/public/","page":"Public API","title":"Public API","text":"Documentation for EpiObsModels.jl's public interface.","category":"page"},{"location":"lib/EpiObsModels/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/EpiObsModels/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiObsModels/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiObsModels/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/EpiObsModels/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/EpiObsModels/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/EpiObsModels/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware.EpiObsModels]\nPrivate = false","category":"page"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels","page":"Public API","title":"EpiAware.EpiObsModels","text":"Module for defining observation models.\n\n\n\n\n\n","category":"module"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.Aggregate","page":"Public API","title":"EpiAware.EpiObsModels.Aggregate","text":"struct Aggregate{M<:AbstractTuringObservationModel, I<:(AbstractVector{<:Int64}), J<:(AbstractVector{<:Bool})} <: AbstractTuringObservationModel\n\nAggregates observations over a specified time period. For efficiency it also only passes the aggregated observations to the submodel. The aggregation vector is internally broadcasted to the length of the observations and the present vector is broadcasted to the length of the aggregation vector using broadcast_n.\n\nFields\n\nmodel::AbstractTuringObservationModel: The submodel to use for the aggregated observations.\naggregation::AbstractVector{<: Int}: The number of time periods to aggregate over.\npresent::AbstractVector{<: Bool}: A vector of booleans indicating whether the observation is present or not.\n\nConstructors\n\nAggregate(model, aggregation): Constructs an Aggregate object and automatically sets the present field.\nAggregate(; model, aggregation): Constructs an Aggregate object and automatically sets the present field using named keyword arguments\n\nExamples\n\nusing EpiAware\nweekly_agg = Aggregate(PoissonError(), [0, 0, 0, 0, 7, 0, 0])\ngen_obs = generate_observations(weekly_agg, missing, fill(1, 28))\ngen_obs()\n\n\n\nFields\n\nmodel::AbstractTuringObservationModel\naggregation::AbstractVector{<:Int64}\npresent::AbstractVector{<:Bool}\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.Ascertainment","page":"Public API","title":"EpiAware.EpiObsModels.Ascertainment","text":"struct Ascertainment{M<:AbstractTuringObservationModel, T<:AbstractTuringLatentModel, F<:Function, P<:String} <: AbstractTuringObservationModel\n\nThe Ascertainment struct represents an observation model that incorporates a ascertainment model. If a latent_prefixis supplied the latent_model is wrapped in a call to PrefixLatentModel.\n\nConstructors\n\nAscertainment(model::M, latent_model::T, transform::F, latent_prefix::P) where {M <: AbstractTuringObservationModel, T <: AbstractTuringLatentModel, F <: Function, P <: String}: Constructs an Ascertainment instance with the specified observation model, latent model, transform function, and latent prefix.\nAscertainment(; model::M, latent_model::T, transform::F = (Y_t, x) -> xexpy.(Y_t, x), latent_prefix::P = \"Ascertainment\") where {M <: AbstractTuringObservationModel, T <: AbstractTuringLatentModel, F <: Function, P <: String}: Constructs an Ascertainment instance with the specified observation model, latent model, optional transform function (default: (Y_t, x) -> xexpy.(Y_t, x)), and optional latent prefix (default: \"Ascertainment\").\n\nExamples\n\nusing EpiAware, Turing\nobs = Ascertainment(model = NegativeBinomialError(), latent_model = FixedIntercept(0.1))\ngen_obs = generate_observations(obs, missing, fill(100, 10))\nrand(gen_obs)\n\n\n\nFields\n\nmodel::AbstractTuringObservationModel: The underlying observation model.\nlatent_model::AbstractTuringLatentModel: The latent model.\ntransform::Function: The function used to transform Y_t and the latent model output.\nlatent_prefix::String\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.LatentDelay","page":"Public API","title":"EpiAware.EpiObsModels.LatentDelay","text":"struct LatentDelay{M<:AbstractTuringObservationModel, T<:(AbstractVector{<:Real})} <: AbstractTuringObservationModel\n\nThe LatentDelay struct represents an observation model that introduces a latent delay in the observations. It is a subtype of AbstractTuringObservationModel.\n\nNote that the LatentDelay observation model shortens the expected observation vector by the length of the delay distribution and this is then passed to the underlying observation model. This is to prevent fitting to partially observed data.\n\nFields\n\nmodel::M: The underlying observation model.\nrev_pmf::T: The probability mass function (PMF) representing the delay distribution reversed.\n\nConstructors\n\nLatentDelay(model::M, distribution::C; D = nothing, Δd = 1.0) where {M <: AbstractTuringObservationModel, C <: ContinuousDistribution}: Constructs a LatentDelay object with the given underlying observation model and continuous distribution. The D parameter specifies the right truncation of the distribution, with default D = nothing indicates that the distribution should be truncated at its 99th percentile rounded to nearest multiple of Δd. The Δd parameter specifies the width of each delay interval.\nLatentDelay(model::M, pmf::T) where {M <: AbstractTuringObservationModel, T <: AbstractVector{<:Real}}: Constructs a LatentDelay object with the given underlying observation model and delay PMF.\n\nExamples\n\nusing Distributions, Turing, EpiAware\nobs = LatentDelay(NegativeBinomialError(), truncated(Normal(5.0, 2.0), 0.0, Inf))\nobs_model = generate_observations(obs, missing, fill(10, 30))\nobs_model()\n\n\n\nFields\n\nmodel::AbstractTuringObservationModel\nrev_pmf::AbstractVector{<:Real}\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.NegativeBinomialError","page":"Public API","title":"EpiAware.EpiObsModels.NegativeBinomialError","text":"struct NegativeBinomialError{S<:Distributions.Sampleable} <: AbstractTuringObservationErrorModel\n\nThe NegativeBinomialError struct represents an observation model for negative binomial errors. It is a subtype of AbstractTuringObservationModel.\n\nConstructors\n\nNegativeBinomialError(; cluster_factor_prior::Distribution = HalfNormal(0.1)): Constructs a NegativeBinomialError object with default values for the cluster factor prior.\nNegativeBinomialError(cluster_factor_prior::Distribution): Constructs a NegativeBinomialError object with a specified cluster factor prior.\n\nExamples\n\nusing Distributions, Turing, EpiAware\nnb = NegativeBinomialError()\nnb_model = generate_observations(nb, missing, fill(10, 10))\nrand(nb_model)\n\n\n\nFields\n\ncluster_factor_prior::Distributions.Sampleable: The prior distribution for the cluster factor.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.PoissonError","page":"Public API","title":"EpiAware.EpiObsModels.PoissonError","text":"struct PoissonError <: AbstractTuringObservationErrorModel\n\nThe PoissonError struct represents an observation model for Poisson errors. It is a subtype of AbstractTuringObservationErrorModel.\n\nConstructors\n\nPoissonError(): Constructs a PoissonError object.\n\nExamples\n\nusing Distributions, Turing, EpiAware\npoi = PoissonError()\npoi_model = generate_observations(poi, missing, fill(10, 10))\nrand(poi_model)\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.PrefixObservationModel","page":"Public API","title":"EpiAware.EpiObsModels.PrefixObservationModel","text":"struct PrefixObservationModel{M<:AbstractTuringObservationModel, P<:String} <: AbstractTuringObservationModel\n\nGenerate an observation model with a prefix. A lightweight wrapper around `EpiAwareUtils.prefix_submodel`.\n\n# Constructors\n- `PrefixObservationModel(model::M, prefix::P)`: Create a `PrefixObservationModel` with the observation model `model` and the prefix `prefix`.\n- `PrefixObservationModel(; model::M, prefix::P)`: Create a `PrefixObservationModel` with the observation model `model` and the prefix `prefix`.\n\n# Examples\n```julia\nusing EpiAware\nobservation_model = PrefixObservationModel(Poisson(), \"Test\")\nobs = generate_observations(observation_model, 10)\nrand(obs)\n```\n\n\n\nFields\n\nmodel::AbstractTuringObservationModel: The observation model\nprefix::String: The prefix for the observation model\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.RecordExpectedObs","page":"Public API","title":"EpiAware.EpiObsModels.RecordExpectedObs","text":"struct RecordExpectedObs{M<:AbstractTuringObservationModel} <: AbstractTuringObservationModel\n\nRecord a variable (using the Turing := syntax) in the observation model.\n\n# Fields\n- `model::AbstractTuringObservationModel`: The observation model to dispatch to.\n\n# Constructors\n\n- `RecordExpectedObs(model::AbstractTuringObservationModel)`: Record the expected observation from the model as `exp_y_t`.\n\n# Examples\n\n```julia\nusing EpiAware, Turing\nmdl = RecordExpectedObs(NegativeBinomialError())\ngen_obs = generate_observations(mdl, missing, fill(100, 10))\nsample(gen_obs, Prior(), 10)\n```\n\n\n\nFields\n\nmodel::AbstractTuringObservationModel\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.StackObservationModels","page":"Public API","title":"EpiAware.EpiObsModels.StackObservationModels","text":"struct StackObservationModels{M<:(AbstractVector{<:AbstractTuringObservationModel}), N<:(AbstractVector{<:AbstractString})} <: AbstractTuringObservationModel\n\nA stack of observation models that are looped over to generate observations for each model in the stack. Note that the model names are used to prefix the parameters in each model (so if I have a model named cases and a parameter y_t, the parameter in the model will be cases.y_t). Inside the constructor PrefixObservationModel is wrapped around each observation model.\n\nConstructors\n\nStackObservationModels(models::Vector{<:AbstractTuringObservationModel}, model_names::Vector{<:AbstractString}): Construct a StackObservationModels object with a vector of observation models and a vector of model names.\n`StackObservationModels(; models::Vector{<:AbstractTuringObservationModel},\nmodel_names::Vector{<:AbstractString}): Construct aStackObservationModels` object with a vector of observation models and a vector of model names.\nStackObservationModels(models::NamedTuple{names, T}): Construct a StackObservationModels object with a named tuple of observation models. The model names are automatically generated from the keys of the named tuple.\n\nExample\n\nusing EpiAware, Turing\n\nobs = StackObservationModels(\n (cases = PoissonError(), deaths = NegativeBinomialError())\n)\ny_t = (cases = missing, deaths = missing)\nobs_model = generate_observations(obs, y_t, fill(10, 10))\nrand(obs_model)\nsamples = sample(obs_model, Prior(), 100; progress = false)\n\ncases_y_t = group(samples, \"cases.y_t\")\ncases_y_t\n\ndeaths_y_t = group(samples, \"deaths.y_t\")\ndeaths_y_t\n\n\n\nFields\n\nmodels::AbstractVector{<:AbstractTuringObservationModel}: A vector of observation models.\nmodel_names::AbstractVector{<:AbstractString}: A vector of observation model names\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.ascertainment_dayofweek-Tuple{AbstractTuringObservationModel}","page":"Public API","title":"EpiAware.EpiObsModels.ascertainment_dayofweek","text":"ascertainment_dayofweek(\n model::AbstractTuringObservationModel;\n latent_model,\n transform,\n latent_prefix\n) -> Ascertainment{M, AbstractTuringLatentModel, EpiAware.EpiObsModels.var\"#18#20\", String} where M<:AbstractTuringObservationModel\n\n\nCreate an Ascertainment object that models the ascertainment process based on the day of the week.\n\nArguments\n\nmodel::AbstractTuringObservationModel: The observation model to be used.\nlatent_model::AbstractTuringLatentModel: The latent model to be used. Default is HierarchicalNormal() which is a hierarchical normal distribution.\ntransform: The transform function to be used. Default is (x, y) -> x .* y.\n\nThis function is used to transform the latent model after broadcasting to periodic weekly has been applied.\n\nlatent_prefix: The prefix to be used for the latent model. Default is \"DayofWeek\".\n\nReturns\n\nAscertainment: The Ascertainment object that models the ascertainment process based on the day of the week.\n\nExamples\n\nusing EpiAware\nobs = ascertainment_dayofweek(PoissonError())\ngen_obs = generate_observations(obs, missing, fill(100, 14))\ngen_obs()\nrand(gen_obs)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.generate_observation_error_priors-Tuple{AbstractTuringObservationErrorModel, Any, Any}","page":"Public API","title":"EpiAware.EpiObsModels.generate_observation_error_priors","text":"generate_observation_error_priors(\n obs_model::AbstractTuringObservationErrorModel,\n y_t,\n Y_t\n) -> Any\n\n\nGenerates priors for the observation error model. This should return a named tuple containing the priors required for generating the observation error distribution.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.generate_observation_error_priors-Tuple{NegativeBinomialError, Any, Any}","page":"Public API","title":"EpiAware.EpiObsModels.generate_observation_error_priors","text":"generate_observation_error_priors(\n obs_model::NegativeBinomialError,\n Y_t,\n y_t\n) -> Any\n\n\nGenerates observation error priors based on the NegativeBinomialError observation model. This function generates the cluster factor prior for the negative binomial error model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.observation_error-Tuple{AbstractTuringObservationErrorModel, Any}","page":"Public API","title":"EpiAware.EpiObsModels.observation_error","text":"observation_error(\n obs_model::AbstractTuringObservationErrorModel,\n Y_t\n) -> SafePoisson\n\n\nThe observation error distribution for the observation error model. This function should return the distribution for the observation error given the expected observation value Y_t and the priors generated by generate_observation_error_priors.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.observation_error-Tuple{NegativeBinomialError, Any, Any}","page":"Public API","title":"EpiAware.EpiObsModels.observation_error","text":"observation_error(\n obs_model::NegativeBinomialError,\n Y_t,\n sq_cluster_factor\n) -> SafeNegativeBinomial\n\n\nThis function generates the observation error model based on the negative binomial error model with a positive shift. It dispatches to the NegativeBinomialMeanClust distribution.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/public/#EpiAware.EpiObsModels.observation_error-Tuple{PoissonError, Any}","page":"Public API","title":"EpiAware.EpiObsModels.observation_error","text":"observation_error(\n obs_model::PoissonError,\n Y_t\n) -> SafePoisson\n\n\nThe observation error model for Poisson errors. This function generates the observation error model based on the Poisson error model.\n\n\n\n\n\n","category":"method"},{"location":"getting-started/explainers/julia/#Julia-for-EpiAware","page":"Working with Julia","title":"Julia for EpiAware","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Julia is a programming language aimed at technical computing. This guide is aimed at helping you set up Julia on your system and pointing towards resources for learning more.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"[!NOTE] If you are familar with other languages with tooling for technical computing (e.g. R, MATLAB, Python) these noteworthy differences may be useful.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Pages = [\"getting-started/tutorials/julia.md\"]\nDepth = 3","category":"page"},{"location":"getting-started/explainers/julia/#What-this-guide-is-and-isn't","page":"Working with Julia","title":"What this guide is and isn't","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"This isn't a guide to learning the Julia programming language. Instead we providing an opinionated guide to setting up your system to use Julia effectively in project workflows aimed at people with familiarity with Julia but have maybe only developed projects in other languages (e.g. R, MATLAB, Python).","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"If you want to learn more about the Julia programming language, we recommend the following resources:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Julia Documentation - getting started.\nJulia Academy.\nJulia learning resources.\nJuliaHub.\nJulia Discourse.\nJulia Slack.","category":"page"},{"location":"getting-started/explainers/julia/#Julia-Installation-with-Juliaup","page":"Working with Julia","title":"Julia Installation with Juliaup","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Download Juliaup: This is a cross-platform installer/updater for the Julia programming language. It simplifies the process of installing and managing Julia versions. Go to the Juliaup GitHub repository or to the official Julia website for installation instructions.\nVerify Installation: Open a terminal (or Command Prompt on Windows) and type julia to start the Julia REPL (Read-Eval-Print Loop). You should see a Julia prompt julia>.","category":"page"},{"location":"getting-started/explainers/julia/#Basic-usage-of-Juliaup","page":"Working with Julia","title":"Basic usage of Juliaup","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Juliaup is a tool for managing Julia installations on your system. It allows you to install, update, and switch between different versions of Julia. Details are available at the Juliaup GitHub repository, but here are some examples of common commands:","category":"page"},{"location":"getting-started/explainers/julia/#Add-a-specific-version-of-Julia","page":"Working with Julia","title":"Add a specific version of Julia","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Juliaup default installs the latest release version of Julia. To install a specific version, use the add command followed by the version number. For example, to install Julia version 1.9.3, use the following command:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"% juliaup add 1.9.3","category":"page"},{"location":"getting-started/explainers/julia/#Use-a-specific-version-of-Julia","page":"Working with Julia","title":"Use a specific version of Julia","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"To switch between different versions of Julia, use + julia-version after the julia command. For example, to use Julia version 1.9.3, use the following command:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"% julia +1.9.3","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"This will use the specified version of Julia for the current REPL. In general, adding the + julia-version flag after the julia command will execute using the specified version of Julia.","category":"page"},{"location":"getting-started/explainers/julia/#Check-versions-of-Julia-installed","page":"Working with Julia","title":"Check versions of Julia installed","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"To see a list of all the versions of Julia installed on your system, use the following command:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"% juliaup list","category":"page"},{"location":"getting-started/explainers/julia/#Update-Julia-(all-versions-installed)","page":"Working with Julia","title":"Update Julia (all versions installed)","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"This will update all versions of Julia installed on your system to their latest release versions.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"% juliaup update","category":"page"},{"location":"getting-started/explainers/julia/#Usage-of-Julia-environments","page":"Working with Julia","title":"Usage of Julia environments","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"The environment of a Julia project determines which packages, and their version, are available to the project. This is useful when you want to ensure that a project uses a specific version of a package, or when you want to isolate the project from other projects on your system. As per other languages, Julia environments are useful for managing dependencies and ensuring reproducibility.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"The most common usage of environments is to create a new explicit environment for a project in a directory. This creates a Project.toml file in the directory that specifies the dependencies for the project and a Manifest.toml file that specifies the exact versions of the dependencies, and their underlying dependencies. We'll discuss how to set up a new environment for a project in the REPL section.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Julia environments can be stacked. This means that you can have a primary environment embedded in the stacked environment, along with secondary environment(s) that define common packages to be available to many projects.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"From a project development point of view, most commonly the project environment will be the primary environment, isolated from other project environments. And the environment of the Julia version installation (e.g. the @v1.10 env) will be a secondary environment because its in the default LOAD_PATH Julia environmental variable. You can add packages to the Julia version environment that you want to be available to all projects as we'll show in the REPL section. See section Recommended packages for the primary Julia environment for our recommendations.","category":"page"},{"location":"getting-started/explainers/julia/#Using-the-Julia-REPL-in-projects","page":"Working with Julia","title":"Using the Julia REPL in projects","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"The Julia REPL (Read-Eval-Print Loop) is an interactive programming environment that takes single user inputs (i.e., single expressions), evaluates them, and returns the result to the user.","category":"page"},{"location":"getting-started/explainers/julia/#Package-management-programmatically-and-from-REPL","page":"Working with Julia","title":"Package management programmatically and from REPL","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Julia has a built-in package manager called Pkg, which is documented briefly here and in more detail here. The package manager is used to install, update, and manage Julia packages and environments.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"You can use Pkg programmatically as a normal Julia package, which is often done in scripts. For example, if we wanted to install the OrdinaryDiffEq package as part of executing a julia script, we would add the following lines to the script:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"using Pkg\nPkg.add(\"OrdinaryDiffEq\")","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"However, you can also use the package manager interactively from the REPL. In our opinion, this is the more common usage of package management in Julia project development.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"For example, to install the OrdinaryDiffEq package from the REPL you can switch to package mode by typing ] and then type add OrdinaryDiffEq. To exit package mode, type backspace.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"julia> ]\n(@v1.10) pkg> add OrdinaryDiffEq","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"This workflow is often more convenient than the programmatic interface, especially when setting packages you want to install to the environment for your julia installation, e.g the @v1.10 environment for julia 1.10.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"By default, the environment for a julia installation is stacked as a primary environment, so that the packages you install in the julia installation environment are available to all projects.","category":"page"},{"location":"getting-started/explainers/julia/#Using-the-Julia-REPL-to-set-up-active-project-environments","page":"Working with Julia","title":"Using the Julia REPL to set up active project environments","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"To set a new active project environment, you can use the Pkg package manager from the REPL with the command activate with a local directory path. The project environment is named after the directory hosting the Project.toml file. After activating the project environment, you can manage packages to the project environment, as well as use packages from the primary stacked environment as described above.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Here is an example of how you can create a new environment for a project when the REPL working directory is in some directory /myproject, and then add OrdinaryDiffEq to the project environment:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"julia> pwd() #Check your directory\n# \"path/to/myproject\"\njulia> ]\n(@v1.10) pkg> activate .\n(myproject) pkg> add OrdinaryDiffEq","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Note that if the project directory doesn't have a Project.toml file, one will be created when you add the first package to the project environment.","category":"page"},{"location":"getting-started/explainers/julia/#Experimenting-with-Julia-from-REPL-using-a-temporary-environment","page":"Working with Julia","title":"Experimenting with Julia from REPL using a temporary environment","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"It is quite common to want to experiment with new Julia packages and code snippets. A convenient way to do this without setting up a new project environment or adding dependencies to the primary environment is to use a temporary environment. To do this:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"julia> ]\n(@v1.10) pkg> activate --temp\n(jl_FTIz6j) pkg> add InterestingPackage","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"This will create a temporary environment, stacked with the primary environment, that is not saved to disk, and you can add packages to this environment without affecting the primary environment or any project environments. When you exit the REPL, the temporary environment will be deleted.","category":"page"},{"location":"getting-started/explainers/julia/#Recommended-packages-for-the-\"global\"-Julia-version-environment","page":"Working with Julia","title":"Recommended packages for the \"global\" Julia version environment","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"In our view these packages are useful for your Julia version environment, e.g. v1.10 env, which will be available to other environments.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Revise: For modifying package code and using the changes without restarting Julia session.\nTerm: For pretty and stylized REPL output (including error messages).\nJuliaFormatter: For code formatting.\nDocumenter: For local documentation generation.\nPluto: A native Julia notebook for interactive development.\nTestEnv: For easy use of test environments for package testing.\nUnicodePlots: For simple and quick plotting in the REPL without needing to install a fully featured plotting package.","category":"page"},{"location":"getting-started/explainers/julia/#startup.jl-recommendation","page":"Working with Julia","title":"startup.jl recommendation","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Revise and Term useful to have available in every Julia session. It is convenient to have these packages loaded automatically when you start a Julia session by adding a startup.jl file. This file should be located in the ~/.julia/config directory. Here is an example of a startup.jl file that loads the Revise and Term:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"atreplinit() do repl\n # Load Revise if it is installed\n try\n @eval using Revise\n catch e\n @warn \"error while importing Revise\" e\n end\n # Load Term if it is installed\n try\n @eval using Term\n @eval install_term_repr()\n @eval install_term_stacktrace()\n catch e\n @warn \"error while importing Term\" e\n end\nend\n","category":"page"},{"location":"getting-started/explainers/julia/#Developing-a-EpiAware-project-from-VS-Code","page":"Working with Julia","title":"Developing a EpiAware-project from VS-Code","text":"","category":"section"},{"location":"getting-started/explainers/julia/#Julia-extension-for-VS-Code","page":"Working with Julia","title":"Julia extension for VS-Code","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Visual Studio Code (VS-Code) is a popular code editor that supports Julia development. The Julia extension for VS-Code provides an interactive development environment that will be familiar to users of other scientific IDEs (e.g. developing R projects in RStudio or using the MATLAB application).","category":"page"},{"location":"getting-started/explainers/julia/#Features-of-the-Julia-extension-for-VS-Code","page":"Working with Julia","title":"Features of the Julia extension for VS-Code","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"It is worth reading both the VS-Code documentation and the Julia extension documentation, however, here are some highlights:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Julia REPL: The Julia extension provides an integrated REPL in the TERMINAL pane that allows you to interact with Julia code directly from the editor. For example, you can run code snippets from highlighting or code blocks defined by ## comments in the scripts.\nPlotting: By default, plots generated by featured plotting packages (e.g. Plots.jl) will be displayed in a Plot pane generated by the VS-Code editor.\nJulia Tab: The Julia extension provides a Julia tab with the following sub-tabs:\nWorkspace: This allows you to inspect the modules, functions and variables in your current REPL session. For variables that can be understood as a Table, you can view them in a tabular format from the workspace tab.\nDocumentation: This allows you to view the documentation for functions and types in the Julia standard library and any packages you have installed.\nPlot Navigator: This allows you to navigate the plots generated by the featured plotting packages.\nTesting: The Julia extension provides interaction between the Testing tab in VS-Code with Julia tests defined using the Julia package TestItems macro @testitem run with TestItemRunner.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Other standard IDE features are Code completion, Code linting, Code formatting, Debugging, and Profiling.","category":"page"},{"location":"getting-started/explainers/julia/#Recommended-settings-for-the-Julia-extension-in-VS-Code","page":"Working with Julia","title":"Recommended settings for the Julia extension in VS-Code","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"The settings of the Julia extension can be found by accessing Preferences: Open User Settings from the command palette in VS-Code and then searching for Julia.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"We recommend the following workplace settings saved in a file .vscode/settings.json relative to your working directory:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"{\n \"[julia]\": {\n \"editor.detectIndentation\": false,\n \"editor.insertSpaces\": true,\n \"editor.tabSize\": 4,\n \"files.insertFinalNewline\": true,\n \"files.trimFinalNewlines\": true,\n \"files.trimTrailingWhitespace\": true,\n \"editor.rulers\": [80],\n \"files.eol\": \"\\n\"\n },\n \"julia.liveTestFile\": \"path/to/runtests.jl\",\n \"julia.environmentPath\": \"path/to/project/directory\",\n}","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"These settings set basic code formatting and whitespace settings for Julia files, as well as setting the path to the test file for the project and the path to the project directory for the environment.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"The VS-Code command Julia: Start REPL will start a REPL in TERMINAL tab in the editor with the environment set to the project directory and the Testing tab will detect the defined tests for the project.","category":"page"},{"location":"getting-started/explainers/julia/#Literate-programming-with-Julia-in-EpiAware","page":"Working with Julia","title":"Literate programming with Julia in EpiAware","text":"","category":"section"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Its common to develop technical computing projects using a literate programming style, where code and documentation are interwoven. Julia supports this style of programming through a number of packages. In EpiAware we recommend the following:","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"Pluto: A native Julia notebook for interactive development. Pluto notebooks are reactive, meaning that the output of all cells are updated as input changes. Installation instructions are available here. Pluto notebook files have the extension .jl and can be run as scripts.\nQuarto: A literate programming tool that allows you to write documents in markdown with embedded Julia code. Installation instructions are available here. Quarto files have the extension .qmd.","category":"page"},{"location":"getting-started/explainers/julia/","page":"Working with Julia","title":"Working with Julia","text":"We use Pluto for interactive development and Quarto for generating reports and academic articles. Both tools are useful for developing reproducible workflows.","category":"page"},{"location":"getting-started/explainers/inference/#Inference","page":"Inference","title":"Inference","text":"","category":"section"},{"location":"lib/EpiInference/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/EpiInference/public/","page":"Public API","title":"Public API","text":"Documentation for EpiInference.jl's public interface.","category":"page"},{"location":"lib/EpiInference/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/EpiInference/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiInference/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiInference/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/EpiInference/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/EpiInference/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/EpiInference/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware.EpiInference]\nPrivate = false","category":"page"},{"location":"lib/EpiInference/public/#EpiAware.EpiInference","page":"Public API","title":"EpiAware.EpiInference","text":"Module for defining inference methods.\n\n\n\n\n\n","category":"module"},{"location":"lib/EpiInference/public/#EpiAware.EpiInference.ManyPathfinder","page":"Public API","title":"EpiAware.EpiInference.ManyPathfinder","text":"struct ManyPathfinder <: AbstractEpiOptMethod\n\nA variational inference method that runs manypathfinder.\n\n\n\nFields\n\nndraws::Int64: Number of draws per pathfinder run.\nnruns::Int64: Number of many pathfinder runs.\nmaxiters::Int64: Maximum number of optimization iterations for each run.\nmax_tries::Int64: Maximum number of tries if all runs fail.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInference/public/#EpiAware.EpiInference.NUTSampler","page":"Public API","title":"EpiAware.EpiInference.NUTSampler","text":"struct NUTSampler{A<:ADTypes.AbstractADType, E<:AbstractMCMC.AbstractMCMCEnsemble, M} <: AbstractEpiSamplingMethod\n\nA NUTS method for sampling from a DynamicPPL.Model object.\n\nThe NUTSampler struct represents using the No-U-Turn Sampler (NUTS) to sample from the distribution defined by a DynamicPPL.Model.\n\n\n\nFields\n\ntarget_acceptance::Float64: The target acceptance rate for the sampler.\nadtype::ADTypes.AbstractADType: The automatic differentiation type used for computing gradients.\nmcmc_parallel::AbstractMCMC.AbstractMCMCEnsemble: The parallelization strategy for the MCMC sampler.\nnchains::Int64: The number of MCMC chains to run.\nmax_depth::Int64: Tree depth limit for the NUTS sampler.\nΔ_max::Float64: Divergence threshold for the NUTS sampler.\ninit_ϵ::Float64: The initial step size for the NUTS sampler.\nndraws::Int64: The number of samples to draw from each chain.\nmetricT::Any: The metric type to use for the HMC sampler.\nnadapts::Int64: number of adaptation steps\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInference/public/#EpiAware.EpiInference.manypathfinder-Tuple{DynamicPPL.Model, Any}","page":"Public API","title":"EpiAware.EpiInference.manypathfinder","text":"manypathfinder(\n mdl::DynamicPPL.Model,\n ndraws;\n nruns,\n maxiters,\n max_tries,\n kwargs...\n) -> Any\n\n\nRun multiple instances of the pathfinder algorithm and returns the pathfinder run with the largest ELBO estimate.\n\nArguments\n\nmdl::DynamicPPL.Model: The model to perform inference on.\nnruns::Int: The number of pathfinder runs to perform.\nndraws::Int: The number of draws per pathfinder run, readjusted to be at least as large as the number of chains.\nnchains::Int: The number of chains that will be initialised by pathfinder draws.\nmaxiters::Int: The maximum number of optimizer iterations per pathfinder run.\nmax_tries::Int: The maximum number of extra tries to find a valid pathfinder result.\nkwargs...: Additional keyword arguments passed to pathfinder.\n\nReturns\n\nbest_pfs::PathfinderResult: Best pathfinder result by estimated ELBO.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/EpiLatentModels/public/","page":"Public API","title":"Public API","text":"Documentation for EpiLatentModels.jl's public interface.","category":"page"},{"location":"lib/EpiLatentModels/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/EpiLatentModels/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiLatentModels/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiLatentModels/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/EpiLatentModels/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/EpiLatentModels/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/EpiLatentModels/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware.EpiLatentModels]\nPrivate = false","category":"page"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels","page":"Public API","title":"EpiAware.EpiLatentModels","text":"Module for defining latent models.\n\n\n\n\n\n","category":"module"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.AR","page":"Public API","title":"EpiAware.EpiLatentModels.AR","text":"struct AR{D<:Distributions.Sampleable, S<:Distributions.Sampleable, I<:Distributions.Sampleable, P<:Int64} <: AbstractTuringLatentModel\n\nThe autoregressive (AR) model struct.\n\nConstructors\n\nAR(damp_prior::Distribution, std_prior::Distribution, init_prior::Distribution; p::Int = 1): Constructs an AR model with the specified prior distributions for damping coefficients, standard deviation, and initial conditions. The order of the AR model can also be specified.\nAR(; damp_priors::Vector{D} = [truncated(Normal(0.0, 0.05))], std_prior::Distribution = truncated(Normal(0.0, 0.05), 0.0, Inf), init_priors::Vector{I} = [Normal()]) where {D <: Distribution, I <: Distribution}: Constructs an AR model with the specified prior distributions for damping coefficients, standard deviation, and initial conditions. The order of the AR model is determined by the length of the damp_priors vector.\nAR(damp_prior::Distribution, std_prior::Distribution, init_prior::Distribution, p::Int): Constructs an AR model with the specified prior distributions for damping coefficients, standard deviation, and initial conditions. The order of the AR model is explicitly specified.\n\nExamples\n\nusing Distributions\nusing EpiAware\nar = AR()\nar_model = generate_latent(ar, 10)\nrand(ar_model)\n\n\n\nFields\n\ndamp_prior::Distributions.Sampleable: Prior distribution for the damping coefficients.\nstd_prior::Distributions.Sampleable: Prior distribution for the standard deviation.\ninit_prior::Distributions.Sampleable: Prior distribution for the initial conditions\np::Int64: Order of the AR model.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.BroadcastLatentModel","page":"Public API","title":"EpiAware.EpiLatentModels.BroadcastLatentModel","text":"struct BroadcastLatentModel{M<:AbstractTuringLatentModel, P<:Integer, B<:AbstractBroadcastRule} <: AbstractTuringLatentModel\n\nThe BroadcastLatentModel struct represents a latent model that supports broadcasting of latent periods.\n\nConstructors\n\nBroadcastLatentModel(;model::M; period::Int, broadcast_rule::B): Constructs a BroadcastLatentModel with the given model, period, and broadcast_rule.\nBroadcastLatentModel(model::M, period::Int, broadcast_rule::B): An alternative constructor that allows the model, period, and broadcast_rule to be specified without keyword arguments.\n\nExamples\n\nusing EpiAware, Turing\neach_model = BroadcastLatentModel(RandomWalk(), 7, RepeatEach())\ngen_each_model = generate_latent(each_model, 10)\nrand(gen_each_model)\n\nblock_model = BroadcastLatentModel(RandomWalk(), 3, RepeatBlock())\ngen_block_model = generate_latent(block_model, 10)\nrand(gen_block_model)\n\n\n\nFields\n\nmodel::AbstractTuringLatentModel: The underlying latent model.\nperiod::Integer: The period of the broadcast.\nbroadcast_rule::AbstractBroadcastRule: The broadcast rule to be applied.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.CombineLatentModels","page":"Public API","title":"EpiAware.EpiLatentModels.CombineLatentModels","text":"struct CombineLatentModels{M<:(AbstractVector{<:AbstractTuringLatentModel}), P<:(AbstractVector{<:String})} <: AbstractTuringLatentModel\n\nThe CombineLatentModels struct.\n\nThis struct is used to combine multiple latent models into a single latent model. If a prefix is supplied wraps each model with PrefixLatentModel.\n\nConstructors\n\nCombineLatentModels(models::M, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, P <: AbstractVector{<:String}}: Constructs a CombineLatentModels instance with specified models and prefixes, ensuring that there are at least two models and the number of models and prefixes are equal.\nCombineLatentModels(models::M) where {M <: AbstractVector{<:AbstractTuringLatentModel}}: Constructs a CombineLatentModels instance with specified models, automatically generating prefixes for each model. The\n\nautomatic prefixes are of the form Combine.1, Combine.2, etc.\n\nExamples\n\nusing EpiAware, Distributions\ncombined_model = CombineLatentModels([Intercept(Normal(2, 0.2)), AR()])\nlatent_model = generate_latent(combined_model, 10)\nlatent_model()\n\n\n\nFields\n\nmodels::AbstractVector{<:AbstractTuringLatentModel}: A vector of latent models\nprefixes::AbstractVector{<:String}: A vector of prefixes for the latent models\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.ConcatLatentModels","page":"Public API","title":"EpiAware.EpiLatentModels.ConcatLatentModels","text":"struct ConcatLatentModels{M<:(AbstractVector{<:AbstractTuringLatentModel}), N<:Int64, F<:Function, P<:(AbstractVector{<:String})} <: AbstractTuringLatentModel\n\nThe ConcatLatentModels struct.\n\nThis struct is used to concatenate multiple latent models into a single latent model.\n\nConstructors\n\nConcatLatentModels(models::M, no_models::I, dimension_adaptor::F, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, I <: Int, F <: Function, P <: AbstractVector{String}}: Constructs a ConcatLatentModels instance with specified models, number of models, dimension adaptor, and prefixes.\nConcatLatentModels(models::M, dimension_adaptor::F; prefixes::P = \"Concat.\" * string.(1:length(models))) where {M <: AbstractVector{<:AbstractTuringLatentModel}, F <: Function}: Constructs a ConcatLatentModels instance with specified models and dimension adaptor. The number of models is automatically determined as are the prefixes (of the form Concat.1, Concat.2, etc.) by default.\nConcatLatentModels(models::M; dimension_adaptor::Function, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, P <: AbstractVector{String}}: Constructs a ConcatLatentModels instance with specified models, dimension adaptor, prefixes, and automatically determines the number of models.The default dimension adaptor is equal_dimensions. The default prefixes are of the form Concat.1, Concat.2, etc.\nConcatLatentModels(; models::M, dimension_adaptor::Function, prefixes::P) where {M <: AbstractVector{<:AbstractTuringLatentModel}, P <: AbstractVector{String}}: Constructs a ConcatLatentModels instance with specified models, dimension adaptor, prefixes, and automatically determines the number of models. The default dimension adaptor is equal_dimensions. The default prefixes are of the form Concat.1, Concat.2, etc.\n\nExamples\n\nusing EpiAware, Distributions\ncombined_model = ConcatLatentModels([Intercept(Normal(2, 0.2)), AR()])\nlatent_model = generate_latent(combined_model, 10)\nlatent_model()\n\n\n\nFields\n\nmodels::AbstractVector{<:AbstractTuringLatentModel}: A vector of latent models\nno_models::Int64: The number of models in the collection\ndimension_adaptor::Function: The dimension function for the latent variables. By default this divides the number of latent variables by the number of models and returns a vector of dimensions rounding up the first element and rounding down the rest.\nprefixes::AbstractVector{<:String}: A vector of prefixes for the latent models\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.DiffLatentModel","page":"Public API","title":"EpiAware.EpiLatentModels.DiffLatentModel","text":"struct DiffLatentModel{M<:AbstractTuringLatentModel, P<:Distributions.Distribution} <: AbstractTuringLatentModel\n\nModel the latent process as a d-fold differenced version of another process.\n\nMathematical specification\n\nLet Delta be the differencing operator. If tildeZ_t is a realisation of undifferenced latent model supplied to DiffLatentModel, then the differenced process is given by,\n\nDelta^(d) Z_t = tildeZ_t quad t = d+1 ldots\n\nWe can recover Z_t by applying the inverse differencing operator Delta^-1, which corresponds to the cumulative sum operator cumsum in Julia, d-times. The d initial terms Z_1 ldots Z_d are inferred.\n\nConstructors\n\nDiffLatentModel(latent_model, init_prior_distribution::Distribution; d::Int) Constructs a DiffLatentModel for d-fold differencing with latent_model as the undifferenced latent process. All initial terms have common prior init_prior_distribution.\nDiffLatentModel(;model, init_priors::Vector{D} where {D <: Distribution}) Constructs a DiffLatentModel for d-fold differencing with latent_model as the undifferenced latent process. The d initial terms have priors given by the vector init_priors, therefore length(init_priors) sets d.\n\nExample usage with generate_latent\n\ngenerate_latent can be used to construct a Turing model for the differenced latent process. In this example, the underlying undifferenced process is a RandomWalk model.\n\nFirst, we construct a RandomWalk struct with an initial value prior and a step size standard deviation prior.\n\nusing Distributions, EpiAware\nrw = RandomWalk(Normal(0.0, 1.0), truncated(Normal(0.0, 0.05), 0.0, Inf))\n\nThen, we can use DiffLatentModel to construct a DiffLatentModel for d-fold differenced process with rw as the undifferenced latent process.\n\nWe have two constructor options for DiffLatentModel. The first option is to supply a common prior distribution for the initial terms and specify d as follows:\n\ndiff_model = DiffLatentModel(rw, Normal(); d = 2)\n\nOr we can supply a vector of priors for the initial terms and d is inferred as follows:\n\ndiff_model2 = DiffLatentModel(;undiffmodel = rw, init_priors = [Normal(), Normal()])\n\nThen, we can use generate_latent to construct a Turing model for the differenced latent process generating a length n process,\n\n# Construct a Turing model\nn = 100\ndifference_mdl = generate_latent(diff_model, n)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved latent process.\n\n#Sample random parameters from prior\nθ = rand(difference_mdl)\n#Get a sampled latent process as a generated quantity from the model\n(Z_t, _) = generated_quantities(difference_mdl, θ)\nZ_t\n\n\n\nFields\n\nmodel::AbstractTuringLatentModel: Underlying latent model for the differenced process\ninit_prior::Distributions.Distribution: The prior distribution for the initial latent variables.\nd::Int64: Number of times differenced.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.FixedIntercept","page":"Public API","title":"EpiAware.EpiLatentModels.FixedIntercept","text":"struct FixedIntercept{F<:Real} <: AbstractTuringIntercept\n\nA variant of the Intercept struct that represents a fixed intercept value for a latent model.\n\nConstructors\n\nFixedIntercept(intercept) : Constructs a FixedIntercept instance with the specified intercept value.\nFixedIntercept(; intercept) : Constructs a FixedIntercept instance with the specified intercept value using named arguments.\n\nExamples\n\nusing EpiAware\nfi = FixedIntercept(2.0)\nfi_model = generate_latent(fi, 10)\nfi_model()\n\n\n\nFields\n\nintercept::Real\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.HierarchicalNormal","page":"Public API","title":"EpiAware.EpiLatentModels.HierarchicalNormal","text":"struct HierarchicalNormal{R<:Real, D<:Distributions.Sampleable} <: AbstractTuringLatentModel\n\nThe HierarchicalNormal struct represents a non-centered hierarchical normal distribution.\n\nConstructors\n\nHierarchicalNormal(mean, std_prior): Constructs a HierarchicalNormal instance with the specified mean and standard deviation prior.\nHierarchicalNormal(; mean = 0.0, std_prior = truncated(Normal(0,1), 0, Inf)): Constructs a HierarchicalNormal instance with the specified mean and standard deviation prior using named arguments and with default values.\n\nExamples\n\nusing Distributions, EpiAware\nhnorm = HierarchicalNormal(0.0, truncated(Normal(0, 1), 0, Inf))\nhnorm_model = generate_latent(hnorm, 10)\nhnorm_model()\n\n\n\nFields\n\nmean::Real\nstd_prior::Distributions.Sampleable\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.Intercept","page":"Public API","title":"EpiAware.EpiLatentModels.Intercept","text":"struct Intercept{D<:Distributions.Sampleable} <: AbstractTuringIntercept\n\nThe Intercept struct is used to model the intercept of a latent process. It broadcasts a single intercept value to a length n latent process.\n\nConstructors\n\nIntercept(intercept_prior)\nIntercept(; intercept_prior)\n\nExamples\n\nusing Distributions, Turing, EpiAware\nint = Intercept(Normal(0, 1))\nint_model = generate_latent(int, 10)\nrand(int_model)\nint_model()\n\n\n\nFields\n\nintercept_prior::Distributions.Sampleable: Prior distribution for the intercept.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.PrefixLatentModel","page":"Public API","title":"EpiAware.EpiLatentModels.PrefixLatentModel","text":"struct PrefixLatentModel{M<:AbstractTuringLatentModel, P<:String} <: AbstractTuringLatentModel\n\nGenerate a latent model with a prefix. A lightweight wrapper around `EpiAwareUtils.prefix_submodel`.\n\n# Constructors\n- `PrefixLatentModel(model::M, prefix::P)`: Create a `PrefixLatentModel` with the latent model `model` and the prefix `prefix`.\n- `PrefixLatentModel(; model::M, prefix::P)`: Create a `PrefixLatentModel` with the latent model `model` and the prefix `prefix`.\n\n# Examples\n```julia\nusing EpiAware\nlatent_model = PrefixLatentModel(model = HierarchicalNormal(), prefix = \"Test\")\nmdl = generate_latent(latent_model, 10)\nrand(mdl)\n```\n\n\n\nFields\n\nmodel::AbstractTuringLatentModel: The latent model\nprefix::String: The prefix for the latent model\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.RandomWalk","page":"Public API","title":"EpiAware.EpiLatentModels.RandomWalk","text":"struct RandomWalk{D<:Distributions.Sampleable, S<:Distributions.Sampleable} <: AbstractTuringLatentModel\n\nModel latent process Z_t as a random walk.\n\nMathematical specification\n\nThe random walk Z_t is specified as a parameteric transformation of the white noise sequence (epsilon_t)_tgeq 1,\n\nZ_t = Z_0 + sigma sum_i = 1^t epsilon_t\n\nConstructing a random walk requires specifying:\n\nAn init_prior as a prior for Z_0. Default is Normal().\nA std_prior for sigma. The default is HalfNormal with a mean of 0.25.\n\nConstructors\n\nRandomWalk(; init_prior, std_prior)\n\nExample usage with generate_latent\n\ngenerate_latent can be used to construct a Turing model for the random walk Z_t.\n\nFirst, we construct a RandomWalk struct with priors,\n\nusing Distributions, Turing, EpiAware\n\n# Create a RandomWalk model\nrw = RandomWalk(init_prior = Normal(2., 1.),\n std_prior = HalfNormal(0.1))\n\nThen, we can use generate_latent to construct a Turing model for a 10 step random walk.\n\n# Construct a Turing model\nrw_model = generate_latent(rw, 10)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n#Sample random parameters from prior\nθ = rand(rw_model)\n#Get random walk sample path as a generated quantities from the model\nZ_t, _ = generated_quantities(rw_model, θ)\n\n\n\nFields\n\ninit_prior::Distributions.Sampleable\nstd_prior::Distributions.Sampleable\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.RecordExpectedLatent","page":"Public API","title":"EpiAware.EpiLatentModels.RecordExpectedLatent","text":"struct RecordExpectedLatent{M<:AbstractTuringLatentModel} <: AbstractTuringLatentModel\n\nRecord a variable (using the Turing := syntax) in a latent model.\n\n# Fields\n- `model::AbstractTuringLatentModel`: The latent model to dispatch to.\n\n# Constructors\n\n- `RecordExpectedLatent(model::AbstractTuringLatentModel)`: Record the expected latent vector from the model as `exp_latent`.\n\n# Examples\n\n```julia\nusing EpiAware, Turing\nmdl = RecordExpectedLatent(FixedIntercept(0.1))\ngen_latent = generate_latent(mdl, 1)\nsample(gen_latent, Prior(), 10)\n```\n\n\n\nFields\n\nmodel::AbstractTuringLatentModel\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.RepeatBlock","page":"Public API","title":"EpiAware.EpiLatentModels.RepeatBlock","text":"struct RepeatBlock <: AbstractBroadcastRule\n\nRepeatBlock is a struct that represents a broadcasting rule. It is a subtype of AbstractBroadcastRule.\n\nIt repeats the latent process in blocks of size period. An example of this rule is to repeat the latent process in blocks of size 7 to model a weekly process (though for this we also provide the broadcast_weekly helper function).\n\nExamples\n\nusing EpiAware\nrule = RepeatBlock()\nlatent = [1, 2, 3, 4, 5]\nn = 10\nperiod = 2\nbroadcast_rule(rule, latent, n, period)\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.RepeatEach","page":"Public API","title":"EpiAware.EpiLatentModels.RepeatEach","text":"struct RepeatEach <: AbstractBroadcastRule\n\nRepeatEach is a struct that represents a broadcasting rule. It is a subtype of AbstractBroadcastRule.\n\nIt repeats the latent process at each period. An example of this rule is to repeat the latent process at each day of the week (though for this we also provide the dayofweek helper function).\n\nExamples\n\nusing EpiAware\nrule = RepeatEach()\nlatent = [1, 2]\nn = 10\nperiod = 2\nbroadcast_rule(rule, latent, n, period)\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.TransformLatentModel","page":"Public API","title":"EpiAware.EpiLatentModels.TransformLatentModel","text":"struct TransformLatentModel{M<:AbstractTuringLatentModel, F<:Function} <: AbstractTuringLatentModel\n\nThe TransformLatentModel struct represents a latent model that applies a transformation function to the latent variables generated by another latent model.\n\nConstructors\n\nTransformLatentModel(model, trans_function): Constructs a TransformLatentModel instance with the specified latent model and transformation function.\nTransformLatentModel(; model, trans_function): Constructs a TransformLatentModel instance with the specified latent model and transformation function using named arguments.\n\nExample\n\nusing EpiAware, Distributions\ntrans = TransformLatentModel(Intercept(Normal(2, 0.2)), x -> x .|> exp)\ntrans_model = generate_latent(trans, 5)\ntrans_model()\n\n\n\nFields\n\nmodel::AbstractTuringLatentModel: The latent model to transform.\ntrans_function::Function: The transformation function.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiAwareBase.broadcast_rule-Tuple{RepeatBlock, Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.broadcast_rule","text":"broadcast_rule(_::RepeatBlock, latent, n, period) -> Any\n\n\nbroadcast_rule is a function that applies the RepeatBlock rule to the latent process latent to generate n samples.\n\nArguments\n\nrule::RepeatBlock: The broadcasting rule.\nlatent::Vector: The latent process.\nn: The number of samples to generate.\nperiod: The period of the broadcast.\n\nReturns\n\nlatent: The generated broadcasted latent periods.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiAwareBase.broadcast_rule-Tuple{RepeatEach, Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.broadcast_rule","text":"broadcast_rule(_::RepeatEach, latent, n, period) -> Any\n\n\nbroadcast_rule is a function that applies the RepeatEach rule to the latent process latent to generate n samples.\n\nArguments\n\nrule::RepeatEach: The broadcasting rule.\nlatent::Vector: The latent process.\nn: The number of samples to generate.\nperiod: The period of the broadcast.\n\nReturns\n\nlatent: The generated broadcasted latent periods.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.broadcast_dayofweek-Tuple{AbstractTuringLatentModel}","page":"Public API","title":"EpiAware.EpiLatentModels.broadcast_dayofweek","text":"broadcast_dayofweek(\n model::AbstractTuringLatentModel;\n link\n) -> BroadcastLatentModel{TransformLatentModel{M, EpiAware.EpiLatentModels.var\"#42#44\"}, Int64, RepeatEach} where M<:AbstractTuringLatentModel\n\n\nConstructs a BroadcastLatentModel appropriate for modelling the day of the week for a given AbstractTuringLatentModel.\n\nArguments\n\nmodel::AbstractTuringLatentModel: The latent model to be repeated.\nlink::Function: The link function to transform the latent model before broadcasting\n\nto periodic weekly. Default is x -> 7 * softmax(x) which implements constraint of the sum week effects to be 7.\n\nReturns\n\nBroadcastLatentModel: The broadcast latent model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.broadcast_weekly-Tuple{AbstractTuringLatentModel}","page":"Public API","title":"EpiAware.EpiLatentModels.broadcast_weekly","text":"broadcast_weekly(\n model::AbstractTuringLatentModel\n) -> BroadcastLatentModel{<:AbstractTuringLatentModel, Int64, RepeatBlock}\n\n\nConstructs a BroadcastLatentModel appropriate for modelling piecewise constant weekly processes for a given AbstractTuringLatentModel.\n\nArguments\n\nmodel::AbstractTuringLatentModel: The latent model to be repeated.\n\nReturns\n\nBroadcastLatentModel: The broadcast latent model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiLatentModels/public/#EpiAware.EpiLatentModels.equal_dimensions-Tuple{Int64, Int64}","page":"Public API","title":"EpiAware.EpiLatentModels.equal_dimensions","text":"equal_dimensions(n::Int64, m::Int64) -> Vector{Int64}\n\n\nReturn a vector of dimensions that are equal or as close as possible, given the total number of elements n and the number of dimensions m. The default dimension adaptor for ConcatLatentModels.\n\nArguments\n\nn::Int: The total number of elements.\nm::Int: The number of dimensions.\n\nReturns\n\ndims::AbstractVector{Int}: A vector of dimensions, where the first element is the ceiling of n / m and the remaining elements are the floor of n / m.\n\n\n\n\n\n","category":"method"},{"location":"getting-started/faq/#Frequently-asked-questions","page":"Frequently asked questions","title":"Frequently asked questions","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"This page contains a list of frequently asked questions about the EpiAware package. If you have a question that is not answered here, please open a discussion on the GitHub repository.","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"Pages = [\"lib/getting-started/faq.md\"]","category":"page"},{"location":"getting-started/faq/#Pluto-notebooks","page":"Frequently asked questions","title":"Pluto notebooks","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"In some of the showcase examples in EpiAware/docs/src/showcase we use Pluto.jl notebooks for the underlying computation. As well as reading the code blocks and output of the notebooks in this documentation, you can also run these notebooks by cloning EpiAware and running the notebooks with Pluto.jl (for further details see developer notes).","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"It should be noted that Pluto.jl notebooks are reactive, meaning that they re-run downstream code after changes with downstreaming determined by a tree of dependent code blocks. This is different from the standard Julia REPL, and some other notebook formats (e.g. .ipynb). In Pluto each code block is a single lines of code or encapsulated by let ... end and begin ... end. The difference between let ... end blocks and begin ... end blocks are that the let ... end type of code block only adds the final output/return value of the block to scope, like an anonymous function, whereas begin ... end executes each line and adds defined variables to scope.","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"For installation instructions and more information and documentation on Pluto.jl see the Pluto.jl documentation.","category":"page"},{"location":"getting-started/faq/#Manipulating-EpiAware-model-specifications","page":"Frequently asked questions","title":"Manipulating EpiAware model specifications","text":"","category":"section"},{"location":"getting-started/faq/#Modular-model-construction","page":"Frequently asked questions","title":"Modular model construction","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"One of the key features of EpiAware is the ability to specify models as components of a larger model. This is useful for specifying models that are shared across multiple EpiProblems or for specifying models that are used in multiple methods. You can see an examples of this approach in our showcases.","category":"page"},{"location":"getting-started/faq/#Remaking-models","page":"Frequently asked questions","title":"Remaking models","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"An alternative to modular model construction is to remake models with different parameters. This can be useful for comparing models with different parameters or for comparing models with different priors. Whilst we don't have a built in function for this, we recommend the Accessors.jl package for this purpose. For examples of how to use this package see the documentation.","category":"page"},{"location":"getting-started/faq/#Working-with-Turing.jl-models","page":"Frequently asked questions","title":"Working with Turing.jl models","text":"","category":"section"},{"location":"getting-started/faq/#[DynamicPPL.jl](https://github.com/TuringLang/DynamicPPL.jl)","page":"Frequently asked questions","title":"DynamicPPL.jl","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"Whilst Turing.jl is the front end of the Turing.jl ecosystem, it is not the only package that can be used to work with Turing.jl models. DynamicPPL.jl is the part of the ecosytem that deals with defining, running, and manipulating models.","category":"page"},{"location":"getting-started/faq/#Conditioning-and-deconditioning-models","page":"Frequently asked questions","title":"Conditioning and deconditioning models","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"DynamicPPL supports the condition (alased with |) to fix values as known observations in the model (i.e fixing values on the left hand side of ~ definitions). This is useful for fixing parameters to known values or for conditioning the model on data. The decondition function can be used to remove these conditions. Internally this is what apply_method(::EpiProblem, ...) does to condition the user supplied EpiProblem to data. See more here.","category":"page"},{"location":"getting-started/faq/#Fixing-and-unfixing-models","page":"Frequently asked questions","title":"Fixing and unfixing models","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"Similarly to conditioning and deconditioning models, DynamicPPL supports fixing and unfixing models via the fix and unfix functions. Fixing is essentially saying that variables are constants (i.e replacing the right hand side of ~ with a value and changing the ~ to a =). A common use of this would be to simplify a prespecified model, for example to make the variance of a random walk be known versus estimated from the data. We also use this functionality in apply_method(::EpiProblem, ...) to allow users to simplify EpiProblems on the fly. See more here.","category":"page"},{"location":"getting-started/faq/#Tools-for-working-with-MCMCChain-objects","page":"Frequently asked questions","title":"Tools for working with MCMCChain objects","text":"","category":"section"},{"location":"getting-started/faq/#[MCMCChain.jl](https://turinglang.org/MCMCChains.jl/stable/)","page":"Frequently asked questions","title":"MCMCChain.jl","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"MCMCChain.jl is the package from which MCMCChains is imported. It provides a number of useful functions for working with MCMCChain objects. These include functions for summarising, plotting, and manipulating chains. Below is a list of some of the most useful functions.","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"plot: Plots trace and density plots for each parameter in the chain object.\nhistogram: Plots histograms for each parameter in the chain object by chain.\nget: Accesses the values of a parameter/s in the chain object.\nDataFrames.DataFrame converts a chain into a wide format DataFrame.\ndescribe: Prints the summary statistics of the chain object.","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"There are many more functions available in the MCMCChain.jl package. For a full list of functions, see the documentation.","category":"page"},{"location":"getting-started/faq/#[Arviz.jl](https://julia.arviz.org/ArviZ/stable/)","page":"Frequently asked questions","title":"Arviz.jl","text":"","category":"section"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"An alternative to MCMCChain.jl is the ArviZ.jl package. ArviZ.jl is a Julia meta-package for exploratory analysis of Bayesian models. It is part of the ArviZ project, which also includes a related Python package.","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"ArviZ.jl uses a InferenceData object to store the results of a Bayesian analysis. This object can be created from a MCMCChain object using the from_mcmcchains function. The InferenceData object can then be used to create a range of plots and summaries of the model. This is particularly useful as it allows you to specify the indexes of your parameters (for example you could use dates for time parameters).","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"In addition to this useful functionality from_mcmcchains can also be used to combine posterior predictions with prior predictions, prior information and the log likelihood of the model (see here for an example of this). This unlocks a range of useful diagnostics and plots that can be used to assess the model.","category":"page"},{"location":"getting-started/faq/","page":"Frequently asked questions","title":"Frequently asked questions","text":"There is a lot of functionality in ArviZ.jl and it is worth exploring the documentation to see what is available.","category":"page"},{"location":"showcase/replications/chatzilena-2019/","page":"Statistical inference for ODE-based infectious disease models","title":"Statistical inference for ODE-based infectious disease models","text":"\n\n\n\n\n\n\n\n
Example: Statistical inference for ODE-based infectious disease models
Defining the deterministic ODE model from Chatzilena et al section 2.2.2 using SciML ODE functionality and an EpiAware observation model.
Build on this to define the stochastic ODE model from Chatzilena et al section 2.2.3 using an EpiAware observation model.
Fitting the deterministic ODE model to data from an Influenza outbreak in an English boarding school.
Fitting the stochastic ODE model to data from an Influenza outbreak in an English boarding school.
What might I need to know before starting
This vignette builds on concepts from EpiAware observation models and a familarity with the SciML and Turing ecosystems would be useful but not essential.
Packages used in this vignette
Alongside the EpiAware package we will use the OrdinaryDiffEq and SciMLSensitivity packages for interfacing with SciML ecosystem; this is a lower dependency usage of DifferentialEquations.jl that, respectively, exposes ODE solvers and adjoint methods for ODE solvees; that is the method of propagating parameter derivatives through functions containing ODE solutions. Bayesian inference will be done with NUTS from the Turing ecosystem. We will also use the CairoMakie package for plotting and DataFramesMeta for data manipulation.
\n\n
using EpiAware
\n\n\n
using Turing
\n\n\n
using OrdinaryDiffEq, SciMLSensitivity #ODE solvers and adjoint methods
\n\n\n
using Distributions, Statistics, LogExpFunctions #Statistics and special func packages
\n\n\n
using CSV, DataFramesMeta #Data wrangling
\n\n\n
using CairoMakie, PairPlots
\n\n\n
using ReverseDiff #Automatic differentiation backend
\n\n\n
begin #Date utility and set Random seed\n using Dates\n using Random\n Random.seed!(1234)\nend
\n
TaskLocalRNG()
\n\n\n
Single population SIR model
As mentioned in Chatzilena et al disease spread is frequently modelled in terms of ODE-based models. The study population is divided into compartments representing a specific stage of the epidemic status. In this case, susceptible, infected, and recovered individuals.
where S(t) represents the number of susceptible, I(t) the number of infected and R(t) the number of recovered individuals at time t. The total population size is denoted by N (with N = S(t) + I(t) + R(t)), β denotes the transmission rate and γ denotes the recovery rate.
\n\n\n
We can interface to the SciML ecosystem by writing a function with the signature:
(du, u, p, t) -> nothing
Where:
du is the vector field of the ODE problem, e.g. \\({dS \\over dt}\\), \\({dI \\over dt}\\) etc. This is calculated in-place (commonly denoted using ! in function names in Julia).
u is the state of the ODE problem, e.g. \\(S\\), \\(I\\), etc.
p is an object that represents the parameters of the ODE problem, e.g. \\(\\beta\\), \\(\\gamma\\).
t is the time of the ODE problem.
We do this for the SIR model described above in a function called sir!:
\n\n
function sir!(du, u, p, t)\n S, I, R = u\n β, γ = p\n du[1] = -β * I * S\n du[2] = β * I * S - γ * I\n du[3] = γ * I\n\n return nothing\nend
\n
sir! (generic function with 1 method)
\n\n\n
We combine vector field function sir! with a initial condition u0 and the integration period tspan to make an ODEProblem. We do not define the parameters, these will be defined within an inference approach.
ODEProblem with uType Vector{Float64} and tType Float64. In-place: true\ntimespan: (0.0, 14.0)\nu0: 3-element Vector{Float64}:\n 755.37\n 7.63\n 0.0
\n\n\n
Note that this is analogous to the EpiProblem approach we expose from EpiAware, as used in the Mishra et al replication. The difference is that here we are going to use ODE solvers from the SciML ecosystem to generate the dynamics of the underlying infections. In the linked example, we use latent process generation exposed by EpiAware as the underlying generative process for underlying dynamics.
\n\n","category":"page"},{"location":"showcase/replications/chatzilena-2019/#Data-for-inference","page":"Statistical inference for ODE-based infectious disease models","title":"Data for inference","text":"","category":"section"},{"location":"showcase/replications/chatzilena-2019/","page":"Statistical inference for ODE-based infectious disease models","title":"Statistical inference for ODE-based infectious disease models","text":"
\n
There was a brief, but intense, outbreak of Influenza within the (semi-) closed community of a boarding school reported to the British medical journal in 1978. The outbreak lasted from 22nd January to 4th February and it is reported that one infected child started the epidemic and then it spread rapidly. Of the 763 children at the boarding scholl, 512 became ill.
We downloaded the data of this outbreak using the R package outbreaks which is maintained as part of the R Epidemics Consortium(RECON).
\n\n\n","category":"page"},{"location":"showcase/replications/chatzilena-2019/#Inference-for-the-deterministic-SIR-model","page":"Statistical inference for ODE-based infectious disease models","title":"Inference for the deterministic SIR model","text":"","category":"section"},{"location":"showcase/replications/chatzilena-2019/","page":"Statistical inference for ODE-based infectious disease models","title":"Statistical inference for ODE-based infectious disease models","text":"
\n
The boarding school data gives the number of children \"in bed\" and \"convalescent\" on each of 14 days from 22nd Jan to 4th Feb 1978. We follow Chatzilena et al and treat the number \"in bed\" as a proxy for the number of children in the infectious (I) compartment in the ODE model.
NB: Chatzilena et al give \\(\\lambda_t = \\int_0^t \\beta \\frac{I(s)}{N} S(s) - \\gamma I(s)ds = I(t) - I(0).\\) However, this doesn't match their underlying stan code.
\n\n\n
From EpiAware, we have the PoissonError struct which defines the probabilistic structure of this observation error model.
\n\n
obs = PoissonError()
\n
PoissonError()
\n\n\n
Now we can write the probabilistic model using the Turing PPL. Note that instead of using \\(I(t)\\) directly we do the softplus transform on \\(I(t)\\) implemented by LogExpFunctions.log1pexp. The reason is that the solver can return small negative numbers, the soft plus transform smoothly maintains positivity which being very close to \\(I(t)\\) when \\(I(t) > 2\\).
deterministic_ode_mdl (generic function with 2 methods)
\n\n\n
We instantiate the model in two ways:
deterministic_mdl: This conditions the generative model on the data observation. We can sample from this model to find the posterior distribution of the parameters.
deterministic_uncond_mdl: This doesn't condition on the data. This is useful for prior and posterior predictive modelling.
Here we construct the Turing model directly, in the Mishra et al replication we using the EpiProblem functionality to build a Turing model under the hood. Because in this note we are using a mix of functionality from SciML and EpiAware, we construct the model to sample from directly.
The prior predictive checking suggests that a priori our parameter beliefs are very far from the data. Approaching the inference naively can lead to poor fits.
We do three things to mitigate this:
We choose a switching ODE solver which switches between explicit (Tsit5) and implicit (Rosenbrock23) solvers. This helps avoid the ODE solver failing when the sampler tries extreme parameter values. This is the default solver = AutoTsit5(Rosenbrock23()) above.
We locate the maximum likelihood point, that is we ignore the influence of the priors, as a useful starting point for NUTS.
2-element Vector{ChainDataFrame}:\n Summary Statistics (3 x 8)\n Quantiles (3 x 6)
\n\n
pairplot(chn)
\n\n\n\n
Posterior predictive plotting
\n\n
let\n gens = generated_quantities(deterministic_uncond_mdl, chn)\n plot_predYt(data, gens;\n title = \"Fitted deterministic model\",\n ylabel = \"Number of Infected students\"\n )\nend
\n\n\n","category":"page"},{"location":"showcase/replications/chatzilena-2019/#Inference-for-the-Stochastic-SIR-model","page":"Statistical inference for ODE-based infectious disease models","title":"Inference for the Stochastic SIR model","text":"","category":"section"},{"location":"showcase/replications/chatzilena-2019/","page":"Statistical inference for ODE-based infectious disease models","title":"Statistical inference for ODE-based infectious disease models","text":"
\n
In Chatzilena et al, they present an auto-regressive model for connecting the outcome of the ODE model to illness observations. The argument is that the stochastic component of the model can absorb the noise generated by a possible mis-specification of the model.
In their approach they consider \\(\\kappa_t = \\log \\lambda_t\\) where \\(\\kappa_t\\) evolves according to an Ornstein-Uhlenbeck process:
We will using the AR struct from EpiAware to define the auto-regressive process in this model which has a direct parameterisation of the AR model.
To convert from the formulation above we sample from the priors, and define HalfNormal priors based on the sampled prior means of \\(e^{-\\phi}\\) and \\({\\sigma^2 \\over 2 \\phi} \\left(1 - e^{-2\\phi} \\right)\\). We also add a strong prior that \\(\\kappa_1 \\approx 0\\).
We define the AR(1) process by matching means of HalfNormal prior distributions for the damp parameters and std deviation parameter to the calculated the prior means from the Chatzilena et al definition.
We can sample directly from the behaviour specified by the ar struct to do prior predictive checking on the AR(1) process.
\n\n
let\n nobs = size(data, 1)\n ar_mdl = generate_latent(ar, nobs)\n fig = Figure()\n ax = Axis(fig[1, 1],\n xticks = (data.ts[1:3:end], data.date[1:3:end] .|> string),\n ylabel = \"exp(kt)\",\n title = \"Prior predictive sampling for relative residual in mean pred.\"\n )\n for i in 1:500\n lines!(ax, ar_mdl() .|> exp, color = (:grey, 0.15))\n end\n fig\nend
\n\n\n\n
We see that the choice of priors implies an a priori belief that the extra observation noise on the mean prediction of the ODE model is fairly small, approximately 10% relative to the mean prediction.
\n\n\n
We can now define the probabilistic model. The stochastic model assumes a (random) time-varying ascertainment, which we implement using the Ascertainment struct from EpiAware. Note that instead of implementing an ascertainment factor exp.(κₜ) directly, which can be unstable for large primal values, by default Ascertainment uses the LogExpFunctions.xexpy function which implements \\(x\\exp(y)\\) stabily for a wide range of values.
\n\n\n
To distinguish random variables sampled by various sub-processes EpiAware process types create prefixes. The default for Ascertainment is just the string \"Ascertainment\", but in this case we use the less verbose \"va\" for \"varying ascertainment\".
\n\n
mdl_prefix = \"va\"
\n
\"va\"
\n\n\n
Now we can construct our time varying ascertianment model. The main keyword arguments here are model and latent_model. model sets the connection between the expected observation and the actual observation. In this case, we reuse our PoissonError model from above. latent_model sets the modification model on the expected values. In this case, we use the AR process we defined above.
The prior predictive checking again shows misaligned prior beliefs; for example a priori without data we would not expect the median prediction of number of ill children as about 600 out of 763 after 1 day.
The latent process for the log-residuals \\(\\kappa_t\\) doesn't make much sense without priors, so we look for a reasonable MAP point to start NUTS from. We do this by first making an initial guess which is a mixture of:
The posterior averages from the deterministic model.
The prior averages of the structure parameters of the AR(1) process.
Zero for the time-varying noise underlying the AR(1) process.
let\n vars = mapreduce(vcat, 1:13) do i\n Symbol(mdl_prefix * \".ϵ_t[$i]\")\n end\n pairplot(chn2[vars])\nend
\n\n\n
let\n gens = generated_quantities(stochastic_uncond_mdl, chn2)\n plot_predYt(data, gens;\n title = \"Fitted stochastic model\",\n ylabel = \"Number of Infected students\"\n )\nend
\n\n\n","category":"page"},{"location":"showcase/replications/chatzilena-2019/","page":"Statistical inference for ODE-based infectious disease models","title":"Statistical inference for ODE-based infectious disease models","text":"EditURL = \"https://github.com/CDCgov/Rt-without-renewal/blob/main/docs/src/showcase/replications/chatzilena-2019/index.jl\"","category":"page"},{"location":"getting-started/explainers/latent-models/#Latent-models","page":"Latent models","title":"Latent models","text":"","category":"section"},{"location":"lib/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpiAware.jl's internal interface.","category":"page"},{"location":"lib/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/internals/#Internal-API","page":"Internal API","title":"Internal API","text":"","category":"section"},{"location":"lib/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware]\nPublic = false","category":"page"},{"location":"getting-started/explainers/intro/#Introduction","page":"Introduction to EpiAware","title":"Introduction","text":"","category":"section"},{"location":"getting-started/explainers/intro/","page":"Introduction to EpiAware","title":"Introduction to EpiAware","text":"The diagram below shows the relationship between the modules in the package for a typical workflow.","category":"page"},{"location":"getting-started/explainers/intro/","page":"Introduction to EpiAware","title":"Introduction to EpiAware","text":"flowchart LR\n\nA[\"Underlying GI\nBijector\"]\n\nEpiModel[\"AbstractTuringEpiModel\n----------------------\nChoice of target\nfor latent process:\n\nDirectInfections\n ExpGrowthRate\n Renewal\"]\n\nInitModel[\"Priors for\ninitial scale of incidence\"]\n\nDataW[Data wrangling and QC]\n\n\nObsData[\"Observational Data\n---------------------\nObs. cases y_t\"]\n\nLatentProcPriors[\"Latent process priors\"]\n\nLatentProc[\"AbstractTuringLatentModel\n---------------------\nRandomWalk\"]\n\nObsModelPriors[\"Observation model priors\nchoice of delayed obs. model\"]\n\nObsModel[\"AbstractObservationModel\n---------------------\nDelayObservations\"]\n\nE[\"Turing model constructor\n---------------------\ngenerate_epiaware\"]\n\nG[Posterior draws]\nH[Posterior checking]\nI[Post-processing]\n\n\n\nA --> EpiData\nEpiData --> EpiModel\nInitModel --> EpiModel\nEpiModel -->E\nObsData-->E\nDataW-.->ObsData\nLatentProcPriors-->LatentProc\nLatentProc-->E\nObsModelPriors-->ObsModel\nObsModel-->E\n\n\nE-->|sample...NUTS...| G\nG-->H\nH-->I","category":"page"},{"location":"lib/EpiAwareBase/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/EpiAwareBase/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpiAwareBase.jl's internal interface.","category":"page"},{"location":"lib/EpiAwareBase/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiAwareBase/internals/#Contents-2","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiAwareBase/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiAwareBase/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/EpiAwareBase/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/EpiAwareBase/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware.EpiAwareBase]\nPublic = false","category":"page"},{"location":"lib/EpiInfModels/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/EpiInfModels/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpiInfModels.jl's internal interface.","category":"page"},{"location":"lib/EpiInfModels/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiInfModels/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiInfModels/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/EpiInfModels/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/EpiInfModels/internals/#Internal-API","page":"Internal API","title":"Internal API","text":"","category":"section"},{"location":"lib/EpiInfModels/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware.EpiInfModels]\nPublic = false","category":"page"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.AbstractConstantRenewalStep","page":"Internal API","title":"EpiAware.EpiInfModels.AbstractConstantRenewalStep","text":"abstract type AbstractConstantRenewalStep <: AbstractAccumulationStep\n\nAbstract type representing an accumulation iteration/step for a Renewal model with a constant generation interval.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.ConstantRenewalStep","page":"Internal API","title":"EpiAware.EpiInfModels.ConstantRenewalStep","text":"struct ConstantRenewalStep{T} <: EpiAware.EpiInfModels.AbstractConstantRenewalStep\n\nThe renewal process iteration/step function struct with constant generation interval.\n\nNote that the generation interval is stored in reverse order.\n\n\n\nFields\n\nrev_gen_int::Vector\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.ConstantRenewalStep-Tuple{Any, Any}","page":"Internal API","title":"EpiAware.EpiInfModels.ConstantRenewalStep","text":"function (recurrent_step::ConstantRenewalStep)(recent_incidence, Rt)\n\nImplement the Renewal model iteration/step function, with constant generation interval.\n\nMathematical specification\n\nThe new incidence is given by\n\nI_t = R_t sum_i=1^n-1 I_t-i g_i\n\nwhere I_t is the new incidence, R_t is the reproduction number, I_{t-i} is the recent incidence and g_i is the generation interval.\n\nArguments\n\nrecent_incidence: Array of recent incidence values order least recent to most recent.\nRt: Reproduction number.\n\nReturns\n\nUpdated incidence array.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep","page":"Internal API","title":"EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep","text":"struct ConstantRenewalWithPopulationStep{T} <: EpiAware.EpiInfModels.AbstractConstantRenewalStep\n\nThe renewal process iteration/step function struct with constant generation interval and a fixed population size.\n\nNote that the generation interval is stored in reverse order.\n\n\n\nFields\n\nrev_gen_int::Vector\npop_size::Any\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep-Tuple{Any, Any}","page":"Internal API","title":"EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep","text":"function (recurrent_step::ConstantRenewalWithPopulationStep)(recent_incidence_and_available_sus, Rt)\n\nCallable on a RenewalWithPopulation struct for compute new incidence based on recent incidence, Rt and depletion of susceptibles.\n\nMathematical specification\n\nThe new incidence is given by\n\nI_t = S_t-1 N R_t sum_i=1^n-1 I_t-i g_i\n\nwhere I_t is the new incidence, R_t is the reproduction number, I_{t-i} is the recent incidence and g_i is the generation interval.\n\nArguments\n\nrecent_incidence_and_available_sus: A tuple with an array of recent incidence\n\nvalues and the remaining susceptible/available individuals.\n\nRt: Reproduction number.\n\nReturns\n\nVector containing the updated incidence array and the new recent_incidence_and_available_sus\n\nvalue.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiAwareBase.generate_latent_infs-Tuple{AbstractTuringRenewal, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent_infs","text":"generate_latent_infs(\n epi_model::AbstractTuringRenewal,\n _Rt\n) -> Any\n\n\nImplement the generate_latent_infs function for the Renewal model.\n\nExample usage with Renewal type of model for unobserved infection process\n\ngenerate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.\n\nFirst, we construct an Renewal struct with an EpiData object, an initialisation prior and a transformation function.\n\nusing Distributions, Turing, EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\n\n# Create an EpiData object\ndata = EpiData(gen_int, g)\n\n# Create an Renewal model\nrenewal_model = Renewal(data; initialisation_prior = Normal())\n\nThen, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of renewal_model.\n\n# Construct a Turing model\nZ_t = randn(100) * 0.05\nlatent_inf = generate_latent_infs(renewal_model, Z_t)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n# Sample from the unobserved infections model\n\n#Sample random parameters from prior\nθ = rand(latent_inf)\n#Get unobserved infections as a generated quantities from the model\nI_t = generated_quantities(latent_inf, θ)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiAwareBase.generate_latent_infs-Tuple{DirectInfections, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent_infs","text":"generate_latent_infs(\n epi_model::DirectInfections,\n Z_t\n) -> Any\n\n\nImplement the generate_latent_infs function for the DirectInfections model.\n\nExample usage with DirectInfections type of model for unobserved infection process\n\nFirst, we construct a DirectInfections struct with an EpiData object, an initialisation prior and a transformation function.\n\nusing Distributions, Turing, EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\n\n# Create an EpiData object\ndata = EpiData(gen_int, g)\n\n# Create a DirectInfections model\ndirect_inf_model = DirectInfections(data = data, initialisation_prior = Normal())\n\nThen, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.\n\n# Construct a Turing model\nZ_t = randn(100)\nlatent_inf = generate_latent_infs(direct_inf_model, Z_t)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n# Sample from the unobserved infections model\n\n#Sample random parameters from prior\nθ = rand(latent_inf)\n#Get unobserved infections as a generated quantities from the model\nI_t = generated_quantities(latent_inf, θ)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiAwareBase.generate_latent_infs-Tuple{ExpGrowthRate, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_latent_infs","text":"generate_latent_infs(epi_model::ExpGrowthRate, rt) -> Any\n\n\nImplement the generate_latent_infs function for the ExpGrowthRate model.\n\nExample usage with ExpGrowthRate type of model for unobserved infection process\n\ngenerate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.\n\nFirst, we construct an ExpGrowthRate struct with an EpiData object, an initialisation prior and a transformation function.\n\nusing Distributions, Turing, EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\n\n# Create an EpiData object\ndata = EpiData(gen_int, g)\n\n# Create an ExpGrowthRate model\nexp_growth_model = ExpGrowthRate(data = data, initialisation_prior = Normal())\n\nThen, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.\n\n# Construct a Turing model\nZ_t = randn(100) * 0.05\nlatent_inf = generate_latent_infs(exp_growth_model, Z_t)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n# Sample from the unobserved infections model\n\n#Sample random parameters from prior\nθ = rand(latent_inf)\n#Get unobserved infections as a generated quantities from the model\nI_t = generated_quantities(latent_inf, θ)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiAwareUtils.get_state-Tuple{EpiAware.EpiInfModels.ConstantRenewalStep, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareUtils.get_state","text":"get_state(\n acc_step::EpiAware.EpiInfModels.ConstantRenewalStep,\n initial_state,\n state\n) -> Any\n\n\nMethod to get the state of the accumulation for a ConstantRenewalStep object.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiAwareUtils.get_state-Tuple{EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareUtils.get_state","text":"get_state(\n acc_step::EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep,\n initial_state,\n state\n) -> Any\n\n\nMethod to get the state of the accumulation for a ConstantRenewalWithPopulationStep object.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.make_renewal_init-Tuple{Renewal, Any, Any}","page":"Internal API","title":"EpiAware.EpiInfModels.make_renewal_init","text":"make_renewal_init(epi_model::Renewal, I₀, Rt₀) -> Any\n\n\nCreate the initial state of the Renewal model.\n\nArguments\n\nepi_model::Renewal: The Renewal model.\nI₀: The initial number of infected individuals.\nRt₀: The initial time-varying reproduction number.\n\nReturns\n\nThe initial vector of infected individuals.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.neg_MGF-Tuple{Any, AbstractVector}","page":"Internal API","title":"EpiAware.EpiInfModels.neg_MGF","text":"neg_MGF(r, w::AbstractVector) -> Any\n\n\nCompute the negative moment generating function (MGF) for a given rate r and weights w.\n\nArguments\n\nr: The rate parameter.\nw: An abstract vector of weights.\n\nReturns\n\nThe value of the negative MGF.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.oneexpy-Tuple{T} where T","page":"Internal API","title":"EpiAware.EpiInfModels.oneexpy","text":"oneexpy(y) -> Any\n\n\nVersion of LogExpFunctions.xexpy that takes a single argument y and returns exp(y).\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.renewal_init_state-Tuple{EpiAware.EpiInfModels.ConstantRenewalStep, Any, Any, Any}","page":"Internal API","title":"EpiAware.EpiInfModels.renewal_init_state","text":"renewal_init_state(\n recurrent_step::EpiAware.EpiInfModels.ConstantRenewalStep,\n I₀,\n r_approx,\n len_gen_int\n) -> Any\n\n\nConstructs the initial conditions for a renewal model with ConstantRenewalStep type of step function.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/internals/#EpiAware.EpiInfModels.renewal_init_state-Tuple{EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep, Any, Any, Any}","page":"Internal API","title":"EpiAware.EpiInfModels.renewal_init_state","text":"renewal_init_state(\n recurrent_step::EpiAware.EpiInfModels.ConstantRenewalWithPopulationStep,\n I₀,\n r_approx,\n len_gen_int\n) -> Any\n\n\nConstructs the initial conditions for a renewal model with ConstantRenewalWithPopulationStep type of step function.\n\n\n\n\n\n","category":"method"},{"location":"getting-started/explainers/interfaces/#Interfaces","page":"Interfaces","title":"Interfaces","text":"","category":"section"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"We support two primary workflows for using the package:","category":"page"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"EpiProblem: A high-level interface for defining and fitting models to data. This is the recommended way to use the package.\nTuring interface: A lower-level interface for defining and fitting models to data. This is the more flexible way to use the package and may also be more familiar to users of Turing.jl.","category":"page"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"See the getting started section for tutorials on each of these workflows.","category":"page"},{"location":"getting-started/explainers/interfaces/#EpiProblem","page":"Interfaces","title":"EpiProblem","text":"","category":"section"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"Each module of the overall epidemiological model we are interested in is a Turing Model in its own right. In this section, we compose the individual models into the full epidemiological model using the EpiProblem struct.","category":"page"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"The constructor for an EpiProblem requires:","category":"page"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"An epi_model.\nA latent_model.\nAn observation_model.\nA tspan.","category":"page"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"The tspan set the range of the time index for the models.","category":"page"},{"location":"getting-started/explainers/interfaces/#Turing-interface","page":"Interfaces","title":"Turing interface","text":"","category":"section"},{"location":"getting-started/explainers/interfaces/","page":"Interfaces","title":"Interfaces","text":"The Turing interface is a lower-level interface for defining and fitting models to data. This is the more flexible way to use the package and may also be more familiar to users of Turing.jl.","category":"page"},{"location":"getting-started/tutorials/#Tutorials","page":"Overview","title":"Tutorials","text":"","category":"section"},{"location":"getting-started/tutorials/","page":"Overview","title":"Overview","text":"This section contains tutorials that will help you get started with EpiAware for specific tasks. See the sidebar for the list of tutorials.","category":"page"},{"location":"overview/#overview","page":"Overview","title":"Overview of the EpiAware Software Ecosystem","text":"","category":"section"},{"location":"overview/","page":"Overview","title":"Overview","text":"EpiAware is not a standard toolkit for infectious disease modelling.","category":"page"},{"location":"overview/","page":"Overview","title":"Overview","text":"It seeks to be highly modular and composable for advanced users whilst still providing opinionated workflows for those who are new to the field. Developed by the authors behind other widely used infectious disease modelling packages such as EpiNow2, epinowcast, and epidist, alongside experts in infectious disease modelling in Julia,EpiAware is designed to go beyond the capabilities of these packages by providing a more flexible and extensible framework for modelling and inference of infectious disease dynamics.","category":"page"},{"location":"overview/#Package-Features","page":"Overview","title":"Package Features","text":"","category":"section"},{"location":"overview/","page":"Overview","title":"Overview","text":"Flexible: The package is designed to be flexible and extensible, and to provide a consistent interface for fitting and simulating models.\nModular: The package is designed to be modular, with a clear separation between the model and the data.\nExtensible: The package is designed to be extensible, with a clear separation between the model and the data.\nConsistent: The package is designed to provide a consistent interface for fitting and simulating models.\nEfficient: The package is designed to be efficient, with a clear separation between the model and the data.","category":"page"},{"location":"overview/#Package-structure","page":"Overview","title":"Package structure","text":"","category":"section"},{"location":"overview/","page":"Overview","title":"Overview","text":"EpiAware.jl is a wrapper around a series of submodules, each of which provides a different aspect of the package's functionality (much like the tidyverse in R). The package is designed to be modular, with a clear separation between modules and between modules and data. Currently included modules are:","category":"page"},{"location":"overview/","page":"Overview","title":"Overview","text":"EpiAwareBase: The core module, which provides the underlying abstract types and functions for the package.\nEpiAwareUtils: A utility module, which provides a series of utility functions for working with the package.\nEpiInference: An inference module, which provides a series of functions for fitting models to data. Builds on top of Turing.jl.\nEpiInfModels: Provides tools for composing models of the disease transmission process. Builds on top of Turing.jl, in particular the DynamicPPL.jl interface.\nEpiLatentModels: Provides tools for composing latent models such as random walks, autoregressive models, etc. Builds on top of DynamicPPL.jl. Used by all other modelling modules to define latent processes.\nEpiObsModels: Provides tools for composing observation models, such as Poisson, Binomial, etc. Builds on top of DynamicPPL.jl.","category":"page"},{"location":"overview/#Using-the-package","page":"Overview","title":"Using the package","text":"","category":"section"},{"location":"overview/","page":"Overview","title":"Overview","text":"We support two primary workflows for using the package:","category":"page"},{"location":"overview/","page":"Overview","title":"Overview","text":"EpiProblem: A high-level interface for defining and fitting models to data. This is the recommended way to use the package.\nTuring interface: A lower-level interface for defining and fitting models to data. This is the more flexible way to use the package and may also be more familiar to users of Turing.jl.","category":"page"},{"location":"overview/","page":"Overview","title":"Overview","text":"See the getting started section for tutorials on each of these workflows.","category":"page"},{"location":"lib/EpiAwareUtils/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/EpiAwareUtils/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpiAwareUtils.jl's internal interface.","category":"page"},{"location":"lib/EpiAwareUtils/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiAwareUtils/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiAwareUtils/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/EpiAwareUtils/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/EpiAwareUtils/internals/#Internal-API","page":"Internal API","title":"Internal API","text":"","category":"section"},{"location":"lib/EpiAwareUtils/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware.EpiAwareUtils]\nPublic = false","category":"page"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase._apply_method","page":"Internal API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::DynamicPPL.Model,\n method::AbstractEpiMethod;\n ...\n) -> Any\n_apply_method(\n model::DynamicPPL.Model,\n method::AbstractEpiMethod,\n prev_result;\n kwargs...\n) -> Any\n\n\nApply the inference/generative method method to the Model object mdl.\n\nArguments\n\nmodel::AbstractEpiModel: The model to apply the method to.\nmethod::AbstractEpiMethod: The epidemiological method to apply.\nprev_result: The previous result of the method.\nkwargs: Additional keyword arguments passed to the method.\n\nReturns\n\nnothing: If no concrete implementation is defined for the given method.\n\n\n\n\n\n","category":"function"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase._apply_method-2","page":"Internal API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::DynamicPPL.Model,\n method::DirectSample;\n ...\n) -> Any\n_apply_method(\n model::DynamicPPL.Model,\n method::DirectSample,\n prev_result;\n kwargs...\n) -> Any\n\n\nImplements direct sampling from a Turing model.\n\n\n\n\n\n","category":"function"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase._apply_method-Tuple{DynamicPPL.Model, EpiMethod, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::DynamicPPL.Model,\n method::EpiMethod,\n prev_result;\n kwargs...\n) -> Any\n\n\nApply steps defined by an EpiMethod to a model object.\n\nThis function applies the steps defined by an EpiMethod object to a Model object. It iterates over the pre-sampler steps defined in the EpiMethod object and recursively applies them to the model. Finally, it applies the sampler step defined in the EpiMethod object to the model. The prev_result argument is used to pass the result obtained from applying the previous steps, if any.\n\nArguments\n\nmethod::EpiMethod: The EpiMethod object containing the steps to be applied.\nmodel::Model: The model object to which the steps will be applied.\nprev_result: The previous result obtained from applying the steps. Defaults to nothing.\nkwargs...: Additional keyword arguments that can be passed to the steps.\n\nReturns\n\nprev_result: The result obtained after applying the steps.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase._apply_method-Tuple{DynamicPPL.Model, EpiMethod}","page":"Internal API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::DynamicPPL.Model,\n method::EpiMethod;\n kwargs...\n) -> Any\n\n\nApply a method to a mode without previous results\n\nArguments\n\nmodel::Model: The model to apply the method to.\nmethod::EpiMethod: The method to apply.\nkwargs...: Additional keyword arguments.\n\nReturns\n\nThe result of applying the method to the model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase.condition_model-Tuple{DynamicPPL.Model, NamedTuple, NamedTuple}","page":"Internal API","title":"EpiAware.EpiAwareBase.condition_model","text":"condition_model(\n model::DynamicPPL.Model,\n fix_parameters::NamedTuple,\n condition_parameters::NamedTuple\n) -> Any\n\n\nApply the condition to the model by fixing the specified parameters and conditioning on the others.\n\nArguments\n\nmodel::Model: The model to be conditioned.\nfix_parameters::NamedTuple: The parameters to be fixed.\ncondition_parameters::NamedTuple: The parameters to be conditioned on.\n\nReturns\n\n_model: The conditioned model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase.generate_epiaware-Tuple{Any, Any, AbstractTuringEpiModel}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_epiaware","text":"generate_epiaware(\n y_t,\n time_steps,\n epi_model::AbstractTuringEpiModel;\n latent_model,\n observation_model\n)\n\n\nGenerate an epi-aware model given the observed data and model specifications.\n\nArguments\n\ny_t: Observed data.\ntime_steps: Number of time steps.\nepi_model: A Turing Epi model specification.\nlatent_model: A Turing Latent model specification.\nobservation_model: A Turing Observation model specification.\n\nReturns\n\nA DynamicPPPL.Model object.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareBase.generated_observables-Tuple{DynamicPPL.Model, Any, Union{NamedTuple, MCMCChains.Chains}}","page":"Internal API","title":"EpiAware.EpiAwareBase.generated_observables","text":"generated_observables(\n model::DynamicPPL.Model,\n data,\n solution::Union{NamedTuple, MCMCChains.Chains}\n) -> EpiAwareObservables\n\n\nGenerate observables from a given model and solution including generated quantities.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareUtils._apply_direct_sample-Tuple{Any, Any, Int64}","page":"Internal API","title":"EpiAware.EpiAwareUtils._apply_direct_sample","text":"_apply_direct_sample(\n model,\n method,\n n_samples::Int64;\n kwargs...\n) -> Any\n\n\nSample the model directly using Turing.Prior() and a NamedTuple of the sampled random variables along with generated quantities.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareUtils._apply_direct_sample-Tuple{Any, Any, Nothing}","page":"Internal API","title":"EpiAware.EpiAwareUtils._apply_direct_sample","text":"_apply_direct_sample(\n model,\n method,\n n_samples::Nothing\n) -> Any\n\n\nSample the model directly using rand and return a single set of sampled random variables.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/internals/#EpiAware.EpiAwareUtils._check_and_give_ts-Tuple{Distributions.Distribution, Any, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareUtils._check_and_give_ts","text":"_check_and_give_ts(\n dist::Distributions.Distribution,\n Δd,\n D,\n upper\n) -> Any\n\n\nInternal function to check censored_pmf arguments and return the time steps of the rightmost limits of the censor intervals.\n\n\n\n\n\n","category":"method"},{"location":"#EpiAware.jl","page":"EpiAware.jl: Real-time infectious disease monitoring","title":"EpiAware.jl","text":"","category":"section"},{"location":"","page":"EpiAware.jl: Real-time infectious disease monitoring","title":"EpiAware.jl: Real-time infectious disease monitoring","text":"Infectious disease situational awareness modelling toolkit for Julia.","category":"page"},{"location":"#Where-to-start","page":"EpiAware.jl: Real-time infectious disease monitoring","title":"Where to start","text":"","category":"section"},{"location":"","page":"EpiAware.jl: Real-time infectious disease monitoring","title":"EpiAware.jl: Real-time infectious disease monitoring","text":"Want to get started running code? Check out the Getting Started Tutorials.\nWhat is EpiAware? Check out our Overview.\nWant to see some end-to-end examples? Check out our EpiAware showcase.\nWant to understand the API? Check out our API Reference.\nWant to chat with someone about EpiAware? Post on our GitHub Discussions.\nWant to contribute to EpiAware? Check out our Developer documentation.\nWant to see our code? Check out our GitHub Repository.","category":"page"},{"location":"getting-started/tutorials/censored-obs/","page":"Fitting distributions with censored data","title":"Fitting distributions with censored data","text":"\n\n\n\n\n
Fitting distributions using EpiAware and Turing PPL
Introduction
What are we going to do in this Vignette
In this vignette, we'll demonstrate how to use the CDF function for censored delay distributions EpiAwareUtils.∫F, which underlies EpiAwareUtils.censored_pmf in conjunction with the Turing PPL for Bayesian inference of epidemiological delay distributions. We'll cover the following key points:
Simulating censored delay distribution data
Fitting a naive model using Turing
Evaluating the naive model's performance
Fitting an improved model using censored delay functionality from EpiAware.
Comparing the censored delay model's performance to the naive model
This note is generated using the EpiAware package locally via Pkg.develop, in the EpiAware/docs environment. It is also possible to install EpiAware using
As well as EpiAware and Turing we will use Makie ecosystem packages for plotting and DataFramesMeta for data manipulation.
\n\n
let\n docs_dir = dirname(dirname(dirname(@__DIR__)))\n using Pkg: Pkg\n Pkg.activate(docs_dir)\n Pkg.instantiate()\nend
\n\n\n\n
The other dependencies are as follows:
\n\n
begin\n using EpiAware.EpiAwareUtils: censored_pmf, censored_cdf, ∫F\n using Random, Distributions, StatsBase #utilities for random events\n using DataFramesMeta #Data wrangling\n using CairoMakie, PairPlots #plotting\n using Turing #PPL\nend
\n\n\n","category":"page"},{"location":"getting-started/tutorials/censored-obs/#Simulating-censored-and-truncated-delay-distribution-data","page":"Fitting distributions with censored data","title":"Simulating censored and truncated delay distribution data","text":"","category":"section"},{"location":"getting-started/tutorials/censored-obs/","page":"Fitting distributions with censored data","title":"Fitting distributions with censored data","text":"
\n
We'll start by simulating some censored and truncated delay distribution data. We’ll define a rpcens function for generating data.
\n\n
Random.seed!(123) # For reproducibility
\n
TaskLocalRNG()
\n\n\n
Define the true distribution parameters
\n\n
n = 2000
\n
2000
\n\n
meanlog = 1.5
\n
1.5
\n\n
sdlog = 0.75
\n
0.75
\n\n
true_dist = LogNormal(meanlog, sdlog)
\n
Distributions.LogNormal{Float64}(μ=1.5, σ=0.75)
\n\n\n
Generate varying pwindow, swindow, and obs_time lengths
We recreate the primary censored sampling function from primarycensoreddist, c.f. documentation here.
\n\n
\"\"\"\n function rpcens(dist; pwindow = 1, swindow = 1, D = Inf, max_tries = 1000)\n\nDoes a truncated censored sample from `dist` with a uniform primary time on `[0, pwindow]`.\n\"\"\"\nfunction rpcens(dist; pwindow = 1, swindow = 1, D = Inf, max_tries = 1000)\n T = zero(eltype(dist))\n invalid_sample = true\n attempts = 1\n while (invalid_sample && attempts <= max_tries)\n X = rand(dist)\n U = rand() * pwindow\n T = X + U\n attempts += 1\n if X + U < D\n invalid_sample = false\n end\n end\n\n @assert !invalid_sample \"censored value not found in $max_tries attempts\"\n\n return (T ÷ swindow) * swindow\nend
\n\n\n
#Sample secondary time relative to beginning of primary censor window respecting the right-truncation\nsamples = map(pwindows, swindows, obs_times) do pw, sw, ot\n rpcens(true_dist; pwindow = pw, swindow = sw, D = ot)\nend
We've aggregated the data to unique combinations of pwindow, swindow, and obs_time and counted the number of occurrences of each observed_delay for each combination. This is the data we will use to fit our model.
\n\n","category":"page"},{"location":"getting-started/tutorials/censored-obs/#Fitting-a-naive-model-using-Turing","page":"Fitting distributions with censored data","title":"Fitting a naive model using Turing","text":"","category":"section"},{"location":"getting-started/tutorials/censored-obs/","page":"Fitting distributions with censored data","title":"Fitting distributions with censored data","text":"
\n
We'll start by fitting a naive model using NUTS from Turing. We define the model in the Turing PPL.
\n\n
@model function naive_model(N, y, n)\n mu ~ Normal(1.0, 1.0)\n sigma ~ truncated(Normal(0.5, 1.0); lower = 0.0)\n d = LogNormal(mu, sigma)\n\n for i in eachindex(y)\n Turing.@addlogprob! n[i] * logpdf(d, y[i])\n end\nend
\n
naive_model (generic function with 2 methods)
\n\n\n
Now lets instantiate this model with data
\n\n
naive_mdl = naive_model(\n size(delay_counts, 1),\n delay_counts.observed_delay .+ 1e-6, # Add a small constant to avoid log(0)\n delay_counts.n)
We see that the model has converged and the diagnostics look good. However, just from the model posterior summary we see that we might not be very happy with the fit. mu is smaller than the target 1.5 and sigma is larger than the target 0.75.
\n\n","category":"page"},{"location":"getting-started/tutorials/censored-obs/#Fitting-an-improved-model-using-censoring-utilities","page":"Fitting distributions with censored data","title":"Fitting an improved model using censoring utilities","text":"","category":"section"},{"location":"getting-started/tutorials/censored-obs/","page":"Fitting distributions with censored data","title":"Fitting distributions with censored data","text":"
\n
We'll now fit an improved model using the ∫F function from EpiAware.EpiAwareUtils for calculating the CDF of the total delay from the beginning of the primary window to the secondary event time. This includes both the delay distribution we are making inference on and the time between the start of the primary censor window and the primary event. The ∫F function underlies censored_pmf function from the EpiAware.EpiAwareUtils submodule.
Using the ∫F function we can write a log-pmf function primary_censored_dist_lpmf that accounts for:
The primary and secondary censoring windows, which can vary in length.
The effect of right truncation in biasing our observations.
This is the analog function to the function of the same name in primarycensoreddist: it calculates the log-probability of the secondary event occurring in the secondary censoring window conditional on the primary event occurring in the primary censoring window by calculating the increase in the CDF over the secondary window and rescaling by the probability of the secondary event occuring within the maximum observation time D.
\n\n
function primary_censored_dist_lpmf(dist, y, pwindow, y_upper, D)\n if y == 0.0\n return log(∫F(dist, y_upper, pwindow)) - log(∫F(dist, D, pwindow))\n else\n return log(∫F(dist, y_upper, pwindow) - ∫F(dist, y, pwindow)) -\n log(∫F(dist, D, pwindow))\n end\nend
\n
primary_censored_dist_lpmf (generic function with 1 method)
\n\n\n
We make a new Turing model that now uses primary_censored_dist_lpmf rather than the naive uncensored and untruncated logpdf.
\n\n
@model function primarycensoreddist_model(y, y_upper, n, pws, Ds)\n mu ~ Normal(1.0, 1.0)\n sigma ~ truncated(Normal(0.5, 0.5); lower = 0.0)\n dist = LogNormal(mu, sigma)\n\n for i in eachindex(y)\n Turing.@addlogprob! n[i] * primary_censored_dist_lpmf(\n dist, y[i], pws[i], y_upper[i], Ds[i])\n end\nend
\n
primarycensoreddist_model (generic function with 2 methods)
We see that the model has converged and the diagnostics look good. We also see that the posterior means are very near the true parameters and the 90% credible intervals include the true parameters.
\n\n","category":"page"},{"location":"getting-started/tutorials/censored-obs/","page":"Fitting distributions with censored data","title":"Fitting distributions with censored data","text":"EditURL = \"https://github.com/CDCgov/Rt-without-renewal/blob/main/docs/src/getting-started/tutorials/censored-obs.jl\"","category":"page"},{"location":"lib/EpiInfModels/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/EpiInfModels/public/","page":"Public API","title":"Public API","text":"Documentation for EpiInfModels.jl's public interface.","category":"page"},{"location":"lib/EpiInfModels/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/EpiInfModels/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiInfModels/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiInfModels/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/EpiInfModels/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/EpiInfModels/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/EpiInfModels/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware.EpiInfModels]\nPrivate = false","category":"page"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels","page":"Public API","title":"EpiAware.EpiInfModels","text":"Module for defining epidemiological models.\n\n\n\n\n\n","category":"module"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.DirectInfections","page":"Public API","title":"EpiAware.EpiInfModels.DirectInfections","text":"struct DirectInfections{S<:Distributions.Sampleable} <: AbstractTuringEpiModel\n\nModel unobserved/latent infections as a transformation on a sampled latent process.\n\nMathematical specification\n\nIf Z_t is a realisation of the latent model, then the unobserved/latent infections are given by\n\nI_t = g(hatI_0 + Z_t)\n\nwhere g is a transformation function and the unconstrained initial infections hatI_0 are sampled from a prior distribution.\n\nDirectInfections are constructed by passing an EpiData object data and an initialisation_prior for the prior distribution of hatI_0. The default initialisation_prior is Normal().\n\nConstructors\n\nDirectInfections(; data, initialisation_prior)\n\nExample usage with generate_latent_infs\n\ngenerate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.\n\nFirst, we construct a DirectInfections struct with an EpiData object, an initialisation prior and a transformation function.\n\nusing Distributions, Turing, EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\n\n# Create an EpiData object\ndata = EpiData(gen_int, g)\n\n# Create a DirectInfections model\ndirect_inf_model = DirectInfections(data = data, initialisation_prior = Normal())\n\nThen, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.\n\n# Construct a Turing model\nZ_t = randn(100)\nlatent_inf = generate_latent_infs(direct_inf_model, Z_t)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n# Sample from the unobserved infections model\n\n#Sample random parameters from prior\nθ = rand(latent_inf)\n#Get unobserved infections as a generated quantities from the model\nI_t = generated_quantities(latent_inf, θ)\n\n\n\nFields\n\ndata::EpiData: Epidata object.\ninitialisation_prior::Distributions.Sampleable: Prior distribution for the initialisation of the infections. Default is Normal().\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.EpiData","page":"Public API","title":"EpiAware.EpiInfModels.EpiData","text":"struct EpiData{T<:Real, F<:Function}\n\nThe EpiData struct represents epidemiological data used in infectious disease modeling.\n\nConstructors\n\nEpiData(gen_int, transformation::Function). Constructs an EpiData object with discrete\n\ngeneration interval gen_int and transformation function transformation.\n\nEpiData(;gen_distribution::ContinuousDistribution, D_gen, Δd = 1.0, transformation::Function = exp).\n\nConstructs an EpiData object with double interval censoring discretisation of the continuous next generation interval distribution gen_distribution with additional right truncation at D_gen. Δd sets the interval width (default = 1.0). transformation sets the transformation function\n\nExamples\n\nConstruction direct from discrete generation interval and transformation function:\n\nusing EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\ndata = EpiData(gen_int, g)\n\nConstruction from continuous distribution for generation interval.\n\nusing Distributions\n\ngen_distribution = Uniform(0.0, 10.0)\n\ndata = EpiData(;gen_distribution\n D_gen = 10.0)\n\n\n\nFields\n\ngen_int::Vector{T} where T<:Real: Discrete generation interval.\nlen_gen_int::Integer: Length of the discrete generation interval.\ntransformation::Function: Transformation function defining constrained and unconstrained domain bijections.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.ExpGrowthRate","page":"Public API","title":"EpiAware.EpiInfModels.ExpGrowthRate","text":"struct ExpGrowthRate{S<:Distributions.Sampleable} <: AbstractTuringEpiModel\n\nModel unobserved/latent infections as due to time-varying exponential growth rate r_t which is generated by a latent process.\n\nMathematical specification\n\nIf Z_t is a realisation of the latent model, then the unobserved/latent infections are given by\n\nI_t = g(hatI_0) exp(Z_t)\n\nwhere g is a transformation function and the unconstrained initial infections hatI_0 are sampled from a prior distribution.\n\nExpGrowthRate are constructed by passing an EpiData object data and an initialisation_prior for the prior distribution of hatI_0. The default initialisation_prior is Normal().\n\nConstructor\n\nExpGrowthRate(; data, initialisation_prior).\n\nExample usage with generate_latent_infs\n\ngenerate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.\n\nFirst, we construct an ExpGrowthRate struct with an EpiData object, an initialisation prior and a transformation function.\n\nusing Distributions, Turing, EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\n\n# Create an EpiData object\ndata = EpiData(gen_int, g)\n\n# Create an ExpGrowthRate model\nexp_growth_model = ExpGrowthRate(data = data, initialisation_prior = Normal())\n\nThen, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.\n\n# Construct a Turing model\nZ_t = randn(100) * 0.05\nlatent_inf = generate_latent_infs(exp_growth_model, Z_t)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n# Sample from the unobserved infections model\n\n#Sample random parameters from prior\nθ = rand(latent_inf)\n#Get unobserved infections as a generated quantities from the model\nI_t = generated_quantities(latent_inf, θ)\n\n\n\nFields\n\ndata::EpiData\ninitialisation_prior::Distributions.Sampleable\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.Renewal","page":"Public API","title":"EpiAware.EpiInfModels.Renewal","text":"struct Renewal{E, S<:Distributions.Sampleable, A} <: AbstractTuringRenewal\n\nModel unobserved/latent infections as due to time-varying Renewal model with reproduction number mathcalR_t which is generated by a latent process.\n\nMathematical specification\n\nIf Z_t is a realisation of the latent model, then the unobserved/latent infections are given by\n\nbeginalign\nmathcalR_t = g(Z_t)\nI_t = mathcalR_t sum_i=1^n-1 I_t-i g_i qquad t geq 1 \nI_t = g(hatI_0) exp(r(mathcalR_1) t) qquad t leq 0\nendalign\n\nwhere g is a transformation function and the unconstrained initial infections hatI_0 are sampled from a prior distribution. The discrete generation interval is given by g_i.\n\nr(mathcalR_1) is the exponential growth rate implied by mathcalR_1) using the implicit relationship between the exponential growth rate and the reproduction number.\n\nmathcalR sum_j geq 1 g_j exp(- r j)= 1\n\nRenewal are constructed by passing an EpiData object data and an initialisation_prior for the prior distribution of hatI_0. The default initialisation_prior is Normal().\n\nConstructors\n\nRenewal(; data, initialisation_prior). Construct a Renewal model with default update steps.\nRenewal(data; initialisation_prior). Construct a Renewal model with default update steps.\nRenewal(data, initialisation_prior, recurrent_step) Construct a Renewal model with recurrent_step update step function.\n\nExample usage with generate_latent_infs\n\ngenerate_latent_infs can be used to construct a Turing model for the latent infections conditional on the sample path of a latent process. In this example, we generate a sample of a white noise latent process.\n\nFirst, we construct an Renewal struct with an EpiData object, an initialisation prior and a transformation function.\n\nusing Distributions, Turing, EpiAware\ngen_int = [0.2, 0.3, 0.5]\ng = exp\n\n# Create an EpiData object\ndata = EpiData(gen_int, g)\n\n# Create an Renewal model\nrenewal_model = Renewal(data; initialisation_prior = Normal())\n\nThen, we can use generate_latent_infs to construct a Turing model for the unobserved infection generation model set by the type of direct_inf_model.\n\n# Construct a Turing model\nZ_t = randn(100) * 0.05\nlatent_inf = generate_latent_infs(renewal_model, Z_t)\n\nNow we can use the Turing PPL API to sample underlying parameters and generate the unobserved infections.\n\n# Sample from the unobserved infections model\n\n#Sample random parameters from prior\nθ = rand(latent_inf)\n#Get unobserved infections as a generated quantities from the model\nI_t = generated_quantities(latent_inf, θ)\n\n\n\nFields\n\ndata::Any\ninitialisation_prior::Distributions.Sampleable\nrecurrent_step::Any\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.R_to_r-Union{Tuple{T}, Tuple{Any, Vector{T}}} where T<:AbstractFloat","page":"Public API","title":"EpiAware.EpiInfModels.R_to_r","text":"R_to_r(\n R₀,\n w::Array{T<:AbstractFloat, 1};\n newton_steps,\n Δd\n) -> Any\n\n\nThis function computes an approximation to the exponential growth rate r given the reproductive ratio R₀ and the discretized generation interval w with discretized interval width Δd. This is based on the implicit solution of\n\nG(r) - 1 over R_0 = 0\n\nwhere\n\nG(r) = sum_i=1^n w_i e^-r i\n\nis the negative moment generating function (MGF) of the generation interval distribution.\n\nThe two step approximation is based on: 1. Direct solution of implicit equation for a small r approximation. 2. Improving the approximation using Newton's method for a fixed number of steps newton_steps.\n\nReturns:\n\nThe approximate value of r.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.expected_Rt-Tuple{EpiData, Vector{<:Real}}","page":"Public API","title":"EpiAware.EpiInfModels.expected_Rt","text":"expected_Rt(\n data::EpiData,\n infections::Vector{<:Real}\n) -> Any\n\n\nCalculate the expected Rt values based on the given EpiData object and infections.\n\nR_t = fracI_tsum_i=1^n I_t-i g_i\n\nArguments\n\ndata::EpiData: An instance of the EpiData type containing generation interval data.\ninfections::Vector{<:Real}: A vector of infection data.\n\nReturns\n\nexp_Rt::Vector{Float64}: A vector of expected Rt values.\n\nExamples\n\nusing EpiAware\n\ndata = EpiData([0.2, 0.3, 0.5], exp)\ninfections = [100, 200, 300, 400, 500]\nexpected_Rt(data, infections)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInfModels/public/#EpiAware.EpiInfModels.r_to_R-Tuple{Any, AbstractVector}","page":"Public API","title":"EpiAware.EpiInfModels.r_to_R","text":"r_to_R(r, w::AbstractVector) -> Any\n\n\nr_to_R(r, w)\n\nCompute the reproductive ratio given exponential growth rate r and discretized generation interval w.\n\nArguments\n\nr: The exponential growth rate.\nw: discretized generation interval.\n\nReturns\n\nThe reproductive ratio.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/#EpiAwareUtils.jl","page":"Overview","title":"EpiAwareUtils.jl","text":"","category":"section"},{"location":"lib/EpiAwareUtils/","page":"Overview","title":"Overview","text":"This package provides utility functions for the EpiAware ecosystem.","category":"page"},{"location":"lib/EpiAwareUtils/#API","page":"Overview","title":"API","text":"","category":"section"},{"location":"lib/EpiAwareUtils/","page":"Overview","title":"Overview","text":"Pages = [\"lib/EpiAwareUtils/public.md\", \"lib/EpiAwareUtils/internals.md\"]","category":"page"},{"location":"lib/EpiInference/#EpiInference.jl","page":"Overview","title":"EpiInference.jl","text":"","category":"section"},{"location":"lib/EpiInference/","page":"Overview","title":"Overview","text":"This package provides inference algorithms for the EpiAware ecosystem.","category":"page"},{"location":"lib/EpiInference/#API","page":"Overview","title":"API","text":"","category":"section"},{"location":"lib/EpiInference/","page":"Overview","title":"Overview","text":"Pages = [\"lib/EpiInference/public.md\", \"lib/EpiInference/internals.md\"]","category":"page"},{"location":"lib/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/public/","page":"Public API","title":"Public API","text":"Documentation for EpiAware.jl's public interface.","category":"page"},{"location":"lib/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware]\nPrivate = false","category":"page"},{"location":"showcase/replications/mishra-2020/","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","text":"\n\n\n\n\n
The time varying reproductive number modelled as an AR(2) process on the log-scale \\(\\log R_t \\sim \\text{AR(2)}\\).
The latent infection (\\(I_t\\)) generating process is a renewal model (note that we leave out external infections in this note):
$$I_t = R_t \\sum_{s\\geq 1} I_{t-s} g_s.$$
The discrete generation interval \\(g_t\\) is a daily discretisation of the probability mass function of an estimated serial interval distribution for SARS-CoV-2:
$$G \\sim \\text{Gamma}(6.5,0.62).$$
Observed cases \\(C_t\\) are distributed around latent infections with negative binomial errors:
In the examples below we are going to largely recreate the Mishra et al model, whilst emphasing that each component of the overall epidemiological model is, itself, a stand alone model that can be sampled from.
\n\n\n\n\n","category":"page"},{"location":"showcase/replications/mishra-2020/#Dependencies-for-this-notebook","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"Dependencies for this notebook","text":"","category":"section"},{"location":"showcase/replications/mishra-2020/","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","text":"
\n
Now we want to import these dependencies into scope. If evaluating these code lines/blocks in REPL, then the REPL will offer to install any missing dependencies. Alternatively, you can add them to your active environment using Pkg.add.
\n\n
using EpiAware
\n\n\n
using Turing, DynamicPPL #Underlying Turing ecosystem packages to interact with models
\n\n\n
using Distributions, Statistics #Statistics packages
\n\n\n
using CSV, DataFramesMeta #Data wrangling
\n\n\n
using CairoMakie, PairPlots, TimeSeries #Plotting backend
\n\n\n
using ReverseDiff #Automatic differentiation backend
\n\n\n
begin #Date utility and set Random seed\n using Dates\n using Random\n Random.seed!(1)\nend
\n
TaskLocalRNG()
\n\n","category":"page"},{"location":"showcase/replications/mishra-2020/#Load-early-SARS-2-case-data-for-South-Korea","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"Load early SARS-2 case data for South Korea","text":"","category":"section"},{"location":"showcase/replications/mishra-2020/","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","text":"
\n
First, we make sure that we have the data we want to analysis in scope by downloading it for where we have saved a copy in the EpiAware repository.
NB: The case data is curated by the covidregionaldata package. We accessed the South Korean case data using a short R script. It is possible to interface directly from a Julia session using the RCall.jl package, but we do not do this in this notebook to reduce the number of underlying dependencies required to run this notebook.
Time-varying reproduction number as an AbstractLatentModel type
EpiAware exposes a AbstractLatentModel abstract type; the purpose of which is to group stochastic processes which can be interpreted as generating time-varying parameters/quantities of interest which we call latent process models.
In the Mishra et al model the log-time varying reproductive number \\(Z_t\\) is assumed to evolve as an auto-regressive process, AR(2):
Where \\(\\rho_1,\\rho_2\\), which are the parameters of AR process, and \\(\\epsilon_t\\) is a white noise process with standard deviation \\(\\sigma^*\\).
\n\n\n
In EpiAware we determine the behaviour of a latent process by choosing a concrete subtype (i.e. a struct) of AbstractLatentModel which has fields that set the priors of the various parameters required for the latent process.
The AR process has the struct AR <: AbstractLatentModel. The user can supply the priors for \\(\\rho_1,\\rho_2\\) in the field damp_priors, for \\(\\sigma^*\\) in the field std_prior, and the initial values \\(Z_1, Z_2\\) in the field init_priors.
\n\n\n
We choose priors based on Mishra et al using the Distributions.jl interface to probability distributions. Note that we condition the AR parameters onto \\([0,1]\\), as in Mishra et al, using the truncated function.
In Mishra et al the standard deviation of the stationary distribution of \\(Z_t\\) which has a standard normal distribution conditioned to be positive \\(\\sigma \\sim \\mathcal{N}^+(0,1)\\). The value \\(σ^*\\) was determined from a nonlinear function of sampled \\(\\sigma, ~\\rho_1, ~\\rho_2\\) values. Since, Mishra et al give sharply informative priors for \\(\\rho_1,~\\rho_2\\) (see below) we simplify by calculating \\(\\sigma^*\\) at the prior mode of \\(\\rho_1,~\\rho_2\\). This results in a \\(\\sigma^* \\sim \\mathcal{N}^+(0, 0.5)\\) prior.
As mentioned above, we can use this instance of the AR latent model to construct a Turing model object which implements the probabilistic behaviour determined by ar. We do this with the constructor function exposed by EpiAware: generate_latent which combines an AbstractLatentModel substype struct with the number of time steps for which we want to generate the latent process.
As a refresher, we remind that the Turing.Model object has the following properties:
The model object parameters are sampleable using rand; that is we can generate parameters from the specified priors e.g. θ = rand(mdl).
The model object is generative as a callable; that is we can sample instances of \\(Z_t\\) e.g. Z_t = mdl().
The model object can construct new model objects by conditioning parameters using the DynamicPPL.jl syntax, e.g. conditional_mdl = mdl | (σ_AR = 1.0, ).
As a concrete example we create a model object for the AR(2) process we specified above for 50 time steps:
Ultimately, this will only be one component of the full epidemiological model. However, it is useful to visualise its probabilistic behaviour for model diagnostic and prior predictive checking.
We can spaghetti plot generative samples from the AR(2) process with the priors specified above.
\n\n
plt_ar_sample = let\n n_samples = 100\n ar_mdl_samples = mapreduce(hcat, 1:n_samples) do _\n ar_mdl() .|> exp #Sample Z_t trajectories for the model\n end\n\n fig = Figure()\n ax = Axis(fig[1, 1];\n yscale = log10,\n ylabel = \"Time varying Rₜ\",\n title = \"$(n_samples) draws from the prior Rₜ model\"\n )\n for col in eachcol(ar_mdl_samples)\n lines!(ax, col, color = (:grey, 0.1))\n end\n fig\nend
\n\n\n\n
This suggests that a priori we believe that there is a few percent chance of achieving very high \\(R_t\\) values, i.e. \\(R_t \\sim 10-1000\\) is not excluded by our priors.
\n\n\n
The Renewal model as an AbstractEpiModel type
The abstract type for models that generate infections exposed by EpiAware is called AbstractEpiModel. As with latent models different concrete subtypes of AbstractEpiModel define different classes of infection generating process. In this case we want to implement a renewal model.
The Renewal <: AbstractEpiModel type of struct needs two fields:
Data about the generation interval of the infectious disease so it can construct \\(g_t\\).
A prior for the initial numbers of infected.
In Mishra et al they use an estimate of the serial interval of SARS-CoV-2 as an estimate of the generation interval.
\n\n
truth_GI = Gamma(6.5, 0.62)
\n
Distributions.Gamma{Float64}(α=6.5, θ=0.62)
\n\n\n
This is a representation of the generation interval distribution as continuous whereas the infection process will be formulated in discrete daily time steps. By default, EpiAware performs double interval censoring to convert our continuous estimate of the generation interval into a discretized version \\(g_t\\), whilst also applying left truncation such that \\(g_0 = 0\\) and normalising \\(\\sum_t g_t = 1.\\)
The constructor for converting a continuous estimate of the generation interval distribution into a usable discrete time estimate is EpiData.
We can compare the discretized generation interval with the continuous estimate, which in this example is the serial interval estimate.
\n\n
let\n fig = Figure()\n ax = Axis(fig[1, 1];\n xticks = 0:14,\n xlabel = \"Days\",\n title = \"Continuous and discrete generation intervals\"\n )\n barplot!(ax, model_data.gen_int;\n label = \"Discretized next gen pmf\"\n )\n lines!(truth_GI;\n label = \"Continuous serial interval\",\n color = :green\n )\n axislegend(ax)\n fig\nend
\n\n\n\n
The user also needs to specify a prior for the log incidence at time zero, \\(\\log I_0\\). The initial history of latent infections \\(I_{-1}, I_{-2},\\dots\\) is constructed as
$$I_t = e^{rt} I_0,\\qquad t = 0, -1, -2,...$$
Where the exponential growth rate \\(r\\) is determined by the initial reproductive number \\(R_1\\) via the solution to the implicit equation,
NB: We don't implement a background infection rate in this model.
\n\n\n
Turing model interface to Renewal process
As mentioned above, we can use this instance of the Renewal latent infection model to construct a TuringModel which implements the probabilistic behaviour determined by epi using the constructor function generate_latent_infs which combines epi with a provided \\(\\log R_t\\) time series.
Here we choose an example where \\(R_t\\) decreases from \\(R_t = 3\\) to \\(R_t = 0.5\\) over the course of 50 days.
\n\n
R_t_fixed = [0.5 + 2.5 / (1 + exp(t - 15)) for t in 1:50]
plt_epi = let\n n_samples = 100\n #Sample unconditionally the underlying parameters of the model\n epi_mdl_samples = mapreduce(hcat, 1:n_samples) do _\n latent_inf_mdl()\n end\n fig = Figure()\n ax1 = Axis(fig[1, 1];\n title = \"$(n_samples) draws from renewal model with chosen Rt\",\n ylabel = \"Latent infections\"\n )\n ax2 = Axis(fig[2, 1];\n ylabel = \"Rt\"\n )\n for col in eachcol(epi_mdl_samples)\n lines!(ax1, col;\n color = (:grey, 0.1)\n )\n end\n lines!(ax2, R_t_fixed;\n linewidth = 2\n )\n fig\nend
\n\n\n\n
Negative Binomial Observations as an ObservationModel type
In Mishra et al latent infections were assumed to occur on their observation day with negative binomial errors, this motivates using the serial interval (the time between onset of symptoms of a primary and secondary case) rather than generation interval distribution (the time between infection time of a primary and secondary case).
Observation models are set in EpiAware as concrete subtypes of an ObservationModel. The Negative binomial error model without observation delays is set with a NegativeBinomialError struct. In Mishra et al the overdispersion parameter \\(\\phi\\) sets the relationship between the mean and variance of the negative binomial errors,
In EpiAware, we default to a prior on \\(\\sqrt{1/\\phi}\\) because this quantity is approximately the coefficient of variation of the observation noise and, therefore, is easier to reason on a priori beliefs. We call this quantity the cluster factor.
A prior for \\(\\phi\\) was not specified in Mishra et al, we select one below but we will condition a value in analysis below.
Turing model interface to the NegativeBinomialError model
We can construct a NegativeBinomialError model implementation as a TuringModel using the EpiAwaregenerate_observations functions.
Turing uses missing arguments to indicate variables that are to be sampled. We use this to observe a forward model that samples observations, conditional on an underlying expected observation time series.
\n\n\n
First, we set an artificial expected cases curve.
\n\n
expected_cases = [1000 * exp(-(t - 15)^2 / (2 * 4)) for t in 1:30]
plt_obs = let\n n_samples = 100\n obs_mdl_samples = mapreduce(hcat, 1:n_samples) do _\n θ = obs_mdl() #Sample unconditionally the underlying parameters of the model\n end\n fig = Figure()\n ax = Axis(fig[1, 1];\n title = \"$(n_samples) draws from neg. bin. obs model\",\n ylabel = \"Observed cases\"\n )\n for col in eachcol(obs_mdl_samples)\n scatter!(ax, col;\n color = (:grey, 0.2)\n )\n end\n lines!(ax, expected_cases;\n color = :red,\n linewidth = 3,\n label = \"Expected cases\"\n )\n axislegend(ax)\n fig\nend
\n\n\n\n
Composing models into an EpiProblem
Mishra et al follows a common pattern of having an infection generation process driven by a latent process with an observation model that links the infection process to a discrete valued time series of incidence data.
In EpiAware we provide an EpiProblem constructor for this common epidemiological model pattern.
The constructor for an EpiProblem requires:
An epi_model.
A latent_model.
An observation_model.
A tspan.
The tspan set the range of the time index for the models.
\n\n","category":"page"},{"location":"showcase/replications/mishra-2020/#Inference-Methods","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"Inference Methods","text":"","category":"section"},{"location":"showcase/replications/mishra-2020/","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","text":"
\n
We make inferences on the unobserved quantities, such as \\(R_t\\) by sampling from the model conditioned on the observed data. We generate the posterior samples using the No U-Turns (NUTS) sampler.
To make NUTS more robust we provide manypathfinder, which is built on pathfinder variational inference from Pathfinder.jl. manypathfinder runs nruns pathfinder processes on the inference problem and returns the pathfinder run with maximum estimated ELBO.
The composition of doing variational inference as a pre-sampler step which gets passed to NUTS initialisation is defined using the EpiMethod struct, where a sequence of pre-sampler steps can be be defined.
EpiMethod also allows the specification of NUTS parameters, such as type of automatic differentiation, type of parallelism and number of parallel chains to sample.
\n\n","category":"page"},{"location":"showcase/replications/mishra-2020/#Inference-and-analysis","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"Inference and analysis","text":"","category":"section"},{"location":"showcase/replications/mishra-2020/","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","text":"
\n
We supply the data as a NamedTuple with the y_t field containing the observed data, shortened to fit the chosen tspan of epi_prob.
In the epidemiological model it is hard to identify between the AR parameters such as the standard deviation of the AR process and the cluster factor of the negative binomial observation model. The reason for this identifiability problem is that the model assumes no delay between infection and observation. Therefore, on any day the data could be explained by \\(R_t\\) changing or observation noise and its not easy to disentangle greater volatility in \\(R_t\\) from higher noise in the observations.
In models with latent delays, changes in \\(R_t\\) impact the observed cases over several days which means that it easier to disentangle trend effects from observation-to-observation fluctuations.
To counter act this problem we condition the model on a fixed cluster factor value.
\n\n
fixed_cluster_factor = 0.25
\n
0.25
\n\n\n
EpiAware has the generate_epiaware function which joins an EpiProblem object with the data to produce as Turing model. This Turing model composes the three unit Turing models defined above: the Renewal infection generating process, the AR latent process for \\(\\log R_t\\), and the negative binomial observation model. Therefore, we can condition on variables as with any other Turing model.
To assess the quality of the inference visually we can plot predictive quantiles for generated case data from the version of the model which hasn't conditioned on case data using posterior parameters inferred from the version conditioned on observed data. For this purpose, we add a generated_quantiles utility function. This kind of visualisation is known as posterior predictive checking, and is a useful diagnostic tool for Bayesian inference (see here).
We also plot the inferred \\(R_t\\) estimates from the model. We find that the EpiAware model recovers the main finding in Mishra et al; that the \\(R_t\\) in South Korea peaked at a very high value (\\(R_t \\sim 10\\) at peak) before rapidly dropping below 1 in early March 2020.
Note that, in reality, the peak \\(R_t\\) found here and in Mishra et al is unrealistically high, this might be due to a combination of:
A mis-estimated generation interval/serial interval distribution.
An ascertainment rate that was, in reality, changing over time.
In a future note, we'll demonstrate having a time-varying ascertainment rate.
\n\n
function generated_quantiles(gens, quantity, qs; transformation = x -> x)\n mapreduce(hcat, gens) do gen #loop over sampled generated quantities\n getfield(gen, quantity) |> transformation\n end |> mat -> mapreduce(hcat, qs) do q #Loop over matrix row to condense into qs\n map(eachrow(mat)) do row\n if any(ismissing, row)\n return missing\n else\n quantile(row, q)\n end\n end\n end\nend
\n
generated_quantiles (generic function with 1 method)
\n\n\n","category":"page"},{"location":"showcase/replications/mishra-2020/","page":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","title":"On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective","text":"EditURL = \"https://github.com/CDCgov/Rt-without-renewal/blob/main/docs/src/showcase/replications/mishra-2020/index.jl\"","category":"page"},{"location":"lib/EpiAwareBase/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/EpiAwareBase/public/","page":"Public API","title":"Public API","text":"Documentation for EpiAwareBae.jl's public interface.","category":"page"},{"location":"lib/EpiAwareBase/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/EpiAwareBase/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiAwareBase/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiAwareBase/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/EpiAwareBase/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/EpiAwareBase/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/EpiAwareBase/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware.EpiAwareBase]\nPrivate = false","category":"page"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase","page":"Public API","title":"EpiAware.EpiAwareBase","text":"Module for defining abstract epidemiological types.\n\n\n\n\n\n","category":"module"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractAccumulationStep","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractAccumulationStep","text":"abstract type AbstractAccumulationStep\n\nAbstract type for all accumulation steps\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractBroadcastRule","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractBroadcastRule","text":"abstract type AbstractBroadcastRule\n\nAn abstract type representing a broadcast rule.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractEpiMethod","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractEpiMethod","text":"abstract type AbstractEpiMethod\n\nAbstract supertype for all EpiAware inference/generative modelling methods.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractEpiModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractEpiModel","text":"abstract type AbstractEpiModel <: AbstractModel\n\nThe abstract supertype for all structs that define a model for generating unobserved/latent infections.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractEpiOptMethod","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractEpiOptMethod","text":"abstract type AbstractEpiOptMethod <: AbstractEpiMethod\n\nAbstract supertype for infence/generative methods that are based on optimization, e.g. MAP estimation or variational inference.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractEpiProblem","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractEpiProblem","text":"abstract type AbstractEpiProblem\n\nAbstract supertype for all EpiAware problems.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractEpiSamplingMethod","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractEpiSamplingMethod","text":"abstract type AbstractEpiSamplingMethod <: AbstractEpiMethod\n\nAbstract supertype for infence/generative methods that are based on sampling from the posterior distribution, e.g. NUTS.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractLatentModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractLatentModel","text":"abstract type AbstractLatentModel <: AbstractModel\n\nThe abstract supertype for all structs that define a model for generating a latent process used in EpiAware models.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractObservationModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractObservationModel","text":"abstract type AbstractObservationModel <: AbstractModel\n\nA type representing an abstract observation model that is a subtype of AbstractModel.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractTuringEpiModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractTuringEpiModel","text":"abstract type AbstractTuringEpiModel <: AbstractEpiModel\n\nA abstract type representing a Turing-based epidemiological model.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractTuringIntercept","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractTuringIntercept","text":"abstract type AbstractTuringIntercept <: AbstractTuringLatentModel\n\nA abstract type used to define the common interface for intercept models.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractTuringLatentModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractTuringLatentModel","text":"abstract type AbstractTuringLatentModel <: AbstractLatentModel\n\nA abstract type representing a Turing-based Latent model.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractTuringObservationErrorModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractTuringObservationErrorModel","text":"abstract type AbstractTuringObservationErrorModel <: AbstractTuringObservationModel\n\nThe abstract supertype for all structs that defines a Turing-based model for generating observation errors.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractTuringObservationModel","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractTuringObservationModel","text":"abstract type AbstractTuringObservationModel <: AbstractObservationModel\n\nA abstract type representing a Turing-based observation model.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.AbstractTuringRenewal","page":"Public API","title":"EpiAware.EpiAwareBase.AbstractTuringRenewal","text":"abstract type AbstractTuringRenewal <: AbstractTuringEpiModel\n\nAbstract type for all Turing-based Renewal infection generating models.\n\n\n\nFields\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.EpiAwareObservables","page":"Public API","title":"EpiAware.EpiAwareBase.EpiAwareObservables","text":"struct EpiAwareObservables\n\nThe EpiAwareObservables struct represents the observables used in the EpiAware model.\n\nFields\n\nmodel: The model used for the observables.\ndata: The data used for the observables.\nsamples: Samples from the posterior distribution.\ngenerated: The generated observables.\n\n\n\nFields\n\nmodel::Any\ndata::Any\nsamples::Any\ngenerated::Any\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.EpiMethod","page":"Public API","title":"EpiAware.EpiAwareBase.EpiMethod","text":"struct EpiMethod{O<:AbstractEpiOptMethod, S<:AbstractEpiSamplingMethod} <: AbstractEpiMethod\n\nEpiMethod represents a method for performing EpiAware inference and/or generative modelling, which combines a sequence of optimization steps to pass initialisation information to a sampler method.\n\n\n\nFields\n\npre_sampler_steps::Vector{O} where O<:AbstractEpiOptMethod: Pre-sampler optimization steps.\nsampler::AbstractEpiSamplingMethod: Sampler method.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.EpiProblem","page":"Public API","title":"EpiAware.EpiAwareBase.EpiProblem","text":"struct EpiProblem{E<:AbstractEpiModel, L<:AbstractLatentModel, O<:AbstractObservationModel} <: AbstractEpiProblem\n\nDefines an inference/generative modelling problem for case data.\n\nEpiProblem wraps the underlying components of an epidemiological model:\n\nepi_model: An epidemiological model for unobserved infections.\nlatent_model: A latent model for underlying latent process.\nobservation_model: An observation model for observed cases.\n\nAlong with a tspan tuple for the time span of the case data.\n\n\n\nFields\n\nepi_model::AbstractEpiModel: Epidemiological model for unobserved infections.\nlatent_model::AbstractLatentModel: Latent model for underlying latent process.\nobservation_model::AbstractObservationModel: Observation model for observed cases.\ntspan::Tuple{Int64, Int64}: Time span for either inference or generative modelling of case time series.\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase._apply_method","page":"Public API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::AbstractEpiModel,\n method::AbstractEpiMethod;\n ...\n)\n_apply_method(\n model::AbstractEpiModel,\n method::AbstractEpiMethod,\n prev_result;\n kwargs...\n)\n\n\nApply the inference/generative method method to the AbstractEpiModel object mdl.\n\nArguments\n\nmodel::AbstractEpiModel: The model to apply the method to.\nmethod::AbstractEpiMethod: The epidemiological method to apply.\nprev_result: The previous result of the method.\nkwargs: Additional keyword arguments passed to the method.\n\nReturns\n\nnothing: If no concrete implementation is defined for the given method.\n\n\n\n\n\n","category":"function"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.apply_method-Tuple{Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.apply_method","text":"apply_method(\n model,\n method,\n data;\n kwargs...\n) -> EpiAwareObservables\n\n\nWrap the _apply_method function by calling it with the given model, method, data, and optional keyword arguments (kwargs). The resulting solution is then passed to the generated_observables function, along with the model and input data, to compute the generated observables.\n\nArguments\n\nmodel: The model to apply the method to.\nmethod: The method to apply to the model.\ndata: The data to pass to the apply_method function.\nkwargs: Optional keyword arguments to pass to the apply_method function.\n\nReturns\n\nThe generated observables computed from the solution.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.apply_method-Tuple{Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.apply_method","text":"apply_method(\n model,\n method;\n kwargs...\n) -> EpiAwareObservables\n\n\nCalls wrap_apply_method setting the data argument to nothing.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.apply_method-Tuple{EpiProblem, AbstractEpiMethod, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.apply_method","text":"apply_method(\n epiproblem::EpiProblem,\n method::AbstractEpiMethod,\n data;\n fix_parameters,\n condition_parameters,\n kwargs...\n) -> EpiAwareObservables\n\n\nRun the EpiAware algorithm to estimate the parameters of an epidemiological model.\n\nArguments\n\nepiproblem::EpiProblem: An EpiProblem object specifying the epidemiological problem.\nmethod::EpiMethod: An EpiMethod object specifying the inference method.\ndata: The observed data used for inference.\n\nKeyword Arguments\n\nfix_parameters::NamedTuple: A NamedTuple of fixed parameters for the model.\ncondition_parameters::NamedTuple: A NamedTuple of conditioned parameters for the model.\nkwargs...: Additional keyword arguments passed to the inference methods.\n\nReturns\n\nA NamedTuple with a samples field which is the output of applying methods and a model field with the model used. Optionally, a gens field with the generated quantities from the model if that makes sense with the inference method.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.broadcast_n-Tuple{AbstractBroadcastRule, Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.broadcast_n","text":"broadcast_n(\n broadcast_rule::AbstractBroadcastRule,\n latent,\n n,\n period\n)\n\n\nThis function is used to define the behavior of broadcasting for a specific type of AbstractBroadcastRule.\n\nThe broadcast_n function returns the length of the latent periods to generate using the given broadcast_rule. Which model of broadcasting to be implemented is set by the type of broadcast_rule. If no implemention is defined for the given broadcast_rule, then EpiAware will return a warning and return nothing.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.broadcast_rule-Tuple{AbstractBroadcastRule, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.broadcast_rule","text":"broadcast_rule(\n broadcast_rule::AbstractBroadcastRule,\n n,\n period\n)\n\n\nThis function is used to define the behavior of broadcasting for a specific type of AbstractBroadcastRule.\n\nThe broadcast_rule function implements a model of broadcasting a latent process. Which model of broadcasting to be implemented is set by the type of broadcast_rule. If no implemention is defined for the given broadcast_rule, then EpiAware will return a warning and return nothing.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.condition_model-Tuple{Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.condition_model","text":"condition_model(\n model,\n fix_parameters,\n condition_parameters\n) -> Any\n\n\nCondition a model on fixed (i.e to a value) and conditioned (i.e to data) parameters.\n\nReturns\n\nmodel: The conditioned model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.generate_epiaware-Tuple{Any, Any, AbstractEpiModel, AbstractLatentModel, AbstractObservationModel}","page":"Public API","title":"EpiAware.EpiAwareBase.generate_epiaware","text":"generate_epiaware(\n y_t,\n time_step,\n epi_model::AbstractEpiModel,\n latent_model::AbstractLatentModel,\n observation_model::AbstractObservationModel\n)\n\n\nCreate an epi-aware model using the specified epimodel, latentmodel, and observation_model.\n\nArguments\n\ny_t: The observed data.\ntime_steps: The time steps.\nepi_model: An abstract epi model.\nlatent_model: An abstract latent model.\nobservation_model: An abstract observation model.\n\nReturns\n\nnothing\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.generate_epiaware-Tuple{EpiProblem, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.generate_epiaware","text":"generate_epiaware(epiproblem::EpiProblem, data) -> Any\n\n\nGenerate an epi-aware model given an EpiProblem and data.\n\nArguments\n\nepiproblem: Epi problem specification.\ndata: Observed data.\n\nReturns\n\nA tuple containing the generated quantities of the epi-aware model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.generate_latent-Tuple{AbstractLatentModel, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.generate_latent","text":"generate_latent(latent_model::AbstractLatentModel, n) -> Any\n\n\nConstructor function for a latent process path Z_t of length n.\n\nThe generate_latent function implements a model of generating a latent process. Which model for generating the latent process infections is implemented is set by the type of latent_model. If no implemention is defined for the type of latent_model, then EpiAware will pass a warning and return nothing.\n\nInterface to Turing.jl probablilistic programming language (PPL)\n\nApart from the no implementation fallback method, the generate_latent implementation function should return a constructor function for a DynamicPPL.Model object. Sample paths of Z_t are generated quantities of the constructed model. Priors for model parameters are fields of epi_model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.generate_latent_infs-Tuple{AbstractEpiModel, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.generate_latent_infs","text":"generate_latent_infs(\n epi_model::AbstractEpiModel,\n Z_t\n) -> Any\n\n\nConstructor function for unobserved/latent infections based on the type of epi_model <: AbstractEpimodel and a latent process path Z_t.\n\nThe generate_latent_infs function implements a model of generating unobserved/latent infections conditional on a latent process. Which model of generating unobserved/latent infections to be implemented is set by the type of epi_model. If no implemention is defined for the given epi_model, then EpiAware will return a warning and return nothing.\n\nInterface to Turing.jl probablilistic programming language (PPL)\n\nApart from the no implementation fallback method, the generate_latent_infs implementation function returns a constructor function for a DynamicPPL.Model object where the unobserved/latent infections are a generated quantity. Priors for model parameters are fields of epi_model.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.generate_observations-Tuple{AbstractObservationModel, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.generate_observations","text":"generate_observations(\n obs_model::AbstractObservationModel,\n y_t,\n Y_t\n) -> Any\n\n\nConstructor function for generating observations based on the given observation model.\n\nThe generate_observations function implements a model of generating observations based on the given observation model. Which model of generating observations to be implemented is set by the type of obs_model. If no implemention is defined for the given obs_model, then EpiAware will return a warning and return nothing.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareBase/public/#EpiAware.EpiAwareBase.generated_observables-Tuple{Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareBase.generated_observables","text":"generated_observables(\n model,\n data,\n solution\n) -> EpiAwareObservables\n\n\nGenerate observables from a given model and solution and return them as a EpiAwareObservables struct.\n\nArguments\n\nmodel: The model used for generating observables.\ndata: The data used for generating observables.\nsolution: The solution used for generating observables.\n\nReturns\n\nAn instance of EpiAwareObservables struct with the provided model, data, solution, and the generated observables if specified\n\n\n\n\n\n","category":"method"},{"location":"getting-started/explainers/#Explainers","page":"Overview","title":"Explainers","text":"","category":"section"},{"location":"getting-started/explainers/","page":"Overview","title":"Overview","text":"This section contains a series of explainers that provide a detailed overview of the EpiAware platform and its features. These explainers are designed to help you understand the platform and its capabilities, and to provide you with the information you need to get started using EpiAware. See the sidebar for the list of explainers.","category":"page"},{"location":"lib/EpiObsModels/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/EpiObsModels/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpiObsModels.jl's internal interface.","category":"page"},{"location":"lib/EpiObsModels/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiObsModels/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiObsModels/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/EpiObsModels/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/EpiObsModels/internals/#Internal-API","page":"Internal API","title":"Internal API","text":"","category":"section"},{"location":"lib/EpiObsModels/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware.EpiObsModels]\nPublic = false","category":"page"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiObsModels.LDStep","page":"Internal API","title":"EpiAware.EpiObsModels.LDStep","text":"struct LDStep{D<:(AbstractVector{<:Real})} <: AbstractAccumulationStep\n\nThe LatentDelay step function struct\n\n\n\nFields\n\nrev_pmf::AbstractVector{<:Real}\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiObsModels.LDStep-Tuple{Any, Any}","page":"Internal API","title":"EpiAware.EpiObsModels.LDStep","text":"The LatentDelay step function method for accumulate_scan.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiAwareBase.generate_observations-Tuple{AbstractTuringObservationErrorModel, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_observations","text":"generate_observations(\n obs_model::AbstractTuringObservationErrorModel,\n y_t,\n Y_t\n) -> Any\n\n\nGenerates observations from an observation error model. It provides support for missing values in observations (y_t), and expected observations (Y_t) that are shorter than observations. When this is the case it assumes that the expected observations are the last length(Y_t) elements of y_t. It also pads the expected observations with a small value (1e-6) to mitigate potential numerical issues.\n\nIt dispatches to the observation_error function to generate the observation error distribution which uses priors generated by generate_observation_error_priors submodel. For most observation error models specific implementations of observation_error and generate_observation_error_priors are required but a specific implementation of generate_observations is not required.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiAwareBase.generate_observations-Tuple{Ascertainment, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_observations","text":"generate_observations(\n obs_model::Ascertainment,\n y_t,\n Y_t\n) -> Any\n\n\nGenerates observations based on the LatentDelay observation model.\n\nArguments\n\nobs_model::Ascertainment: The Ascertainment model.\ny_t: The current state of the observations.\nY_t` : The expected observations.\n\nReturns\n\ny_t: The updated observations.\nexpected_aux: Additional expected observation-related variables.\nobs_aux: Additional observation-related variables.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiAwareBase.generate_observations-Tuple{LatentDelay, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_observations","text":"generate_observations(\n obs_model::LatentDelay,\n y_t,\n Y_t\n) -> Any\n\n\nGenerates observations based on the LatentDelay observation model.\n\nArguments\n\nobs_model::LatentDelay: The LatentDelay observation model.\ny_t: The current observations.\nI_t: The current infection indicator.\n\nReturns\n\ny_t: The updated observations.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiAwareBase.generate_observations-Tuple{StackObservationModels, NamedTuple, AbstractVector}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_observations","text":"generate_observations(\n obs_model::StackObservationModels,\n y_t::NamedTuple,\n Y_t::AbstractVector\n) -> Any\n\n\nGenerate observations from a stack of observation models. Maps Y_t to a NamedTuple of the same length as y_t assuming a 1 to many mapping.\n\nArguments\n\nobs_model::StackObservationModels: The stack of observation models.\ny_t::NamedTuple: The observed values.\nY_t::AbstractVector: The expected values.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiAwareBase.generate_observations-Tuple{StackObservationModels, NamedTuple, NamedTuple}","page":"Internal API","title":"EpiAware.EpiAwareBase.generate_observations","text":"generate_observations(\n obs_model::StackObservationModels,\n y_t::NamedTuple,\n Y_t::NamedTuple\n) -> Any\n\n\nGenerate observations from a stack of observation models. Assumes a 1 to 1 mapping between y_t and Y_t.\n\nArguments\n\nobs_model::StackObservationModels: The stack of observation models.\ny_t::NamedTuple: The observed values.\nY_t::NamedTuple: The expected values.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiAwareUtils.get_state-Tuple{EpiAware.EpiObsModels.LDStep, Any, Any}","page":"Internal API","title":"EpiAware.EpiAwareUtils.get_state","text":"get_state(\n acc_step::EpiAware.EpiObsModels.LDStep,\n initial_state,\n state\n) -> Any\n\n\nThe LatentDelay step function method for get_state.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiObsModels.NegativeBinomialMeanClust-Tuple{Any, Any}","page":"Internal API","title":"EpiAware.EpiObsModels.NegativeBinomialMeanClust","text":"NegativeBinomialMeanClust(μ, α) -> SafeNegativeBinomial\n\n\nCompute the mean-cluster factor negative binomial distribution.\n\nArguments\n\nμ: The mean of the distribution.\nα: The clustering factor parameter.\n\nReturns\n\nA NegativeBinomial distribution object.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiObsModels/internals/#EpiAware.EpiObsModels.generate_observation_kernel-Tuple{Any, Any}","page":"Internal API","title":"EpiAware.EpiObsModels.generate_observation_kernel","text":"generate_observation_kernel(\n delay_int,\n time_horizon;\n partial\n) -> Any\n\n\nGenerate an observation kernel matrix based on the given delay interval and time horizon.\n\nArguments\n\ndelay_int::Vector{Float64}: The delay PMF vector.\ntime_horizon::Int: The number of time steps of the observation period.\npartial::Bool: Whether to generate a partial observation kernel matrix.\n\nReturns\n\nK::SparseMatrixCSC{Float64, Int}: The observation kernel matrix.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInference/internals/#Internal-Documentation","page":"Internal API","title":"Internal Documentation","text":"","category":"section"},{"location":"lib/EpiInference/internals/","page":"Internal API","title":"Internal API","text":"Documentation for EpInference.jl's internal interface.","category":"page"},{"location":"lib/EpiInference/internals/#Contents","page":"Internal API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiInference/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiInference/internals/#Index","page":"Internal API","title":"Index","text":"","category":"section"},{"location":"lib/EpiInference/internals/","page":"Internal API","title":"Internal API","text":"Pages = [\"internals.md\"]","category":"page"},{"location":"lib/EpiInference/internals/#Internal-API","page":"Internal API","title":"Internal API","text":"","category":"section"},{"location":"lib/EpiInference/internals/","page":"Internal API","title":"Internal API","text":"Modules = [EpiAware.EpiInference]\nPublic = false","category":"page"},{"location":"lib/EpiInference/internals/#EpiAware.EpiAwareBase._apply_method","page":"Internal API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::DynamicPPL.Model,\n method::ManyPathfinder;\n ...\n) -> Any\n_apply_method(\n model::DynamicPPL.Model,\n method::ManyPathfinder,\n prev_result;\n kwargs...\n) -> Any\n\n\nApply a ManyPathfinder method to a DynamicPPL.Model object.\n\nIf prev_result is a vector of real numbers, then the ManyPathfinder method is applied with the initial values set to prev_result. Otherwise, the ManyPathfinder method is run with default initial values generated.\n\n\n\n\n\n","category":"function"},{"location":"lib/EpiInference/internals/#EpiAware.EpiAwareBase._apply_method-2","page":"Internal API","title":"EpiAware.EpiAwareBase._apply_method","text":"_apply_method(\n model::DynamicPPL.Model,\n method::NUTSampler;\n ...\n) -> Any\n_apply_method(\n model::DynamicPPL.Model,\n method::NUTSampler,\n prev_result;\n kwargs...\n) -> Any\n\n\nApply NUTS sampling to a DynamicPPL.Model object with prev_result representing any initial results to use for sampler initialisation.\n\n\n\n\n\n","category":"function"},{"location":"lib/EpiInference/internals/#EpiAware.EpiInference._apply_nuts-Tuple{Any, Any, Any}","page":"Internal API","title":"EpiAware.EpiInference._apply_nuts","text":"_apply_nuts(model, method, prev_result; kwargs...) -> Any\n\n\nNo initialisation NUTS.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInference/internals/#EpiAware.EpiInference._apply_nuts-Tuple{Any, Any, Pathfinder.PathfinderResult}","page":"Internal API","title":"EpiAware.EpiInference._apply_nuts","text":"_apply_nuts(\n model,\n method,\n prev_result::Pathfinder.PathfinderResult;\n kwargs...\n) -> Any\n\n\nInitialise NUTS with initial parameters from a Pathfinder result.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInference/internals/#EpiAware.EpiInference._continue_manypathfinder!-Tuple{Any, DynamicPPL.Model}","page":"Internal API","title":"EpiAware.EpiInference._continue_manypathfinder!","text":"_continue_manypathfinder!(\n pfs,\n mdl::DynamicPPL.Model;\n max_tries,\n nruns,\n kwargs...\n)\n\n\nContinue running the pathfinder algorithm until a pathfinder succeeds or the maximum number of tries is reached.\n\nArguments\n\npfs: An array of pathfinder objects.\nmdl::DynamicPPL.Model: The model to perform inference on.\nmax_tries: The maximum number of tries to run the pathfinder algorithm. Default is Inf.\nnruns: The number of times to run the pathfinder function.\nkwargs...: Additional keyword arguments passed to pathfinder.\n\nReturns\n\npfs: The updated array of pathfinder objects.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInference/internals/#EpiAware.EpiInference._get_best_elbo_pathfinder-Tuple{Any}","page":"Internal API","title":"EpiAware.EpiInference._get_best_elbo_pathfinder","text":"_get_best_elbo_pathfinder(pfs) -> Any\n\n\nSelects the pathfinder with the highest ELBO estimate from a list of pathfinders.\n\nArguments\n\npfs: A list of pathfinders results or Symbol values indicating failure.\n\nReturns\n\nThe pathfinder with the highest ELBO estimate.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiInference/internals/#EpiAware.EpiInference._run_manypathfinder-Tuple{DynamicPPL.Model}","page":"Internal API","title":"EpiAware.EpiInference._run_manypathfinder","text":"_run_manypathfinder(mdl::DynamicPPL.Model; nruns, kwargs...)\n\n\nRun pathfinder multiple times and store the results in an array. Fails safely.\n\nArguments\n\nmdl::DynamicPPL.Model: The Turing model to be used for inference.\nnruns: The number of times to run the pathfinder function.\nkwargs...: Additional keyword arguments passed to pathfinder.\n\nReturns\n\nAn array of PathfinderResult objects or Symbol values indicating success or failure.\n\n\n\n\n\n","category":"method"},{"location":"getting-started/explainers/modelling-infections/#Modelling-infections","page":"Modelling infections","title":"Modelling infections","text":"","category":"section"},{"location":"developer/contributing/#Contributing","page":"Contributing","title":"Contributing","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"This page details the some of the guidelines that should be followed when contributing to this package. It is adapted from Documenter.jl.","category":"page"},{"location":"developer/contributing/#Branches","page":"Contributing","title":"Branches","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"release-* branches are used for tagged minor versions of this package. This follows the same approach used in the main Julia repository, albeit on a much more modest scale.","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Please open pull requests against the master branch rather than any of the release-* branches whenever possible.","category":"page"},{"location":"developer/contributing/#Backports","page":"Contributing","title":"Backports","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Bug fixes are backported to the release-* branches using git cherry-pick -x by a EpiAware member and will become available in point releases of that particular minor version of the package.","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Feel free to nominate commits that should be backported by opening an issue. Requests for new point releases to be tagged in METADATA.jl can also be made in the same way.","category":"page"},{"location":"developer/contributing/#release-*-branches","page":"Contributing","title":"release-* branches","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Each new minor version x.y.0 gets a branch called release-x.y (a protected branch).\nNew versions are usually tagged only from the release-x.y branches.\nFor patch releases, changes get backported to the release-x.y branch via a single PR with the standard name \"Backports for x.y.z\" and label \"Type: Backport\". The PR message links to all the PRs that are providing commits to the backport. The PR gets merged as a merge commit (i.e. not squashed).\nThe old release-* branches may be removed once they have outlived their usefulness.\nPatch version milestones are used to keep track of which PRs get backported etc.","category":"page"},{"location":"developer/contributing/#Style-Guide","page":"Contributing","title":"Style Guide","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Follow the style of the surrounding text when making changes. When adding new features please try to stick to the following points whenever applicable. This project follows the SciML style guide.","category":"page"},{"location":"developer/contributing/#Tests","page":"Contributing","title":"Tests","text":"","category":"section"},{"location":"developer/contributing/#Unit-tests","page":"Contributing","title":"Unit tests","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"As is conventional for Julia packages, unit tests are located at test/*.jl with the entrypoint test/runtests.jl.","category":"page"},{"location":"developer/contributing/#End-to-end-testing","page":"Contributing","title":"End to end testing","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Tests that build example package docs from source and inspect the results (end to end tests) are located in /test/examples. The main entry points are test/examples/make.jl for building and test/examples/test.jl for doing some basic checks on the generated outputs.","category":"page"},{"location":"developer/contributing/#Pluto-usage-in-showcase-documentation","page":"Contributing","title":"Pluto usage in showcase documentation","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Some of the showcase examples in EpiAware/docs/src/showcase use Pluto.jl notebooks for the underlying computation. The output of the notebooks is rendered into HTML for inclusion in the documentation in two steps:","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"PlutoStaticHTML.jl converts the notebook with output into a machine-readable .md format.\nDocumenter.jl renders the .md file into HTML for inclusion in the documentation during the build process.","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"For other examples of using Pluto to generate documentation see the examples shown here.","category":"page"},{"location":"developer/contributing/#Running-Pluto-notebooks-from-EpiAware-locally","page":"Contributing","title":"Running Pluto notebooks from EpiAware locally","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"To run the Pluto.jl scripts in the EpiAware documentation directly from the source code you can do these steps:","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Install Pluto.jl locally. We recommend using the version of Pluto that is pinned in the Project.toml file defining the documentation environment.\nClone the EpiAware repository.\nStart Pluto.jl either from REPL (see the Pluto.jl documentation) or from the command line with the shell script EpiAware/docs/pluto-scripts.sh.\nFrom the Pluto.jl interface, navigate to the Pluto.jl script you want to run.","category":"page"},{"location":"developer/contributing/#Contributing-to-Pluto-notebooks-in-EpiAware-documentation","page":"Contributing","title":"Contributing to Pluto notebooks in EpiAware documentation","text":"","category":"section"},{"location":"developer/contributing/#Modifying-an-existing-Pluto-notebook","page":"Contributing","title":"Modifying an existing Pluto notebook","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Committing changes to the Pluto.jl notebooks in the EpiAware documentation is the same as committing changes to any other part of the repository. However, please note that we expect the following features for the environment management of the notebooks:","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Use the environment determined by the Project.toml file in the EpiAware/docs directory. If you want extra packages, add them to this environment.\nUse the version of EpiAware that is used in these notebooks to be the version of EpiAware on the branch being pull requested into main. To do this use the Pkg.develop function.","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"To do this you can use the following code snippet in the Pluto notebook:","category":"page"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"# Determine the relative path to the `EpiAware/docs` directory\ndocs_dir = dirname(dirname(dirname(dirname(@__DIR__))))\n# Determine the relative path to the `EpiAware` package directory\npkg_dir = dirname(docs_dir)\n\nusing Pkg: Pkg\nPkg.activate(docs_dir)\nPkg.develop(; path = pkg_dir)\nPkg.instantiate()","category":"page"},{"location":"developer/contributing/#Adding-a-new-Pluto-notebook","page":"Contributing","title":"Adding a new Pluto notebook","text":"","category":"section"},{"location":"developer/contributing/","page":"Contributing","title":"Contributing","text":"Adding a new Pluto.jl notebook to the EpiAware documentation is the same as adding any other file to the repository. However, in addition to following the guidelines for modifying an existing notebook, please note that the new notebook is added to the set of notebook builds using build in the EpiAware/docs/make.jl file. This will generate an .md of the same name as the notebook which can be rendered when makedocs is run. For this document to be added to the overall documentation the path to the .md file must be added to the Pages array defined in EpiAware/docs/pages.jl.","category":"page"},{"location":"developer/checklist/#Checklists","page":"Release checklist","title":"Checklists","text":"","category":"section"},{"location":"developer/checklist/","page":"Release checklist","title":"Release checklist","text":"The purpose of this page is to collate a series of checklists for commonly performed changes to the source code of EpiAware. It has been adapted from Documenter.jl.","category":"page"},{"location":"developer/checklist/","page":"Release checklist","title":"Release checklist","text":"In each case, copy the checklist into the description of the pull request.","category":"page"},{"location":"developer/checklist/#Making-a-release","page":"Release checklist","title":"Making a release","text":"","category":"section"},{"location":"developer/checklist/","page":"Release checklist","title":"Release checklist","text":"In preparation for a release, use the following checklist. These steps should be performed on a branch with an open pull request, either for a topic branch, or for a new branch release-1.y.z (\"Release version 1.y.z\") if multiple changes have accumulated on the master branch since the last release.","category":"page"},{"location":"developer/checklist/","page":"Release checklist","title":"Release checklist","text":"## Pre-release\n\n - [ ] Change the version number in `Project.toml`\n * If the release is breaking, increment MAJOR\n * If the release adds a new user-visible feature, increment MINOR\n * Otherwise (bug-fixes, documentation improvements), increment PATCH\n - [ ] Update `CHANGELOG.md`, following the existing style (in particular, make sure that the change log for this version has the correct version number and date).\n - [ ] Run `make changelog`, to make sure that all the issue references in `CHANGELOG.md` are up to date.\n - [ ] Check that the commit messages in this PR do not contain `[ci skip]`\n - [ ] Run https://github.com/JuliaDocs/Documenter.jl/actions/workflows/regression-tests.yml\n using a `workflow_dispatch` trigger to check for any changes that broke extensions.\n\n## The release\n\n - [ ] After merging the pull request, tag the release. There are two options for this:\n\n 1. [Comment `[at]JuliaRegistrator register` on the GitHub commit.](https://github.com/JuliaRegistries/Registrator.jl#via-the-github-app)\n 2. Use [JuliaHub's package registration feature](https://help.juliahub.com/juliahub/stable/contribute/#registrator) to trigger the registration.\n\n Either of those should automatically publish a new version to the Julia registry.\n - Once registered, the `TagBot.yml` workflow should create a tag, and rebuild the documentation for this tag.\n - These steps can take quite a bit of time (1 hour or more), so don't be surprised if the new documentation takes a while to appear.","category":"page"},{"location":"lib/EpiObsModels/#EpiObsModels.jl","page":"Overview","title":"EpiObsModels.jl","text":"","category":"section"},{"location":"lib/EpiObsModels/","page":"Overview","title":"Overview","text":"This package provides observation models for the EpiAware ecosystem.","category":"page"},{"location":"lib/EpiObsModels/#API","page":"Overview","title":"API","text":"","category":"section"},{"location":"lib/EpiObsModels/","page":"Overview","title":"Overview","text":"Pages = [\"lib/EpiObsModels/public.md\", \"lib/EpiObsModels/internals.md\"]","category":"page"},{"location":"lib/EpiAwareBase/#EpiAwareBase.jl","page":"Overview","title":"EpiAwareBase.jl","text":"","category":"section"},{"location":"lib/EpiAwareBase/","page":"Overview","title":"Overview","text":"This package provides the core functionality for the EpiAware ecosystem. It is a dependency of all other EpiAware packages.","category":"page"},{"location":"lib/EpiAwareBase/#API","page":"Overview","title":"API","text":"","category":"section"},{"location":"lib/EpiAwareBase/","page":"Overview","title":"Overview","text":"Pages = [\"lib/EpiAwareBase/public.md\", \"lib/EpiAwareBase/internals.md\"]","category":"page"},{"location":"developer/#developer","page":"Overview","title":"Developer documentation","text":"","category":"section"},{"location":"developer/","page":"Overview","title":"Overview","text":"Welcome to the EpiAware developer documentation! This section is designed to help you get started with developing the package.","category":"page"},{"location":"lib/EpiAwareUtils/public/#Public-Documentation","page":"Public API","title":"Public Documentation","text":"","category":"section"},{"location":"lib/EpiAwareUtils/public/","page":"Public API","title":"Public API","text":"Documentation for EpiAwareBae.jl's public interface.","category":"page"},{"location":"lib/EpiAwareUtils/public/","page":"Public API","title":"Public API","text":"See the Internals section of the manual for internal package docs covering all submodules.","category":"page"},{"location":"lib/EpiAwareUtils/public/#Contents","page":"Public API","title":"Contents","text":"","category":"section"},{"location":"lib/EpiAwareUtils/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]\nDepth = 2:2","category":"page"},{"location":"lib/EpiAwareUtils/public/#Index","page":"Public API","title":"Index","text":"","category":"section"},{"location":"lib/EpiAwareUtils/public/","page":"Public API","title":"Public API","text":"Pages = [\"public.md\"]","category":"page"},{"location":"lib/EpiAwareUtils/public/#Public-API","page":"Public API","title":"Public API","text":"","category":"section"},{"location":"lib/EpiAwareUtils/public/","page":"Public API","title":"Public API","text":"Modules = [EpiAware.EpiAwareUtils]\nPrivate = false","category":"page"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils","page":"Public API","title":"EpiAware.EpiAwareUtils","text":"Module for defining utility functions.\n\n\n\n\n\n","category":"module"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.DirectSample","page":"Public API","title":"EpiAware.EpiAwareUtils.DirectSample","text":"struct DirectSample <: AbstractEpiSamplingMethod\n\nSample directly from a Turing model.\n\n\n\nFields\n\nn_samples::Union{Nothing, Int64}: Number of samples from a model. If an integer is provided, the model is sampled n_samples times using Turing.Prior() returning an MCMChains. Chain object. If nothing, the model is sampled once returning a NamedTuple object of the sampled random variables along with generated quantities\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.HalfNormal","page":"Public API","title":"EpiAware.EpiAwareUtils.HalfNormal","text":"struct HalfNormal{T<:Real} <: Distributions.Distribution{Distributions.Univariate, Distributions.Continuous}\n\nCreate a half-normal prior distribution with the specified mean.\n\nArguments:\n\nμ: The mean of the half-normal distribution.\n\nReturns:\n\nA HalfNormal distribution with the specified mean.\n\nExamples:\n\nusing EpiAware, Distributions\n\nhn = HalfNormal(1.0)\n# output\nEpiAware.EpiAwareUtils.HalfNormal{Float64}(μ=1.0)\n\nfilter out all the values that are less than 0\n\nrand(hn)\n# output\n0.4508533245229199\n\ncdf(hn, 2)\n# output\n0.8894596502772643\n\nquantile(hn, 0.5)\n# output\n0.8453475393951495\n\nlogpdf(hn, 2)\n# output\n-3.1111166111445083\n\nmean(hn)\n# output\n1.0\n\nvar(hn)\n# output\n0.5707963267948966\n\n\n\nFields\n\nμ::Real\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.SafeNegativeBinomial","page":"Public API","title":"EpiAware.EpiAwareUtils.SafeNegativeBinomial","text":"struct SafeNegativeBinomial{T<:Real} <: Distributions.Distribution{Distributions.Univariate, Distributions.Discrete}\n\nCreate a Negative binomial distribution with the specified mean that avoids InExactError when the mean is too large.\n\nParameterisation:\n\nWe are using a mean and cluster factorization of the negative binomial distribution such that the variance to mean relationship is:\n\nsigma^2 = mu + alpha^2 mu^2\n\nThe reason for this parameterisation is that at sufficiently large mean values (i.e. r > 1 / p) p is approximately equal to the standard fluctuation of the distribution, e.g. if p = 0.05 we expect typical fluctuations of samples from the negative binomial to be about 5% of the mean when the mean is notably larger than 20. Otherwise, we expect approximately Poisson noise. In our opinion, this parameterisation is useful for specifying the distribution in a way that is easier to reason on priors for p.\n\nArguments:\n\nr: The number of successes, although this can be extended to a continous number.\np: Success rate.\n\nReturns:\n\nA SafeNegativeBinomial distribution with the specified mean.\n\nExamples:\n\nusing EpiAware, Distributions\n\nbigμ = exp(48.0) #Large value of μ\nσ² = bigμ + 0.05 * bigμ^2 #Large variance\n\n# We can calculate the success rate from the mean to variance relationship\np = bigμ / σ²\nr = bigμ * p / (1 - p)\nd = SafeNegativeBinomial(r, p)\n# output\nEpiAware.EpiAwareUtils.SafeNegativeBinomial{Float64}(r=20.0, p=2.85032816548187e-20)\n\ncdf(d, 100)\n# output\n0.0\n\nlogpdf(d, 100)\n# output\n-850.1397180331871\n\nmean(d)\n# output\n7.016735912097631e20\n\nvar(d)\n# output\n2.4617291430060293e40\n\n\n\nFields\n\nr::Real\np::Real\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.SafePoisson","page":"Public API","title":"EpiAware.EpiAwareUtils.SafePoisson","text":"struct SafePoisson{T<:Real} <: Distributions.Distribution{Distributions.Univariate, Distributions.Discrete}\n\nCreate a Poisson distribution with the specified mean that avoids InExactError when the mean is too large.\n\nArguments:\n\nλ: The mean of the Poisson distribution.\n\nReturns:\n\nA SafePoisson distribution with the specified mean.\n\nExamples:\n\nusing EpiAware, Distributions\n\nbigλ = exp(48.0) #Large value of λ\nd = SafePoisson(bigλ)\n# output\nEpiAware.EpiAwareUtils.SafePoisson{Float64}(λ=7.016735912097631e20)\n\ncdf(d, 2)\n# output\n0.0\n\nlogpdf(d, 100)\n# output\n-7.016735912097631e20\n\nmean(d)\n# output\n7.016735912097631e20\n\nvar(d)\n# output\n7.016735912097631e20\n\n\n\nFields\n\nλ::Real\n\n\n\n\n\n","category":"type"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.accumulate_scan-Tuple{AbstractAccumulationStep, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareUtils.accumulate_scan","text":"accumulate_scan(\n acc_step::AbstractAccumulationStep,\n initial_state,\n ϵ_t\n) -> Any\n\n\nApply the `accumulate` function to the `AbstractAccumulationStep` object.\nThis is effectively a optimised version of a for loop that applies the\n`AbstractAccumulationStep` object to the input data in a single pass.\n\n# Arguments\n- `acc_step::AbstractAccumulationStep: The accumulation step function.\n- `initial_state`: The initial state of the accumulation.\n- `ϵ_t::AbstractVector{<:Real}`: The input data.\n\n# Returns\n- `state::AbstractVector{<:Real}`: The accumulated state as returned by the\n`get_state` function from the output of the `accumulate` function.\n\n# Examples\n```julia\nusing EpiAware\nstruct TestStep <: AbstractAccumulationStep\n a::Float64\nend\n\nfunction (step::TestStep)(state, ϵ)\n new_state = step.a * ϵ\n return new_state\nend\n\nacc_step = TestStep(0.5)\ninitial_state = zeros(3)\n\naccumulate_scan(acc_step, initial_state, [1.0, 2.0, 3.0])\n\nfunction get_state(acc_step::TestStep, initial_state, state)\n return state\nend\n\naccumulate_scan(acc_step, initial_state, [1.0, 2.0, 3.0])\n```\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.censored_cdf-Tuple{Distributions.Distribution}","page":"Public API","title":"EpiAware.EpiAwareUtils.censored_cdf","text":"censored_cdf(\n dist::Distributions.Distribution;\n Δd,\n D,\n upper\n) -> Any\n\n\nCreate a discrete probability cumulative distribution function (CDF) from a given distribution, assuming a uniform distribution over primary event times with censoring intervals of width Δd for both primary and secondary events.\n\nNB: censored_cdf returns the non-truncated CDF, i.e. the CDF without conditioning on the secondary event occuring either before or after some time.\n\nArguments\n\ndist: The distribution from which to create the PMF.\nΔd: The step size for discretizing the domain. Default is 1.0.\nD: The upper bound of the domain. Must be greater than Δd. Default D = nothing\n\nindicates that the distribution should be truncated at its upperth percentile rounded to nearest multiple of Δd.\n\nReturns\n\nA vector representing the CDF with 0.0 appended at the beginning.\n\nRaises\n\nAssertionError if the minimum value of dist is negative.\nAssertionError if Δd is not positive.\nAssertionError if D is shorter than Δd.\nAssertionError if D is not a multiple of Δd.\n\nExamples\n\nusing Distributions\nusing EpiAware.EpiAwareUtils\n\ndist = Exponential(1.0)\n\ncensored_cdf(dist; D = 10) |>\n p -> round.(p, digits=3)\n\n# output\n11-element Vector{Float64}:\n 0.0\n 0.368\n 0.767\n 0.914\n 0.969\n 0.988\n 0.996\n 0.998\n 0.999\n 1.0\n 1.0\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.censored_pmf-Tuple{Distributions.Distribution, Val{:single_censored}}","page":"Public API","title":"EpiAware.EpiAwareUtils.censored_pmf","text":"censored_pmf(\n dist::Distributions.Distribution,\n ::Val{:single_censored};\n primary_approximation_point,\n Δd,\n D\n)\n\n\nCreate a discrete probability mass function (PMF) from a given distribution, assuming that the primary event happens at primary_approximation_point * Δd within an intial censoring interval. Common single-censoring approximations are primary_approximation_point = 0 (left-hand approximation), primary_approximation_point = 1 (right-hand) and primary_approximation_point = 0.5 (midpoint).\n\nArguments\n\ndist: The distribution from which to create the PMF.\n::Val{:single_censored}: A dummy argument to dispatch to this method. The purpose of the Val\n\ntype argument is that to use single-censored approximation is an active decision.\n\nprimary_approximation_point: A approximation point for the primary time in its censoring interval.\n\nDefault is 0.5 for midpoint approximation.\n\nΔd: The step size for discretizing the domain. Default is 1.0.\nD: The upper bound of the domain. Must be greater than Δd.\n\nReturns\n\nA vector representing the PMF.\n\nRaises:\n\nAssertionError if the minimum value of dist is negative.\nAssertionError if Δd is not positive.\nAssertionError if D is not greater than Δd.\n\nExamples\n\nusing Distributions\nusing EpiAware.EpiAwareUtils\n\ndist = Exponential(1.0)\n\ncensored_pmf(dist, Val(:single_censored); D = 10) |>\n p -> round.(p, digits=3)\n\n# output\n10-element Vector{Float64}:\n 0.393\n 0.383\n 0.141\n 0.052\n 0.019\n 0.007\n 0.003\n 0.001\n 0.0\n 0.0\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.censored_pmf-Tuple{Distributions.Distribution}","page":"Public API","title":"EpiAware.EpiAwareUtils.censored_pmf","text":"censored_pmf(\n dist::Distributions.Distribution;\n Δd,\n D,\n upper\n) -> Any\n\n\nCreate a discrete probability mass function (PMF) from a given distribution, assuming a uniform distribution over primary event times with censoring intervals of width Δd for both primary and secondary events. The CDF for the time from the left edge of the interval containing the primary event to the secondary event is created by direct numerical integration (quadrature) of the convolution of the CDF of dist with the uniform density on [0,Δd), using the censored_cdf function. The discrete PMF for double censored delays is then found using simple differencing on the CDF.\n\nNB: censored_pmf returns a right-truncated PMF, i.e. the PMF conditioned on the secondary event occurring before or on the final secondary censoring window.\n\nArguments\n\ndist: The distribution from which to create the PMF.\nΔd: The step size for discretizing the domain. Default is 1.0.\nD: The upper bound of the domain. Must be greater than Δd. Default D = nothing\n\nindicates that the distribution should be truncated at its upperth percentile rounded to nearest multiple of Δd.\n\nReturns\n\nA vector representing the PMF.\n\nRaises\n\nAssertionError if the minimum value of dist is negative.\nAssertionError if Δd is not positive.\nAssertionError if D is shorter than Δd.\nAssertionError if D is not a multiple of Δd.\n\nExamples\n\nusing Distributions\nusing EpiAware.EpiAwareUtils\n\ndist = Exponential(1.0)\n\ncensored_pmf(dist; D = 10) |>\n p -> round.(p, digits=3)\n\n# output\n10-element Vector{Float64}:\n 0.368\n 0.4\n 0.147\n 0.054\n 0.02\n 0.007\n 0.003\n 0.001\n 0.0\n 0.0\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.get_param_array-Tuple{MCMCChains.Chains}","page":"Public API","title":"EpiAware.EpiAwareUtils.get_param_array","text":"get_param_array(chn::MCMCChains.Chains) -> Any\n\n\nExtract a parameter array from a Chains object chn that matches the shape of number of sample and chain pairs in chn.\n\nArguments\n\nchn::Chains: The Chains object containing the MCMC samples.\n\nReturns\n\nparam_array: An array of parameter samples, where each element corresponds to a single\n\nMCMC sample as a NamedTuple.\n\nExample\n\nSampling from a simple model which has both scalar and vector quantity random variables across 4 chains.\n\nusing Turing, MCMCChains, EpiAware\n\n@model function testmodel()\n y ~ Normal()\nend\nmdl = testmodel()\nchn = sample(mdl, Prior(), MCMCSerial(), 2, 1, progress=false)\n\nA = get_param_array(chn)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.get_state-Tuple{AbstractAccumulationStep, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareUtils.get_state","text":"get_state(\n acc_step::AbstractAccumulationStep,\n initial_state,\n state\n) -> Any\n\n\nProcesses the output of the `accumulate` function to return the final state.\n\n# Arguments\n- `acc_step::AbstractAccumulationStep`: The accumulation step function.\n- `initial_state`: The initial state of the accumulation.\n- `state`: The output of the `accumulate` function.\n\n# Returns\n- `state`: The combination of the initial state and the last element of\n each accumulated state.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.prefix_submodel-Tuple{AbstractModel, Function, String, Vararg{Any}}","page":"Public API","title":"EpiAware.EpiAwareUtils.prefix_submodel","text":"prefix_submodel(\n model::AbstractModel,\n fn::Function,\n prefix::String,\n kwargs...\n) -> Any\n\n\nGenerate a submodel with an optional prefix. A lightweight wrapper around the @submodel macro from DynamicPPL.jl.\n\nArguments\n\nmodel::AbstractModel: The model to be used.\nfn::Function: The Turing @model function to be applied to the model.\nprefix::String: The prefix to be used. If the prefix is an empty string, the submodel is created without a prefix.\n\nReturns\n\nsubmodel: The returns from the submodel are passed through.\n\nExamples\n\nusing EpiAware, DynamicPPL\n\nsubmodel = prefix_submodel(FixedIntercept(0.1), generate_latent, string(1), 2)\nsubmodel\n# output\nModel{typeof(prefix_submodel), (:model, :fn, :prefix, Symbol(\"#splat#kwargs\")), (), (), Tuple{FixedIntercept{Float64}, typeof(generate_latent), String, Tuple{Int64}}, Tuple{}, DefaultContext}(EpiAware.EpiAwareUtils.prefix_submodel, (model = FixedIntercept{Float64}(0.1), fn = EpiAware.EpiAwareBase.generate_latent, prefix = \"1\", var\"#splat#kwargs\" = (2,)), NamedTuple(), DefaultContext())\n\nWe can now draw a sample from the submodel.\n\nrand(submodel)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.scan-Union{Tuple{F}, Tuple{F, Any, Any}} where F<:AbstractModel","page":"Public API","title":"EpiAware.EpiAwareUtils.scan","text":"scan(f::AbstractModel, init, xs) -> Tuple{Any, Any}\n\n\nApply f to each element of xs and accumulate the results.\n\nf must be a callable on a sub-type of AbstractModel.\n\nDesign note\n\nscan is being restricted to AbstractModel sub-types to ensure: 1. That compiler specialization is activated 2. Also avoids potential compiler overhead from specialisation on f<: Function.\n\nArguments\n\nf: A callable/functor that takes two arguments, carry and x, and returns a new carry and a result y.\ninit: The initial value for the carry variable.\nxs: An iterable collection of elements.\n\nReturns\n\nys: An array containing the results of applying f to each element of xs.\ncarry: The final value of the carry variable after processing all elements of xs.\n\nExamples\n\n```jldoctest using EpiAware\n\nstruct Adder <: EpiAwareBase.AbstractModel end function (a::Adder)(carry, x) carry + x, carry + x end\n\nscan(Adder(), 0, 1:5) #output ([1, 3, 6, 10, 15], 15)\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.spread_draws-Tuple{MCMCChains.Chains}","page":"Public API","title":"EpiAware.EpiAwareUtils.spread_draws","text":"spread_draws(chn::MCMCChains.Chains) -> DataFrames.DataFrame\n\n\nspread_draws(chn::Chains)\n\nConverts a Chains object into a DataFrame in tidybayes format.\n\nArguments\n\nchn::Chains: The Chains object to be converted.\n\nReturns\n\ndf::DataFrame: The converted DataFrame.\n\n\n\n\n\n","category":"method"},{"location":"lib/EpiAwareUtils/public/#EpiAware.EpiAwareUtils.∫F-Tuple{Any, Any, Any}","page":"Public API","title":"EpiAware.EpiAwareUtils.∫F","text":"∫F(dist, t, Δd) -> Any\n\n\nCalculate the CDF of the random variable X + U where X has cumulative distriubtion function F and U is a uniform random variable on [0, Δd).\n\nThis is used in solving for censored CDFs and PMFs using numerical quadrature.\n\n\n\n\n\n","category":"method"},{"location":"release-notes/","page":"Release notes","title":"Release notes","text":"EditURL = \"https://github.com/JuliaDocs/Documenter.jl/blob/master/CHANGELOG.md\"","category":"page"},{"location":"release-notes/#Release-notes","page":"Release notes","title":"Release notes","text":"","category":"section"},{"location":"release-notes/","page":"Release notes","title":"Release notes","text":"The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.","category":"page"},{"location":"release-notes/#Unreleased","page":"Release notes","title":"Unreleased","text":"","category":"section"},{"location":"release-notes/#Added","page":"Release notes","title":"Added","text":"","category":"section"},{"location":"release-notes/#Changed","page":"Release notes","title":"Changed","text":"","category":"section"},{"location":"release-notes/#Fixed","page":"Release notes","title":"Fixed","text":"","category":"section"},{"location":"getting-started/#getting-started","page":"Overview","title":"Getting started","text":"","category":"section"},{"location":"getting-started/","page":"Overview","title":"Overview","text":"Note that this section of the documentation is still under construction. Please see replications for the most up-to-date information. Please feel free to contribute to the documentation by submitting a pull request.","category":"page"},{"location":"getting-started/","page":"Overview","title":"Overview","text":"Welcome to the EpiAware documentation! This section is designed to help you get started with the package. It includes a frequently asked questions (FAQ) section, a series of explainers that provide a detailed overview of the platform and its features, and tutorials that will help you get started with EpiAware for specific tasks. See the sidebar for the list of topics.","category":"page"},{"location":"getting-started/explainers/observation-models/#Observation-models","page":"Observation models","title":"Observation models","text":"","category":"section"},{"location":"showcase/#showcase","page":"Overview","title":"EpiAware Showcase","text":"","category":"section"},{"location":"showcase/","page":"Overview","title":"Overview","text":"Here we showcase the capabilities of EpiAware in action. If you have a showcase you would like to add, please submit a pull request.","category":"page"}]
+}
diff --git a/previews/PR513/showcase/index.html b/previews/PR513/showcase/index.html
new file mode 100644
index 000000000..6a3ec9961
--- /dev/null
+++ b/previews/PR513/showcase/index.html
@@ -0,0 +1,2 @@
+
+Overview · EpiAware.jl
Defining the deterministic ODE model from Chatzilena et al section 2.2.2 using SciML ODE functionality and an EpiAware observation model.
Build on this to define the stochastic ODE model from Chatzilena et al section 2.2.3 using an EpiAware observation model.
Fitting the deterministic ODE model to data from an Influenza outbreak in an English boarding school.
Fitting the stochastic ODE model to data from an Influenza outbreak in an English boarding school.
What might I need to know before starting
This vignette builds on concepts from EpiAware observation models and a familarity with the SciML and Turing ecosystems would be useful but not essential.
Packages used in this vignette
Alongside the EpiAware package we will use the OrdinaryDiffEq and SciMLSensitivity packages for interfacing with SciML ecosystem; this is a lower dependency usage of DifferentialEquations.jl that, respectively, exposes ODE solvers and adjoint methods for ODE solvees; that is the method of propagating parameter derivatives through functions containing ODE solutions. Bayesian inference will be done with NUTS from the Turing ecosystem. We will also use the CairoMakie package for plotting and DataFramesMeta for data manipulation.
+
+
using EpiAware
+
+
+
using Turing
+
+
+
using OrdinaryDiffEq, SciMLSensitivity #ODE solvers and adjoint methods
+
+
+
using Distributions, Statistics, LogExpFunctions #Statistics and special func packages
+
+
+
using CSV, DataFramesMeta #Data wrangling
+
+
+
using CairoMakie, PairPlots
+
+
+
using ReverseDiff #Automatic differentiation backend
+
+
+
begin #Date utility and set Random seed
+ using Dates
+ using Random
+ Random.seed!(1234)
+end
+
TaskLocalRNG()
+
+
+
Single population SIR model
As mentioned in Chatzilena et al disease spread is frequently modelled in terms of ODE-based models. The study population is divided into compartments representing a specific stage of the epidemic status. In this case, susceptible, infected, and recovered individuals.
where S(t) represents the number of susceptible, I(t) the number of infected and R(t) the number of recovered individuals at time t. The total population size is denoted by N (with N = S(t) + I(t) + R(t)), β denotes the transmission rate and γ denotes the recovery rate.
+
+
+
We can interface to the SciML ecosystem by writing a function with the signature:
(du, u, p, t) -> nothing
Where:
du is the vector field of the ODE problem, e.g. \({dS \over dt}\), \({dI \over dt}\) etc. This is calculated in-place (commonly denoted using ! in function names in Julia).
u is the state of the ODE problem, e.g. \(S\), \(I\), etc.
p is an object that represents the parameters of the ODE problem, e.g. \(\beta\), \(\gamma\).
t is the time of the ODE problem.
We do this for the SIR model described above in a function called sir!:
+
+
function sir!(du, u, p, t)
+ S, I, R = u
+ β, γ = p
+ du[1] = -β * I * S
+ du[2] = β * I * S - γ * I
+ du[3] = γ * I
+
+ return nothing
+end
+
sir! (generic function with 1 method)
+
+
+
We combine vector field function sir! with a initial condition u0 and the integration period tspan to make an ODEProblem. We do not define the parameters, these will be defined within an inference approach.
Note that this is analogous to the EpiProblem approach we expose from EpiAware, as used in the Mishra et al replication. The difference is that here we are going to use ODE solvers from the SciML ecosystem to generate the dynamics of the underlying infections. In the linked example, we use latent process generation exposed by EpiAware as the underlying generative process for underlying dynamics.
There was a brief, but intense, outbreak of Influenza within the (semi-) closed community of a boarding school reported to the British medical journal in 1978. The outbreak lasted from 22nd January to 4th February and it is reported that one infected child started the epidemic and then it spread rapidly. Of the 763 children at the boarding scholl, 512 became ill.
We downloaded the data of this outbreak using the R package outbreaks which is maintained as part of the R Epidemics Consortium(RECON).
The boarding school data gives the number of children "in bed" and "convalescent" on each of 14 days from 22nd Jan to 4th Feb 1978. We follow Chatzilena et al and treat the number "in bed" as a proxy for the number of children in the infectious (I) compartment in the ODE model.
NB: Chatzilena et al give \(\lambda_t = \int_0^t \beta \frac{I(s)}{N} S(s) - \gamma I(s)ds = I(t) - I(0).\) However, this doesn't match their underlying stan code.
+
+
+
From EpiAware, we have the PoissonError struct which defines the probabilistic structure of this observation error model.
+
+
obs = PoissonError()
+
PoissonError()
+
+
+
Now we can write the probabilistic model using the Turing PPL. Note that instead of using \(I(t)\) directly we do the softplus transform on \(I(t)\) implemented by LogExpFunctions.log1pexp. The reason is that the solver can return small negative numbers, the soft plus transform smoothly maintains positivity which being very close to \(I(t)\) when \(I(t) > 2\).
deterministic_ode_mdl (generic function with 2 methods)
+
+
+
We instantiate the model in two ways:
deterministic_mdl: This conditions the generative model on the data observation. We can sample from this model to find the posterior distribution of the parameters.
deterministic_uncond_mdl: This doesn't condition on the data. This is useful for prior and posterior predictive modelling.
Here we construct the Turing model directly, in the Mishra et al replication we using the EpiProblem functionality to build a Turing model under the hood. Because in this note we are using a mix of functionality from SciML and EpiAware, we construct the model to sample from directly.
The prior predictive checking suggests that a priori our parameter beliefs are very far from the data. Approaching the inference naively can lead to poor fits.
We do three things to mitigate this:
We choose a switching ODE solver which switches between explicit (Tsit5) and implicit (Rosenbrock23) solvers. This helps avoid the ODE solver failing when the sampler tries extreme parameter values. This is the default solver = AutoTsit5(Rosenbrock23()) above.
We locate the maximum likelihood point, that is we ignore the influence of the priors, as a useful starting point for NUTS.
In Chatzilena et al, they present an auto-regressive model for connecting the outcome of the ODE model to illness observations. The argument is that the stochastic component of the model can absorb the noise generated by a possible mis-specification of the model.
In their approach they consider \(\kappa_t = \log \lambda_t\) where \(\kappa_t\) evolves according to an Ornstein-Uhlenbeck process:
We will using the AR struct from EpiAware to define the auto-regressive process in this model which has a direct parameterisation of the AR model.
To convert from the formulation above we sample from the priors, and define HalfNormal priors based on the sampled prior means of \(e^{-\phi}\) and \({\sigma^2 \over 2 \phi} \left(1 - e^{-2\phi} \right)\). We also add a strong prior that \(\kappa_1 \approx 0\).
We define the AR(1) process by matching means of HalfNormal prior distributions for the damp parameters and std deviation parameter to the calculated the prior means from the Chatzilena et al definition.
We can sample directly from the behaviour specified by the ar struct to do prior predictive checking on the AR(1) process.
+
+
let
+ nobs = size(data, 1)
+ ar_mdl = generate_latent(ar, nobs)
+ fig = Figure()
+ ax = Axis(fig[1, 1],
+ xticks = (data.ts[1:3:end], data.date[1:3:end] .|> string),
+ ylabel = "exp(kt)",
+ title = "Prior predictive sampling for relative residual in mean pred."
+ )
+ for i in 1:500
+ lines!(ax, ar_mdl() .|> exp, color = (:grey, 0.15))
+ end
+ fig
+end
+
+
+
+
We see that the choice of priors implies an a priori belief that the extra observation noise on the mean prediction of the ODE model is fairly small, approximately 10% relative to the mean prediction.
+
+
+
We can now define the probabilistic model. The stochastic model assumes a (random) time-varying ascertainment, which we implement using the Ascertainment struct from EpiAware. Note that instead of implementing an ascertainment factor exp.(κₜ) directly, which can be unstable for large primal values, by default Ascertainment uses the LogExpFunctions.xexpy function which implements \(x\exp(y)\) stabily for a wide range of values.
+
+
+
To distinguish random variables sampled by various sub-processes EpiAware process types create prefixes. The default for Ascertainment is just the string "Ascertainment", but in this case we use the less verbose "va" for "varying ascertainment".
+
+
mdl_prefix = "va"
+
"va"
+
+
+
Now we can construct our time varying ascertianment model. The main keyword arguments here are model and latent_model. model sets the connection between the expected observation and the actual observation. In this case, we reuse our PoissonError model from above. latent_model sets the modification model on the expected values. In this case, we use the AR process we defined above.
The prior predictive checking again shows misaligned prior beliefs; for example a priori without data we would not expect the median prediction of number of ill children as about 600 out of 763 after 1 day.
The latent process for the log-residuals \(\kappa_t\) doesn't make much sense without priors, so we look for a reasonable MAP point to start NUTS from. We do this by first making an initial guess which is a mixture of:
The posterior averages from the deterministic model.
The prior averages of the structure parameters of the AR(1) process.
Zero for the time-varying noise underlying the AR(1) process.
let
+ vars = mapreduce(vcat, 1:13) do i
+ Symbol(mdl_prefix * ".ϵ_t[$i]")
+ end
+ pairplot(chn2[vars])
+end
+
+
+
let
+ gens = generated_quantities(stochastic_uncond_mdl, chn2)
+ plot_predYt(data, gens;
+ title = "Fitted stochastic model",
+ ylabel = "Number of Infected students"
+ )
+end
+
+
+
Settings
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
diff --git a/previews/PR513/showcase/replications/chatzilena-2019/index.jl b/previews/PR513/showcase/replications/chatzilena-2019/index.jl
new file mode 100644
index 000000000..7ce473618
--- /dev/null
+++ b/previews/PR513/showcase/replications/chatzilena-2019/index.jl
@@ -0,0 +1,708 @@
+### A Pluto.jl notebook ###
+# v0.20.0
+
+using Markdown
+using InteractiveUtils
+
+# ╔═╡ e34cec5a-a173-4e92-a860-340c7a9e9c72
+let
+ docs_dir = dirname(dirname(dirname(dirname(@__DIR__))))
+ using Pkg: Pkg
+ Pkg.activate(docs_dir)
+ Pkg.instantiate()
+end;
+
+# ╔═╡ b1468db3-7ab0-468c-8e27-70013a8f512f
+using EpiAware
+
+# ╔═╡ a4710701-6315-459d-b677-f24b77ff3e80
+using Turing
+
+# ╔═╡ 7263d714-2ce4-4d57-8881-6b60db018dd5
+using OrdinaryDiffEq, SciMLSensitivity #ODE solvers and adjoint methods
+
+# ╔═╡ 261420cd-4650-402b-b126-7a431f93f37e
+using Distributions, Statistics, LogExpFunctions #Statistics and special func packages
+
+# ╔═╡ 9c19a98b-a08b-4560-966d-61ff0ece2ad5
+using CSV, DataFramesMeta #Data wrangling
+
+# ╔═╡ 3897e773-ed07-4860-bb62-35605d0dacb0
+using CairoMakie, PairPlots
+
+# ╔═╡ 14641441-dbea-4fdf-88e0-64a57da60ef7
+using ReverseDiff #Automatic differentiation backend
+
+# ╔═╡ a0d91258-8ab5-4adc-98f2-8f17b4bd685c
+begin #Date utility and set Random seed
+ using Dates
+ using Random
+ Random.seed!(1234)
+end
+
+# ╔═╡ 33384fc6-7cca-11ef-3567-ab7df9200cde
+md"
+# Example: Statistical inference for ODE-based infectious disease models
+# Introduction
+## What are we going to do in this Vignette
+In this vignette, we'll demonstrate how to use `EpiAware` in conjunction with [SciML ecosystem](https://sciml.ai/) for Bayesian inference of infectious disease dynamics.
+The model and data is heavily based on [Contemporary statistical inference for infectious disease models using Stan _Chatzilena et al. 2019_](https://www.sciencedirect.com/science/article/pii/S1755436519300325).
+
+We'll cover the following key points:
+
+1. Defining the deterministic ODE model from Chatzilena et al section 2.2.2 using SciML ODE functionality and an `EpiAware` observation model.
+2. Build on this to define the stochastic ODE model from Chatzilena et al section 2.2.3 using an `EpiAware` observation model.
+3. Fitting the deterministic ODE model to data from an Influenza outbreak in an English boarding school.
+4. Fitting the stochastic ODE model to data from an Influenza outbreak in an English boarding school.
+
+## What might I need to know before starting
+
+This vignette builds on concepts from `EpiAware` observation models and a familarity with the `SciML` and `Turing` ecosystems would be useful but not essential.
+
+## Packages used in this vignette
+
+Alongside the `EpiAware` package we will use the `OrdinaryDiffEq` and `SciMLSensitivity` packages for interfacing with `SciML` ecosystem; this is a lower dependency usage of `DifferentialEquations.jl` that, respectively, exposes ODE solvers and adjoint methods for ODE solvees; that is the method of propagating parameter derivatives through functions containing ODE solutions.
+Bayesian inference will be done with `NUTS` from the `Turing` ecosystem. We will also use the `CairoMakie` package for plotting and `DataFramesMeta` for data manipulation.
+"
+
+# ╔═╡ 943b82ec-b4dc-4537-8183-d6c73cd74a37
+md"
+# Single population SIR model
+
+As mentioned in _Chatzilena et al_ disease spread is frequently modelled in terms of ODE-based models.
+The study population is divided into compartments representing a specific stage of the epidemic status.
+In this case, susceptible, infected, and recovered individuals.
+
+```math
+\begin{aligned}
+{dS \over dt} &= - \beta \frac{I(t)}{N} S(t) \\
+{dI \over dt} &= \beta \frac{I(t)}{N} S(t) - \gamma I(t) \\
+{dR \over dt} &= \gamma I(t). \\
+\end{aligned}
+```
+where S(t) represents the number of susceptible, I(t) the number of
+infected and R(t) the number of recovered individuals at time t.
+The total population size is denoted by N (with N = S(t) + I(t) + R(t)), β denotes the transmission rate and γ denotes the recovery rate.
+
+"
+
+# ╔═╡ 0e78285c-d2e8-4c3c-848a-14dae6ead0a4
+md"
+We can interface to the `SciML` ecosystem by writing a function with the signature:
+
+> `(du, u, p, t) -> nothing`
+
+Where:
+- `du` is the _vector field_ of the ODE problem, e.g. ${dS \over dt}$, ${dI \over dt}$ etc. This is calculated _in-place_ (commonly denoted using ! in function names in Julia).
+- `u` is the _state_ of the ODE problem, e.g. $S$, $I$, etc.
+- `p` is an object that represents the parameters of the ODE problem, e.g. $\beta$, $\gamma$.
+- `t` is the time of the ODE problem.
+
+We do this for the SIR model described above in a function called `sir!`:
+"
+
+# ╔═╡ ab4269b1-e292-466f-8bfb-713d917c18f9
+function sir!(du, u, p, t)
+ S, I, R = u
+ β, γ = p
+ du[1] = -β * I * S
+ du[2] = β * I * S - γ * I
+ du[3] = γ * I
+
+ return nothing
+end
+
+# ╔═╡ f16eb00b-2d77-45df-b767-757fe2f5674c
+md"
+We combine vector field function `sir!` with a initial condition `u0` and the integration period `tspan` to make an `ODEProblem`.
+We do not define the parameters, these will be defined within an inference approach.
+"
+
+# ╔═╡ b5ff95d1-8a6f-4d48-adf2-60d91b3ebebe
+md"
+Note that this is analogous to the `EpiProblem` approach we expose from `EpiAware`, as used in the [Mishra et al replication](https://cdcgov.github.io/Rt-without-renewal/dev/showcase/replications/mishra-2020/).
+The difference is that here we are going to use ODE solvers from the `SciML` ecosystem to generate the dynamics of the underlying infections.
+In the linked example, we use latent process generation exposed by `EpiAware` as the underlying generative process for underlying dynamics.
+"
+
+# ╔═╡ d64388f9-6edd-414d-a191-316f75b35b2c
+md"
+
+## Data for inference
+
+There was a brief, but intense, outbreak of Influenza within the (semi-) closed community of a boarding school reported to the British medical journal in 1978.
+The outbreak lasted from 22nd January to 4th February and it is reported that one infected child started the epidemic and then it spread rapidly.
+Of the 763 children at the boarding scholl, 512 became ill.
+
+We downloaded the data of this outbreak using the R package `outbreaks` which is maintained as part of the [R Epidemics Consortium(RECON)](http://www. repidemicsconsortium.org).
+
+"
+
+# ╔═╡ 7c9cbbc1-71ef-4d81-b93a-c2b3a8683d53
+data = "https://raw.githubusercontent.com/CDCgov/Rt-without-renewal/refs/heads/main/EpiAware/docs/src/showcase/replications/chatzilena-2019/influenza_england_1978_school.csv2" |>
+ url -> CSV.read(download(url), DataFrame) |>
+ df -> @transform(df,
+ :ts=(:date .- minimum(:date)) .|> d -> d.value + 1.0,)
+
+# ╔═╡ aba3f1db-c290-409c-9b9e-6065935ede54
+N = 763;
+
+# ╔═╡ bb07a580-6d86-48b3-a79f-d2ed9306e87c
+sir_prob = ODEProblem(
+ sir!,
+ N .* [0.99, 0.01, 0.0],
+ (0.0, (Date(1978, 2, 4) - Date(1978, 1, 22)).value + 1)
+)
+
+# ╔═╡ 3f54bb44-76c4-4744-885a-46dedfaffeca
+md"
+## Inference for the deterministic SIR model
+
+The boarding school data gives the number of children \"in bed\" and \"convalescent\" on each of 14 days from 22nd Jan to 4th Feb 1978.
+We follow _Chatzilena et al_ and treat the number \"in bed\" as a proxy for the number of children in the infectious (I) compartment in the ODE model.
+
+The full observation model is:
+
+```math
+\begin{aligned}
+Y_t &\sim \text{Poisson}(\lambda_t)\\
+\lambda_t &= I(t)\\
+\beta &\sim \text{LogNormal}(\text{logmean}=0,\text{logstd}=1) \\
+\gamma & \sim \text{Gamma}(\text{shape} = 0.004, \text{scale} = 50)\\
+S(0) /N &\sim \text{Beta}(0.5, 0.5).
+\end{aligned}
+```
+
+**NB: Chatzilena et al give $\lambda_t = \int_0^t \beta \frac{I(s)}{N} S(s) - \gamma I(s)ds = I(t) - I(0).$
+However, this doesn't match their underlying stan code.**
+"
+
+# ╔═╡ ea1be94b-d722-47ee-8465-982c83dc6838
+md"
+From `EpiAware`, we have the `PoissonError` struct which defines the probabilistic structure of this observation error model.
+"
+
+# ╔═╡ 87509792-e28d-4618-9bf5-e06b2e5dbe8b
+obs = PoissonError()
+
+# ╔═╡ 81501c84-5e1f-4829-a26d-52fe00503958
+md"
+Now we can write the probabilistic model using the `Turing` PPL.
+Note that instead of using $I(t)$ directly we do the [softplus](https://en.wikipedia.org/wiki/Softplus) transform on $I(t)$ implemented by `LogExpFunctions.log1pexp`.
+The reason is that the solver can return small negative numbers, the soft plus transform smoothly maintains positivity which being very close to $I(t)$ when $I(t) > 2$.
+"
+
+# ╔═╡ 1d287c8e-7000-4b23-ae7e-f7008c3e53bd
+@model function deterministic_ode_mdl(y_t, ts, obs, prob, N;
+ solver = AutoTsit5(Rosenbrock23())
+)
+ ##Priors##
+ β ~ LogNormal(0.0, 1.0)
+ γ ~ Gamma(0.004, 1 / 0.002)
+ S₀ ~ Beta(0.5, 0.5)
+
+ ##remake ODE model##
+ _prob = remake(prob;
+ u0 = [S₀, 1 - S₀, 0.0],
+ p = [β, γ]
+ )
+
+ ##Solve remade ODE model##
+
+ sol = solve(_prob, solver;
+ saveat = ts,
+ verbose = false)
+
+ ##log-like accumulation using obs##
+ λt = log1pexp.(N * sol[2, :]) # #expected It
+ @submodel generated_y_t = generate_observations(obs, y_t, λt)
+
+ ##Generated quantities##
+ return (; sol, generated_y_t, R0 = β / γ)
+end
+
+# ╔═╡ e7383885-fa6a-4240-a252-44ae82cae713
+md"
+We instantiate the model in two ways:
+
+1. `deterministic_mdl`: This conditions the generative model on the data observation. We can sample from this model to find the posterior distribution of the parameters.
+2. `deterministic_uncond_mdl`: This _doesn't_ condition on the data. This is useful for prior and posterior predictive modelling.
+
+Here we construct the `Turing` model directly, in the [Mishra et al replication](https://cdcgov.github.io/Rt-without-renewal/dev/showcase/replications/mishra-2020/) we using the `EpiProblem` functionality to build a `Turing` model under the hood.
+Because in this note we are using a mix of functionality from `SciML` and `EpiAware`, we construct the model to sample from directly.
+"
+
+# ╔═╡ dbc1b453-1c29-4f82-bec9-098d67f9e63f
+deterministic_mdl = deterministic_ode_mdl(data.in_bed, data.ts, obs, sir_prob, N);
+
+# ╔═╡ e795c2bf-0861-4e96-9921-db47f41af206
+deterministic_uncond_mdl = deterministic_ode_mdl(
+ fill(missing, length(data.in_bed)), data.ts, obs, sir_prob, N);
+
+# ╔═╡ e848434c-2543-43d1-ae22-5c4241f138bb
+md"
+We add a useful plotting utility.
+"
+
+# ╔═╡ ab8c98d1-d357-4c49-9f5a-f069e05c45f5
+function plot_predYt(data, gens; title::String, ylabel::String)
+ fig = Figure()
+ ga = fig[1, 1:2] = GridLayout()
+
+ ax = Axis(ga[1, 1];
+ title = title,
+ xticks = (data.ts[1:3:end], data.date[1:3:end] .|> string),
+ ylabel = ylabel
+ )
+ pred_Yt = mapreduce(hcat, gens) do gen
+ gen.generated_y_t
+ end |> X -> mapreduce(vcat, eachrow(X)) do row
+ quantile(row, [0.5, 0.025, 0.975, 0.1, 0.9, 0.25, 0.75])'
+ end
+
+ lines!(ax, data.ts, pred_Yt[:, 1]; linewidth = 3, color = :green, label = "Median")
+ band!(
+ ax, data.ts, pred_Yt[:, 2], pred_Yt[:, 3], color = (:green, 0.2), label = "95% CI")
+ band!(
+ ax, data.ts, pred_Yt[:, 4], pred_Yt[:, 5], color = (:green, 0.4), label = "80% CI")
+ band!(
+ ax, data.ts, pred_Yt[:, 6], pred_Yt[:, 7], color = (:green, 0.6), label = "50% CI")
+ scatter!(ax, data.in_bed, label = "data")
+ leg = Legend(ga[1, 2], ax; framevisible = false)
+ hidespines!(ax)
+
+ fig
+end
+
+# ╔═╡ 2c6ac235-e331-4189-8c8c-74de5f98b2c4
+md"
+**Prior predictive sampling**
+"
+
+# ╔═╡ a729f1cd-404c-4a33-a8f9-b2ea6f0adb62
+let
+ prior_chn = sample(deterministic_uncond_mdl, Prior(), 2000)
+ gens = generated_quantities(deterministic_uncond_mdl, prior_chn)
+ plot_predYt(data, gens;
+ title = "Prior predictive: deterministic model",
+ ylabel = "Number of Infected students"
+ )
+end
+
+# ╔═╡ 4c0759fb-76e9-4de5-9206-89e8bfb6c3bb
+md"
+The prior predictive checking suggests that _a priori_ our parameter beliefs are very far from the data.
+Approaching the inference naively can lead to poor fits.
+
+We do three things to mitigate this:
+
+1. We choose a switching ODE solver which switches between explicit (`Tsit5`) and implicit (`Rosenbrock23`) solvers. This helps avoid the ODE solver failing when the sampler tries extreme parameter values. This is the default `solver = AutoTsit5(Rosenbrock23())` above.
+2. We locate the maximum likelihood point, that is we ignore the influence of the priors, as a useful starting point for `NUTS`.
+"
+
+# ╔═╡ 8d96db67-de3b-4704-9f54-f4ed50a4ecff
+nmle_tries = 100
+
+# ╔═╡ ba35cebd-0d29-43c5-8db7-f550d7f821bc
+mle_fit = map(1:nmle_tries) do _
+ fit = try
+ maximum_likelihood(deterministic_mdl)
+ catch
+ (lp = -Inf,)
+ end
+end |>
+ fits -> (findmax(fit -> fit.lp, fits)[2], fits) |>
+ max_and_fits -> max_and_fits[2][max_and_fits[1]]
+
+# ╔═╡ 0be912c1-22dc-4978-b86a-84273062f5da
+mle_fit.optim_result.retcode
+
+# ╔═╡ a1a34b67-ff4e-4fee-aa30-4c2add3ea8a0
+md"
+Note that we choose the best out of $nmle_tries tries for the MLE estimators.
+
+Now, we sample aiming at 1000 samples for each of 4 chains.
+"
+
+# ╔═╡ 2cf64ba3-ff8d-40b0-9bd8-9e80393156f5
+chn = sample(
+ deterministic_mdl, NUTS(), MCMCThreads(), 1000, 4;
+ initial_params = fill(mle_fit.values.array, 4)
+)
+
+# ╔═╡ b2429b68-dd75-499f-a4e1-1b7d72e209c7
+describe(chn)
+
+# ╔═╡ 1e7f37c5-4cb4-4d06-8f68-55d80f7a00ad
+pairplot(chn)
+
+# ╔═╡ c16b81a0-2d36-4012-aed4-a035af31b4c3
+md"
+**Posterior predictive plotting**
+"
+
+# ╔═╡ 03d1ecf8-543d-444d-b1a3-7a19acd88499
+let
+ gens = generated_quantities(deterministic_uncond_mdl, chn)
+ plot_predYt(data, gens;
+ title = "Fitted deterministic model",
+ ylabel = "Number of Infected students"
+ )
+end
+
+# ╔═╡ e023770d-25f7-4b7a-b509-8a4372f42b76
+md"
+## Inference for the Stochastic SIR model
+
+In _Chatzilena et al_, they present an auto-regressive model for connecting the outcome of the ODE model to illness observations.
+The argument is that the stochastic component of the model can absorb the noise generated by a possible mis-specification of the model.
+
+In their approach they consider $\kappa_t = \log \lambda_t$ where $\kappa_t$ evolves according to an Ornstein-Uhlenbeck process:
+
+```math
+d\kappa_t = \phi(\mu_t - \kappa_t) dt + \sigma dB_t.
+```
+Which has transition density:
+```math
+\kappa_{t+1} | \kappa_t \sim N\Big(\mu_t + \left(\kappa_t - \mu_t\right)e^{-\phi}, {\sigma^2 \over 2 \phi} \left(1 - e^{-2\phi} \right)\Big).
+```
+Where $\mu_t = \log(I(t))$.
+
+We modify this approach since it implies that the $\mu_t$ is treated as constant between observation times.
+
+Instead we redefine $\kappa_t$ as the log-residual:
+
+$\kappa_t = \log(\lambda_t / I(t)).$
+
+With the transition density:
+
+```math
+\kappa_{t+1} | \kappa_t \sim N\Big(\kappa_te^{-\phi}, {\sigma^2 \over 2 \phi} \left(1 - e^{-2\phi} \right)\Big).
+```
+
+This is an AR(1) process.
+
+The stochastic model is completed:
+
+```math
+\begin{aligned}
+Y_t &\sim \text{Poisson}(\lambda_t)\\
+\lambda_t &= I(t)\exp(\kappa_t)\\
+\beta &\sim \text{LogNormal}(\text{logmean}=0,\text{logstd}=1) \\
+\gamma & \sim \text{Gamma}(\text{shape} = 0.004, \text{scale} = 50)\\
+S(0) /N &\sim \text{Beta}(0.5, 0.5)\\
+\phi & \sim \text{HalfNormal}(0, 100) \\
+1 / \sigma^2 & \sim \text{InvGamma}(0.1,0.1).
+\end{aligned}
+```
+
+"
+
+# ╔═╡ 69ba59d1-2221-463f-8853-ae172739e512
+md"
+We will using the `AR` struct from `EpiAware` to define the auto-regressive process in this model which has a direct parameterisation of the `AR` model.
+
+To convert from the formulation above we sample from the priors, and define `HalfNormal` priors based on the sampled prior means of $e^{-\phi}$ and ${\sigma^2 \over 2 \phi} \left(1 - e^{-2\phi} \right)$.
+We also add a strong prior that $\kappa_1 \approx 0$.
+"
+
+# ╔═╡ 178e0048-069a-4953-bb24-5116eb81cc41
+ϕs = rand(truncated(Normal(0, 100), lower = 0.0), 1000)
+
+# ╔═╡ e6bcf0c0-3cc4-41f3-ad20-fa11bf2ca37b
+σ²s = rand(InverseGamma(0.1, 0.1), 1000) .|> x -> 1 / x
+
+# ╔═╡ 4f07e8ba-30d0-411f-8c3e-b6d5bc1bb5fa
+sampled_AR_damps = ϕs .|> ϕ -> exp(-ϕ)
+
+# ╔═╡ 48032d21-53fa-4c0a-85cb-c22327b55073
+sampled_AR_stds = map(ϕs, σ²s) do ϕ, σ²
+ (1 - exp(-2 * ϕ)) * σ² / (2 * ϕ)
+end
+
+# ╔═╡ 89c767b8-97a0-45bb-9e9f-821879ddd38b
+md"
+We define the AR(1) process by matching means of `HalfNormal` prior distributions for the damp parameters and std deviation parameter to the calculated the prior means from the _Chatzilena et al_ definition.
+"
+
+# ╔═╡ 71a26408-1c26-46cf-bc72-c6ba528dfadd
+ar = AR(
+ damp_priors = [HalfNormal(mean(sampled_AR_damps))],
+ std_prior = HalfNormal(mean(sampled_AR_stds)),
+ init_priors = [Normal(0, 0.001)]
+)
+
+# ╔═╡ e1ffdaf6-ca2e-405d-8355-0d8848d005b0
+md"
+We can sample directly from the behaviour specified by the `ar` struct to do prior predictive checking on the `AR(1)` process.
+"
+
+# ╔═╡ de1498fa-8502-40ba-9708-2add74368e73
+let
+ nobs = size(data, 1)
+ ar_mdl = generate_latent(ar, nobs)
+ fig = Figure()
+ ax = Axis(fig[1, 1],
+ xticks = (data.ts[1:3:end], data.date[1:3:end] .|> string),
+ ylabel = "exp(kt)",
+ title = "Prior predictive sampling for relative residual in mean pred."
+ )
+ for i in 1:500
+ lines!(ax, ar_mdl() .|> exp, color = (:grey, 0.15))
+ end
+ fig
+end
+
+# ╔═╡ 9a82c75a-6ea4-48bb-af06-fabaca4c45ee
+md"
+We see that the choice of priors implies an _a priori_ belief that the extra observation noise on the mean prediction of the ODE model is fairly small, approximately 10% relative to the mean prediction.
+"
+
+# ╔═╡ b693a942-c6c7-40f8-997c-0dc8e5548132
+md"
+We can now define the probabilistic model.
+The stochastic model assumes a (random) time-varying ascertainment, which we implement using the `Ascertainment` struct from `EpiAware`.
+Note that instead of implementing an ascertainment factor `exp.(κₜ)` directly, which can be unstable for large primal values, by default `Ascertainment` uses the `LogExpFunctions.xexpy` function which implements $x\exp(y)$ stabily for a wide range of values.
+"
+
+# ╔═╡ 8588c45d-c225-4779-b7d0-8a9fd059f30e
+md"
+To distinguish random variables sampled by various sub-processes `EpiAware` process types create prefixes.
+The default for `Ascertainment` is just the string `\"Ascertainment\"`, but in this case we use the less verbose `\"va\"` for \"varying ascertainment\".
+"
+
+# ╔═╡ f116bb64-0426-4cd5-a01d-d8916d61af6d
+mdl_prefix = "va"
+
+# ╔═╡ 8956b070-3b9a-4a0f-a5a0-ff0b2770d9de
+md"
+Now we can construct our time varying ascertianment model.
+The main keyword arguments here are `model` and `latent_model`.
+`model` sets the connection between the expected observation and the actual observation.
+In this case, we reuse our `PoissonError` model from above.
+`latent_model` sets the modification model on the expected values.
+In this case, we use the `AR` process we defined above.
+"
+
+# ╔═╡ c7b5d1dd-3e21-4841-b096-917328432c3c
+varying_ascertainment = Ascertainment(
+ model = obs,
+ latent_model = ar,
+ latent_prefix = mdl_prefix
+)
+
+# ╔═╡ 0e2e281c-ef19-4027-a1fa-16ce17b7bdd7
+md"
+Now we can declare the full model in the `Turing` PPL.
+"
+
+# ╔═╡ 9309f7f8-0896-4686-8bfc-b9f82d91bc0f
+@model function stochastic_ode_mdl(y_t, ts, obs, prob, N;
+ solver = AutoTsit5(Rosenbrock23())
+)
+
+ ##Priors##
+ β ~ LogNormal(0.0, 1.0)
+ γ ~ Gamma(0.004, 1 / 0.002)
+ S₀ ~ Beta(0.5, 0.5)
+
+ ##Remake ODE model##
+ _prob = remake(prob;
+ u0 = [S₀, 1 - S₀, 0.0],
+ p = [β, γ]
+ )
+
+ ##Solve ODE model##
+ sol = solve(_prob, solver;
+ saveat = ts,
+ verbose = false
+ )
+ λt = log1pexp.(N * sol[2, :])
+
+ ##Observation##
+ @submodel generated_y_t = generate_observations(obs, y_t, λt)
+
+ ##Generated quantities##
+ return (; sol, generated_y_t, R0 = β / γ)
+end
+
+# ╔═╡ 4330c83f-de39-44c7-bdab-87e5f5830145
+stochastic_mdl = stochastic_ode_mdl(
+ data.in_bed,
+ data.ts,
+ varying_ascertainment,
+ sir_prob,
+ N
+)
+
+# ╔═╡ 8071c92f-9fe8-48cf-b1a0-79d1e34ec7e7
+stochastic_uncond_mdl = stochastic_ode_mdl(
+ fill(missing, length(data.in_bed)),
+ data.ts,
+ varying_ascertainment,
+ sir_prob,
+ N
+)
+
+# ╔═╡ adb9d0ac-d412-4dbc-a601-59fcc33adf43
+md"
+**Prior predictive checking**
+"
+
+# ╔═╡ b44286f9-ba88-4e2b-9a34-f14c0a78824d
+let
+ prior_chn = sample(stochastic_uncond_mdl, Prior(), 2000)
+ gens = generated_quantities(stochastic_uncond_mdl, prior_chn)
+ plot_predYt(data, gens;
+ title = "Prior predictive: stochastic model",
+ ylabel = "Number of Infected students"
+ )
+end
+
+# ╔═╡ f690114f-4dca-4451-8a93-57c9d8d7c20c
+md"
+The prior predictive checking again shows misaligned prior beliefs; for example _a priori_ without data we would not expect the median prediction of number of ill children as about 600 out of $N after 1 day.
+
+The latent process for the log-residuals $\kappa_t$ doesn't make much sense without priors, so we look for a reasonable MAP point to start NUTS from.
+We do this by first making an initial guess which is a mixture of:
+
+1. The posterior averages from the deterministic model.
+2. The prior averages of the structure parameters of the AR(1) process.
+3. Zero for the time-varying noise underlying the AR(1) process.
+"
+
+# ╔═╡ 3491e7e5-6c82-4c50-8feb-d730d1fbe457
+rand(stochastic_mdl)
+
+# ╔═╡ e1d54935-4305-42cf-98c6-ccee9b0813ea
+initial_guess = [[mean(chn[:β]),
+ mean(chn[:γ]),
+ mean(chn[:S₀]),
+ mean(ar.std_prior),
+ mean(ar.init_prior)[1],
+ mean(ar.damp_prior)[1]]
+ zeros(13)]
+
+# ╔═╡ 685221ea-f268-4ddc-937f-e7620d065c28
+md"
+Starting from the initial guess, the MAP point is calculated rapidly in one pass.
+"
+
+# ╔═╡ 6796ae76-bc2d-4895-ba0a-5e2c23c50dfb
+map_fit_stoch_mdl = maximum_a_posteriori(stochastic_mdl;
+ adtype = AutoReverseDiff(),
+ initial_params = initial_guess
+)
+
+# ╔═╡ 62080cc2-3cab-4a22-9b2e-2bff640a17a4
+md"
+Now we can run NUTS, sampling 1000 posterior draws per chain for 4 chains.
+"
+
+# ╔═╡ 156272d7-56c4-4ac4-bf3e-7882f4edc144
+chn2 = sample(
+ stochastic_mdl,
+ NUTS(; adtype = AutoReverseDiff(true)),
+ MCMCThreads(), 1000, 4;
+ initial_params = fill(map_fit_stoch_mdl.values.array, 4)
+)
+
+# ╔═╡ 00b90e6d-732f-41c9-a603-cabe9740e329
+describe(chn2)
+
+# ╔═╡ 37a016d8-8384-41c9-abdd-23e88b1f988d
+pairplot(chn2[[:β, :γ, :S₀, Symbol(mdl_prefix * ".σ_AR"),
+ Symbol(mdl_prefix * ".ar_init[1]"), Symbol(mdl_prefix * ".damp_AR[1]")]])
+
+# ╔═╡ 7df5d669-d3a2-4a66-83c3-f8618e39bec6
+let
+ vars = mapreduce(vcat, 1:13) do i
+ Symbol(mdl_prefix * ".ϵ_t[$i]")
+ end
+ pairplot(chn2[vars])
+end
+
+# ╔═╡ 0e7bbf13-9187-41ea-8b46-294b93be4c6d
+let
+ gens = generated_quantities(stochastic_uncond_mdl, chn2)
+ plot_predYt(data, gens;
+ title = "Fitted stochastic model",
+ ylabel = "Number of Infected students"
+ )
+end
+
+# ╔═╡ Cell order:
+# ╟─e34cec5a-a173-4e92-a860-340c7a9e9c72
+# ╟─33384fc6-7cca-11ef-3567-ab7df9200cde
+# ╠═b1468db3-7ab0-468c-8e27-70013a8f512f
+# ╠═a4710701-6315-459d-b677-f24b77ff3e80
+# ╠═7263d714-2ce4-4d57-8881-6b60db018dd5
+# ╠═261420cd-4650-402b-b126-7a431f93f37e
+# ╠═9c19a98b-a08b-4560-966d-61ff0ece2ad5
+# ╠═3897e773-ed07-4860-bb62-35605d0dacb0
+# ╠═14641441-dbea-4fdf-88e0-64a57da60ef7
+# ╠═a0d91258-8ab5-4adc-98f2-8f17b4bd685c
+# ╟─943b82ec-b4dc-4537-8183-d6c73cd74a37
+# ╟─0e78285c-d2e8-4c3c-848a-14dae6ead0a4
+# ╠═ab4269b1-e292-466f-8bfb-713d917c18f9
+# ╟─f16eb00b-2d77-45df-b767-757fe2f5674c
+# ╠═bb07a580-6d86-48b3-a79f-d2ed9306e87c
+# ╟─b5ff95d1-8a6f-4d48-adf2-60d91b3ebebe
+# ╟─d64388f9-6edd-414d-a191-316f75b35b2c
+# ╠═7c9cbbc1-71ef-4d81-b93a-c2b3a8683d53
+# ╠═aba3f1db-c290-409c-9b9e-6065935ede54
+# ╟─3f54bb44-76c4-4744-885a-46dedfaffeca
+# ╟─ea1be94b-d722-47ee-8465-982c83dc6838
+# ╠═87509792-e28d-4618-9bf5-e06b2e5dbe8b
+# ╟─81501c84-5e1f-4829-a26d-52fe00503958
+# ╠═1d287c8e-7000-4b23-ae7e-f7008c3e53bd
+# ╟─e7383885-fa6a-4240-a252-44ae82cae713
+# ╠═dbc1b453-1c29-4f82-bec9-098d67f9e63f
+# ╠═e795c2bf-0861-4e96-9921-db47f41af206
+# ╟─e848434c-2543-43d1-ae22-5c4241f138bb
+# ╠═ab8c98d1-d357-4c49-9f5a-f069e05c45f5
+# ╟─2c6ac235-e331-4189-8c8c-74de5f98b2c4
+# ╠═a729f1cd-404c-4a33-a8f9-b2ea6f0adb62
+# ╟─4c0759fb-76e9-4de5-9206-89e8bfb6c3bb
+# ╠═8d96db67-de3b-4704-9f54-f4ed50a4ecff
+# ╠═ba35cebd-0d29-43c5-8db7-f550d7f821bc
+# ╠═0be912c1-22dc-4978-b86a-84273062f5da
+# ╟─a1a34b67-ff4e-4fee-aa30-4c2add3ea8a0
+# ╠═2cf64ba3-ff8d-40b0-9bd8-9e80393156f5
+# ╠═b2429b68-dd75-499f-a4e1-1b7d72e209c7
+# ╠═1e7f37c5-4cb4-4d06-8f68-55d80f7a00ad
+# ╟─c16b81a0-2d36-4012-aed4-a035af31b4c3
+# ╠═03d1ecf8-543d-444d-b1a3-7a19acd88499
+# ╟─e023770d-25f7-4b7a-b509-8a4372f42b76
+# ╟─69ba59d1-2221-463f-8853-ae172739e512
+# ╠═178e0048-069a-4953-bb24-5116eb81cc41
+# ╠═e6bcf0c0-3cc4-41f3-ad20-fa11bf2ca37b
+# ╠═4f07e8ba-30d0-411f-8c3e-b6d5bc1bb5fa
+# ╠═48032d21-53fa-4c0a-85cb-c22327b55073
+# ╟─89c767b8-97a0-45bb-9e9f-821879ddd38b
+# ╠═71a26408-1c26-46cf-bc72-c6ba528dfadd
+# ╟─e1ffdaf6-ca2e-405d-8355-0d8848d005b0
+# ╠═de1498fa-8502-40ba-9708-2add74368e73
+# ╟─9a82c75a-6ea4-48bb-af06-fabaca4c45ee
+# ╟─b693a942-c6c7-40f8-997c-0dc8e5548132
+# ╟─8588c45d-c225-4779-b7d0-8a9fd059f30e
+# ╠═f116bb64-0426-4cd5-a01d-d8916d61af6d
+# ╟─8956b070-3b9a-4a0f-a5a0-ff0b2770d9de
+# ╠═c7b5d1dd-3e21-4841-b096-917328432c3c
+# ╟─0e2e281c-ef19-4027-a1fa-16ce17b7bdd7
+# ╠═9309f7f8-0896-4686-8bfc-b9f82d91bc0f
+# ╠═4330c83f-de39-44c7-bdab-87e5f5830145
+# ╠═8071c92f-9fe8-48cf-b1a0-79d1e34ec7e7
+# ╟─adb9d0ac-d412-4dbc-a601-59fcc33adf43
+# ╠═b44286f9-ba88-4e2b-9a34-f14c0a78824d
+# ╟─f690114f-4dca-4451-8a93-57c9d8d7c20c
+# ╠═3491e7e5-6c82-4c50-8feb-d730d1fbe457
+# ╠═e1d54935-4305-42cf-98c6-ccee9b0813ea
+# ╟─685221ea-f268-4ddc-937f-e7620d065c28
+# ╠═6796ae76-bc2d-4895-ba0a-5e2c23c50dfb
+# ╟─62080cc2-3cab-4a22-9b2e-2bff640a17a4
+# ╠═156272d7-56c4-4ac4-bf3e-7882f4edc144
+# ╠═00b90e6d-732f-41c9-a603-cabe9740e329
+# ╠═37a016d8-8384-41c9-abdd-23e88b1f988d
+# ╠═7df5d669-d3a2-4a66-83c3-f8618e39bec6
+# ╠═0e7bbf13-9187-41ea-8b46-294b93be4c6d
diff --git a/previews/PR513/showcase/replications/chatzilena-2019/influenza_england_1978_school.csv2 b/previews/PR513/showcase/replications/chatzilena-2019/influenza_england_1978_school.csv2
new file mode 100644
index 000000000..1eb88145c
--- /dev/null
+++ b/previews/PR513/showcase/replications/chatzilena-2019/influenza_england_1978_school.csv2
@@ -0,0 +1,15 @@
+"","date","in_bed","convalescent"
+"1",1978-01-22,3,0
+"2",1978-01-23,8,0
+"3",1978-01-24,26,0
+"4",1978-01-25,76,0
+"5",1978-01-26,225,9
+"6",1978-01-27,298,17
+"7",1978-01-28,258,105
+"8",1978-01-29,233,162
+"9",1978-01-30,189,176
+"10",1978-01-31,128,166
+"11",1978-02-01,68,150
+"12",1978-02-02,29,85
+"13",1978-02-03,14,47
+"14",1978-02-04,4,20
diff --git a/previews/PR513/showcase/replications/mishra-2020/get_data.R b/previews/PR513/showcase/replications/mishra-2020/get_data.R
new file mode 100644
index 000000000..fada0bf58
--- /dev/null
+++ b/previews/PR513/showcase/replications/mishra-2020/get_data.R
@@ -0,0 +1,19 @@
+install.packages("covidregionaldata",
+ repos = "https://epiforecasts.r-universe.dev"
+)
+
+library(covidregionaldata)
+library(dplyr)
+library(ggplot2)
+library(scales)
+
+start_using_memoise()
+
+# Get data
+
+data <- get_national_data("South Korea", source = "ecdc") |>
+ filter(date <= "2020-7-31") |>
+ select(date, cases_new, deaths_new)
+plot(data$cases_new)
+
+write.csv(data, "EpiAware/docs/src/examples/south_korea_data.csv")
diff --git a/previews/PR513/showcase/replications/mishra-2020/index.html b/previews/PR513/showcase/replications/mishra-2020/index.html
new file mode 100644
index 000000000..dc43bbde7
--- /dev/null
+++ b/previews/PR513/showcase/replications/mishra-2020/index.html
@@ -0,0 +1,438 @@
+
+On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective · EpiAware.jl
The time varying reproductive number modelled as an AR(2) process on the log-scale \(\log R_t \sim \text{AR(2)}\).
The latent infection (\(I_t\)) generating process is a renewal model (note that we leave out external infections in this note):
$$I_t = R_t \sum_{s\geq 1} I_{t-s} g_s.$$
The discrete generation interval \(g_t\) is a daily discretisation of the probability mass function of an estimated serial interval distribution for SARS-CoV-2:
$$G \sim \text{Gamma}(6.5,0.62).$$
Observed cases \(C_t\) are distributed around latent infections with negative binomial errors:
In the examples below we are going to largely recreate the Mishra et al model, whilst emphasing that each component of the overall epidemiological model is, itself, a stand alone model that can be sampled from.
Now we want to import these dependencies into scope. If evaluating these code lines/blocks in REPL, then the REPL will offer to install any missing dependencies. Alternatively, you can add them to your active environment using Pkg.add.
+
+
using EpiAware
+
+
+
using Turing, DynamicPPL #Underlying Turing ecosystem packages to interact with models
+
+
+
using Distributions, Statistics #Statistics packages
+
+
+
using CSV, DataFramesMeta #Data wrangling
+
+
+
using CairoMakie, PairPlots, TimeSeries #Plotting backend
+
+
+
using ReverseDiff #Automatic differentiation backend
+
+
+
begin #Date utility and set Random seed
+ using Dates
+ using Random
+ Random.seed!(1)
+end
First, we make sure that we have the data we want to analysis in scope by downloading it for where we have saved a copy in the EpiAware repository.
NB: The case data is curated by the covidregionaldata package. We accessed the South Korean case data using a short R script. It is possible to interface directly from a Julia session using the RCall.jl package, but we do not do this in this notebook to reduce the number of underlying dependencies required to run this notebook.
Time-varying reproduction number as an AbstractLatentModel type
EpiAware exposes a AbstractLatentModel abstract type; the purpose of which is to group stochastic processes which can be interpreted as generating time-varying parameters/quantities of interest which we call latent process models.
In the Mishra et al model the log-time varying reproductive number \(Z_t\) is assumed to evolve as an auto-regressive process, AR(2):
Where \(\rho_1,\rho_2\), which are the parameters of AR process, and \(\epsilon_t\) is a white noise process with standard deviation \(\sigma^*\).
+
+
+
In EpiAware we determine the behaviour of a latent process by choosing a concrete subtype (i.e. a struct) of AbstractLatentModel which has fields that set the priors of the various parameters required for the latent process.
The AR process has the struct AR <: AbstractLatentModel. The user can supply the priors for \(\rho_1,\rho_2\) in the field damp_priors, for \(\sigma^*\) in the field std_prior, and the initial values \(Z_1, Z_2\) in the field init_priors.
+
+
+
We choose priors based on Mishra et al using the Distributions.jl interface to probability distributions. Note that we condition the AR parameters onto \([0,1]\), as in Mishra et al, using the truncated function.
In Mishra et al the standard deviation of the stationary distribution of \(Z_t\) which has a standard normal distribution conditioned to be positive \(\sigma \sim \mathcal{N}^+(0,1)\). The value \(σ^*\) was determined from a nonlinear function of sampled \(\sigma, ~\rho_1, ~\rho_2\) values. Since, Mishra et al give sharply informative priors for \(\rho_1,~\rho_2\) (see below) we simplify by calculating \(\sigma^*\) at the prior mode of \(\rho_1,~\rho_2\). This results in a \(\sigma^* \sim \mathcal{N}^+(0, 0.5)\) prior.
As mentioned above, we can use this instance of the AR latent model to construct a Turing model object which implements the probabilistic behaviour determined by ar. We do this with the constructor function exposed by EpiAware: generate_latent which combines an AbstractLatentModel substype struct with the number of time steps for which we want to generate the latent process.
As a refresher, we remind that the Turing.Model object has the following properties:
The model object parameters are sampleable using rand; that is we can generate parameters from the specified priors e.g. θ = rand(mdl).
The model object is generative as a callable; that is we can sample instances of \(Z_t\) e.g. Z_t = mdl().
The model object can construct new model objects by conditioning parameters using the DynamicPPL.jl syntax, e.g. conditional_mdl = mdl | (σ_AR = 1.0, ).
As a concrete example we create a model object for the AR(2) process we specified above for 50 time steps:
Ultimately, this will only be one component of the full epidemiological model. However, it is useful to visualise its probabilistic behaviour for model diagnostic and prior predictive checking.
We can spaghetti plot generative samples from the AR(2) process with the priors specified above.
+
+
plt_ar_sample = let
+ n_samples = 100
+ ar_mdl_samples = mapreduce(hcat, 1:n_samples) do _
+ ar_mdl() .|> exp #Sample Z_t trajectories for the model
+ end
+
+ fig = Figure()
+ ax = Axis(fig[1, 1];
+ yscale = log10,
+ ylabel = "Time varying Rₜ",
+ title = "$(n_samples) draws from the prior Rₜ model"
+ )
+ for col in eachcol(ar_mdl_samples)
+ lines!(ax, col, color = (:grey, 0.1))
+ end
+ fig
+end
+
+
+
+
This suggests that a priori we believe that there is a few percent chance of achieving very high \(R_t\) values, i.e. \(R_t \sim 10-1000\) is not excluded by our priors.
+
+
+
The Renewal model as an AbstractEpiModel type
The abstract type for models that generate infections exposed by EpiAware is called AbstractEpiModel. As with latent models different concrete subtypes of AbstractEpiModel define different classes of infection generating process. In this case we want to implement a renewal model.
The Renewal <: AbstractEpiModel type of struct needs two fields:
Data about the generation interval of the infectious disease so it can construct \(g_t\).
A prior for the initial numbers of infected.
In Mishra et al they use an estimate of the serial interval of SARS-CoV-2 as an estimate of the generation interval.
+
+
truth_GI = Gamma(6.5, 0.62)
+
Distributions.Gamma{Float64}(α=6.5, θ=0.62)
+
+
+
This is a representation of the generation interval distribution as continuous whereas the infection process will be formulated in discrete daily time steps. By default, EpiAware performs double interval censoring to convert our continuous estimate of the generation interval into a discretized version \(g_t\), whilst also applying left truncation such that \(g_0 = 0\) and normalising \(\sum_t g_t = 1.\)
The constructor for converting a continuous estimate of the generation interval distribution into a usable discrete time estimate is EpiData.
We can compare the discretized generation interval with the continuous estimate, which in this example is the serial interval estimate.
+
+
let
+ fig = Figure()
+ ax = Axis(fig[1, 1];
+ xticks = 0:14,
+ xlabel = "Days",
+ title = "Continuous and discrete generation intervals"
+ )
+ barplot!(ax, model_data.gen_int;
+ label = "Discretized next gen pmf"
+ )
+ lines!(truth_GI;
+ label = "Continuous serial interval",
+ color = :green
+ )
+ axislegend(ax)
+ fig
+end
+
+
+
+
The user also needs to specify a prior for the log incidence at time zero, \(\log I_0\). The initial history of latent infections \(I_{-1}, I_{-2},\dots\) is constructed as
$$I_t = e^{rt} I_0,\qquad t = 0, -1, -2,...$$
Where the exponential growth rate \(r\) is determined by the initial reproductive number \(R_1\) via the solution to the implicit equation,
NB: We don't implement a background infection rate in this model.
+
+
+
Turing model interface to Renewal process
As mentioned above, we can use this instance of the Renewal latent infection model to construct a TuringModel which implements the probabilistic behaviour determined by epi using the constructor function generate_latent_infs which combines epi with a provided \(\log R_t\) time series.
Here we choose an example where \(R_t\) decreases from \(R_t = 3\) to \(R_t = 0.5\) over the course of 50 days.
+
+
R_t_fixed = [0.5 + 2.5 / (1 + exp(t - 15)) for t in 1:50]
plt_epi = let
+ n_samples = 100
+ #Sample unconditionally the underlying parameters of the model
+ epi_mdl_samples = mapreduce(hcat, 1:n_samples) do _
+ latent_inf_mdl()
+ end
+ fig = Figure()
+ ax1 = Axis(fig[1, 1];
+ title = "$(n_samples) draws from renewal model with chosen Rt",
+ ylabel = "Latent infections"
+ )
+ ax2 = Axis(fig[2, 1];
+ ylabel = "Rt"
+ )
+ for col in eachcol(epi_mdl_samples)
+ lines!(ax1, col;
+ color = (:grey, 0.1)
+ )
+ end
+ lines!(ax2, R_t_fixed;
+ linewidth = 2
+ )
+ fig
+end
+
+
+
+
Negative Binomial Observations as an ObservationModel type
In Mishra et al latent infections were assumed to occur on their observation day with negative binomial errors, this motivates using the serial interval (the time between onset of symptoms of a primary and secondary case) rather than generation interval distribution (the time between infection time of a primary and secondary case).
Observation models are set in EpiAware as concrete subtypes of an ObservationModel. The Negative binomial error model without observation delays is set with a NegativeBinomialError struct. In Mishra et al the overdispersion parameter \(\phi\) sets the relationship between the mean and variance of the negative binomial errors,
In EpiAware, we default to a prior on \(\sqrt{1/\phi}\) because this quantity is approximately the coefficient of variation of the observation noise and, therefore, is easier to reason on a priori beliefs. We call this quantity the cluster factor.
A prior for \(\phi\) was not specified in Mishra et al, we select one below but we will condition a value in analysis below.
Turing model interface to the NegativeBinomialError model
We can construct a NegativeBinomialError model implementation as a TuringModel using the EpiAwaregenerate_observations functions.
Turing uses missing arguments to indicate variables that are to be sampled. We use this to observe a forward model that samples observations, conditional on an underlying expected observation time series.
+
+
+
First, we set an artificial expected cases curve.
+
+
expected_cases = [1000 * exp(-(t - 15)^2 / (2 * 4)) for t in 1:30]
plt_obs = let
+ n_samples = 100
+ obs_mdl_samples = mapreduce(hcat, 1:n_samples) do _
+ θ = obs_mdl() #Sample unconditionally the underlying parameters of the model
+ end
+ fig = Figure()
+ ax = Axis(fig[1, 1];
+ title = "$(n_samples) draws from neg. bin. obs model",
+ ylabel = "Observed cases"
+ )
+ for col in eachcol(obs_mdl_samples)
+ scatter!(ax, col;
+ color = (:grey, 0.2)
+ )
+ end
+ lines!(ax, expected_cases;
+ color = :red,
+ linewidth = 3,
+ label = "Expected cases"
+ )
+ axislegend(ax)
+ fig
+end
+
+
+
+
Composing models into an EpiProblem
Mishra et al follows a common pattern of having an infection generation process driven by a latent process with an observation model that links the infection process to a discrete valued time series of incidence data.
In EpiAware we provide an EpiProblem constructor for this common epidemiological model pattern.
The constructor for an EpiProblem requires:
An epi_model.
A latent_model.
An observation_model.
A tspan.
The tspan set the range of the time index for the models.
We make inferences on the unobserved quantities, such as \(R_t\) by sampling from the model conditioned on the observed data. We generate the posterior samples using the No U-Turns (NUTS) sampler.
To make NUTS more robust we provide manypathfinder, which is built on pathfinder variational inference from Pathfinder.jl. manypathfinder runs nruns pathfinder processes on the inference problem and returns the pathfinder run with maximum estimated ELBO.
The composition of doing variational inference as a pre-sampler step which gets passed to NUTS initialisation is defined using the EpiMethod struct, where a sequence of pre-sampler steps can be be defined.
EpiMethod also allows the specification of NUTS parameters, such as type of automatic differentiation, type of parallelism and number of parallel chains to sample.
In the epidemiological model it is hard to identify between the AR parameters such as the standard deviation of the AR process and the cluster factor of the negative binomial observation model. The reason for this identifiability problem is that the model assumes no delay between infection and observation. Therefore, on any day the data could be explained by \(R_t\) changing or observation noise and its not easy to disentangle greater volatility in \(R_t\) from higher noise in the observations.
In models with latent delays, changes in \(R_t\) impact the observed cases over several days which means that it easier to disentangle trend effects from observation-to-observation fluctuations.
To counter act this problem we condition the model on a fixed cluster factor value.
+
+
fixed_cluster_factor = 0.25
+
0.25
+
+
+
EpiAware has the generate_epiaware function which joins an EpiProblem object with the data to produce as Turing model. This Turing model composes the three unit Turing models defined above: the Renewal infection generating process, the AR latent process for \(\log R_t\), and the negative binomial observation model. Therefore, we can condition on variables as with any other Turing model.
To assess the quality of the inference visually we can plot predictive quantiles for generated case data from the version of the model which hasn't conditioned on case data using posterior parameters inferred from the version conditioned on observed data. For this purpose, we add a generated_quantiles utility function. This kind of visualisation is known as posterior predictive checking, and is a useful diagnostic tool for Bayesian inference (see here).
We also plot the inferred \(R_t\) estimates from the model. We find that the EpiAware model recovers the main finding in Mishra et al; that the \(R_t\) in South Korea peaked at a very high value (\(R_t \sim 10\) at peak) before rapidly dropping below 1 in early March 2020.
Note that, in reality, the peak \(R_t\) found here and in Mishra et al is unrealistically high, this might be due to a combination of:
A mis-estimated generation interval/serial interval distribution.
An ascertainment rate that was, in reality, changing over time.
In a future note, we'll demonstrate having a time-varying ascertainment rate.
+
+
function generated_quantiles(gens, quantity, qs; transformation = x -> x)
+ mapreduce(hcat, gens) do gen #loop over sampled generated quantities
+ getfield(gen, quantity) |> transformation
+ end |> mat -> mapreduce(hcat, qs) do q #Loop over matrix row to condense into qs
+ map(eachrow(mat)) do row
+ if any(ismissing, row)
+ return missing
+ else
+ quantile(row, q)
+ end
+ end
+ end
+end
+
generated_quantiles (generic function with 1 method)
This document was generated with Documenter.jl version 1.7.0 on Thursday 24 October 2024. Using Julia version 1.11.0.
diff --git a/previews/PR513/showcase/replications/mishra-2020/index.jl b/previews/PR513/showcase/replications/mishra-2020/index.jl
new file mode 100644
index 000000000..28b7a9ed0
--- /dev/null
+++ b/previews/PR513/showcase/replications/mishra-2020/index.jl
@@ -0,0 +1,635 @@
+### A Pluto.jl notebook ###
+# v0.20.0
+
+using Markdown
+using InteractiveUtils
+
+# ╔═╡ 34a06b3b-799b-48c5-bd08-1e57151f51ec
+let
+ docs_dir = dirname(dirname(dirname(dirname(@__DIR__))))
+ using Pkg: Pkg
+ Pkg.activate(docs_dir)
+ Pkg.instantiate()
+end;
+
+# ╔═╡ d63b37f0-9642-4c38-ac01-9ffe48d50441
+using EpiAware
+
+# ╔═╡ 74642759-35a5-4957-9f2b-544712866410
+using Turing, DynamicPPL #Underlying Turing ecosystem packages to interact with models
+
+# ╔═╡ 0c5f413e-d043-448d-8665-f0f6f705d70f
+using Distributions, Statistics #Statistics packages
+
+# ╔═╡ b1e2a759-a52b-4ee5-8db4-cfe848878c92
+using CSV, DataFramesMeta #Data wrangling
+
+# ╔═╡ 9eb03a0b-c6ca-4e23-8109-fb68f87d7fdf
+using CairoMakie, PairPlots, TimeSeries #Plotting backend
+
+# ╔═╡ 97b5374e-7653-4b3b-98eb-d8f73aa30580
+using ReverseDiff #Automatic differentiation backend
+
+# ╔═╡ 1642dbda-4915-4e29-beff-bca592f3ec8d
+begin #Date utility and set Random seed
+ using Dates
+ using Random
+ Random.seed!(1)
+end
+
+# ╔═╡ 8a8d5682-2f89-443b-baf0-d4d3b134d311
+md"
+# Example: Early COVID-19 case data in South Korea
+
+In this example we use `EpiAware` functionality to largely recreate an epidemiological model presented in [On the derivation of the renewal equation from an age-dependent branching process: an epidemic modelling perspective, _Mishra et al_ (2020)](https://arxiv.org/abs/2006.16487). _Mishra et al_ consider test-confirmed cases of COVID-19 in South Korea between January to July 2020. The components of the epidemilogical model they consider are:
+
+- The time varying reproductive number modelled as an [AR(2) process](https://en.wikipedia.org/wiki/Autoregressive_model) on the log-scale $\log R_t \sim \text{AR(2)}$.
+- The latent infection ($I_t$) generating process is a renewal model (note that we leave out external infections in this note):
+```math
+I_t = R_t \sum_{s\geq 1} I_{t-s} g_s.
+```
+- The discrete generation interval $g_t$ is a daily discretisation of the probability mass function of an estimated serial interval distribution for SARS-CoV-2:
+```math
+G \sim \text{Gamma}(6.5,0.62).
+```
+- Observed cases $C_t$ are distributed around latent infections with negative binomial errors:
+```math
+C_t \sim \text{NegBin}(\text{mean} = I_t,~ \text{overdispersion} = \phi).
+```
+
+In the examples below we are going to largely recreate the _Mishra et al_ model, whilst emphasing that each component of the overall epidemiological model is, itself, a stand alone model that can be sampled from.
+"
+
+# ╔═╡ 27d73202-a93e-4471-ab50-d59345304a0b
+md"
+## Dependencies for this notebook
+Now we want to import these dependencies into scope. If evaluating these code lines/blocks in REPL, then the REPL will offer to install any missing dependencies. Alternatively, you can add them to your active environment using `Pkg.add`.
+"
+
+# ╔═╡ 1d3b9541-80ad-41b5-a5ed-a947f5c0731b
+md"
+## Load early SARS-2 case data for South Korea
+First, we make sure that we have the data we want to analysis in scope by downloading it for where we have saved a copy in the `EpiAware` repository.
+
+NB: The case data is curated by the [`covidregionaldata`](https://github.com/epiforecasts/covidregionaldata) package. We accessed the South Korean case data using a short [R script](https://github.com/CDCgov/Rt-without-renewal/blob/main/EpiAware/docs/src/showcase/replications/mishra-2020/get_data.R). It is possible to interface directly from a Julia session using the `RCall.jl` package, but we do not do this in this notebook to reduce the number of underlying dependencies required to run this notebook.
+"
+
+# ╔═╡ 4e5e0e24-8c55-4cb4-be3a-d28198f81a69
+url = "https://raw.githubusercontent.com/CDCgov/Rt-without-renewal/main/EpiAware/docs/src/showcase/replications/mishra-2020/south_korea_data.csv2"
+
+# ╔═╡ a59d977c-0178-11ef-0063-83e30e0cf9f0
+data = CSV.read(download(url), DataFrame)
+
+# ╔═╡ 104f4d16-7433-4a2d-89e7-288a9b223563
+md"
+## Time-varying reproduction number as an `AbstractLatentModel` type
+
+`EpiAware` exposes a `AbstractLatentModel` abstract type; the purpose of which is to group stochastic processes which can be interpreted as generating time-varying parameters/quantities of interest which we call latent process models.
+
+In the _Mishra et al_ model the log-time varying reproductive number $Z_t$ is assumed to evolve as an auto-regressive process, AR(2):
+
+```math
+\begin{align}
+R_t &= \exp Z_t, \\
+Z_t &= \rho_1 Z_{t-1} + \rho_2 Z_{t-2} + \epsilon_t, \\
+\epsilon_t &\sim \text{Normal}(0, \sigma^*).
+\end{align}
+```
+Where $\rho_1,\rho_2$, which are the parameters of AR process, and $\epsilon_t$ is a white noise process with standard deviation $\sigma^*$.
+"
+
+# ╔═╡ d753b21f-cf8e-4a25-bab3-46c811c80a78
+md"
+In `EpiAware` we determine the behaviour of a latent process by choosing a concrete subtype (i.e. a struct) of `AbstractLatentModel` which has fields that set the priors of the various parameters required for the latent process.
+
+The AR process has the struct `AR <: AbstractLatentModel`. The user can supply the priors for $\rho_1,\rho_2$ in the field `damp_priors`, for $\sigma^*$ in the field `std_prior`, and the initial values $Z_1, Z_2$ in the field `init_priors`.
+"
+
+# ╔═╡ d201c82b-8efd-41e2-96d7-4f5e0c67088c
+md"
+We choose priors based on _Mishra et al_ using the `Distributions.jl` interface to probability distributions. Note that we condition the AR parameters onto $[0,1]$, as in _Mishra et al_, using the `truncated` function.
+
+In _Mishra et al_ the standard deviation of the _stationary distribution_ of $Z_t$ which has a standard normal distribution conditioned to be positive $\sigma \sim \mathcal{N}^+(0,1)$. The value $σ^*$ was determined from a nonlinear function of sampled $\sigma, ~\rho_1, ~\rho_2$ values. Since, _Mishra et al_ give sharply informative priors for $\rho_1,~\rho_2$ (see below) we simplify by calculating $\sigma^*$ at the prior mode of $\rho_1,~\rho_2$. This results in a $\sigma^* \sim \mathcal{N}^+(0, 0.5)$ prior.
+"
+
+# ╔═╡ c88bbbd6-0101-4c04-97c9-c5887ef23999
+ar = AR(
+ damp_priors = reverse([truncated(Normal(0.8, 0.05), 0, 1),
+ truncated(Normal(0.1, 0.05), 0, 1)]),
+ std_prior = HalfNormal(0.5),
+ init_priors = [Normal(-1.0, 0.1), Normal(-1.0, 0.5)]
+)
+
+# ╔═╡ 31ee2757-0409-45df-b193-60c552797a3d
+md"
+### `Turing` model interface to the AR process
+
+As mentioned above, we can use this instance of the `AR` latent model to construct a [`Turing`](https://turinglang.org/) model object which implements the probabilistic behaviour determined by `ar`. We do this with the constructor function exposed by `EpiAware`: `generate_latent` which combines an `AbstractLatentModel` substype struct with the number of time steps for which we want to generate the latent process.
+
+As a refresher, we remind that the `Turing.Model` object has the following properties:
+
+- The model object parameters are sampleable using `rand`; that is we can generate parameters from the specified priors e.g. `θ = rand(mdl)`.
+- The model object is generative as a callable; that is we can sample instances of $Z_t$ e.g. `Z_t = mdl()`.
+- The model object can construct new model objects by conditioning parameters using the [`DynamicPPL.jl`](https://turinglang.org/DynamicPPL.jl/stable/) syntax, e.g. `conditional_mdl = mdl | (σ_AR = 1.0, )`.
+
+As a concrete example we create a model object for the AR(2) process we specified above for 50 time steps:
+"
+
+# ╔═╡ 2bf22866-b785-4ee0-953d-ac990a197561
+ar_mdl = generate_latent(ar, 50)
+
+# ╔═╡ 25e25125-8587-4451-8600-9b55a04dbcd9
+md"
+Ultimately, this will only be one component of the full epidemiological model. However, it is useful to visualise its probabilistic behaviour for model diagnostic and prior predictive checking.
+
+We can spaghetti plot generative samples from the AR(2) process with the priors specified above.
+"
+
+# ╔═╡ fbe117b7-a0b8-4604-a5dd-e71a0a1a4fc3
+plt_ar_sample = let
+ n_samples = 100
+ ar_mdl_samples = mapreduce(hcat, 1:n_samples) do _
+ ar_mdl() .|> exp #Sample Z_t trajectories for the model
+ end
+
+ fig = Figure()
+ ax = Axis(fig[1, 1];
+ yscale = log10,
+ ylabel = "Time varying Rₜ",
+ title = "$(n_samples) draws from the prior Rₜ model"
+ )
+ for col in eachcol(ar_mdl_samples)
+ lines!(ax, col, color = (:grey, 0.1))
+ end
+ fig
+end
+
+# ╔═╡ 9f84dec1-70f1-442e-8bef-a9494921549e
+md"
+This suggests that _a priori_ we believe that there is a few percent chance of achieving very high $R_t$ values, i.e. $R_t \sim 10-1000$ is not excluded by our priors.
+"
+
+# ╔═╡ 6a9e871f-a2fa-4e41-af89-8b0b3c3b5b4b
+md"
+## The Renewal model as an `AbstractEpiModel` type
+
+The abstract type for models that generate infections exposed by `EpiAware` is called `AbstractEpiModel`. As with latent models different concrete subtypes of `AbstractEpiModel` define different classes of infection generating process. In this case we want to implement a renewal model.
+
+The `Renewal <: AbstractEpiModel` type of struct needs two fields:
+
+- Data about the generation interval of the infectious disease so it can construct $g_t$.
+- A prior for the initial numbers of infected.
+
+In _Mishra et al_ they use an estimate of the serial interval of SARS-CoV-2 as an estimate of the generation interval.
+
+"
+
+# ╔═╡ c1fc1929-0624-45c0-9a89-86c8479b2675
+truth_GI = Gamma(6.5, 0.62)
+
+# ╔═╡ ab0c6bec-1ab7-43d1-aa59-11225dea79eb
+md"
+This is a representation of the generation interval distribution as continuous whereas the infection process will be formulated in discrete daily time steps. By default, `EpiAware` performs [double interval censoring](https://www.medrxiv.org/content/10.1101/2024.01.12.24301247v1) to convert our continuous estimate of the generation interval into a discretized version $g_t$, whilst also applying left truncation such that $g_0 = 0$ and normalising $\sum_t g_t = 1.$
+
+The constructor for converting a continuous estimate of the generation interval distribution into a usable discrete time estimate is `EpiData`.
+"
+
+# ╔═╡ 99c9ba2c-20a5-4c7f-94d2-272d6c9d5904
+model_data = EpiData(gen_distribution = truth_GI)
+
+# ╔═╡ 3c9849a8-1361-49e7-8b4e-cc4035b3fc70
+md"
+We can compare the discretized generation interval with the continuous estimate, which in this example is the serial interval estimate.
+"
+
+# ╔═╡ 71d08f7e-c409-4fbe-b154-b21d09010683
+let
+ fig = Figure()
+ ax = Axis(fig[1, 1];
+ xticks = 0:14,
+ xlabel = "Days",
+ title = "Continuous and discrete generation intervals"
+ )
+ barplot!(ax, model_data.gen_int;
+ label = "Discretized next gen pmf"
+ )
+ lines!(truth_GI;
+ label = "Continuous serial interval",
+ color = :green
+ )
+ axislegend(ax)
+ fig
+end
+
+# ╔═╡ 4a2b5cf1-623c-4fe7-8365-49fb7972af5a
+md"
+The user also needs to specify a prior for the log incidence at time zero, $\log I_0$. The initial _history_ of latent infections $I_{-1}, I_{-2},\dots$ is constructed as
+```math
+I_t = e^{rt} I_0,\qquad t = 0, -1, -2,...
+```
+Where the exponential growth rate $r$ is determined by the initial reproductive number $R_1$ via the solution to the implicit equation,
+```math
+R_1 = 1 \Big{/} \sum_{t\geq 1} e^{-rt} g_t
+```
+"
+
+# ╔═╡ 9e49d451-946b-430b-bcdb-1ef4bba55a4b
+log_I0_prior = Normal(log(1.0), 1.0)
+
+# ╔═╡ 8487835e-d430-4300-bd7c-e33f5769ee32
+epi = Renewal(model_data; initialisation_prior = log_I0_prior)
+
+# ╔═╡ 2119319f-a2ef-4c96-82c4-3c7eaf40d2e0
+md"
+_NB: We don't implement a background infection rate in this model._
+"
+
+# ╔═╡ 51b5d5b6-3ad3-4967-ad1d-b1caee201fcb
+md"
+### `Turing` model interface to `Renewal` process
+
+As mentioned above, we can use this instance of the `Renewal` latent infection model to construct a `Turing` `Model` which implements the probabilistic behaviour determined by `epi` using the constructor function `generate_latent_infs` which combines `epi` with a provided $\log R_t$ time series.
+
+Here we choose an example where $R_t$ decreases from $R_t = 3$ to $R_t = 0.5$ over the course of 50 days.
+"
+
+# ╔═╡ 9e564a6e-f521-41e8-8604-6a9d73af9ba7
+R_t_fixed = [0.5 + 2.5 / (1 + exp(t - 15)) for t in 1:50]
+
+# ╔═╡ 72bdb47d-4967-4f20-9ae5-01f82e7b32c5
+latent_inf_mdl = generate_latent_infs(epi, log.(R_t_fixed))
+
+# ╔═╡ 7a6d4b14-58d3-40c1-81f2-713c830f875f
+plt_epi = let
+ n_samples = 100
+ #Sample unconditionally the underlying parameters of the model
+ epi_mdl_samples = mapreduce(hcat, 1:n_samples) do _
+ latent_inf_mdl()
+ end
+ fig = Figure()
+ ax1 = Axis(fig[1, 1];
+ title = "$(n_samples) draws from renewal model with chosen Rt",
+ ylabel = "Latent infections"
+ )
+ ax2 = Axis(fig[2, 1];
+ ylabel = "Rt"
+ )
+ for col in eachcol(epi_mdl_samples)
+ lines!(ax1, col;
+ color = (:grey, 0.1)
+ )
+ end
+ lines!(ax2, R_t_fixed;
+ linewidth = 2
+ )
+ fig
+end
+
+# ╔═╡ c8ef8a60-d087-4ae9-ae92-abeea5afc7ae
+md"
+### Negative Binomial Observations as an `ObservationModel` type
+
+In _Mishra et al_ latent infections were assumed to occur on their observation day with negative binomial errors, this motivates using the serial interval (the time between onset of symptoms of a primary and secondary case) rather than generation interval distribution (the time between infection time of a primary and secondary case).
+
+Observation models are set in `EpiAware` as concrete subtypes of an `ObservationModel`. The Negative binomial error model without observation delays is set with a `NegativeBinomialError` struct. In _Mishra et al_ the overdispersion parameter $\phi$ sets the relationship between the mean and variance of the negative binomial errors,
+```math
+\text{var} = \text{mean} + {\text{mean}^2 \over \phi}.
+```
+In `EpiAware`, we default to a prior on $\sqrt{1/\phi}$ because this quantity is approximately the coefficient of variation of the observation noise and, therefore, is easier to reason on _a priori_ beliefs. We call this quantity the cluster factor.
+
+A prior for $\phi$ was not specified in _Mishra et al_, we select one below but we will condition a value in analysis below.
+"
+
+# ╔═╡ 714908a1-dc85-476f-a99f-ec5c95a78b60
+obs = NegativeBinomialError(cluster_factor_prior = HalfNormal(0.1))
+
+# ╔═╡ dacb8094-89a4-404a-8243-525c0dbfa482
+md"
+### `Turing` model interface to the `NegativeBinomialError` model
+
+We can construct a `NegativeBinomialError` model implementation as a `Turing` `Model` using the `EpiAware` `generate_observations` functions.
+
+`Turing` uses `missing` arguments to indicate variables that are to be sampled. We use this to observe a forward model that samples observations, conditional on an underlying expected observation time series.
+"
+
+# ╔═╡ d45f34e2-64f0-4828-ae0d-7b4cb3a3287d
+md"
+First, we set an artificial expected cases curve.
+"
+
+# ╔═╡ 2e0e8bf3-f34b-44bc-aa2d-046e1db6ee2d
+expected_cases = [1000 * exp(-(t - 15)^2 / (2 * 4)) for t in 1:30]
+
+# ╔═╡ 55c639f6-b47b-47cf-a3d6-547e793c72bc
+obs_mdl = generate_observations(obs, missing, expected_cases)
+
+# ╔═╡ c3a62dda-e054-4c8c-b1b8-ba1b5c4447b3
+plt_obs = let
+ n_samples = 100
+ obs_mdl_samples = mapreduce(hcat, 1:n_samples) do _
+ θ = obs_mdl() #Sample unconditionally the underlying parameters of the model
+ end
+ fig = Figure()
+ ax = Axis(fig[1, 1];
+ title = "$(n_samples) draws from neg. bin. obs model",
+ ylabel = "Observed cases"
+ )
+ for col in eachcol(obs_mdl_samples)
+ scatter!(ax, col;
+ color = (:grey, 0.2)
+ )
+ end
+ lines!(ax, expected_cases;
+ color = :red,
+ linewidth = 3,
+ label = "Expected cases"
+ )
+ axislegend(ax)
+ fig
+end
+
+# ╔═╡ a06065e1-0e20-4cf8-8d5a-2d588da20bee
+md"
+## Composing models into an `EpiProblem`
+
+_Mishra et al_ follows a common pattern of having an infection generation process driven by a latent process with an observation model that links the infection process to a discrete valued time series of incidence data.
+
+In `EpiAware` we provide an `EpiProblem` constructor for this common epidemiological model pattern.
+
+The constructor for an `EpiProblem` requires:
+- An `epi_model`.
+- A `latent_model`.
+- An `observation_model`.
+- A `tspan`.
+
+The `tspan` set the range of the time index for the models.
+"
+
+# ╔═╡ eaad5f46-e928-47c2-90ec-2cca3871c75d
+epi_prob = EpiProblem(epi_model = epi,
+ latent_model = ar,
+ observation_model = obs,
+ tspan = (45, 80))
+
+# ╔═╡ 2678f062-36ec-40a3-bd85-7b57a08fd809
+md"
+## Inference Methods
+
+We make inferences on the unobserved quantities, such as $R_t$ by sampling from the model conditioned on the observed data. We generate the posterior samples using the No U-Turns (NUTS) sampler.
+
+To make NUTS more robust we provide `manypathfinder`, which is built on pathfinder variational inference from [Pathfinder.jl](https://mlcolab.github.io/Pathfinder.jl/stable/). `manypathfinder` runs `nruns` pathfinder processes on the inference problem and returns the pathfinder run with maximum estimated ELBO.
+
+The composition of doing variational inference as a pre-sampler step which gets passed to NUTS initialisation is defined using the `EpiMethod` struct, where a sequence of pre-sampler steps can be be defined.
+
+`EpiMethod` also allows the specification of NUTS parameters, such as type of automatic differentiation, type of parallelism and number of parallel chains to sample.
+"
+
+# ╔═╡ 58f6f0bd-f1e4-459f-84b0-8d89831c8d7b
+num_threads = min(10, Threads.nthreads())
+
+# ╔═╡ 88b43e23-1e06-4716-b284-76e8afc6171b
+inference_method = EpiMethod(
+ pre_sampler_steps = [ManyPathfinder(nruns = 4, maxiters = 100)],
+ sampler = NUTSampler(
+ adtype = AutoReverseDiff(compile = true),
+ ndraws = 2000,
+ nchains = num_threads,
+ mcmc_parallel = MCMCThreads())
+)
+
+# ╔═╡ 92333a96-5c9b-46e1-9a8f-f1890831066b
+md"
+## Inference and analysis
+We supply the data as a `NamedTuple` with the `y_t` field containing the observed data, shortened to fit the chosen `tspan` of `epi_prob`.
+"
+
+# ╔═╡ c7140b20-e030-4dc4-97bc-0efc0ff59631
+south_korea_data = (y_t = data.cases_new[epi_prob.tspan[1]:epi_prob.tspan[2]],
+ dates = data.date[epi_prob.tspan[1]:epi_prob.tspan[2]])
+
+# ╔═╡ f6c168e5-6933-4bd7-bf71-35a37551d040
+md"
+In the epidemiological model it is hard to identify between the AR parameters such as the standard deviation of the AR process and the cluster factor of the negative binomial observation model. The reason for this identifiability problem is that the model assumes no delay between infection and observation. Therefore, on any day the data could be explained by $R_t$ changing _or_ observation noise and its not easy to disentangle greater volatility in $R_t$ from higher noise in the observations.
+
+In models with latent delays, changes in $R_t$ impact the observed cases over several days which means that it easier to disentangle trend effects from observation-to-observation fluctuations.
+
+To counter act this problem we condition the model on a fixed cluster factor value.
+"
+
+# ╔═╡ 9cbacc02-9c76-41eb-9c75-fec667b60829
+fixed_cluster_factor = 0.25
+
+# ╔═╡ b2074ff2-562d-44e6-b4b4-7a77c0f85c16
+md"
+`EpiAware` has the `generate_epiaware` function which joins an `EpiProblem` object with the data to produce as `Turing` model. This `Turing` model composes the three unit `Turing` models defined above: the Renewal infection generating process, the AR latent process for $\log R_t$, and the negative binomial observation model. Therefore, [we can condition on variables as with any other `Turing` model](https://turinglang.org/DynamicPPL.jl/stable/api/#Condition-and-decondition).
+"
+
+# ╔═╡ fe47748e-151b-4819-987a-07cf35e6cc80
+mdl = generate_epiaware(epi_prob, south_korea_data) |
+ (var"obs.cluster_factor" = fixed_cluster_factor,)
+
+# ╔═╡ 9970adfd-ee88-4598-87a3-ffde5297031c
+md"
+### Sampling with `apply_method`
+
+The `apply_method` function combines the elements above:
+- An `EpiProblem` object or `Turing` model.
+- An `EpiMethod` object.
+- Data to condition the model upon.
+
+And returns a collection of results:
+- The epidemiological model as a `Turing` `Model`.
+- Samples from MCMC.
+- Generated quantities of the model.
+"
+
+# ╔═╡ 3d10379a-3bb4-474c-ad20-de767b82d52b
+inference_results = apply_method(mdl,
+ inference_method,
+ south_korea_data
+)
+
+# ╔═╡ 5e6f505b-49fe-4ff4-ac2e-f6adcd445569
+md"
+### Results and Predictive plotting
+
+To assess the quality of the inference visually we can plot predictive quantiles for generated case data from the version of the model _which hasn't conditioned on case data_ using posterior parameters inferred from the version conditioned on observed data. For this purpose, we add a `generated_quantiles` utility function. This kind of visualisation is known as _posterior predictive checking_, and is a useful diagnostic tool for Bayesian inference (see [here](http://www.stat.columbia.edu/~gelman/book/BDA3.pdf)).
+
+We also plot the inferred $R_t$ estimates from the model. We find that the `EpiAware` model recovers the main finding in _Mishra et al_; that the $R_t$ in South Korea peaked at a very high value ($R_t \sim 10$ at peak) before rapidly dropping below 1 in early March 2020.
+
+Note that, in reality, the peak $R_t$ found here and in _Mishra et al_ is unrealistically high, this might be due to a combination of:
+- A mis-estimated generation interval/serial interval distribution.
+- An ascertainment rate that was, in reality, changing over time.
+
+In a future note, we'll demonstrate having a time-varying ascertainment rate.
+"
+
+# ╔═╡ aa1d8b72-a3d2-4844-bb43-406b98b2648f
+function generated_quantiles(gens, quantity, qs; transformation = x -> x)
+ mapreduce(hcat, gens) do gen #loop over sampled generated quantities
+ getfield(gen, quantity) |> transformation
+ end |> mat -> mapreduce(hcat, qs) do q #Loop over matrix row to condense into qs
+ map(eachrow(mat)) do row
+ if any(ismissing, row)
+ return missing
+ else
+ quantile(row, q)
+ end
+ end
+ end
+end
+
+# ╔═╡ 8b557bf1-f3dd-4f42-a250-ce965412eb32
+let
+ C = south_korea_data.y_t
+ D = south_korea_data.dates
+
+ #Case unconditional model for posterior predictive sampling
+ mdl_unconditional = generate_epiaware(epi_prob,
+ (y_t = fill(missing, length(C)),)
+ ) | (var"obs.cluster_factor" = fixed_cluster_factor,)
+ posterior_gens = generated_quantities(mdl_unconditional, inference_results.samples)
+
+ #plotting quantiles
+ qs = [0.025, 0.25, 0.5, 0.75, 0.975]
+
+ #Prediction quantiles
+ predicted_y_t = generated_quantiles(posterior_gens, :generated_y_t, qs)
+ predicted_R_t = generated_quantiles(
+ posterior_gens, :Z_t, qs; transformation = x -> exp.(x))
+
+ ts = D .|> d -> d - minimum(D) .|> d -> d.value + 1
+ t_ticks = string.(D)
+ fig = Figure()
+ ax1 = Axis(fig[1, 1];
+ ylabel = "Daily cases",
+ xticks = (ts[1:14:end], t_ticks[1:14:end]),
+ title = "Posterior predictive: Cases"
+ )
+ ax2 = Axis(fig[2, 1];
+ yscale = log10,
+ title = "Prediction: Reproduction number",
+ xticks = (ts[1:14:end], t_ticks[1:14:end])
+ )
+ linkxaxes!(ax1, ax2)
+
+ lines!(ax1, ts, predicted_y_t[:, 3];
+ color = :purple,
+ linewidth = 2,
+ label = "Post. median"
+ )
+ band!(ax1, 1:size(predicted_y_t, 1), predicted_y_t[:, 2], predicted_y_t[:, 4];
+ color = (:purple, 0.4),
+ label = "50%"
+ )
+ band!(ax1, 1:size(predicted_y_t, 1), predicted_y_t[:, 1], predicted_y_t[:, 5];
+ color = (:purple, 0.2),
+ label = "95%"
+ )
+ scatter!(ax1, C;
+ color = :black,
+ label = "Actual cases")
+ axislegend(ax1)
+
+ lines!(ax2, ts, predicted_R_t[:, 3];
+ color = :green,
+ linewidth = 2,
+ label = "Post. median"
+ )
+ band!(ax2, 1:size(predicted_R_t, 1), predicted_R_t[:, 2], predicted_R_t[:, 4];
+ color = (:green, 0.4),
+ label = "50%"
+ )
+ band!(ax2, 1:size(predicted_R_t, 1), predicted_R_t[:, 1], predicted_R_t[:, 5];
+ color = (:green, 0.2),
+ label = "95%"
+ )
+ axislegend(ax2)
+
+ fig
+end
+
+# ╔═╡ c05ed977-7a89-4ac8-97be-7078d69fce9f
+md"
+### Parameter inference
+
+We can interrogate the sampled chains directly from the `samples` field of the `inference_results` object.
+"
+
+# ╔═╡ ff21c9ec-1581-405f-8db1-0f522b5bc296
+let
+ sub_chn = inference_results.samples[inference_results.samples.name_map.parameters[[1:5;
+ end]]]
+ fig = pairplot(sub_chn)
+ lines!(fig[1, 1], ar.std_prior, label = "Prior")
+ lines!(fig[2, 2], ar.init_prior.v[1], label = "Prior")
+ lines!(fig[3, 3], ar.init_prior.v[2], label = "Prior")
+ lines!(fig[4, 4], ar.damp_prior.v[1], label = "Prior")
+ lines!(fig[5, 5], ar.damp_prior.v[2], label = "Prior")
+ lines!(fig[6, 6], epi.initialisation_prior, label = "Prior")
+
+ fig
+end
+
+# ╔═╡ Cell order:
+# ╟─8a8d5682-2f89-443b-baf0-d4d3b134d311
+# ╟─34a06b3b-799b-48c5-bd08-1e57151f51ec
+# ╟─27d73202-a93e-4471-ab50-d59345304a0b
+# ╠═d63b37f0-9642-4c38-ac01-9ffe48d50441
+# ╠═74642759-35a5-4957-9f2b-544712866410
+# ╠═0c5f413e-d043-448d-8665-f0f6f705d70f
+# ╠═b1e2a759-a52b-4ee5-8db4-cfe848878c92
+# ╠═9eb03a0b-c6ca-4e23-8109-fb68f87d7fdf
+# ╠═97b5374e-7653-4b3b-98eb-d8f73aa30580
+# ╠═1642dbda-4915-4e29-beff-bca592f3ec8d
+# ╟─1d3b9541-80ad-41b5-a5ed-a947f5c0731b
+# ╠═4e5e0e24-8c55-4cb4-be3a-d28198f81a69
+# ╠═a59d977c-0178-11ef-0063-83e30e0cf9f0
+# ╟─104f4d16-7433-4a2d-89e7-288a9b223563
+# ╟─d753b21f-cf8e-4a25-bab3-46c811c80a78
+# ╟─d201c82b-8efd-41e2-96d7-4f5e0c67088c
+# ╠═c88bbbd6-0101-4c04-97c9-c5887ef23999
+# ╟─31ee2757-0409-45df-b193-60c552797a3d
+# ╠═2bf22866-b785-4ee0-953d-ac990a197561
+# ╟─25e25125-8587-4451-8600-9b55a04dbcd9
+# ╠═fbe117b7-a0b8-4604-a5dd-e71a0a1a4fc3
+# ╟─9f84dec1-70f1-442e-8bef-a9494921549e
+# ╟─6a9e871f-a2fa-4e41-af89-8b0b3c3b5b4b
+# ╠═c1fc1929-0624-45c0-9a89-86c8479b2675
+# ╟─ab0c6bec-1ab7-43d1-aa59-11225dea79eb
+# ╠═99c9ba2c-20a5-4c7f-94d2-272d6c9d5904
+# ╟─3c9849a8-1361-49e7-8b4e-cc4035b3fc70
+# ╠═71d08f7e-c409-4fbe-b154-b21d09010683
+# ╟─4a2b5cf1-623c-4fe7-8365-49fb7972af5a
+# ╠═9e49d451-946b-430b-bcdb-1ef4bba55a4b
+# ╠═8487835e-d430-4300-bd7c-e33f5769ee32
+# ╟─2119319f-a2ef-4c96-82c4-3c7eaf40d2e0
+# ╟─51b5d5b6-3ad3-4967-ad1d-b1caee201fcb
+# ╠═9e564a6e-f521-41e8-8604-6a9d73af9ba7
+# ╠═72bdb47d-4967-4f20-9ae5-01f82e7b32c5
+# ╠═7a6d4b14-58d3-40c1-81f2-713c830f875f
+# ╟─c8ef8a60-d087-4ae9-ae92-abeea5afc7ae
+# ╠═714908a1-dc85-476f-a99f-ec5c95a78b60
+# ╟─dacb8094-89a4-404a-8243-525c0dbfa482
+# ╟─d45f34e2-64f0-4828-ae0d-7b4cb3a3287d
+# ╠═2e0e8bf3-f34b-44bc-aa2d-046e1db6ee2d
+# ╠═55c639f6-b47b-47cf-a3d6-547e793c72bc
+# ╠═c3a62dda-e054-4c8c-b1b8-ba1b5c4447b3
+# ╟─a06065e1-0e20-4cf8-8d5a-2d588da20bee
+# ╠═eaad5f46-e928-47c2-90ec-2cca3871c75d
+# ╟─2678f062-36ec-40a3-bd85-7b57a08fd809
+# ╠═58f6f0bd-f1e4-459f-84b0-8d89831c8d7b
+# ╠═88b43e23-1e06-4716-b284-76e8afc6171b
+# ╟─92333a96-5c9b-46e1-9a8f-f1890831066b
+# ╠═c7140b20-e030-4dc4-97bc-0efc0ff59631
+# ╟─f6c168e5-6933-4bd7-bf71-35a37551d040
+# ╠═9cbacc02-9c76-41eb-9c75-fec667b60829
+# ╟─b2074ff2-562d-44e6-b4b4-7a77c0f85c16
+# ╠═fe47748e-151b-4819-987a-07cf35e6cc80
+# ╟─9970adfd-ee88-4598-87a3-ffde5297031c
+# ╠═3d10379a-3bb4-474c-ad20-de767b82d52b
+# ╟─5e6f505b-49fe-4ff4-ac2e-f6adcd445569
+# ╠═aa1d8b72-a3d2-4844-bb43-406b98b2648f
+# ╠═8b557bf1-f3dd-4f42-a250-ce965412eb32
+# ╟─c05ed977-7a89-4ac8-97be-7078d69fce9f
+# ╠═ff21c9ec-1581-405f-8db1-0f522b5bc296
diff --git a/previews/PR513/showcase/replications/mishra-2020/south_korea_data.csv2 b/previews/PR513/showcase/replications/mishra-2020/south_korea_data.csv2
new file mode 100644
index 000000000..f71324b90
--- /dev/null
+++ b/previews/PR513/showcase/replications/mishra-2020/south_korea_data.csv2
@@ -0,0 +1,215 @@
+"","date","cases_new","deaths_new"
+"1",2019-12-31,0,0
+"2",2020-01-01,0,0
+"3",2020-01-02,0,0
+"4",2020-01-03,0,0
+"5",2020-01-04,0,0
+"6",2020-01-05,0,0
+"7",2020-01-06,0,0
+"8",2020-01-07,0,0
+"9",2020-01-08,0,0
+"10",2020-01-09,0,0
+"11",2020-01-10,0,0
+"12",2020-01-11,0,0
+"13",2020-01-12,0,0
+"14",2020-01-13,0,0
+"15",2020-01-14,0,0
+"16",2020-01-15,0,0
+"17",2020-01-16,0,0
+"18",2020-01-17,0,0
+"19",2020-01-18,0,0
+"20",2020-01-19,0,0
+"21",2020-01-20,1,0
+"22",2020-01-21,0,0
+"23",2020-01-22,0,0
+"24",2020-01-23,0,0
+"25",2020-01-24,1,0
+"26",2020-01-25,0,0
+"27",2020-01-26,1,0
+"28",2020-01-27,1,0
+"29",2020-01-28,0,0
+"30",2020-01-29,0,0
+"31",2020-01-30,0,0
+"32",2020-01-31,3,0
+"33",2020-02-01,5,0
+"34",2020-02-02,3,0
+"35",2020-02-03,0,0
+"36",2020-02-04,1,0
+"37",2020-02-05,2,0
+"38",2020-02-06,5,0
+"39",2020-02-07,1,0
+"40",2020-02-08,0,0
+"41",2020-02-09,1,0
+"42",2020-02-10,2,0
+"43",2020-02-11,1,0
+"44",2020-02-12,0,0
+"45",2020-02-13,0,0
+"46",2020-02-14,0,0
+"47",2020-02-15,0,0
+"48",2020-02-16,1,0
+"49",2020-02-17,1,0
+"50",2020-02-18,1,0
+"51",2020-02-19,15,0
+"52",2020-02-20,34,0
+"53",2020-02-21,75,1
+"54",2020-02-22,190,1
+"55",2020-02-23,256,3
+"56",2020-02-24,161,2
+"57",2020-02-25,130,1
+"58",2020-02-26,254,3
+"59",2020-02-27,449,1
+"60",2020-02-28,427,1
+"61",2020-02-29,909,3
+"62",2020-03-01,595,1
+"63",2020-03-02,686,5
+"64",2020-03-03,600,6
+"65",2020-03-04,516,4
+"66",2020-03-05,438,3
+"67",2020-03-06,518,7
+"68",2020-03-07,483,2
+"69",2020-03-08,367,6
+"70",2020-03-09,248,1
+"71",2020-03-10,131,3
+"72",2020-03-11,242,6
+"73",2020-03-12,114,6
+"74",2020-03-13,110,1
+"75",2020-03-14,107,5
+"76",2020-03-15,76,3
+"77",2020-03-16,74,0
+"78",2020-03-17,84,6
+"79",2020-03-18,93,5
+"80",2020-03-19,152,5
+"81",2020-03-20,87,9
+"82",2020-03-21,147,3
+"83",2020-03-22,98,1
+"84",2020-03-23,64,9
+"85",2020-03-24,76,7
+"86",2020-03-25,100,6
+"87",2020-03-26,104,5
+"88",2020-03-27,91,8
+"89",2020-03-28,146,5
+"90",2020-03-29,105,8
+"91",2020-03-30,78,6
+"92",2020-03-31,125,5
+"93",2020-04-01,0,0
+"94",2020-04-02,190,6
+"95",2020-04-03,86,5
+"96",2020-04-04,94,3
+"97",2020-04-05,81,6
+"98",2020-04-06,47,3
+"99",2020-04-07,47,6
+"100",2020-04-08,53,8
+"101",2020-04-09,39,4
+"102",2020-04-10,27,4
+"103",2020-04-11,0,0
+"104",2020-04-12,62,6
+"105",2020-04-13,25,3
+"106",2020-04-14,27,5
+"107",2020-04-15,27,3
+"108",2020-04-16,22,4
+"109",2020-04-17,22,1
+"110",2020-04-18,18,2
+"111",2020-04-19,8,2
+"112",2020-04-20,13,2
+"113",2020-04-21,9,1
+"114",2020-04-22,11,1
+"115",2020-04-23,8,2
+"116",2020-04-24,6,0
+"117",2020-04-25,10,0
+"118",2020-04-26,10,2
+"119",2020-04-27,10,1
+"120",2020-04-28,14,1
+"121",2020-04-29,9,2
+"122",2020-04-30,4,1
+"123",2020-05-01,9,1
+"124",2020-05-02,6,2
+"125",2020-05-03,13,0
+"126",2020-05-04,8,2
+"127",2020-05-05,3,2
+"128",2020-05-06,2,1
+"129",2020-05-07,4,1
+"130",2020-05-08,12,0
+"131",2020-05-09,18,0
+"132",2020-05-10,34,0
+"133",2020-05-11,35,0
+"134",2020-05-12,27,2
+"135",2020-05-13,26,1
+"136",2020-05-14,29,1
+"137",2020-05-15,27,0
+"138",2020-05-16,19,2
+"139",2020-05-17,13,0
+"140",2020-05-18,15,1
+"141",2020-05-19,13,0
+"142",2020-05-20,32,0
+"143",2020-05-21,12,1
+"144",2020-05-22,20,0
+"145",2020-05-23,23,2
+"146",2020-05-24,25,0
+"147",2020-05-25,16,1
+"148",2020-05-26,19,2
+"149",2020-05-27,40,0
+"150",2020-05-28,79,0
+"151",2020-05-29,58,0
+"152",2020-05-30,39,0
+"153",2020-05-31,27,1
+"154",2020-06-01,35,1
+"155",2020-06-02,38,1
+"156",2020-06-03,49,1
+"157",2020-06-04,39,0
+"158",2020-06-05,39,0
+"159",2020-06-06,51,0
+"160",2020-06-07,57,0
+"161",2020-06-08,38,0
+"162",2020-06-09,38,1
+"163",2020-06-10,50,2
+"164",2020-06-11,45,0
+"165",2020-06-12,56,1
+"166",2020-06-13,48,0
+"167",2020-06-14,33,0
+"168",2020-06-15,37,0
+"169",2020-06-16,34,1
+"170",2020-06-17,43,1
+"171",2020-06-18,59,1
+"172",2020-06-19,49,0
+"173",2020-06-20,67,0
+"174",2020-06-21,48,0
+"175",2020-06-22,17,0
+"176",2020-06-23,46,1
+"177",2020-06-24,51,0
+"178",2020-06-25,28,1
+"179",2020-06-26,39,0
+"180",2020-06-27,51,0
+"181",2020-06-28,62,0
+"182",2020-06-29,42,0
+"183",2020-06-30,43,0
+"184",2020-07-01,50,0
+"185",2020-07-02,54,0
+"186",2020-07-03,63,0
+"187",2020-07-04,63,1
+"188",2020-07-05,61,0
+"189",2020-07-06,0,0
+"190",2020-07-07,90,2
+"191",2020-07-08,63,0
+"192",2020-07-09,49,2
+"193",2020-07-10,45,1
+"194",2020-07-11,35,0
+"195",2020-07-12,44,1
+"196",2020-07-13,62,0
+"197",2020-07-14,33,0
+"198",2020-07-15,39,0
+"199",2020-07-16,61,2
+"200",2020-07-17,60,2
+"201",2020-07-18,39,1
+"202",2020-07-19,34,1
+"203",2020-07-20,26,1
+"204",2020-07-21,45,0
+"205",2020-07-22,63,1
+"206",2020-07-23,59,0
+"207",2020-07-24,41,1
+"208",2020-07-25,113,0
+"209",2020-07-26,58,0
+"210",2020-07-27,25,1
+"211",2020-07-28,28,1
+"212",2020-07-29,48,0
+"213",2020-07-30,18,0
+"214",2020-07-31,36,1
diff --git a/previews/PR513/siteinfo.js b/previews/PR513/siteinfo.js
new file mode 100644
index 000000000..cd646800c
--- /dev/null
+++ b/previews/PR513/siteinfo.js
@@ -0,0 +1 @@
+var DOCUMENTER_CURRENT_VERSION = "previews/PR513";