diff --git a/cmdstanpy/_version.py b/cmdstanpy/_version.py index d221d8be..06a2851d 100644 --- a/cmdstanpy/_version.py +++ b/cmdstanpy/_version.py @@ -1,3 +1,3 @@ """PyPi Version""" -__version__ = '1.2.1' +__version__ = '1.2.2' diff --git a/docs/_modules/cmdstanpy/cmdstan_args.html b/docs/_modules/cmdstanpy/cmdstan_args.html index 92a94c40..9df74f14 100644 --- a/docs/_modules/cmdstanpy/cmdstan_args.html +++ b/docs/_modules/cmdstanpy/cmdstan_args.html @@ -5,7 +5,7 @@
-
if not (
isinstance(self.method_args, SamplerArgs)
and self.method_args.num_chains > 1
+ or isinstance(self.method_args, PathfinderArgs)
):
if not os.path.exists(self.inits):
raise ValueError('no such file {}'.format(self.inits))
diff --git a/docs/_modules/cmdstanpy/compilation.html b/docs/_modules/cmdstanpy/compilation.html
index fa38fe5e..69f84faf 100644
--- a/docs/_modules/cmdstanpy/compilation.html
+++ b/docs/_modules/cmdstanpy/compilation.html
@@ -5,7 +5,7 @@
- cmdstanpy.compilation — CmdStanPy 1.2.1 documentation
+ cmdstanpy.compilation — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
@@ -182,11 +183,17 @@ Source code for cmdstanpy.compilation
import shutil
import subprocess
from copy import copy
+from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Union
from cmdstanpy.utils import get_logger
-from cmdstanpy.utils.cmdstan import EXTENSION, cmdstan_path
+from cmdstanpy.utils.cmdstan import (
+ EXTENSION,
+ cmdstan_path,
+ cmdstan_version,
+ cmdstan_version_before,
+)
from cmdstanpy.utils.command import do_command
from cmdstanpy.utils.filesystem import SanitizedOrTmpFilePath
@@ -234,7 +241,7 @@ Source code for cmdstanpy.compilation
# TODO(2.0): can remove add function and other logic
-[docs]class CompilerOptions:
+[docs]class CompilerOptions:
"""
User-specified flags for stanc and C++ compiler.
@@ -273,7 +280,7 @@ Source code for cmdstanpy.compilation
and self._user_header == other.user_header
)
-[docs] def is_empty(self) -> bool:
+[docs] def is_empty(self) -> bool:
"""True if no options specified."""
return (
self._stanc_options == {}
@@ -296,7 +303,7 @@ Source code for cmdstanpy.compilation
"""user header."""
return self._user_header
-[docs] def validate(self) -> None:
+[docs] def validate(self) -> None:
"""
Check compiler args.
Raise ValueError if invalid options are found.
@@ -305,7 +312,7 @@ Source code for cmdstanpy.compilation
self.validate_cpp_opts()
self.validate_user_header()
-[docs] def validate_stanc_opts(self) -> None:
+[docs] def validate_stanc_opts(self) -> None:
"""
Check stanc compiler args and consistency between stanc and C++ options.
Raise ValueError if bad config is found.
@@ -378,7 +385,7 @@ Source code for cmdstanpy.compilation
os.path.abspath(os.path.expanduser(path)) for path in paths
]
-[docs] def validate_cpp_opts(self) -> None:
+[docs] def validate_cpp_opts(self) -> None:
"""
Check cpp compiler args.
Raise ValueError if bad config is found.
@@ -395,7 +402,7 @@ Source code for cmdstanpy.compilation
f' found {val}.'
)
-[docs] def validate_user_header(self) -> None:
+[docs] def validate_user_header(self) -> None:
"""
User header exists.
Raise ValueError if bad config is found.
@@ -433,7 +440,7 @@ Source code for cmdstanpy.compilation
self._cpp_options['USER_HEADER'] = self._user_header
-[docs] def add(self, new_opts: "CompilerOptions") -> None: # noqa: disable=Q000
+[docs] def add(self, new_opts: "CompilerOptions") -> None: # noqa: disable=Q000
"""Adds options to existing set of compiler options."""
if new_opts.stanc_options is not None:
if self._stanc_options is None:
@@ -456,7 +463,7 @@ Source code for cmdstanpy.compilation
if new_opts._user_header != '' and self._user_header == '':
self._user_header = new_opts._user_header
-[docs] def add_include_path(self, path: str) -> None:
+[docs] def add_include_path(self, path: str) -> None:
"""Adds include path to existing set of compiler options."""
path = os.path.abspath(os.path.expanduser(path))
if 'include-paths' not in self._stanc_options:
@@ -488,7 +495,7 @@ Source code for cmdstanpy.compilation
opts.append(f'--{key}')
return opts
-[docs] def compose(self, filename_in_msg: Optional[str] = None) -> List[str]:
+[docs] def compose(self, filename_in_msg: Optional[str] = None) -> List[str]:
"""
Format makefile options as list of strings.
@@ -533,7 +540,7 @@ Source code for cmdstanpy.compilation
return result
-def compile_stan_file(
+[docs]def compile_stan_file(
src: Union[str, Path],
force: bool = False,
stanc_options: Optional[Dict[str, Any]] = None,
@@ -648,7 +655,102 @@ Source code for cmdstanpy.compilation
raise ValueError(
f"Failed to compile Stan model '{src}'. " f"Console:\n{console}"
)
- return str(exe_target)
+ return str(exe_target)
+
+
+[docs]def format_stan_file(
+ stan_file: Union[str, os.PathLike],
+ *,
+ overwrite_file: bool = False,
+ canonicalize: Union[bool, str, Iterable[str]] = False,
+ max_line_length: int = 78,
+ backup: bool = True,
+ stanc_options: Optional[Dict[str, Any]] = None,
+) -> None:
+ """
+ Run stanc's auto-formatter on the model code. Either saves directly
+ back to the file or prints for inspection
+
+ :param stan_file: Path to Stan program file.
+ :param overwrite_file: If True, save the updated code to disk, rather
+ than printing it. By default False
+ :param canonicalize: Whether or not the compiler should 'canonicalize'
+ the Stan model, removing things like deprecated syntax. Default is
+ False. If True, all canonicalizations are run. If it is a list of
+ strings, those options are passed to stanc (new in Stan 2.29)
+ :param max_line_length: Set the wrapping point for the formatter. The
+ default value is 78, which wraps most lines by the 80th character.
+ :param backup: If True, create a stanfile.bak backup before
+ writing to the file. Only disable this if you're sure you have other
+ copies of the file or are using a version control system like Git.
+ :param stanc_options: Additional options to pass to the stanc compiler.
+ """
+ stan_file = Path(stan_file).resolve()
+
+ if not stan_file.exists():
+ raise ValueError(f'File does not exist: {stan_file}')
+
+ try:
+ cmd = (
+ [os.path.join(cmdstan_path(), 'bin', 'stanc' + EXTENSION)]
+ # handle include-paths, allow-undefined etc
+ + CompilerOptions(stanc_options=stanc_options).compose_stanc(None)
+ + [str(stan_file)]
+ )
+
+ if canonicalize:
+ if cmdstan_version_before(2, 29):
+ if isinstance(canonicalize, bool):
+ cmd.append('--print-canonical')
+ else:
+ raise ValueError(
+ "Invalid arguments passed for current CmdStan"
+ + " version({})\n".format(
+ cmdstan_version() or "Unknown"
+ )
+ + "--canonicalize requires 2.29 or higher"
+ )
+ else:
+ if isinstance(canonicalize, str):
+ cmd.append('--canonicalize=' + canonicalize)
+ elif isinstance(canonicalize, Iterable):
+ cmd.append('--canonicalize=' + ','.join(canonicalize))
+ else:
+ cmd.append('--print-canonical')
+
+ # before 2.29, having both --print-canonical
+ # and --auto-format printed twice
+ if not (cmdstan_version_before(2, 29) and canonicalize):
+ cmd.append('--auto-format')
+
+ if not cmdstan_version_before(2, 29):
+ cmd.append(f'--max-line-length={max_line_length}')
+ elif max_line_length != 78:
+ raise ValueError(
+ "Invalid arguments passed for current CmdStan version"
+ + " ({})\n".format(cmdstan_version() or "Unknown")
+ + "--max-line-length requires 2.29 or higher"
+ )
+
+ out = subprocess.run(cmd, capture_output=True, text=True, check=True)
+ if out.stderr:
+ get_logger().warning(out.stderr)
+ result = out.stdout
+ if overwrite_file:
+ if result:
+ if backup:
+ shutil.copyfile(
+ stan_file,
+ str(stan_file)
+ + '.bak-'
+ + datetime.now().strftime("%Y%m%d%H%M%S"),
+ )
+ stan_file.write_text(result)
+ else:
+ print(result)
+
+ except (ValueError, RuntimeError) as e:
+ raise RuntimeError("Stanc formatting failed") from e
diff --git a/docs/_modules/cmdstanpy/model.html b/docs/_modules/cmdstanpy/model.html
index 4f7ec869..8a75af9f 100644
--- a/docs/_modules/cmdstanpy/model.html
+++ b/docs/_modules/cmdstanpy/model.html
@@ -5,7 +5,7 @@
- cmdstanpy.model — CmdStanPy 1.2.1 documentation
+ cmdstanpy.model — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
@@ -183,7 +184,6 @@ Source code for cmdstanpy.model
import threading
from collections import OrderedDict
from concurrent.futures import ThreadPoolExecutor
-from datetime import datetime
from io import StringIO
from multiprocessing import cpu_count
from typing import (
@@ -230,9 +230,7 @@ Source code for cmdstanpy.model
from_csv,
)
from cmdstanpy.utils import (
- EXTENSION,
cmdstan_path,
- cmdstan_version,
cmdstan_version_before,
do_command,
get_logger,
@@ -493,6 +491,7 @@ Source code for cmdstanpy.model
return {}
return compilation.src_info(str(self.stan_file), self._compiler_options)
+ # TODO(2.0) remove
[docs] def format(
self,
overwrite_file: bool = False,
@@ -502,6 +501,8 @@ Source code for cmdstanpy.model
backup: bool = True,
) -> None:
"""
+ Deprecated: Use :func:`cmdstanpy.format_stan_file()` instead.
+
Run stanc's auto-formatter on the model code. Either saves directly
back to the file or prints for inspection
@@ -518,72 +519,24 @@ Source code for cmdstanpy.model
writing to the file. Only disable this if you're sure you have other
copies of the file or are using a version control system like Git.
"""
- if self.stan_file is None or not os.path.isfile(self.stan_file):
- raise ValueError("No Stan file found for this module")
- try:
- cmd = (
- [os.path.join(cmdstan_path(), 'bin', 'stanc' + EXTENSION)]
- # handle include-paths, allow-undefined etc
- + self._compiler_options.compose_stanc(None)
- + [str(self.stan_file)]
- )
- if canonicalize:
- if cmdstan_version_before(2, 29):
- if isinstance(canonicalize, bool):
- cmd.append('--print-canonical')
- else:
- raise ValueError(
- "Invalid arguments passed for current CmdStan"
- + " version({})\n".format(
- cmdstan_version() or "Unknown"
- )
- + "--canonicalize requires 2.29 or higher"
- )
- else:
- if isinstance(canonicalize, str):
- cmd.append('--canonicalize=' + canonicalize)
- elif isinstance(canonicalize, Iterable):
- cmd.append('--canonicalize=' + ','.join(canonicalize))
- else:
- cmd.append('--print-canonical')
-
- # before 2.29, having both --print-canonical
- # and --auto-format printed twice
- if not (cmdstan_version_before(2, 29) and canonicalize):
- cmd.append('--auto-format')
-
- if not cmdstan_version_before(2, 29):
- cmd.append(f'--max-line-length={max_line_length}')
- elif max_line_length != 78:
- raise ValueError(
- "Invalid arguments passed for current CmdStan version"
- + " ({})\n".format(cmdstan_version() or "Unknown")
- + "--max-line-length requires 2.29 or higher"
- )
+ get_logger().warning(
+ "CmdStanModel.format() is deprecated and will be "
+ "removed in the next major version.\n"
+ "Use cmdstanpy.format_stan_file() instead."
+ )
- out = subprocess.run(
- cmd, capture_output=True, text=True, check=True
- )
- if out.stderr:
- get_logger().warning(out.stderr)
- result = out.stdout
- if overwrite_file:
- if result:
- if backup:
- shutil.copyfile(
- self.stan_file,
- str(self.stan_file)
- + '.bak-'
- + datetime.now().strftime("%Y%m%d%H%M%S"),
- )
- with open(self.stan_file, 'w') as file_handle:
- file_handle.write(result)
- else:
- print(result)
+ if self.stan_file is None:
+ raise ValueError("No Stan file found for this module")
- except (ValueError, RuntimeError) as e:
- raise RuntimeError("Stanc formatting failed") from e
+ compilation.format_stan_file(
+ self.stan_file,
+ overwrite_file=overwrite_file,
+ max_line_length=max_line_length,
+ canonicalize=canonicalize,
+ backup=backup,
+ stanc_options=self.stanc_options,
+ )
@property
def stanc_options(self) -> Dict[str, Union[bool, int, str]]:
@@ -1808,6 +1761,7 @@ Source code for cmdstanpy.model
refresh: Optional[int] = None,
time_fmt: str = "%Y%m%d%H%M%S",
timeout: Optional[float] = None,
+ num_threads: Optional[int] = None,
) -> CmdStanPathfinder:
"""
Run CmdStan's Pathfinder variational inference algorithm.
@@ -1910,6 +1864,10 @@ Source code for cmdstanpy.model
:param timeout: Duration at which Pathfinder times
out in seconds. Defaults to None.
+ :param num_threads: Number of threads to request for parallel execution.
+ A number other than ``1`` requires the model to have been compiled
+ with STAN_THREADS=True.
+
:return: A :class:`CmdStanPathfinder` object
References
@@ -1936,6 +1894,17 @@ Source code for cmdstanpy.model
"available for CmdStan versions 2.34 and later"
)
+ if num_threads is not None:
+ if (
+ num_threads != 1
+ and exe_info.get('STAN_THREADS', '').lower() != 'true'
+ ):
+ raise ValueError(
+ "Model must be compiled with 'STAN_THREADS=true' to use"
+ " 'num_threads' argument"
+ )
+ os.environ['STAN_NUM_THREADS'] = str(num_threads)
+
if num_paths == 1:
if num_single_draws is None:
num_single_draws = draws
diff --git a/docs/_modules/cmdstanpy/stanfit.html b/docs/_modules/cmdstanpy/stanfit.html
index dc6c6689..396b7a0f 100644
--- a/docs/_modules/cmdstanpy/stanfit.html
+++ b/docs/_modules/cmdstanpy/stanfit.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/gq.html b/docs/_modules/cmdstanpy/stanfit/gq.html
index af7303eb..f03cb89e 100644
--- a/docs/_modules/cmdstanpy/stanfit/gq.html
+++ b/docs/_modules/cmdstanpy/stanfit/gq.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.gq — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.gq — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/laplace.html b/docs/_modules/cmdstanpy/stanfit/laplace.html
index 73c13242..548c0131 100644
--- a/docs/_modules/cmdstanpy/stanfit/laplace.html
+++ b/docs/_modules/cmdstanpy/stanfit/laplace.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.laplace — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.laplace — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/mcmc.html b/docs/_modules/cmdstanpy/stanfit/mcmc.html
index e8b729b8..232c7e53 100644
--- a/docs/_modules/cmdstanpy/stanfit/mcmc.html
+++ b/docs/_modules/cmdstanpy/stanfit/mcmc.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.mcmc — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.mcmc — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/metadata.html b/docs/_modules/cmdstanpy/stanfit/metadata.html
index 53db7e9d..fb1340c7 100644
--- a/docs/_modules/cmdstanpy/stanfit/metadata.html
+++ b/docs/_modules/cmdstanpy/stanfit/metadata.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.metadata — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.metadata — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/mle.html b/docs/_modules/cmdstanpy/stanfit/mle.html
index dfeaedf2..929c124b 100644
--- a/docs/_modules/cmdstanpy/stanfit/mle.html
+++ b/docs/_modules/cmdstanpy/stanfit/mle.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.mle — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.mle — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/pathfinder.html b/docs/_modules/cmdstanpy/stanfit/pathfinder.html
index dfbcc6b7..0c1c486b 100644
--- a/docs/_modules/cmdstanpy/stanfit/pathfinder.html
+++ b/docs/_modules/cmdstanpy/stanfit/pathfinder.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.pathfinder — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.pathfinder — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/runset.html b/docs/_modules/cmdstanpy/stanfit/runset.html
index b7d975ec..2fc8301c 100644
--- a/docs/_modules/cmdstanpy/stanfit/runset.html
+++ b/docs/_modules/cmdstanpy/stanfit/runset.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.runset — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.runset — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/stanfit/vb.html b/docs/_modules/cmdstanpy/stanfit/vb.html
index fee5a8f5..49c5f0c0 100644
--- a/docs/_modules/cmdstanpy/stanfit/vb.html
+++ b/docs/_modules/cmdstanpy/stanfit/vb.html
@@ -5,7 +5,7 @@
- cmdstanpy.stanfit.vb — CmdStanPy 1.2.1 documentation
+ cmdstanpy.stanfit.vb — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/utils.html b/docs/_modules/cmdstanpy/utils.html
index 74725c83..ea1958ef 100644
--- a/docs/_modules/cmdstanpy/utils.html
+++ b/docs/_modules/cmdstanpy/utils.html
@@ -5,7 +5,7 @@
- cmdstanpy.utils — CmdStanPy 1.2.1 documentation
+ cmdstanpy.utils — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/cmdstanpy/utils/cmdstan.html b/docs/_modules/cmdstanpy/utils/cmdstan.html
index df7a23af..46683793 100644
--- a/docs/_modules/cmdstanpy/utils/cmdstan.html
+++ b/docs/_modules/cmdstanpy/utils/cmdstan.html
@@ -5,7 +5,7 @@
- cmdstanpy.utils.cmdstan — CmdStanPy 1.2.1 documentation
+ cmdstanpy.utils.cmdstan — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/index.html b/docs/_modules/index.html
index 94648a23..f1dce97b 100644
--- a/docs/_modules/index.html
+++ b/docs/_modules/index.html
@@ -5,7 +5,7 @@
- Overview: module code — CmdStanPy 1.2.1 documentation
+ Overview: module code — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
diff --git a/docs/_modules/stanio/json.html b/docs/_modules/stanio/json.html
index 5f89f624..551304fb 100644
--- a/docs/_modules/stanio/json.html
+++ b/docs/_modules/stanio/json.html
@@ -5,7 +5,7 @@
- stanio.json — CmdStanPy 1.2.1 documentation
+ stanio.json — CmdStanPy 1.2.2 documentation
@@ -37,6 +37,7 @@
+
@@ -60,7 +61,7 @@
@@ -174,7 +175,17 @@ Source code for stanio.json
"""
Utilities for writing Stan Json files
"""
-import json
+try:
+ import ujson as json
+
+ uj_version = tuple(map(int, json.__version__.split(".")))
+ if uj_version < (5, 5, 0):
+ raise ImportError("ujson version too old")
+ UJSON_AVAILABLE = True
+except:
+ UJSON_AVAILABLE = False
+ import json
+
from typing import Any, Mapping
import numpy as np
@@ -204,7 +215,17 @@ Source code for stanio.json
or "xarray" in original_module
or "pandas" in original_module
):
- return process_value(np.asanyarray(val).tolist())
+ numpy_val = np.asanyarray(val)
+ # fast paths for numeric types
+ if numpy_val.dtype.kind in "iuf":
+ return numpy_val.tolist()
+ if numpy_val.dtype.kind == "c":
+ return np.stack([numpy_val.real, numpy_val.imag], axis=-1).tolist()
+ if numpy_val.dtype.kind == "b":
+ return numpy_val.astype(int).tolist()
+
+ # should only be object arrays (tuples, etc)
+ return process_value(numpy_val.tolist())
return val
@@ -248,8 +269,11 @@ Source code for stanio.json
copied before type conversion, not modified
"""
with open(path, "w") as fd:
- for chunk in json.JSONEncoder().iterencode(process_dictionary(data)):
- fd.write(chunk)
+ if UJSON_AVAILABLE:
+ json.dump(process_dictionary(data), fd)
+ else:
+ for chunk in json.JSONEncoder().iterencode(process_dictionary(data)):
+ fd.write(chunk)
diff --git a/docs/_sources/api.rst.txt b/docs/_sources/api.rst.txt
index 5ec5867c..f527f159 100644
--- a/docs/_sources/api.rst.txt
+++ b/docs/_sources/api.rst.txt
@@ -84,6 +84,17 @@ CmdStanGQ
Functions
*********
+compile_stan_file
+=================
+
+.. autofunction:: cmdstanpy.compile_stan_file
+
+
+format_stan_file
+================
+
+.. autofunction:: cmdstanpy.format_stan_file
+
show_versions
=============
diff --git a/docs/_sources/changes.rst.txt b/docs/_sources/changes.rst.txt
index cd179060..15feeb75 100644
--- a/docs/_sources/changes.rst.txt
+++ b/docs/_sources/changes.rst.txt
@@ -7,6 +7,19 @@ What's New
For full changes, see the `Releases page `_ on GitHub.
+CmdStanPy 1.2.2
+---------------
+
+- Updated Community page to link to the ``bibat`` package.
+- Moved CmdStanPy's metadata to exclusively use the ``pyproject.toml`` file.
+- Fixed an issue where the deprecation of the ``compile=False`` argument to :class:`CmdStanModel` would
+ make it impossible to use the canonicalizer to update old models.
+ The new function :func:`cmdstanpy.format_stan_file` supports this use case.
+- Fixed a bug preventing multiple inits from being used with :meth:`CmdStanModel.pathfinder`.
+- Added a helper argument ``num_threads`` to :meth:`CmdStanModel.pathfinder`.
+
+Reminder: The next non-bugfix release of CmdStanPy will be version 2.0, which will remove all existing deprecations.
+
CmdStanPy 1.2.1
---------------
diff --git a/docs/_sources/community.rst.txt b/docs/_sources/community.rst.txt
index 82ef9373..eabfd829 100644
--- a/docs/_sources/community.rst.txt
+++ b/docs/_sources/community.rst.txt
@@ -9,8 +9,8 @@ Project templates
Templates are a great way to piggy back on other users' work, saving you time
when you start a new project.
-- `cookiecutter-cmdstanpy-analysis `_
- A cookiecutter template for cmdstanpy-based statistical analysis projects.
+- `bibat `_
+ A batteries-included template for Bayesian statistical analysis projects.
- `cookiecutter-cmdstanpy-wrapper `_
A cookiecutter template using Stan models in Python packages, including
diff --git a/docs/_static/documentation_options.js b/docs/_static/documentation_options.js
index 744c587f..809835b6 100644
--- a/docs/_static/documentation_options.js
+++ b/docs/_static/documentation_options.js
@@ -1,6 +1,6 @@
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
- VERSION: '1.2.1',
+ VERSION: '1.2.2',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
diff --git a/docs/_static/images/logo_icon.png b/docs/_static/images/logo_icon.png
index a4be80e9..ca139697 100644
Binary files a/docs/_static/images/logo_icon.png and b/docs/_static/images/logo_icon.png differ
diff --git a/docs/_static/images/logo_tm.png b/docs/_static/images/logo_tm.png
index 4a4f06aa..72f17ec0 100644
Binary files a/docs/_static/images/logo_tm.png and b/docs/_static/images/logo_tm.png differ
diff --git a/docs/_static/logo_icon.png b/docs/_static/logo_icon.png
index a4be80e9..ca139697 100644
Binary files a/docs/_static/logo_icon.png and b/docs/_static/logo_icon.png differ
diff --git a/docs/api.html b/docs/api.html
index 3cf6e32b..3ca6aa91 100644
--- a/docs/api.html
+++ b/docs/api.html
@@ -6,7 +6,7 @@
- API Reference — CmdStanPy 1.2.1 documentation
+ API Reference — CmdStanPy 1.2.2 documentation
@@ -38,6 +38,7 @@
+
@@ -63,7 +64,7 @@
@@ -1117,6 +1118,38 @@
Functions
+ -
+
+ compile_stan_file
+
+
+
+ -
+
+ format_stan_file
+
+
+
-
show_versions
@@ -1354,7 +1387,7 @@
CmdStanModel
compile(force=False, stanc_options=None, cpp_options=None, user_header=None, override_options=False, *, _internal=False)[source]¶
Deprecated: To compile a model, use the CmdStanModel
-constructor or cmdstanpy.compile_stan_file()
.
+constructor or cmdstanpy.compile_stan_file()
.
Compile the given Stan program file. Translates the Stan code to
C++, then calls the C++ compiler.
By default, this function compares the timestamps on the source and
@@ -1400,7 +1433,8 @@
CmdStanModel
-
format(overwrite_file=False, canonicalize=False, max_line_length=78, *, backup=True)[source]¶
-Run stanc’s auto-formatter on the model code. Either saves directly
+
Deprecated: Use cmdstanpy.format_stan_file()
instead.
+Run stanc’s auto-formatter on the model code. Either saves directly
back to the file or prints for inspection
- Parameters:
@@ -1585,7 +1619,7 @@ CmdStanModelReturn type:
--
+
-
@@ -1689,7 +1723,7 @@ CmdStanModel
-
-pathfinder(data=None, *, init_alpha=None, tol_obj=None, tol_rel_obj=None, tol_grad=None, tol_rel_grad=None, tol_param=None, history_size=None, num_paths=None, max_lbfgs_iters=None, draws=None, num_single_draws=None, num_elbo_draws=None, psis_resample=True, calculate_lp=True, seed=None, inits=None, output_dir=None, sig_figs=None, save_profile=False, show_console=False, refresh=None, time_fmt='%Y%m%d%H%M%S', timeout=None)[source]¶
+pathfinder(data=None, *, init_alpha=None, tol_obj=None, tol_rel_obj=None, tol_grad=None, tol_rel_grad=None, tol_param=None, history_size=None, num_paths=None, max_lbfgs_iters=None, draws=None, num_single_draws=None, num_elbo_draws=None, psis_resample=True, calculate_lp=True, seed=None, inits=None, output_dir=None, sig_figs=None, save_profile=False, show_console=False, refresh=None, time_fmt='%Y%m%d%H%M%S', timeout=None, num_threads=None)[source]¶
Run CmdStan’s Pathfinder variational inference algorithm.
+num_threads (Optional[int]) – Number of threads to request for parallel execution.
+A number other than 1
requires the model to have been compiled
+with STAN_THREADS=True.
Returns:
@@ -2189,7 +2226,7 @@ CmdStanMCMCReturn type:
-
+
@@ -2212,7 +2249,7 @@ CmdStanMCMCReturn type:
-
+
@@ -2346,7 +2383,7 @@ CmdStanMCMCpandas.DataFrame
Return type:
-
+
@@ -2548,7 +2585,7 @@ CmdStanMLE
-property optimized_iterations_pd: Optional[DataFrame]¶
+property optimized_iterations_pd: Optional[DataFrame]¶
Returns all saved iterations from the optimizer and final estimate
as a pandas.DataFrame which contains all optimizer outputs, i.e.,
the value for lp__ as well as all Stan program variables.
@@ -2571,7 +2608,7 @@ CmdStanMLE
-property optimized_params_pd: DataFrame¶
+property optimized_params_pd: DataFrame¶
Returns all final estimates from the optimizer as a pandas.DataFrame
which contains all optimizer outputs, i.e., the value for lp__
as well as all Stan program variables.
@@ -2615,7 +2652,7 @@ CmdStanLaplacevars (Optional[Union[List[str], str]]) – optional list of variable names.
Return type:
-
+
@@ -3015,7 +3052,7 @@ CmdStanVB
-property variational_params_pd: DataFrame¶
+property variational_params_pd: DataFrame¶
Returns inferred parameter means as pandas DataFrame.
@@ -3027,7 +3064,7 @@ CmdStanVB
-property variational_sample_pd: DataFrame¶
+property variational_sample_pd: DataFrame¶
Returns the set of approximate posterior output draws as
a pandas DataFrame.
@@ -3105,7 +3142,7 @@ CmdStanGQReturn type:
-
+
@@ -3118,7 +3155,7 @@ CmdStanGQ
draws_xr(vars: Optional[Union[str, List[str]]] = None, inc_warmup: bool = False, inc_sample: bool = False) NoReturn [source]¶
-draws_xr(vars: Optional[Union[str, List[str]]] = None, inc_warmup: bool = False, inc_sample: bool = False) Dataset
+draws_xr(vars: Optional[Union[str, List[str]]] = None, inc_warmup: bool = False, inc_sample: bool = False) Dataset
Returns the generated quantities draws as a xarray Dataset.
This method can only be called when the underlying fit was made
through sampling, it cannot be used on MLE or VB outputs.
@@ -3258,6 +3295,70 @@ CmdStanGQ
Functions¶
+
+compile_stan_file¶
+
+-
+cmdstanpy.compile_stan_file(src, force=False, stanc_options=None, cpp_options=None, user_header=None)[source]¶
+Compile the given Stan program file. Translates the Stan code to
+C++, then calls the C++ compiler.
+By default, this function compares the timestamps on the source and
+executable files; if the executable is newer than the source file, it
+will not recompile the file, unless argument force
is True
+or unless the compiler options have been changed.
+
+- Parameters:
+
+
+force (bool) – When True
, always compile, even if the executable file
+is newer than the source file. Used for Stan models which have
+#include
directives in order to force recompilation when changes
+are made to the included files.
+stanc_options (Optional[Dict[str, Any]]) – Options for stanc compiler.
+cpp_options (Optional[Dict[str, Any]]) – Options for C++ compiler.
+user_header (Optional[Union[str, PathLike]]) – A path to a header file to include during C++
+compilation.
+
+
+- Return type:
+-
+
+
+
+
+
+
+format_stan_file¶
+
+-
+cmdstanpy.format_stan_file(stan_file, *, overwrite_file=False, canonicalize=False, max_line_length=78, backup=True, stanc_options=None)[source]¶
+Run stanc’s auto-formatter on the model code. Either saves directly
+back to the file or prints for inspection
+
+- Parameters:
+
+stan_file (Union[str, PathLike]) – Path to Stan program file.
+overwrite_file (bool) – If True, save the updated code to disk, rather
+than printing it. By default False
+canonicalize (Union[bool, str, Iterable[str]]) – Whether or not the compiler should ‘canonicalize’
+the Stan model, removing things like deprecated syntax. Default is
+False. If True, all canonicalizations are run. If it is a list of
+strings, those options are passed to stanc (new in Stan 2.29)
+max_line_length (int) – Set the wrapping point for the formatter. The
+default value is 78, which wraps most lines by the 80th character.
+backup (bool) – If True, create a stanfile.bak backup before
+writing to the file. Only disable this if you’re sure you have other
+copies of the file or are using a version control system like Git.
+stanc_options (Optional[Dict[str, Any]]) – Additional options to pass to the stanc compiler.
+
+
+- Return type:
+None
+
+
+
+
+
show_versions¶
@@ -3430,7 +3531,7 @@ write_stan_jsonnumpy.asarray()
, e.g a
-pandas.Series
.
+pandas.Series
.
Produces a file compatible with the
Json Format for Cmdstan
@@ -3439,7 +3540,7 @@ write_stan_jsonstr) – File path for the created json. Will be overwritten if
already in existence.
data (Mapping[str, Any]) – A mapping from strings to values. This can be a dictionary
-or something more exotic like an xarray.Dataset
. This will be
+or something more exotic like an xarray.Dataset
. This will be
copied before type conversion, not modified
diff --git a/docs/changes.html b/docs/changes.html
index 983ff693..41694480 100644
--- a/docs/changes.html
+++ b/docs/changes.html
@@ -6,7 +6,7 @@
- What’s New — CmdStanPy 1.2.1 documentation
+ What’s New — CmdStanPy 1.2.2 documentation
@@ -38,6 +38,7 @@
+
@@ -63,7 +64,7 @@
@@ -171,6 +172,11 @@
@@ -312,15 +312,7 @@ Maximum Likelihood Estimation
-20:41:35 - cmdstanpy - INFO - Chain [1] start processing
-
-
-
-
-
-
-
-20:41:35 - cmdstanpy - INFO - Chain [1] done processing
+16:29:42 - cmdstanpy - INFO - Chain [1] done processing
@@ -329,7 +321,7 @@ Maximum Likelihood Estimation
('lp__', 'theta')
-OrderedDict([('lp__', -5.00402), ('theta', 0.20001)])
+OrderedDict([('lp__', -5.00402), ('theta', 0.199999)])
@@ -363,7 +355,7 @@ Maximum Likelihood Estimation\n",
" 0 \n",
" -5.00402 \n",
- " 0.20001 \n",
+ " 0.199999 \n",
" \n",
" \n",
"\n",
"
"
],
"text/plain": [
- " lp__ theta\n",
- "0 -5.00402 0.20001"
+ " lp__ theta\n",
+ "0 -5.00402 0.199999"
]
},
"execution_count": 1,
diff --git a/docs/users-guide/examples/Pathfinder.html b/docs/users-guide/examples/Pathfinder.html
index b2cd54ac..c0e27d42 100644
--- a/docs/users-guide/examples/Pathfinder.html
+++ b/docs/users-guide/examples/Pathfinder.html
@@ -6,7 +6,7 @@
- Variational Inference using Pathfinder — CmdStanPy 1.2.1 documentation
+ Variational Inference using Pathfinder — CmdStanPy 1.2.2 documentation
@@ -39,6 +39,7 @@
+
@@ -64,7 +65,7 @@
@@ -293,7 +294,7 @@ Variational Inference using Pathfinderbernoulli.stan¶
The CmdStanModel pathfinder method wraps the CmdStan pathfinder method.
By default, CmdStanPy runs multi-path Pathfinder which returns an importance-resampled set of draws over the outputs of 4 independent single-path Pathfinders.
-
+
[1]:
@@ -302,15 +303,6 @@ Example: variational inference with Pathfinder for model
-
-
-
-
-/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
- from .autonotebook import tqdm as notebook_tqdm
-
-
[2]:
@@ -330,7 +322,7 @@ Example: variational inference with Pathfinder for model
-20:41:37 - cmdstanpy - INFO - Chain [1] start processing
+16:29:44 - cmdstanpy - INFO - Chain [1] start processing
@@ -338,7 +330,7 @@ Example: variational inference with Pathfinder for model
-20:41:37 - cmdstanpy - INFO - Chain [1] done processing
+16:29:44 - cmdstanpy - INFO - Chain [1] done processing
@@ -357,11 +349,11 @@ Example: variational inference with Pathfinder for model
CmdStanPathfinder: model=bernoulli['method=pathfinder']
csv_files:
- /tmp/tmpar1gm37y/bernoulli3ctxpbm0/bernoulli-20240205204137.csv
+ /tmp/tmpimv2ege0/bernoullifbcxwakw/bernoulli-20240326162944.csv
output_files:
- /tmp/tmpar1gm37y/bernoulli3ctxpbm0/bernoulli-20240205204137_0-stdout.txt
+ /tmp/tmpimv2ege0/bernoullifbcxwakw/bernoulli-20240326162944_0-stdout.txt
Metadata:
-{'stan_version_major': 2, 'stan_version_minor': 34, 'stan_version_patch': 1, 'model': 'bernoulli_model', 'start_datetime': '2024-02-05 20:41:37 UTC', 'method': 'pathfinder', 'init_alpha': 0.001, 'tol_obj': 1e-12, 'tol_rel_obj': 10000, 'tol_grad': 1e-08, 'tol_rel_grad': 10000000.0, 'tol_param': 1e-08, 'history_size': 5, 'num_psis_draws': 1000, 'num_paths': 4, 'save_single_paths': 0, 'psis_resample': 1, 'calculate_lp': 1, 'max_lbfgs_iters': 1000, 'num_draws': 1000, 'num_elbo_draws': 25, 'id': 1, 'data_file': '/home/runner/.cmdstan/cmdstan-2.34.1/examples/bernoulli/bernoulli.data.json', 'init': 2, 'seed': 1285, 'diagnostic_file': '', 'refresh': 100, 'sig_figs': -1, 'profile_file': 'profile.csv', 'save_cmdstan_config': 0, 'num_threads': 1, 'stanc_version': 'stanc3 v2.34.0', 'stancflags': '', 'raw_header': 'lp_approx__,lp__,theta', 'column_names': ('lp_approx__', 'lp__', 'theta')}
+{'stan_version_major': 2, 'stan_version_minor': 34, 'stan_version_patch': 1, 'model': 'bernoulli_model', 'start_datetime': '2024-03-26 16:29:44 UTC', 'method': 'pathfinder', 'init_alpha': 0.001, 'tol_obj': 1e-12, 'tol_rel_obj': 10000, 'tol_grad': 1e-08, 'tol_rel_grad': 10000000.0, 'tol_param': 1e-08, 'history_size': 5, 'num_psis_draws': 1000, 'num_paths': 4, 'save_single_paths': 0, 'psis_resample': 1, 'calculate_lp': 1, 'max_lbfgs_iters': 1000, 'num_draws': 1000, 'num_elbo_draws': 25, 'id': 1, 'data_file': '/home/runner/.cmdstan/cmdstan-2.34.1/examples/bernoulli/bernoulli.data.json', 'init': 2, 'seed': 50890, 'diagnostic_file': '', 'refresh': 100, 'sig_figs': -1, 'profile_file': 'profile.csv', 'save_cmdstan_config': 0, 'num_threads': 1, 'stanc_version': 'stanc3 v2.34.0', 'stancflags': '', 'raw_header': 'lp_approx__,lp__,theta', 'column_names': ('lp_approx__', 'lp__', 'theta')}
@@ -439,7 +431,7 @@ Pathfinders as initialization for the MCMC sampler
-[{'theta': array(0.335465)}, {'theta': array(0.370054)}, {'theta': array(0.327206)}, {'theta': array(0.220554)}]
+[{'theta': array(0.278046)}, {'theta': array(0.189515)}, {'theta': array(0.28219)}, {'theta': array(0.174821)}]
The create_inits
takes two arguments:
@@ -461,7 +453,7 @@ Pathfinders as initialization for the MCMC sampler
-[{'theta': array(0.190597)}, {'theta': array(0.268621)}, {'theta': array(0.227258)}]
+[{'theta': array(0.151519)}, {'theta': array(0.156614)}, {'theta': array(0.066908)}]
diff --git a/docs/users-guide/examples/Pathfinder.ipynb b/docs/users-guide/examples/Pathfinder.ipynb
index 2b97e57b..daeee884 100644
--- a/docs/users-guide/examples/Pathfinder.ipynb
+++ b/docs/users-guide/examples/Pathfinder.ipynb
@@ -42,22 +42,13 @@
"execution_count": 1,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:36.890695Z",
- "iopub.status.busy": "2024-02-05T20:41:36.890218Z",
- "iopub.status.idle": "2024-02-05T20:41:37.188270Z",
- "shell.execute_reply": "2024-02-05T20:41:37.187699Z"
+ "iopub.execute_input": "2024-03-26T16:29:43.878098Z",
+ "iopub.status.busy": "2024-03-26T16:29:43.877555Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.225458Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.224871Z"
}
},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
- " from .autonotebook import tqdm as notebook_tqdm\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
"import os\n",
"from cmdstanpy.model import CmdStanModel, cmdstan_path"
@@ -68,10 +59,10 @@
"execution_count": 2,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.191153Z",
- "iopub.status.busy": "2024-02-05T20:41:37.190684Z",
- "iopub.status.idle": "2024-02-05T20:41:37.276597Z",
- "shell.execute_reply": "2024-02-05T20:41:37.276028Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.228431Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.227964Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.313140Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.312554Z"
}
},
"outputs": [
@@ -79,14 +70,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:37 - cmdstanpy - INFO - Chain [1] start processing\n"
+ "16:29:44 - cmdstanpy - INFO - Chain [1] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:37 - cmdstanpy - INFO - Chain [1] done processing\n"
+ "16:29:44 - cmdstanpy - INFO - Chain [1] done processing\n"
]
}
],
@@ -105,10 +96,10 @@
"execution_count": 3,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.279188Z",
- "iopub.status.busy": "2024-02-05T20:41:37.278927Z",
- "iopub.status.idle": "2024-02-05T20:41:37.282778Z",
- "shell.execute_reply": "2024-02-05T20:41:37.282171Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.316072Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.315710Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.319485Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.318839Z"
}
},
"outputs": [
@@ -118,11 +109,11 @@
"text": [
"CmdStanPathfinder: model=bernoulli['method=pathfinder']\n",
" csv_files:\n",
- "\t/tmp/tmpar1gm37y/bernoulli3ctxpbm0/bernoulli-20240205204137.csv\n",
+ "\t/tmp/tmpimv2ege0/bernoullifbcxwakw/bernoulli-20240326162944.csv\n",
" output_files:\n",
- "\t/tmp/tmpar1gm37y/bernoulli3ctxpbm0/bernoulli-20240205204137_0-stdout.txt\n",
+ "\t/tmp/tmpimv2ege0/bernoullifbcxwakw/bernoulli-20240326162944_0-stdout.txt\n",
"Metadata:\n",
- "{'stan_version_major': 2, 'stan_version_minor': 34, 'stan_version_patch': 1, 'model': 'bernoulli_model', 'start_datetime': '2024-02-05 20:41:37 UTC', 'method': 'pathfinder', 'init_alpha': 0.001, 'tol_obj': 1e-12, 'tol_rel_obj': 10000, 'tol_grad': 1e-08, 'tol_rel_grad': 10000000.0, 'tol_param': 1e-08, 'history_size': 5, 'num_psis_draws': 1000, 'num_paths': 4, 'save_single_paths': 0, 'psis_resample': 1, 'calculate_lp': 1, 'max_lbfgs_iters': 1000, 'num_draws': 1000, 'num_elbo_draws': 25, 'id': 1, 'data_file': '/home/runner/.cmdstan/cmdstan-2.34.1/examples/bernoulli/bernoulli.data.json', 'init': 2, 'seed': 1285, 'diagnostic_file': '', 'refresh': 100, 'sig_figs': -1, 'profile_file': 'profile.csv', 'save_cmdstan_config': 0, 'num_threads': 1, 'stanc_version': 'stanc3 v2.34.0', 'stancflags': '', 'raw_header': 'lp_approx__,lp__,theta', 'column_names': ('lp_approx__', 'lp__', 'theta')}\n",
+ "{'stan_version_major': 2, 'stan_version_minor': 34, 'stan_version_patch': 1, 'model': 'bernoulli_model', 'start_datetime': '2024-03-26 16:29:44 UTC', 'method': 'pathfinder', 'init_alpha': 0.001, 'tol_obj': 1e-12, 'tol_rel_obj': 10000, 'tol_grad': 1e-08, 'tol_rel_grad': 10000000.0, 'tol_param': 1e-08, 'history_size': 5, 'num_psis_draws': 1000, 'num_paths': 4, 'save_single_paths': 0, 'psis_resample': 1, 'calculate_lp': 1, 'max_lbfgs_iters': 1000, 'num_draws': 1000, 'num_elbo_draws': 25, 'id': 1, 'data_file': '/home/runner/.cmdstan/cmdstan-2.34.1/examples/bernoulli/bernoulli.data.json', 'init': 2, 'seed': 50890, 'diagnostic_file': '', 'refresh': 100, 'sig_figs': -1, 'profile_file': 'profile.csv', 'save_cmdstan_config': 0, 'num_threads': 1, 'stanc_version': 'stanc3 v2.34.0', 'stancflags': '', 'raw_header': 'lp_approx__,lp__,theta', 'column_names': ('lp_approx__', 'lp__', 'theta')}\n",
"\n"
]
}
@@ -154,10 +145,10 @@
"execution_count": 4,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.285213Z",
- "iopub.status.busy": "2024-02-05T20:41:37.284877Z",
- "iopub.status.idle": "2024-02-05T20:41:37.292194Z",
- "shell.execute_reply": "2024-02-05T20:41:37.291591Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.322023Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.321649Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.328711Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.328190Z"
}
},
"outputs": [
@@ -181,10 +172,10 @@
"execution_count": 5,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.294763Z",
- "iopub.status.busy": "2024-02-05T20:41:37.294355Z",
- "iopub.status.idle": "2024-02-05T20:41:37.298410Z",
- "shell.execute_reply": "2024-02-05T20:41:37.297809Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.331257Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.330886Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.335028Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.334351Z"
}
},
"outputs": [
@@ -208,10 +199,10 @@
"execution_count": 6,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.300935Z",
- "iopub.status.busy": "2024-02-05T20:41:37.300459Z",
- "iopub.status.idle": "2024-02-05T20:41:37.304586Z",
- "shell.execute_reply": "2024-02-05T20:41:37.303933Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.337482Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.337102Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.341146Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.340523Z"
}
},
"outputs": [
@@ -244,10 +235,10 @@
"execution_count": 7,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.307112Z",
- "iopub.status.busy": "2024-02-05T20:41:37.306736Z",
- "iopub.status.idle": "2024-02-05T20:41:37.310810Z",
- "shell.execute_reply": "2024-02-05T20:41:37.310186Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.343774Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.343296Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.347590Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.346955Z"
}
},
"outputs": [
@@ -255,7 +246,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "[{'theta': array(0.335465)}, {'theta': array(0.370054)}, {'theta': array(0.327206)}, {'theta': array(0.220554)}]\n"
+ "[{'theta': array(0.278046)}, {'theta': array(0.189515)}, {'theta': array(0.28219)}, {'theta': array(0.174821)}]\n"
]
}
],
@@ -279,10 +270,10 @@
"execution_count": 8,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:37.313029Z",
- "iopub.status.busy": "2024-02-05T20:41:37.312846Z",
- "iopub.status.idle": "2024-02-05T20:41:37.316790Z",
- "shell.execute_reply": "2024-02-05T20:41:37.316156Z"
+ "iopub.execute_input": "2024-03-26T16:29:44.350084Z",
+ "iopub.status.busy": "2024-03-26T16:29:44.349681Z",
+ "iopub.status.idle": "2024-03-26T16:29:44.353504Z",
+ "shell.execute_reply": "2024-03-26T16:29:44.352850Z"
}
},
"outputs": [
@@ -290,7 +281,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "[{'theta': array(0.190597)}, {'theta': array(0.268621)}, {'theta': array(0.227258)}]\n"
+ "[{'theta': array(0.151519)}, {'theta': array(0.156614)}, {'theta': array(0.066908)}]\n"
]
}
],
diff --git a/docs/users-guide/examples/Run Generated Quantities.html b/docs/users-guide/examples/Run Generated Quantities.html
index af2da5b4..fd0543f3 100644
--- a/docs/users-guide/examples/Run Generated Quantities.html
+++ b/docs/users-guide/examples/Run Generated Quantities.html
@@ -6,7 +6,7 @@
- Generating new quantities of interest. — CmdStanPy 1.2.1 documentation
+ Generating new quantities of interest. — CmdStanPy 1.2.2 documentation
@@ -39,6 +39,7 @@
+
@@ -64,7 +65,7 @@
@@ -312,15 +313,6 @@ Example: add posterior predictive checks to
-
-
-
-
-/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
- from .autonotebook import tqdm as notebook_tqdm
-
-
@@ -379,196 +371,33 @@ Example: add posterior predictive checks to
-20:41:39 - cmdstanpy - INFO - CmdStan start processing
-
-
-
-
-
-
-
-
-
-- chain 1 |<span class="ansi-yellow-fg"> </span>| 00:00 Status
-</pre>
-
-
-- chain 1 |\textcolor{ansi-yellow}{ }| 00:00 Status
-end{sphinxVerbatim}
-
-
-
-chain 1 |[33m [0m| 00:00 Status
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-- chain 2 |<span class="ansi-yellow-fg"> </span>| 00:00 Status
-</pre>
-
-
-- chain 2 |\textcolor{ansi-yellow}{ }| 00:00 Status
-end{sphinxVerbatim}
-
-
-
-chain 2 |[33m [0m| 00:00 Status
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-- chain 3 |<span class="ansi-yellow-fg"> </span>| 00:00 Status
-</pre>
-
-
-- chain 3 |\textcolor{ansi-yellow}{ }| 00:00 Status
-end{sphinxVerbatim}
-
-
-
-chain 3 |[33m [0m| 00:00 Status
-
-
-
-
-
-
+16:29:46 - cmdstanpy - INFO - CmdStan start processing
-
-
-
-
-
-
-
-
-
+
+
-
-
-
-
-- chain 4 |<span class="ansi-yellow-fg"> </span>| 00:00 Status
-</pre>
-
-
-- chain 4 |\textcolor{ansi-yellow}{ }| 00:00 Status
-end{sphinxVerbatim}
-
-
-
-chain 4 |[33m [0m| 00:00 Status
-
-
-
-
+
+
-
-
-
-
-- chain 1 |<span class="ansi-blue-fg">██████████</span>| 00:00 Sampling completed
-</pre>
-
-
-- chain 1 |\textcolor{ansi-blue}{██████████}| 00:00 Sampling completed
-end{sphinxVerbatim}
-
-
-
-chain 1 |[34m██████████[0m| 00:00 Sampling completed
-
-
+
+
-
-
-
-
-- chain 2 |<span class="ansi-blue-fg">██████████</span>| 00:00 Sampling completed
-</pre>
-
-
-- chain 2 |\textcolor{ansi-blue}{██████████}| 00:00 Sampling completed
-end{sphinxVerbatim}
-
-
-
-chain 2 |[34m██████████[0m| 00:00 Sampling completed
-
-
-
-
-- chain 3 |<span class="ansi-blue-fg">██████████</span>| 00:00 Sampling completed
-</pre>
-
-
-- chain 3 |\textcolor{ansi-blue}{██████████}| 00:00 Sampling completed
-end{sphinxVerbatim}
-
-
-
-chain 3 |[34m██████████[0m| 00:00 Sampling completed
-
-
+
+
-
-
-
-
-- chain 4 |<span class="ansi-blue-fg">██████████</span>| 00:00 Sampling completed
-</pre>
-
-
-- chain 4 |\textcolor{ansi-blue}{██████████}| 00:00 Sampling completed
-end{sphinxVerbatim}
-
-
-
-chain 4 |[34m██████████[0m| 00:00 Sampling completed
@@ -606,8 +435,7 @@ Example: add posterior predictive checks to
-
-20:41:39 - cmdstanpy - INFO - CmdStan done processing.
+16:29:46 - cmdstanpy - INFO - CmdStan done processing.
@@ -664,27 +492,27 @@ Example: add posterior predictive checks to
-20:41:39 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc
+16:29:46 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc
@@ -713,7 +541,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc
+16:30:00 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc
@@ -757,7 +585,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [1] start processing
+16:30:00 - cmdstanpy - INFO - Chain [1] start processing
@@ -765,7 +593,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [2] start processing
+16:30:00 - cmdstanpy - INFO - Chain [2] start processing
@@ -773,7 +601,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [1] done processing
+16:30:00 - cmdstanpy - INFO - Chain [1] done processing
@@ -781,7 +609,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [3] start processing
+16:30:00 - cmdstanpy - INFO - Chain [3] start processing
@@ -789,7 +617,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [2] done processing
+16:30:00 - cmdstanpy - INFO - Chain [2] done processing
@@ -797,7 +625,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [4] start processing
+16:30:00 - cmdstanpy - INFO - Chain [4] start processing
@@ -805,7 +633,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [3] done processing
+16:30:00 - cmdstanpy - INFO - Chain [3] done processing
@@ -813,7 +641,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - INFO - Chain [4] done processing
+16:30:00 - cmdstanpy - INFO - Chain [4] done processing
The generate_quantities
method returns a CmdStanGQ
object which contains the values for all variables in the generated quantities block of the program bernoulli_ppc.stan
. Unlike the output from the sample
method, it doesn’t contain any information on the joint log probability density, sampler state, or parameters or transformed parameter values.
@@ -833,7 +661,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
+16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
@@ -841,7 +669,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
+16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
@@ -849,7 +677,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
+16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
@@ -857,7 +685,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
+16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
@@ -866,18 +694,18 @@ Example: add posterior predictive checks to
(1000, 4, 10) ('y_rep[1]', 'y_rep[2]', 'y_rep[3]', 'y_rep[4]', 'y_rep[5]', 'y_rep[6]', 'y_rep[7]', 'y_rep[8]', 'y_rep[9]', 'y_rep[10]')
-[[0. 0. 0. 1. 1. 0. 0. 0. 0. 0.]
- [0. 0. 1. 1. 1. 0. 0. 0. 0. 1.]
- [0. 0. 1. 1. 1. 0. 0. 0. 0. 1.]
- [0. 0. 0. 1. 1. 0. 0. 0. 0. 0.]]
-[[0. 1. 0. 1. 0. 0. 1. 1. 0. 1.]
- [0. 1. 0. 1. 0. 0. 1. 0. 0. 0.]
- [0. 1. 0. 1. 0. 0. 1. 0. 0. 0.]
- [0. 1. 0. 0. 0. 0. 1. 0. 0. 0.]]
-[[1. 1. 0. 0. 0. 0. 0. 1. 0. 0.]
- [1. 1. 0. 0. 0. 0. 0. 1. 0. 0.]
- [1. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
- [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
+[[0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]
+ [0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]
+ [0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]
+ [0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]]
+[[0. 0. 0. 1. 0. 0. 0. 0. 1. 1.]
+ [0. 0. 0. 1. 0. 0. 0. 0. 1. 1.]
+ [0. 0. 0. 1. 0. 0. 0. 0. 1. 1.]
+ [0. 0. 0. 1. 0. 0. 0. 0. 0. 1.]]
+[[0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]
+ [0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]
+ [0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]
+ [0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]]
We can also use draws_pd(inc_sample=True)
to get a pandas DataFrame which combines the input drawset with the generated quantities.
@@ -897,7 +725,7 @@ Example: add posterior predictive checks to
-20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
+16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with "save_warmup=True".
@@ -957,75 +785,75 @@ Example: add posterior predictive checks to
For models as simple as the bernoulli models here, it would be trivial to re-run the sampler and generate a new sample which contains both the estimate of the parameters theta
as well as y_rep
values. For models which are difficult to fit, i.e., when producing a sample is computationally expensive, the generate_quantities
method is preferred.
-
+
diff --git a/docs/users-guide/examples/Run Generated Quantities.ipynb b/docs/users-guide/examples/Run Generated Quantities.ipynb
index b20af744..80477c75 100644
--- a/docs/users-guide/examples/Run Generated Quantities.ipynb
+++ b/docs/users-guide/examples/Run Generated Quantities.ipynb
@@ -42,21 +42,13 @@
"execution_count": 1,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:39.081240Z",
- "iopub.status.busy": "2024-02-05T20:41:39.081047Z",
- "iopub.status.idle": "2024-02-05T20:41:39.443810Z",
- "shell.execute_reply": "2024-02-05T20:41:39.443074Z"
+ "iopub.execute_input": "2024-03-26T16:29:46.036648Z",
+ "iopub.status.busy": "2024-03-26T16:29:46.036452Z",
+ "iopub.status.idle": "2024-03-26T16:29:46.441221Z",
+ "shell.execute_reply": "2024-03-26T16:29:46.440481Z"
}
},
"outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
- " from .autonotebook import tqdm as notebook_tqdm\n"
- ]
- },
{
"name": "stdout",
"output_type": "stream",
@@ -102,10 +94,10 @@
"execution_count": 2,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:39.447060Z",
- "iopub.status.busy": "2024-02-05T20:41:39.446455Z",
- "iopub.status.idle": "2024-02-05T20:41:39.486289Z",
- "shell.execute_reply": "2024-02-05T20:41:39.485729Z"
+ "iopub.execute_input": "2024-03-26T16:29:46.444455Z",
+ "iopub.status.busy": "2024-03-26T16:29:46.443867Z",
+ "iopub.status.idle": "2024-03-26T16:29:46.502957Z",
+ "shell.execute_reply": "2024-03-26T16:29:46.502392Z"
}
},
"outputs": [
@@ -140,10 +132,10 @@
"execution_count": 3,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:39.488877Z",
- "iopub.status.busy": "2024-02-05T20:41:39.488433Z",
- "iopub.status.idle": "2024-02-05T20:41:39.624251Z",
- "shell.execute_reply": "2024-02-05T20:41:39.623588Z"
+ "iopub.execute_input": "2024-03-26T16:29:46.505763Z",
+ "iopub.status.busy": "2024-03-26T16:29:46.505069Z",
+ "iopub.status.idle": "2024-03-26T16:29:46.709551Z",
+ "shell.execute_reply": "2024-03-26T16:29:46.708948Z"
}
},
"outputs": [
@@ -151,120 +143,64 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:39 - cmdstanpy - INFO - CmdStan start processing\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\r",
- "chain 1 |\u001b[33m \u001b[0m| 00:00 Status"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\r",
- "chain 2 |\u001b[33m \u001b[0m| 00:00 Status"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\u001b[A"
+ "16:29:46 - cmdstanpy - INFO - CmdStan start processing\n"
]
},
{
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\r",
- "chain 3 |\u001b[33m \u001b[0m| 00:00 Status"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\u001b[A\u001b[A"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "\n",
- "\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\r",
- "chain 4 |\u001b[33m \u001b[0m| 00:00 Status"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\u001b[A\u001b[A\u001b[A"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\r",
- "chain 1 |\u001b[34m██████████\u001b[0m| 00:00 Sampling completed"
- ]
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "d8220583290146688fa0dcd1925ad1e5",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "chain 1 | | 00:00 Status"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "\r",
- "chain 2 |\u001b[34m██████████\u001b[0m| 00:00 Sampling completed"
- ]
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "46749833be2f497f927fdb925199d231",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "chain 2 | | 00:00 Status"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "\r",
- "chain 3 |\u001b[34m██████████\u001b[0m| 00:00 Sampling completed"
- ]
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "f52d3bec872b4454b3ab4912a4ccd42a",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "chain 3 | | 00:00 Status"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "\r",
- "chain 4 |\u001b[34m██████████\u001b[0m| 00:00 Sampling completed"
- ]
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "da5c161a59bc4cdaa557c3421c211f32",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "chain 4 | | 00:00 Status"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
},
{
"name": "stdout",
@@ -298,8 +234,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "\n",
- "20:41:39 - cmdstanpy - INFO - CmdStan done processing.\n"
+ "16:29:46 - cmdstanpy - INFO - CmdStan done processing.\n"
]
},
{
@@ -327,10 +262,10 @@
"execution_count": 4,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:39.627092Z",
- "iopub.status.busy": "2024-02-05T20:41:39.626692Z",
- "iopub.status.idle": "2024-02-05T20:41:39.673685Z",
- "shell.execute_reply": "2024-02-05T20:41:39.672907Z"
+ "iopub.execute_input": "2024-03-26T16:29:46.712397Z",
+ "iopub.status.busy": "2024-03-26T16:29:46.711862Z",
+ "iopub.status.idle": "2024-03-26T16:29:46.756228Z",
+ "shell.execute_reply": "2024-03-26T16:29:46.755501Z"
}
},
"outputs": [
@@ -369,40 +304,40 @@
" \n",
" \n",
" lp__ \n",
- " -7.273190 \n",
- " 0.024164 \n",
- " 0.755960 \n",
- " -8.784640 \n",
- " -6.98039 \n",
- " -6.75028 \n",
- " 978.759 \n",
- " 22244.5 \n",
- " 1.00159 \n",
+ " -7.297320 \n",
+ " 0.017912 \n",
+ " 0.755274 \n",
+ " -8.851160 \n",
+ " -6.996700 \n",
+ " -6.750000 \n",
+ " 1778.01 \n",
+ " 36286.0 \n",
+ " 1.00036 \n",
" \n",
" \n",
" theta \n",
- " 0.252528 \n",
- " 0.003132 \n",
- " 0.120077 \n",
- " 0.081241 \n",
- " 0.23811 \n",
- " 0.46955 \n",
- " 1469.970 \n",
- " 33408.4 \n",
- " 1.00101 \n",
+ " 0.250792 \n",
+ " 0.003315 \n",
+ " 0.121648 \n",
+ " 0.076211 \n",
+ " 0.240263 \n",
+ " 0.470491 \n",
+ " 1346.53 \n",
+ " 27480.2 \n",
+ " 1.00109 \n",
" \n",
" \n",
"\n",
""
],
"text/plain": [
- " Mean MCSE StdDev 5% 50% 95% N_Eff \\\n",
- "lp__ -7.273190 0.024164 0.755960 -8.784640 -6.98039 -6.75028 978.759 \n",
- "theta 0.252528 0.003132 0.120077 0.081241 0.23811 0.46955 1469.970 \n",
+ " Mean MCSE StdDev 5% 50% 95% N_Eff \\\n",
+ "lp__ -7.297320 0.017912 0.755274 -8.851160 -6.996700 -6.750000 1778.01 \n",
+ "theta 0.250792 0.003315 0.121648 0.076211 0.240263 0.470491 1346.53 \n",
"\n",
" N_Eff/s R_hat \n",
- "lp__ 22244.5 1.00159 \n",
- "theta 33408.4 1.00101 "
+ "lp__ 36286.0 1.00036 \n",
+ "theta 27480.2 1.00109 "
]
},
"execution_count": 4,
@@ -426,10 +361,10 @@
"execution_count": 5,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:39.676525Z",
- "iopub.status.busy": "2024-02-05T20:41:39.676108Z",
- "iopub.status.idle": "2024-02-05T20:41:53.232238Z",
- "shell.execute_reply": "2024-02-05T20:41:53.231630Z"
+ "iopub.execute_input": "2024-03-26T16:29:46.759240Z",
+ "iopub.status.busy": "2024-03-26T16:29:46.758703Z",
+ "iopub.status.idle": "2024-03-26T16:30:00.640995Z",
+ "shell.execute_reply": "2024-03-26T16:30:00.640392Z"
}
},
"outputs": [
@@ -437,14 +372,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:39 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc\n"
+ "16:29:46 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc\n"
+ "16:30:00 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli_ppc\n"
]
},
{
@@ -495,10 +430,10 @@
"execution_count": 6,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:53.235249Z",
- "iopub.status.busy": "2024-02-05T20:41:53.234750Z",
- "iopub.status.idle": "2024-02-05T20:41:53.287981Z",
- "shell.execute_reply": "2024-02-05T20:41:53.287436Z"
+ "iopub.execute_input": "2024-03-26T16:30:00.643833Z",
+ "iopub.status.busy": "2024-03-26T16:30:00.643524Z",
+ "iopub.status.idle": "2024-03-26T16:30:00.701815Z",
+ "shell.execute_reply": "2024-03-26T16:30:00.701030Z"
}
},
"outputs": [
@@ -506,56 +441,56 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [1] start processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [1] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [2] start processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [2] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [1] done processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [1] done processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [3] start processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [3] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [2] done processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [2] done processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [4] start processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [4] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [3] done processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [3] done processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - INFO - Chain [4] done processing\n"
+ "16:30:00 - cmdstanpy - INFO - Chain [4] done processing\n"
]
}
],
@@ -577,10 +512,10 @@
"execution_count": 7,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:53.290756Z",
- "iopub.status.busy": "2024-02-05T20:41:53.290376Z",
- "iopub.status.idle": "2024-02-05T20:41:53.313591Z",
- "shell.execute_reply": "2024-02-05T20:41:53.312968Z"
+ "iopub.execute_input": "2024-03-26T16:30:00.704656Z",
+ "iopub.status.busy": "2024-03-26T16:30:00.704243Z",
+ "iopub.status.idle": "2024-03-26T16:30:00.728113Z",
+ "shell.execute_reply": "2024-03-26T16:30:00.727557Z"
}
},
"outputs": [
@@ -588,28 +523,28 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
+ "16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
+ "16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
+ "16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
+ "16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
]
},
{
@@ -617,18 +552,18 @@
"output_type": "stream",
"text": [
"(1000, 4, 10) ('y_rep[1]', 'y_rep[2]', 'y_rep[3]', 'y_rep[4]', 'y_rep[5]', 'y_rep[6]', 'y_rep[7]', 'y_rep[8]', 'y_rep[9]', 'y_rep[10]')\n",
- "[[0. 0. 0. 1. 1. 0. 0. 0. 0. 0.]\n",
- " [0. 0. 1. 1. 1. 0. 0. 0. 0. 1.]\n",
- " [0. 0. 1. 1. 1. 0. 0. 0. 0. 1.]\n",
- " [0. 0. 0. 1. 1. 0. 0. 0. 0. 0.]]\n",
- "[[0. 1. 0. 1. 0. 0. 1. 1. 0. 1.]\n",
- " [0. 1. 0. 1. 0. 0. 1. 0. 0. 0.]\n",
- " [0. 1. 0. 1. 0. 0. 1. 0. 0. 0.]\n",
- " [0. 1. 0. 0. 0. 0. 1. 0. 0. 0.]]\n",
- "[[1. 1. 0. 0. 0. 0. 0. 1. 0. 0.]\n",
- " [1. 1. 0. 0. 0. 0. 0. 1. 0. 0.]\n",
- " [1. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
- " [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n"
+ "[[0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]\n",
+ " [0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]\n",
+ " [0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]\n",
+ " [0. 0. 0. 0. 1. 1. 0. 0. 0. 0.]]\n",
+ "[[0. 0. 0. 1. 0. 0. 0. 0. 1. 1.]\n",
+ " [0. 0. 0. 1. 0. 0. 0. 0. 1. 1.]\n",
+ " [0. 0. 0. 1. 0. 0. 0. 0. 1. 1.]\n",
+ " [0. 0. 0. 1. 0. 0. 0. 0. 0. 1.]]\n",
+ "[[0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]\n",
+ " [0. 0. 0. 0. 0. 0. 0. 0. 1. 1.]]\n"
]
}
],
@@ -650,10 +585,10 @@
"execution_count": 8,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:53.316211Z",
- "iopub.status.busy": "2024-02-05T20:41:53.315860Z",
- "iopub.status.idle": "2024-02-05T20:41:53.337714Z",
- "shell.execute_reply": "2024-02-05T20:41:53.337056Z"
+ "iopub.execute_input": "2024-03-26T16:30:00.730757Z",
+ "iopub.status.busy": "2024-03-26T16:30:00.730373Z",
+ "iopub.status.idle": "2024-03-26T16:30:00.752951Z",
+ "shell.execute_reply": "2024-03-26T16:30:00.752316Z"
}
},
"outputs": [
@@ -661,7 +596,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:53 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
+ "16:30:00 - cmdstanpy - WARNING - Sample doesn't contain draws from warmup iterations, rerun sampler with \"save_warmup=True\".\n"
]
},
{
@@ -718,75 +653,75 @@
" \n",
" \n",
" 0 \n",
- " -6.94949 \n",
- " 0.891339 \n",
- " 0.834473 \n",
- " 1.0 \n",
+ " -6.88937 \n",
+ " 0.797647 \n",
+ " 1.17203 \n",
+ " 2.0 \n",
" 3.0 \n",
" 0.0 \n",
- " 7.75373 \n",
- " 0.176755 \n",
+ " 7.55922 \n",
+ " 0.320028 \n",
" 1.0 \n",
" 1.0 \n",
" ... \n",
" 0.0 \n",
" 0.0 \n",
" 0.0 \n",
+ " 0.0 \n",
" 1.0 \n",
" 1.0 \n",
" 0.0 \n",
" 0.0 \n",
" 0.0 \n",
" 0.0 \n",
- " 0.0 \n",
" \n",
" \n",
" 1 \n",
- " -7.04638 \n",
- " 0.988320 \n",
- " 0.834473 \n",
- " 2.0 \n",
- " 7.0 \n",
+ " -6.92103 \n",
+ " 0.987890 \n",
+ " 1.17203 \n",
+ " 1.0 \n",
+ " 1.0 \n",
" 0.0 \n",
- " 7.14451 \n",
- " 0.353708 \n",
+ " 6.96935 \n",
+ " 0.327844 \n",
" 1.0 \n",
" 2.0 \n",
" ... \n",
" 0.0 \n",
- " 1.0 \n",
+ " 0.0 \n",
" 0.0 \n",
" 1.0 \n",
" 0.0 \n",
" 0.0 \n",
- " 1.0 \n",
- " 1.0 \n",
" 0.0 \n",
+ " 0.0 \n",
+ " 1.0 \n",
" 1.0 \n",
" \n",
" \n",
" 2 \n",
- " -7.43716 \n",
- " 0.915396 \n",
- " 0.834473 \n",
- " 1.0 \n",
- " 1.0 \n",
+ " -6.77679 \n",
+ " 0.998779 \n",
+ " 1.17203 \n",
+ " 2.0 \n",
+ " 3.0 \n",
" 0.0 \n",
- " 7.44403 \n",
- " 0.411715 \n",
+ " 6.94984 \n",
+ " 0.220845 \n",
" 1.0 \n",
" 3.0 \n",
" ... \n",
- " 1.0 \n",
- " 1.0 \n",
" 0.0 \n",
" 0.0 \n",
" 0.0 \n",
" 0.0 \n",
" 0.0 \n",
- " 1.0 \n",
" 0.0 \n",
" 0.0 \n",
+ " 0.0 \n",
+ " 1.0 \n",
+ " 1.0 \n",
" \n",
" \n",
"\n",
@@ -795,19 +730,19 @@
],
"text/plain": [
" lp__ accept_stat__ stepsize__ treedepth__ n_leapfrog__ divergent__ \\\n",
- "0 -6.94949 0.891339 0.834473 1.0 3.0 0.0 \n",
- "1 -7.04638 0.988320 0.834473 2.0 7.0 0.0 \n",
- "2 -7.43716 0.915396 0.834473 1.0 1.0 0.0 \n",
+ "0 -6.88937 0.797647 1.17203 2.0 3.0 0.0 \n",
+ "1 -6.92103 0.987890 1.17203 1.0 1.0 0.0 \n",
+ "2 -6.77679 0.998779 1.17203 2.0 3.0 0.0 \n",
"\n",
" energy__ theta chain__ iter__ ... y_rep[1] y_rep[2] y_rep[3] \\\n",
- "0 7.75373 0.176755 1.0 1.0 ... 0.0 0.0 0.0 \n",
- "1 7.14451 0.353708 1.0 2.0 ... 0.0 1.0 0.0 \n",
- "2 7.44403 0.411715 1.0 3.0 ... 1.0 1.0 0.0 \n",
+ "0 7.55922 0.320028 1.0 1.0 ... 0.0 0.0 0.0 \n",
+ "1 6.96935 0.327844 1.0 2.0 ... 0.0 0.0 0.0 \n",
+ "2 6.94984 0.220845 1.0 3.0 ... 0.0 0.0 0.0 \n",
"\n",
" y_rep[4] y_rep[5] y_rep[6] y_rep[7] y_rep[8] y_rep[9] y_rep[10] \n",
- "0 1.0 1.0 0.0 0.0 0.0 0.0 0.0 \n",
- "1 1.0 0.0 0.0 1.0 1.0 0.0 1.0 \n",
- "2 0.0 0.0 0.0 0.0 1.0 0.0 0.0 \n",
+ "0 0.0 1.0 1.0 0.0 0.0 0.0 0.0 \n",
+ "1 1.0 0.0 0.0 0.0 0.0 1.0 1.0 \n",
+ "2 0.0 0.0 0.0 0.0 0.0 1.0 1.0 \n",
"\n",
"[3 rows x 21 columns]"
]
@@ -854,6 +789,1454 @@
"interpreter": {
"hash": "8765ce46b013071999fc1966b52035a7309a0da7551e066cc0f0fa23e83d4f60"
}
+ },
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "state": {
+ "00ab955b93cd4fc0b4d308eed5ed70ee": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_079e4aa3f76546339441ec3bf3e9f50b",
+ "placeholder": "",
+ "style": "IPY_MODEL_aa1da1cfce1e48d7b1893908912b743f",
+ "tabbable": null,
+ "tooltip": null,
+ "value": " 00:00 Sampling completed"
+ }
+ },
+ "04f0d3c32e1c4d3785ff233e8546a3b2": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "bar_color": "blue",
+ "description_width": ""
+ }
+ },
+ "079e4aa3f76546339441ec3bf3e9f50b": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "0bd3a7ab41e5435a801932739cd5ee14": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_b86307f5840f4717b92ac230a92c1ea7",
+ "placeholder": "",
+ "style": "IPY_MODEL_64aaa916de0443259c62b9a950821463",
+ "tabbable": null,
+ "tooltip": null,
+ "value": "chain 3 "
+ }
+ },
+ "104a5f127d1c4e5e8652f068de1a1d0e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "10bfac88d68441b497aaef36aab0b23b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "25bff242c37c47db90917cf6f6b63e88": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2759c543db6546689c6c66dd7793cd0e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "bar_color": "blue",
+ "description_width": ""
+ }
+ },
+ "29178bb17df2405d810e3c2bc30d80dd": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_104a5f127d1c4e5e8652f068de1a1d0e",
+ "placeholder": "",
+ "style": "IPY_MODEL_b55ac73097bd40dc88c4841bc6da65f4",
+ "tabbable": null,
+ "tooltip": null,
+ "value": " 00:00 Sampling completed"
+ }
+ },
+ "2c4f551f9ca24aea91e4e41e1d3fca62": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "2d35e6bfb64f447bb5ca2bd8eadd7931": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "333d5fa32c8f4afc8bab74bf2de628bb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_bd13fb45f2ee4cc7aaee9ec5ccdce3b7",
+ "max": 22.0,
+ "min": 0.0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_2759c543db6546689c6c66dd7793cd0e",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 22.0
+ }
+ },
+ "3a57ecb37c674a3bb7a09f06380f34b0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "bar_color": "blue",
+ "description_width": ""
+ }
+ },
+ "40fa2fa8776442dabff2e0d8efc97a10": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "46749833be2f497f927fdb925199d231": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_cc2103fbcb444c678e0787d5ac009d76",
+ "IPY_MODEL_333d5fa32c8f4afc8bab74bf2de628bb",
+ "IPY_MODEL_00ab955b93cd4fc0b4d308eed5ed70ee"
+ ],
+ "layout": "IPY_MODEL_25bff242c37c47db90917cf6f6b63e88",
+ "tabbable": null,
+ "tooltip": null
+ }
+ },
+ "4bebdb23c6a84385b8e3810b42f6aa90": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "4c8ad1ce320d416d9f0546a70e011077": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "51cc65b1dab74732a326e0813c790238": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "6105bd9971de41869eacddfefee7ed55": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "64aaa916de0443259c62b9a950821463": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "79fd2122d8c7480f90f92facc7b379cd": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_4bebdb23c6a84385b8e3810b42f6aa90",
+ "max": 22.0,
+ "min": 0.0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_04f0d3c32e1c4d3785ff233e8546a3b2",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 22.0
+ }
+ },
+ "7d37d88f7fd94f858a1311c4633b682b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "822f36b9135d45708b47164ba5bac44f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "89379932a0ea4cd890e51824c29592fb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "8f8c090234294827af7867a5a5180bf6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_51cc65b1dab74732a326e0813c790238",
+ "placeholder": "",
+ "style": "IPY_MODEL_2c4f551f9ca24aea91e4e41e1d3fca62",
+ "tabbable": null,
+ "tooltip": null,
+ "value": " 00:00 Sampling completed"
+ }
+ },
+ "983dba80cf69498ab0871f2eb9d1069b": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "bar_color": "blue",
+ "description_width": ""
+ }
+ },
+ "9e1a91cb16e045dfa9e79702013edce7": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "a5ba78f0d6b449258594b70792b5c87d": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "aa1da1cfce1e48d7b1893908912b743f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "b55ac73097bd40dc88c4841bc6da65f4": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ },
+ "b86307f5840f4717b92ac230a92c1ea7": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "b8919ce9a6be45919f23cbbc12a0d5a0": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_4c8ad1ce320d416d9f0546a70e011077",
+ "max": 22.0,
+ "min": 0.0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_983dba80cf69498ab0871f2eb9d1069b",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 22.0
+ }
+ },
+ "bd13fb45f2ee4cc7aaee9ec5ccdce3b7": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "c039165d16c9452d916585c7d3e58b9f": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_2d35e6bfb64f447bb5ca2bd8eadd7931",
+ "placeholder": "",
+ "style": "IPY_MODEL_fead15008e784056b17cfe54bdf4aa80",
+ "tabbable": null,
+ "tooltip": null,
+ "value": "chain 1 "
+ }
+ },
+ "cc2103fbcb444c678e0787d5ac009d76": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_d1afd8b2e292433da66af031a060885f",
+ "placeholder": "",
+ "style": "IPY_MODEL_7d37d88f7fd94f858a1311c4633b682b",
+ "tabbable": null,
+ "tooltip": null,
+ "value": "chain 2 "
+ }
+ },
+ "d1afd8b2e292433da66af031a060885f": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d8220583290146688fa0dcd1925ad1e5": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_c039165d16c9452d916585c7d3e58b9f",
+ "IPY_MODEL_b8919ce9a6be45919f23cbbc12a0d5a0",
+ "IPY_MODEL_29178bb17df2405d810e3c2bc30d80dd"
+ ],
+ "layout": "IPY_MODEL_6105bd9971de41869eacddfefee7ed55",
+ "tabbable": null,
+ "tooltip": null
+ }
+ },
+ "da5c161a59bc4cdaa557c3421c211f32": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_fa74efb787764dae83df11a5414a8ef2",
+ "IPY_MODEL_79fd2122d8c7480f90f92facc7b379cd",
+ "IPY_MODEL_de7894bcba02475d95329637fa78f1e7"
+ ],
+ "layout": "IPY_MODEL_f4534d6e7dd74edfaf4f4393e84e040c",
+ "tabbable": null,
+ "tooltip": null
+ }
+ },
+ "de7894bcba02475d95329637fa78f1e7": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_40fa2fa8776442dabff2e0d8efc97a10",
+ "placeholder": "",
+ "style": "IPY_MODEL_10bfac88d68441b497aaef36aab0b23b",
+ "tabbable": null,
+ "tooltip": null,
+ "value": " 00:00 Sampling completed"
+ }
+ },
+ "e26e42d843e54aef9d35dc348adab5bb": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_a5ba78f0d6b449258594b70792b5c87d",
+ "max": 22.0,
+ "min": 0.0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_3a57ecb37c674a3bb7a09f06380f34b0",
+ "tabbable": null,
+ "tooltip": null,
+ "value": 22.0
+ }
+ },
+ "f4534d6e7dd74edfaf4f4393e84e040c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "2.0.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "2.0.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border_bottom": null,
+ "border_left": null,
+ "border_right": null,
+ "border_top": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "f52d3bec872b4454b3ab4912a4ccd42a": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_0bd3a7ab41e5435a801932739cd5ee14",
+ "IPY_MODEL_e26e42d843e54aef9d35dc348adab5bb",
+ "IPY_MODEL_8f8c090234294827af7867a5a5180bf6"
+ ],
+ "layout": "IPY_MODEL_822f36b9135d45708b47164ba5bac44f",
+ "tabbable": null,
+ "tooltip": null
+ }
+ },
+ "fa74efb787764dae83df11a5414a8ef2": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "2.0.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_allow_html": false,
+ "layout": "IPY_MODEL_9e1a91cb16e045dfa9e79702013edce7",
+ "placeholder": "",
+ "style": "IPY_MODEL_89379932a0ea4cd890e51824c29592fb",
+ "tabbable": null,
+ "tooltip": null,
+ "value": "chain 4 "
+ }
+ },
+ "fead15008e784056b17cfe54bdf4aa80": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "2.0.0",
+ "model_name": "HTMLStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "2.0.0",
+ "_model_name": "HTMLStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "2.0.0",
+ "_view_name": "StyleView",
+ "background": null,
+ "description_width": "",
+ "font_size": null,
+ "text_color": null
+ }
+ }
+ },
+ "version_major": 2,
+ "version_minor": 0
+ }
}
},
"nbformat": 4,
diff --git a/docs/users-guide/examples/Using External C++.html b/docs/users-guide/examples/Using External C++.html
index fcbf2c41..6b17cd33 100644
--- a/docs/users-guide/examples/Using External C++.html
+++ b/docs/users-guide/examples/Using External C++.html
@@ -6,7 +6,7 @@
- Advanced Topic: Using External C++ Functions — CmdStanPy 1.2.1 documentation
+ Advanced Topic: Using External C++ Functions — CmdStanPy 1.2.2 documentation
@@ -39,6 +39,7 @@
+
@@ -64,7 +65,7 @@
diff --git a/docs/users-guide/examples/VI as Sampler Inits.html b/docs/users-guide/examples/VI as Sampler Inits.html
index 22ef9a13..72a53041 100644
--- a/docs/users-guide/examples/VI as Sampler Inits.html
+++ b/docs/users-guide/examples/VI as Sampler Inits.html
@@ -6,7 +6,7 @@
- Using Variational Estimates to Initialize the NUTS-HMC Sampler — CmdStanPy 1.2.1 documentation
+ Using Variational Estimates to Initialize the NUTS-HMC Sampler — CmdStanPy 1.2.2 documentation
@@ -39,6 +39,7 @@
+
@@ -64,7 +65,7 @@
diff --git a/docs/users-guide/examples/Variational Inference.html b/docs/users-guide/examples/Variational Inference.html
index b5afc877..de350720 100644
--- a/docs/users-guide/examples/Variational Inference.html
+++ b/docs/users-guide/examples/Variational Inference.html
@@ -6,7 +6,7 @@
- Variational Inference using ADVI — CmdStanPy 1.2.1 documentation
+ Variational Inference using ADVI — CmdStanPy 1.2.2 documentation
@@ -39,6 +39,7 @@
+
@@ -64,7 +65,7 @@
@@ -310,16 +311,7 @@ Example: variational inference for model
-/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
- from .autonotebook import tqdm as notebook_tqdm
-
-
-
-
-
-
-
-20:41:57 - cmdstanpy - INFO - Chain [1] start processing
+16:30:04 - cmdstanpy - INFO - Chain [1] start processing
@@ -327,7 +319,7 @@ Example: variational inference for model
-20:41:57 - cmdstanpy - INFO - Chain [1] done processing
+16:30:04 - cmdstanpy - INFO - Chain [1] done processing
The class `CmdStanVB
<https://mc-stan.org/cmdstanpy/api.html#cmdstanvb>`__ provides the following properties to access information about the parameter names, estimated means, and the sample:
-0.234156
+0.228927
@@ -404,7 +396,7 @@ Example: variational inference for model
-20:41:57 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail
+16:30:04 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail
@@ -412,7 +404,7 @@ Example: variational inference for model
-20:42:13 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail
+16:30:21 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail
@@ -420,7 +412,7 @@ Example: variational inference for model
-20:42:13 - cmdstanpy - INFO - Chain [1] start processing
+16:30:21 - cmdstanpy - INFO - Chain [1] start processing
@@ -428,7 +420,7 @@ Example: variational inference for model
-20:42:13 - cmdstanpy - INFO - Chain [1] done processing
+16:30:21 - cmdstanpy - INFO - Chain [1] done processing
@@ -442,20 +434,20 @@ Example: variational inference for model 1 model_fail = CmdStanModel(stan_file='eta_should_fail.stan')
----> 2 vi_fail = model_fail.variational()
-File ~/work/cmdstanpy/cmdstanpy/cmdstanpy/model.py:1575, in CmdStanModel.variational(self, data, seed, inits, output_dir, sig_figs, save_latent_dynamics, save_profile, algorithm, iter, grad_samples, elbo_samples, eta, adapt_engaged, adapt_iter, tol_rel_obj, eval_elbo, draws, require_converged, show_console, refresh, time_fmt, timeout, output_samples)
- 1573 if len(re.findall(pat, contents)) > 0:
- 1574 if require_converged:
--> 1575 raise RuntimeError(
- 1576 'The algorithm may not have converged.\n'
- 1577 'If you would like to inspect the output, '
- 1578 're-call with require_converged=False'
- 1579 )
- 1580 # else:
- 1581 get_logger().warning(
- 1582 '%s\n%s',
- 1583 'The algorithm may not have converged.',
- 1584 'Proceeding because require_converged is set to False',
- 1585 )
+File ~/work/cmdstanpy/cmdstanpy/cmdstanpy/model.py:1527, in CmdStanModel.variational(self, data, seed, inits, output_dir, sig_figs, save_latent_dynamics, save_profile, algorithm, iter, grad_samples, elbo_samples, eta, adapt_engaged, adapt_iter, tol_rel_obj, eval_elbo, draws, require_converged, show_console, refresh, time_fmt, timeout, output_samples)
+ 1525 if len(re.findall(pat, contents)) > 0:
+ 1526 if require_converged:
+-> 1527 raise RuntimeError(
+ 1528 'The algorithm may not have converged.\n'
+ 1529 'If you would like to inspect the output, '
+ 1530 're-call with require_converged=False'
+ 1531 )
+ 1532 # else:
+ 1533 get_logger().warning(
+ 1534 '%s\n%s',
+ 1535 'The algorithm may not have converged.',
+ 1536 'Proceeding because require_converged is set to False',
+ 1537 )
RuntimeError: The algorithm may not have converged.
If you would like to inspect the output, re-call with require_converged=False
@@ -475,7 +467,7 @@ Example: variational inference for model
-20:42:13 - cmdstanpy - INFO - Chain [1] start processing
+16:30:21 - cmdstanpy - INFO - Chain [1] start processing
@@ -483,7 +475,7 @@ Example: variational inference for model
-20:42:13 - cmdstanpy - INFO - Chain [1] done processing
+16:30:21 - cmdstanpy - INFO - Chain [1] done processing
@@ -491,7 +483,7 @@ Example: variational inference for model
-20:42:13 - cmdstanpy - WARNING - The algorithm may not have converged.
+16:30:21 - cmdstanpy - WARNING - The algorithm may not have converged.
Proceeding because require_converged is set to False
@@ -513,8 +505,8 @@ Example: variational inference for model API documentation for a full description of all arguments.
diff --git a/docs/users-guide/examples/Variational Inference.ipynb b/docs/users-guide/examples/Variational Inference.ipynb
index ba94c842..809a35ff 100644
--- a/docs/users-guide/examples/Variational Inference.ipynb
+++ b/docs/users-guide/examples/Variational Inference.ipynb
@@ -38,10 +38,10 @@
"execution_count": 1,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:57.025293Z",
- "iopub.status.busy": "2024-02-05T20:41:57.024750Z",
- "iopub.status.idle": "2024-02-05T20:41:57.412180Z",
- "shell.execute_reply": "2024-02-05T20:41:57.411608Z"
+ "iopub.execute_input": "2024-03-26T16:30:04.461512Z",
+ "iopub.status.busy": "2024-03-26T16:30:04.461313Z",
+ "iopub.status.idle": "2024-03-26T16:30:04.909486Z",
+ "shell.execute_reply": "2024-03-26T16:30:04.908727Z"
}
},
"outputs": [
@@ -49,22 +49,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "/opt/hostedtoolcache/Python/3.9.18/x64/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
- " from .autonotebook import tqdm as notebook_tqdm\n"
+ "16:30:04 - cmdstanpy - INFO - Chain [1] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:57 - cmdstanpy - INFO - Chain [1] start processing\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "20:41:57 - cmdstanpy - INFO - Chain [1] done processing\n"
+ "16:30:04 - cmdstanpy - INFO - Chain [1] done processing\n"
]
}
],
@@ -103,10 +95,10 @@
"execution_count": 2,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:57.415226Z",
- "iopub.status.busy": "2024-02-05T20:41:57.414803Z",
- "iopub.status.idle": "2024-02-05T20:41:57.418492Z",
- "shell.execute_reply": "2024-02-05T20:41:57.417892Z"
+ "iopub.execute_input": "2024-03-26T16:30:04.912629Z",
+ "iopub.status.busy": "2024-03-26T16:30:04.912204Z",
+ "iopub.status.idle": "2024-03-26T16:30:04.916012Z",
+ "shell.execute_reply": "2024-03-26T16:30:04.915339Z"
},
"scrolled": true
},
@@ -128,10 +120,10 @@
"execution_count": 3,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:57.420923Z",
- "iopub.status.busy": "2024-02-05T20:41:57.420528Z",
- "iopub.status.idle": "2024-02-05T20:41:57.424135Z",
- "shell.execute_reply": "2024-02-05T20:41:57.423500Z"
+ "iopub.execute_input": "2024-03-26T16:30:04.918680Z",
+ "iopub.status.busy": "2024-03-26T16:30:04.918296Z",
+ "iopub.status.idle": "2024-03-26T16:30:04.921670Z",
+ "shell.execute_reply": "2024-03-26T16:30:04.920959Z"
}
},
"outputs": [
@@ -139,7 +131,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "0.234156\n"
+ "0.228927\n"
]
}
],
@@ -152,10 +144,10 @@
"execution_count": 4,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:57.426522Z",
- "iopub.status.busy": "2024-02-05T20:41:57.426142Z",
- "iopub.status.idle": "2024-02-05T20:41:57.429634Z",
- "shell.execute_reply": "2024-02-05T20:41:57.428999Z"
+ "iopub.execute_input": "2024-03-26T16:30:04.923992Z",
+ "iopub.status.busy": "2024-03-26T16:30:04.923642Z",
+ "iopub.status.idle": "2024-03-26T16:30:04.926981Z",
+ "shell.execute_reply": "2024-03-26T16:30:04.926356Z"
}
},
"outputs": [
@@ -185,10 +177,10 @@
"execution_count": 5,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:41:57.432000Z",
- "iopub.status.busy": "2024-02-05T20:41:57.431700Z",
- "iopub.status.idle": "2024-02-05T20:42:13.701188Z",
- "shell.execute_reply": "2024-02-05T20:42:13.700551Z"
+ "iopub.execute_input": "2024-03-26T16:30:04.929323Z",
+ "iopub.status.busy": "2024-03-26T16:30:04.928957Z",
+ "iopub.status.idle": "2024-03-26T16:30:21.609075Z",
+ "shell.execute_reply": "2024-03-26T16:30:21.608322Z"
},
"tags": [
"raises-exception"
@@ -199,28 +191,28 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:41:57 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail\n"
+ "16:30:04 - cmdstanpy - INFO - compiling stan file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail.stan to exe file /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:42:13 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail\n"
+ "16:30:21 - cmdstanpy - INFO - compiled model executable: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/eta_should_fail\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:42:13 - cmdstanpy - INFO - Chain [1] start processing\n"
+ "16:30:21 - cmdstanpy - INFO - Chain [1] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:42:13 - cmdstanpy - INFO - Chain [1] done processing\n"
+ "16:30:21 - cmdstanpy - INFO - Chain [1] done processing\n"
]
},
{
@@ -231,7 +223,7 @@
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[5], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m model_fail \u001b[38;5;241m=\u001b[39m CmdStanModel(stan_file\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124meta_should_fail.stan\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[0;32m----> 2\u001b[0m vi_fail \u001b[38;5;241m=\u001b[39m \u001b[43mmodel_fail\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvariational\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
- "File \u001b[0;32m~/work/cmdstanpy/cmdstanpy/cmdstanpy/model.py:1575\u001b[0m, in \u001b[0;36mCmdStanModel.variational\u001b[0;34m(self, data, seed, inits, output_dir, sig_figs, save_latent_dynamics, save_profile, algorithm, iter, grad_samples, elbo_samples, eta, adapt_engaged, adapt_iter, tol_rel_obj, eval_elbo, draws, require_converged, show_console, refresh, time_fmt, timeout, output_samples)\u001b[0m\n\u001b[1;32m 1573\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(re\u001b[38;5;241m.\u001b[39mfindall(pat, contents)) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1574\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m require_converged:\n\u001b[0;32m-> 1575\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 1576\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mThe algorithm may not have converged.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1577\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mIf you would like to inspect the output, \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1578\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mre-call with require_converged=False\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1579\u001b[0m )\n\u001b[1;32m 1580\u001b[0m \u001b[38;5;66;03m# else:\u001b[39;00m\n\u001b[1;32m 1581\u001b[0m get_logger()\u001b[38;5;241m.\u001b[39mwarning(\n\u001b[1;32m 1582\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1583\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mThe algorithm may not have converged.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1584\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mProceeding because require_converged is set to False\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1585\u001b[0m )\n",
+ "File \u001b[0;32m~/work/cmdstanpy/cmdstanpy/cmdstanpy/model.py:1527\u001b[0m, in \u001b[0;36mCmdStanModel.variational\u001b[0;34m(self, data, seed, inits, output_dir, sig_figs, save_latent_dynamics, save_profile, algorithm, iter, grad_samples, elbo_samples, eta, adapt_engaged, adapt_iter, tol_rel_obj, eval_elbo, draws, require_converged, show_console, refresh, time_fmt, timeout, output_samples)\u001b[0m\n\u001b[1;32m 1525\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(re\u001b[38;5;241m.\u001b[39mfindall(pat, contents)) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 1526\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m require_converged:\n\u001b[0;32m-> 1527\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 1528\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mThe algorithm may not have converged.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1529\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mIf you would like to inspect the output, \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1530\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mre-call with require_converged=False\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 1531\u001b[0m )\n\u001b[1;32m 1532\u001b[0m \u001b[38;5;66;03m# else:\u001b[39;00m\n\u001b[1;32m 1533\u001b[0m get_logger()\u001b[38;5;241m.\u001b[39mwarning(\n\u001b[1;32m 1534\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1535\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mThe algorithm may not have converged.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1536\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mProceeding because require_converged is set to False\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 1537\u001b[0m )\n",
"\u001b[0;31mRuntimeError\u001b[0m: The algorithm may not have converged.\nIf you would like to inspect the output, re-call with require_converged=False"
]
}
@@ -253,10 +245,10 @@
"execution_count": 6,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:42:13.703755Z",
- "iopub.status.busy": "2024-02-05T20:42:13.703551Z",
- "iopub.status.idle": "2024-02-05T20:42:13.746622Z",
- "shell.execute_reply": "2024-02-05T20:42:13.746094Z"
+ "iopub.execute_input": "2024-03-26T16:30:21.611786Z",
+ "iopub.status.busy": "2024-03-26T16:30:21.611575Z",
+ "iopub.status.idle": "2024-03-26T16:30:21.656310Z",
+ "shell.execute_reply": "2024-03-26T16:30:21.655745Z"
}
},
"outputs": [
@@ -264,21 +256,21 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "20:42:13 - cmdstanpy - INFO - Chain [1] start processing\n"
+ "16:30:21 - cmdstanpy - INFO - Chain [1] start processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:42:13 - cmdstanpy - INFO - Chain [1] done processing\n"
+ "16:30:21 - cmdstanpy - INFO - Chain [1] done processing\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
- "20:42:13 - cmdstanpy - WARNING - The algorithm may not have converged.\n",
+ "16:30:21 - cmdstanpy - WARNING - The algorithm may not have converged.\n",
"Proceeding because require_converged is set to False\n"
]
}
@@ -299,10 +291,10 @@
"execution_count": 7,
"metadata": {
"execution": {
- "iopub.execute_input": "2024-02-05T20:42:13.749332Z",
- "iopub.status.busy": "2024-02-05T20:42:13.748927Z",
- "iopub.status.idle": "2024-02-05T20:42:13.756070Z",
- "shell.execute_reply": "2024-02-05T20:42:13.755478Z"
+ "iopub.execute_input": "2024-03-26T16:30:21.659303Z",
+ "iopub.status.busy": "2024-03-26T16:30:21.658885Z",
+ "iopub.status.idle": "2024-03-26T16:30:21.665849Z",
+ "shell.execute_reply": "2024-03-26T16:30:21.665162Z"
}
},
"outputs": [
@@ -312,8 +304,8 @@
"OrderedDict([('lp__', 0.0),\n",
" ('log_p__', 0.0),\n",
" ('log_g__', 0.0),\n",
- " ('mu[1]', 0.00316325),\n",
- " ('mu[2]', 0.0245446)])"
+ " ('mu[1]', -0.0227764),\n",
+ " ('mu[2]', -0.057944)])"
]
},
"execution_count": 7,
diff --git a/docs/users-guide/hello_world.html b/docs/users-guide/hello_world.html
index e2ae5801..db22686e 100644
--- a/docs/users-guide/hello_world.html
+++ b/docs/users-guide/hello_world.html
@@ -6,7 +6,7 @@
- “Hello, World!” — CmdStanPy 1.2.1 documentation
+ “Hello, World!” — CmdStanPy 1.2.2 documentation
@@ -38,6 +38,7 @@
+
@@ -63,7 +64,7 @@
@@ -443,22 +444,22 @@ Accessing the results
# access model variable by name
In [9]: print(fit.stan_variable('theta'))
-[0.448643 0.176426 0.208806 ... 0.102241 0.145615 0.441178]
+[0.174379 0.142577 0.104175 ... 0.296568 0.275516 0.289409]
In [10]: print(fit.draws_pd('theta')[:3])
theta
-0 0.448643
-1 0.176426
-2 0.208806
+0 0.174379
+1 0.142577
+2 0.104175
In [11]: print(fit.draws_xr('theta'))
-<xarray.Dataset>
+<xarray.Dataset> Size: 40kB
Dimensions: (draw: 1000, chain: 4)
Coordinates:
- * chain (chain) int64 1 2 3 4
- * draw (draw) int64 0 1 2 3 4 5 6 7 8 ... 992 993 994 995 996 997 998 999
+ * chain (chain) int64 32B 1 2 3 4
+ * draw (draw) int64 8kB 0 1 2 3 4 5 6 7 ... 993 994 995 996 997 998 999
Data variables:
- theta (chain, draw) float64 0.4486 0.1764 0.2088 ... 0.1022 0.1456 0.4412
+ theta (chain, draw) float64 32kB 0.1744 0.1426 0.1042 ... 0.2755 0.2894
Attributes:
stan_version: 2.34.1
model: bernoulli_model
@@ -489,17 +490,17 @@ Accessing the resultsIn [15]: fit.draws_pd()
Out[15]:
chain__ iter__ draw__ ... divergent__ energy__ theta
-0 1.0 1.0 1.0 ... 0.0 7.87104 0.448643
-1 1.0 2.0 2.0 ... 0.0 8.25851 0.176426
-2 1.0 3.0 3.0 ... 0.0 6.91842 0.208806
-3 1.0 4.0 4.0 ... 0.0 6.79549 0.230222
-4 1.0 5.0 5.0 ... 0.0 6.89761 0.307075
+0 1.0 1.0 1.0 ... 0.0 6.96995 0.174379
+1 1.0 2.0 2.0 ... 0.0 7.23168 0.142577
+2 1.0 3.0 3.0 ... 0.0 8.29890 0.104175
+3 1.0 4.0 4.0 ... 0.0 8.29236 0.083123
+4 1.0 5.0 5.0 ... 0.0 8.59298 0.076468
... ... ... ... ... ... ... ...
-3995 4.0 996.0 3996.0 ... 0.0 8.17578 0.151596
-3996 4.0 997.0 3997.0 ... 0.0 7.24072 0.149352
-3997 4.0 998.0 3998.0 ... 0.0 7.81204 0.102241
-3998 4.0 999.0 3999.0 ... 0.0 7.71005 0.145615
-3999 4.0 1000.0 4000.0 ... 0.0 8.61979 0.441178
+3995 4.0 996.0 3996.0 ... 0.0 7.11186 0.362054
+3996 4.0 997.0 3997.0 ... 0.0 7.09438 0.333609
+3997 4.0 998.0 3998.0 ... 0.0 6.90865 0.296568
+3998 4.0 999.0 3999.0 ... 0.0 6.79980 0.275516
+3999 4.0 1000.0 4000.0 ... 0.0 6.79844 0.289409
[4000 rows x 11 columns]
@@ -511,13 +512,13 @@ Accessing the resultsdiag_e
In [17]: print(fit.metric)
-[[0.484622]
- [0.449072]
- [0.422025]
- [0.530255]]
+[[0.548827]
+ [0.618353]
+ [0.488154]
+ [0.506284]]
In [18]: print(fit.step_size)
-[0.996862 0.790725 1.14549 0.969396]
+[0.916475 0.945805 0.890823 1.05791 ]
The CmdStanMCMC object also provides access to metadata about the model and the sampler run.
@@ -525,7 +526,7 @@ Accessing the resultsbernoulli_model
In [20]: print(fit.metadata.cmdstan_config['seed'])
-38175
+5650
@@ -540,9 +541,9 @@ CmdStan utilities: In [21]: fit.summary()
Out[21]:
- Mean MCSE StdDev ... N_Eff N_Eff/s R_hat
-lp__ -7.282030 0.019091 0.747046 ... 1531.26 31250.2 1.002320
-theta 0.249207 0.003292 0.120195 ... 1333.06 27205.2 0.999556
+ Mean MCSE StdDev ... N_Eff N_Eff/s R_hat
+lp__ -7.244140 0.016066 0.686744 ... 1827.16 45679.0 1.00054
+theta 0.249347 0.003168 0.116642 ... 1356.00 33900.0 1.00428
[2 rows x 9 columns]
@@ -554,7 +555,7 @@ CmdStan utilities: diagnose()
method runs this utility and prints the output to the console.
In [22]: print(fit.diagnose())
-Processing csv files: /tmp/tmpld8wslo1/bernoulliozge2dfm/bernoulli-20240205204228_1.csv, /tmp/tmpld8wslo1/bernoulliozge2dfm/bernoulli-20240205204228_2.csv, /tmp/tmpld8wslo1/bernoulliozge2dfm/bernoulli-20240205204228_3.csv, /tmp/tmpld8wslo1/bernoulliozge2dfm/bernoulli-20240205204228_4.csv
+Processing csv files: /tmp/tmpgvv426mx/bernoullig33uprfe/bernoulli-20240326163037_1.csv, /tmp/tmpgvv426mx/bernoullig33uprfe/bernoulli-20240326163037_2.csv, /tmp/tmpgvv426mx/bernoullig33uprfe/bernoulli-20240326163037_3.csv, /tmp/tmpgvv426mx/bernoullig33uprfe/bernoulli-20240326163037_4.csv
Checking sampler transitions treedepth.
Treedepth satisfactory for all transitions.
diff --git a/docs/users-guide/outputs.html b/docs/users-guide/outputs.html
index 62709097..7f925524 100644
--- a/docs/users-guide/outputs.html
+++ b/docs/users-guide/outputs.html
@@ -6,7 +6,7 @@
- Controlling Outputs — CmdStanPy 1.2.1 documentation
+ Controlling Outputs — CmdStanPy 1.2.2 documentation
@@ -38,6 +38,7 @@
+
@@ -63,7 +64,7 @@
@@ -306,15 +307,15 @@ CSV File OutputsIn [7]: print(fit)
CmdStanMCMC: model=bernoulli chains=4['method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
csv_files:
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_1.csv
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_2.csv
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_3.csv
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_4.csv
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_1.csv
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_2.csv
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_3.csv
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_4.csv
output_files:
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_0-stdout.txt
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_1-stdout.txt
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_2-stdout.txt
- /tmp/tmpld8wslo1/bernoulli_zi120vj/bernoulli-20240205204228_3-stdout.txt
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_0-stdout.txt
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_1-stdout.txt
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_2-stdout.txt
+ /tmp/tmpgvv426mx/bernoulliigzvjuln/bernoulli-20240326163037_3-stdout.txt
The output_dir
argument is an optional argument which specifies
@@ -328,10 +329,10 @@
CSV File OutputsINFO:cmdstanpy:CmdStan done processing.
In [9]: !ls outputs/
-bernoulli-20240205204229_0-stdout.txt bernoulli-20240205204229_2.csv
-bernoulli-20240205204229_1-stdout.txt bernoulli-20240205204229_3-stdout.txt
-bernoulli-20240205204229_1.csv bernoulli-20240205204229_3.csv
-bernoulli-20240205204229_2-stdout.txt bernoulli-20240205204229_4.csv
+bernoulli-20240326163037_0-stdout.txt bernoulli-20240326163037_2.csv
+bernoulli-20240326163037_1-stdout.txt bernoulli-20240326163037_3-stdout.txt
+bernoulli-20240326163037_1.csv bernoulli-20240326163037_3.csv
+bernoulli-20240326163037_2-stdout.txt bernoulli-20240326163037_4.csv
Alternatively, the save_csvfiles()
function moves the CSV files
@@ -344,8 +345,8 @@
CSV File OutputsIn [11]: fit.save_csvfiles(dir='some/path')
In [12]: !ls some/path
-bernoulli-20240205204229_1.csv bernoulli-20240205204229_3.csv
-bernoulli-20240205204229_2.csv bernoulli-20240205204229_4.csv
+bernoulli-20240326163037_1.csv bernoulli-20240326163037_3.csv
+bernoulli-20240326163037_2.csv bernoulli-20240326163037_4.csv
@@ -359,9 +360,9 @@ LoggingINFO:cmdstanpy:Chain [3] start processing
INFO:cmdstanpy:Chain [4] start processing
INFO:cmdstanpy:Chain [1] done processing
-INFO:cmdstanpy:Chain [2] done processing
-INFO:cmdstanpy:Chain [3] done processing
INFO:cmdstanpy:Chain [4] done processing
+INFO:cmdstanpy:Chain [3] done processing
+INFO:cmdstanpy:Chain [2] done processing
This output is managed through the built-in logging
module. For example, it can be disabled entirely:
@@ -408,40 +409,40 @@ Logging ....: for line in logs.readlines():
....: print(line.strip())
....:
-20:42:29 - cmdstanpy - DEBUG - cmd: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli info
+16:30:38 - cmdstanpy - DEBUG - cmd: /home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli info
cwd: None
-20:42:29 - cmdstanpy - INFO - CmdStan start processing
-20:42:29 - cmdstanpy - DEBUG - idx 0
-20:42:29 - cmdstanpy - DEBUG - idx 1
-20:42:29 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
-20:42:29 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
-20:42:29 - cmdstanpy - DEBUG - idx 2
-20:42:29 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=2', 'random', 'seed=13203', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_2.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
-20:42:29 - cmdstanpy - DEBUG - idx 3
-20:42:29 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=1', 'random', 'seed=13203', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_1.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
-20:42:29 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
-20:42:29 - cmdstanpy - INFO - Chain [2] start processing
-20:42:29 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
-20:42:29 - cmdstanpy - INFO - Chain [1] start processing
-20:42:29 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=3', 'random', 'seed=13203', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_3.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
-20:42:29 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=4', 'random', 'seed=13203', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_4.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
-20:42:29 - cmdstanpy - INFO - Chain [3] start processing
-20:42:29 - cmdstanpy - INFO - Chain [4] start processing
-20:42:29 - cmdstanpy - INFO - Chain [2] done processing
-20:42:29 - cmdstanpy - INFO - Chain [3] done processing
-20:42:29 - cmdstanpy - INFO - Chain [1] done processing
-20:42:29 - cmdstanpy - INFO - Chain [4] done processing
-20:42:29 - cmdstanpy - DEBUG - runset
+16:30:38 - cmdstanpy - INFO - CmdStan start processing
+16:30:38 - cmdstanpy - DEBUG - idx 0
+16:30:38 - cmdstanpy - DEBUG - idx 1
+16:30:38 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
+16:30:38 - cmdstanpy - DEBUG - idx 2
+16:30:38 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
+16:30:38 - cmdstanpy - DEBUG - idx 3
+16:30:38 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=1', 'random', 'seed=23084', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_1.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
+16:30:38 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
+16:30:38 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=2', 'random', 'seed=23084', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_2.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
+16:30:38 - cmdstanpy - DEBUG - running CmdStan, num_threads: 1
+16:30:38 - cmdstanpy - INFO - Chain [1] start processing
+16:30:38 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=3', 'random', 'seed=23084', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_3.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
+16:30:38 - cmdstanpy - INFO - Chain [2] start processing
+16:30:38 - cmdstanpy - DEBUG - CmdStan args: ['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=4', 'random', 'seed=23084', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_4.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
+16:30:38 - cmdstanpy - INFO - Chain [3] start processing
+16:30:38 - cmdstanpy - INFO - Chain [4] start processing
+16:30:38 - cmdstanpy - INFO - Chain [1] done processing
+16:30:38 - cmdstanpy - INFO - Chain [2] done processing
+16:30:38 - cmdstanpy - INFO - Chain [3] done processing
+16:30:38 - cmdstanpy - INFO - Chain [4] done processing
+16:30:38 - cmdstanpy - DEBUG - runset
RunSet: chains=4, chain_ids=[1, 2, 3, 4], num_processes=4
cmd (chain 1):
-['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=1', 'random', 'seed=13203', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_1.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
+['/home/runner/work/cmdstanpy/cmdstanpy/docsrc/users-guide/examples/bernoulli', 'id=1', 'random', 'seed=23084', 'data', 'file=users-guide/examples/bernoulli.data.json', 'output', 'file=/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_1.csv', 'method=sample', 'algorithm=hmc', 'adapt', 'engaged=1']
retcodes=[0, 0, 0, 0]
per-chain output files (showing chain 1 only):
csv_file:
-/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_1.csv
+/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_1.csv
console_msgs (if any):
-/tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_0-stdout.txt
-20:42:29 - cmdstanpy - DEBUG - Chain 1 console:
+/tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_0-stdout.txt
+16:30:38 - cmdstanpy - DEBUG - Chain 1 console:
method = sample (Default)
sample
num_samples = 1000 (Default)
@@ -473,9 +474,9 @@ Loggingfile = users-guide/examples/bernoulli.data.json
init = 2 (Default)
random
-seed = 13203
+seed = 23084
output
-file = /tmp/tmpld8wslo1/bernoulli8r5niwzl/bernoulli-20240205204229_1.csv
+file = /tmp/tmpgvv426mx/bernoulli1wek20la/bernoulli-20240326163038_1.csv
diagnostic_file = (Default)
refresh = 100 (Default)
sig_figs = -1 (Default)
@@ -484,8 +485,8 @@ Loggingnum_threads = 1 (Default)
-Gradient evaluation took 1e-06 seconds
-1000 transitions using 10 leapfrog steps per transition would take 0.01 seconds.
+Gradient evaluation took 3e-06 seconds
+1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
Adjust your expectations accordingly!
@@ -512,9 +513,9 @@ LoggingIteration: 1900 / 2000 [ 95%] (Sampling)
Iteration: 2000 / 2000 [100%] (Sampling)
-Elapsed Time: 0.005 seconds (Warm-up)
-0.014 seconds (Sampling)
-0.019 seconds (Total)
+Elapsed Time: 0.004 seconds (Warm-up)
+0.013 seconds (Sampling)
+0.017 seconds (Total)
diff --git a/docs/users-guide/overview.html b/docs/users-guide/overview.html
index e122aad9..ccbf4db6 100644
--- a/docs/users-guide/overview.html
+++ b/docs/users-guide/overview.html
@@ -6,7 +6,7 @@
- Overview — CmdStanPy 1.2.1 documentation
+ Overview — CmdStanPy 1.2.2 documentation
@@ -38,6 +38,7 @@
+
@@ -63,7 +64,7 @@
diff --git a/docs/users-guide/workflow.html b/docs/users-guide/workflow.html
index 692e6d89..332cac28 100644
--- a/docs/users-guide/workflow.html
+++ b/docs/users-guide/workflow.html
@@ -6,7 +6,7 @@
- CmdStanPy Workflow — CmdStanPy 1.2.1 documentation
+ CmdStanPy Workflow — CmdStanPy 1.2.2 documentation
@@ -38,6 +38,7 @@
+
@@ -63,7 +64,7 @@
@@ -465,7 +466,7 @@ Output dataCmdStanMCMC
and CmdStanGQ
return the sample contents
in tabular form, see draws()
and draws_pd()
.
Similarly, the draws_xr()
method returns the sample
-contents as an xarray.Dataset
which is a mapping from variable names to their respective values.
+contents as an xarray.Dataset
which is a mapping from variable names to their respective values.