diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index cbfd10d7..046b592c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -115,7 +115,7 @@ repos:
- "--allow-init-docstring=True"
- "--skip-checking-short-docstrings=False"
name: "flake8 lint docstrings"
- exclude: "^(docs/|tests?/)"
+ exclude: "^(docs/|tests?/|pyglotaran_extras/compat)"
additional_dependencies: [pydoclint==0.3.8]
- repo: https://github.com/codespell-project/codespell
diff --git a/.ruff.toml b/.ruff.toml
index aa23960d..0de4070d 100644
--- a/.ruff.toml
+++ b/.ruff.toml
@@ -66,6 +66,8 @@ dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
[lint.per-file-ignores]
"tests/*" = ["ARG001"]
+"pyglotaran_extras/compat/compat_result.py" = ["D", "N", "E", "ERA"]
+"pyglotaran_extras/compat/convert_result_dataset.py" = ["D", "N", "F", "C"]
[lint.isort]
required-imports = ["from __future__ import annotations"]
diff --git a/pyglotaran_extras/compat/__init__.py b/pyglotaran_extras/compat/__init__.py
index 2044f9df..c1b5798d 100644
--- a/pyglotaran_extras/compat/__init__.py
+++ b/pyglotaran_extras/compat/__init__.py
@@ -1,5 +1,7 @@
"""Provides compatibility utilities for pyglotaran."""
+from __future__ import annotations
+
from pyglotaran_extras.compat.convert_result_dataset import convert
__all__ = ["convert"]
diff --git a/pyglotaran_extras/compat/compat_result.py b/pyglotaran_extras/compat/compat_result.py
new file mode 100644
index 00000000..ec05ae33
--- /dev/null
+++ b/pyglotaran_extras/compat/compat_result.py
@@ -0,0 +1,193 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from typing import Any
+
+import numpy as np
+
+if TYPE_CHECKING:
+ from glotaran.model.experiment_model import ExperimentModel
+ from glotaran.parameter.parameters import Parameters
+
+from glotaran.project.result import Result
+from glotaran.utils.ipython import MarkdownStr
+from tabulate import tabulate
+
+
+class CompatResult(Result):
+ """A compatibility class for Result.
+
+ Inherits:
+ data: dict[str, xr.Dataset]
+ experiments: dict[str, ExperimentModel]
+ optimization: OptimizationResult
+ parameters_intitial: Parameters
+ parameters_optimized: Parameters
+ """
+
+ @property
+ def number_of_function_evaluations(self) -> int:
+ return self.optimization.number_of_function_evaluations
+
+ @property
+ def number_of_residuals(self) -> int:
+ return self.optimization.number_of_data_points
+
+ @property
+ def number_of_free_parameters(self) -> int:
+ return self.optimization.number_of_parameters
+
+ @property
+ def number_of_clps(self) -> int:
+ return self.optimization.number_of_clps
+
+ @property
+ def degrees_of_freedom(self) -> int:
+ return self.optimization.degrees_of_freedom
+
+ @property
+ def chi_square(self) -> float:
+ return self.optimization.chi_square
+
+ @property
+ def reduced_chi_square(self) -> float:
+ return self.optimization.reduced_chi_square
+
+ @property
+ def reduced_chi_squared(self) -> float:
+ return self.optimization.reduced_chi_square
+
+ @property
+ def root_mean_square_error(self) -> float:
+ return self.optimization.root_mean_square_error
+
+ @property
+ def additional_penalty(self) -> list | None:
+ return None # TODO: implement
+
+ @property
+ def optimized_parameters(self) -> Parameters:
+ return self.parameters_optimized
+
+ @property
+ def initial_parameters(self) -> Parameters:
+ return self.parameters_intitial
+
+ @property
+ def model(self) -> ExperimentModel:
+ return self.experiments[next(iter(self.experiments.keys()))]
+
+ @classmethod
+ def from_result(cls, result: Result) -> CompatResult:
+ return cls(
+ data=result.data,
+ experiments=result.experiments,
+ optimization=result.optimization,
+ parameters_intitial=result.parameters_intitial,
+ parameters_optimized=result.parameters_optimized,
+ )
+
+ def markdown(
+ self,
+ with_model: bool = True,
+ *,
+ base_heading_level: int = 1,
+ wrap_model_in_details: bool = False,
+ ) -> MarkdownStr:
+ """Format the model as a markdown text.
+
+ Parameters
+ ----------
+ with_model : bool
+ If `True`, the model will be printed with initial and optimized parameters filled in.
+ base_heading_level : int
+ The level of the base heading.
+ wrap_model_in_details: bool
+ Wraps model into details tag. Defaults to ``False``
+
+ Returns
+ -------
+ MarkdownStr : str
+ The scheme as markdown string.
+ """
+ general_table_rows: list[list[Any]] = [
+ ["Number of residual evaluation", self.number_of_function_evaluations],
+ ["Number of residuals", self.number_of_residuals],
+ ["Number of free parameters", self.number_of_free_parameters],
+ ["Number of conditionally linear parameters", self.number_of_clps],
+ ["Degrees of freedom", self.degrees_of_freedom],
+ ["Chi Square", f"{self.chi_square or np.nan:.2e}"],
+ ["Reduced Chi Square", f"{self.reduced_chi_square or np.nan:.2e}"],
+ ["Root Mean Square Error (RMSE)", f"{self.root_mean_square_error or np.nan:.2e}"],
+ ]
+ if self.additional_penalty is not None and any(
+ len(penalty) != 0 for penalty in self.additional_penalty
+ ):
+ general_table_rows.append(["RMSE additional penalty", self.additional_penalty])
+
+ result_table = tabulate(
+ general_table_rows,
+ headers=["Optimization Result", ""],
+ tablefmt="github",
+ disable_numparse=True,
+ )
+ if len(self.data) > 1:
+ RMSE_rows = [
+ [
+ f"{index}.{label}:",
+ dataset.weighted_root_mean_square_error,
+ dataset.root_mean_square_error,
+ ]
+ for index, (label, dataset) in enumerate(self.data.items(), start=1)
+ ]
+
+ RMSE_table = tabulate(
+ RMSE_rows,
+ headers=["RMSE (per dataset)", "weighted", "unweighted"],
+ floatfmt=".2e",
+ tablefmt="github",
+ )
+
+ result_table = f"{result_table}\n\n{RMSE_table}"
+
+ if with_model:
+ result_table += (
+ "\n\n> **Warning:** Printing model is not yet implemented for `CompatResult`."
+ )
+ result_table += f"\n\n> **unused:** {base_heading_level=}, {wrap_model_in_details=}."
+
+ # model_md = self.model.markdown(
+ # parameters=self.optimized_parameters,
+ # initial_parameters=self.initial_parameters,
+ # base_heading_level=base_heading_level,
+ # )
+ # if wrap_model_in_details is False:
+ # result_table = f"{result_table}\n\n{model_md}"
+ # else:
+ # # The section part is just a hack to generate properly rendering docs due to a bug
+ # # in sphinx which causes a wrong tag opening and closing order of html tags
+ # # Since model_md contains 2 heading levels we need to close 2 sections
+ # result_table = (
+ # f"{result_table}\n\n
\n\n{model_md}\n"
+ # f"{''*(2)}"
+ # " "
+ # f"{''*(2)}"
+ # )
+
+ return MarkdownStr(result_table)
+
+ def _repr_markdown_(self) -> str:
+ """Return a markdown representation str.
+
+ Special method used by ``ipython`` to render markdown.
+
+ Returns
+ -------
+ str
+ The scheme as markdown string.
+ """
+ return str(self.markdown(base_heading_level=3, wrap_model_in_details=True))
+
+ def __str__(self) -> str:
+ """Overwrite of ``__str__``."""
+ return str(self.markdown(with_model=False))
diff --git a/pyglotaran_extras/compat/convert_result_dataset.py b/pyglotaran_extras/compat/convert_result_dataset.py
index b52dec28..6291f665 100644
--- a/pyglotaran_extras/compat/convert_result_dataset.py
+++ b/pyglotaran_extras/compat/convert_result_dataset.py
@@ -1,9 +1,14 @@
"""Convert a new pyglotaran (result) dataset to a version compatible with pyglotaran-extras."""
+from __future__ import annotations
+
import copy
+
import xarray as xr
from glotaran.project.result import Result
+from pyglotaran_extras.compat.compat_result import CompatResult
+
def _adjust_estimations_to_spectra(ds: xr.Dataset, *, cleanup: bool = False) -> None:
"""Adjust the estimations to spectra names and flatten data."""
@@ -27,7 +32,9 @@ def _adjust_estimations_to_spectra(ds: xr.Dataset, *, cleanup: bool = False) ->
if cleanup:
ds = ds.drop_vars("species_associated_estimation")
if "damped_oscillation_associated_estimation" in ds:
- ds["damped_oscillation_associated_spectra"] = ds["damped_oscillation_associated_estimation"]
+ ds["damped_oscillation_associated_spectra"] = ds[
+ "damped_oscillation_associated_estimation"
+ ]
if cleanup:
ds = ds.drop_vars("damped_oscillation_associated_estimation")
@@ -77,7 +84,7 @@ def _adjust_activation_to_irf(ds: xr.Dataset, *, cleanup: bool = False) -> None:
pass
-def convert(input: xr.Dataset | Result, cleanup: bool = False) -> xr.Dataset | Result:
+def convert(input: xr.Dataset | Result, cleanup: bool = False) -> xr.Dataset | CompatResult:
"""Convert a glotaran Result or xarray Dataset to a different format.
Parameters
@@ -128,16 +135,14 @@ def convert_dataset(dataset: xr.Dataset, cleanup: bool = False) -> xr.Dataset:
return converted_ds
-def convert_result(result: Result, cleanup: bool = False) -> Result:
+def convert_result(result: Result, cleanup: bool = False) -> CompatResult:
"""Convert the result format used in staging (to be v0.8) to the format of main (v0.7)."""
- converted_result = copy.copy(result)
+ converted_result = CompatResult.from_result(result)
# convert the datasets
for key in converted_result.data:
- converted_result.data[key] = convert_dataset(
- converted_result.data[key], cleanup=cleanup
- )
+ converted_result.data[key] = convert_dataset(converted_result.data[key], cleanup=cleanup)
# convert the parameters
return converted_result
diff --git a/pyproject.toml b/pyproject.toml
index 0c3073df..0b5a0f7c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -144,6 +144,7 @@ disallow_untyped_defs = false
exclude = [
"docs",
"tests/*",
+ "*/compat/*",
]
ignore-init-module = true
fail-under = 100