Skip to content

Commit

Permalink
🩹🚇 Adapt to changed outputs in pyglotaran-examples GHA (#274)
Browse files Browse the repository at this point in the history
* 🚧 Add back pinned requirements file
* 🚇 Adjust CI workflows to use staging branches
* 🩹🧪 Fix tests as far as possible
* 🧹 Fix  pre-commit issues (x2)
  • Loading branch information
s-weigand authored Jul 7, 2024
1 parent f49f2bb commit 69bf782
Show file tree
Hide file tree
Showing 8 changed files with 54 additions and 40 deletions.
8 changes: 6 additions & 2 deletions .github/workflows/integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,11 @@ jobs:
steps:
- name: Set example list output
id: create-example-list
uses: glotaran/pyglotaran-examples@main
uses: glotaran/pyglotaran-examples@staging_rewrite
with:
example_name: set example list
set_example_list: true
examples_branch: staging_rewrite

run-examples:
name: "Run Example: "
Expand All @@ -41,14 +42,16 @@ jobs:
- name: Install pyglotaran-extras
run: |
pip install wheel
python -m pip install git+https://github.com/glotaran/pyglotaran@staging
pip install .
- name: ${{ matrix.example_name }}
id: example-run
uses: glotaran/pyglotaran-examples@main
uses: glotaran/pyglotaran-examples@staging_rewrite
with:
example_name: ${{ matrix.example_name }}
install_extras: false
examples_branch: staging_rewrite

- name: Upload Example Plots Artifact
uses: actions/upload-artifact@v4
Expand All @@ -57,6 +60,7 @@ jobs:
path: ${{ steps.example-run.outputs.notebook-path }}

collect-artifacts:
if: always()
name: "Collect artifacts and reupload as bundel"
runs-on: ubuntu-latest
needs: [run-examples]
Expand Down
48 changes: 26 additions & 22 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ jobs:
run: |
python -m pip install -U pip wheel
python -m pip install -r requirements_pinned.txt
python -m pip install git+https://github.com/glotaran/pyglotaran@staging
python -m pip install -U -e ".[test]"
- name: Run tests
Expand All @@ -122,32 +123,32 @@ jobs:
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

test-pyglotaran-dev:
name: Test pyglotaran dev
runs-on: ubuntu-latest
needs: [pre-commit, docs]
# test-pyglotaran-dev:
# name: Test pyglotaran dev
# runs-on: ubuntu-latest
# needs: [pre-commit, docs]

steps:
- name: Check out repo
uses: actions/checkout@v4
# steps:
# - name: Check out repo
# uses: actions/checkout@v4

- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
# - name: Set up Python 3.10
# uses: actions/setup-python@v5
# with:
# python-version: "3.10"

- name: Install dependencies
run: |
python -m pip install -U pip wheel
python -m pip install -r requirements_pinned.txt
python -m pip install -U -e ".[test]"
python -m pip install git+https://github.com/glotaran/pyglotaran
# - name: Install dependencies
# run: |
# python -m pip install -U pip wheel
# python -m pip install -r requirements_pinned.txt
# python -m pip install -U -e ".[test]"
# python -m pip install git+https://github.com/glotaran/pyglotaran

- name: Show installed dependencies
run: pip freeze
# - name: Show installed dependencies
# run: pip freeze

- name: Run tests
run: python -m pytest --nbval --cov=./ --cov-report term --cov-report xml --cov-config pyproject.toml tests
# - name: Run tests
# run: python -m pytest --nbval --cov=./ --cov-report term --cov-report xml --cov-config pyproject.toml tests

- name: Codecov Upload
continue-on-error: true
Expand Down Expand Up @@ -197,7 +198,10 @@ jobs:
name: Deploy to PyPi
runs-on: ubuntu-latest
if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags')
needs: [test, test-pyglotaran-dev, test-min-versions]
needs:
- test
# - test-pyglotaran-dev
- test-min-versions
permissions:
id-token: write # IMPORTANT: this permission is mandatory for trusted publishing
steps:
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ repos:
- "--allow-init-docstring=True"
- "--skip-checking-short-docstrings=False"
name: "flake8 lint docstrings"
exclude: "^(docs/|tests?/|pyglotaran_extras/compat)"
exclude: "^(docs/|tests?/|pyglotaran_extras/compat/)"
additional_dependencies: [pydoclint==0.3.8]

- repo: https://github.com/codespell-project/codespell
Expand Down
1 change: 1 addition & 0 deletions pyglotaran_extras/compat/convert_result_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def _adjust_estimations_to_spectra(ds: xr.Dataset, *, cleanup: bool = False) ->


def _adjust_activation_to_irf(ds: xr.Dataset, *, cleanup: bool = False) -> None:
"""Adjust the activation to the corresponding IRF."""
if "gaussian_activation_center" in ds:
values = ds.gaussian_activation_center.to_numpy().flatten()
ds["irf_center"] = values[0]
Expand Down
8 changes: 8 additions & 0 deletions requirements_pinned.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Runtime dependencies

cycler==0.12.1
numpy==1.26.4
matplotlib==3.9.0
# pyglotaran==0.8.0
tabulate==0.9.0
xarray==2024.5.0
10 changes: 3 additions & 7 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,10 @@
import netCDF4 # noqa: F401
# isort: on

from dataclasses import replace

import pytest
from glotaran.optimization.optimize import optimize
from glotaran.testing.simulated_data.parallel_spectral_decay import SCHEME as SCHEME_PAR
from glotaran.testing.simulated_data.sequential_spectral_decay import SCHEME as SCHEME_SEQ
from glotaran.testing.simulated_data.shared_decay import PARAMETERS

from pyglotaran_extras.io.setup_case_study import get_script_dir

Expand All @@ -26,12 +24,10 @@ def wrapped_get_script_dir():
@pytest.fixture(scope="session")
def result_parallel_spectral_decay():
"""Test result from ``glotaran.testing.simulated_data.parallel_spectral_decay``."""
scheme = replace(SCHEME_PAR, maximum_number_function_evaluations=1)
return optimize(scheme)
return SCHEME_PAR.optimize(PARAMETERS, maximum_number_function_evaluations=1)


@pytest.fixture(scope="session")
def result_sequential_spectral_decay():
"""Test result from ``glotaran.testing.simulated_data.sequential_spectral_decay``."""
scheme = replace(SCHEME_SEQ, maximum_number_function_evaluations=1)
return optimize(scheme)
return SCHEME_SEQ.optimize(PARAMETERS, maximum_number_function_evaluations=1)
8 changes: 4 additions & 4 deletions tests/inspect/test_a_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def test_a_matrix_to_html_table(
encoding="utf8"
)
assert a_matrix_to_html_table(
result_parallel_spectral_decay.data["dataset_1"].a_matrix_megacomplex_parallel_decay,
result_parallel_spectral_decay.data["parallel-decay"].a_matrix_megacomplex_parallel_decay,
"megacomplex_parallel_decay",
**kwargs,
) == expected.rstrip("\n")
Expand Down Expand Up @@ -60,10 +60,10 @@ def test_show_a_matrixes(
)

result = result_parallel_spectral_decay
result.data["dataset_2"] = result_sequential_spectral_decay.data["dataset_1"]
result.data["dataset_2"] = result_sequential_spectral_decay.data["sequential-decay"]
# dummy data for filtering based on a-matrix size
single_entry_data = result_sequential_spectral_decay.data[
"dataset_1"
"sequential-decay"
].a_matrix_megacomplex_sequential_decay[:1, :1]
single_entry_data = single_entry_data.rename(
{
Expand All @@ -85,7 +85,7 @@ def test_show_a_matrixes_multiple_a_matrixes_in_dataset(
).read_text(encoding="utf8")

single_entry_data = result_sequential_spectral_decay.data[
"dataset_1"
"sequential-decay"
].a_matrix_megacomplex_sequential_decay[:1, :1]

a_matrix_one = single_entry_data.rename(
Expand Down
9 changes: 5 additions & 4 deletions tests/io/test_load_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def test_load_data(
result_sequential_spectral_decay: Result, tmp_path: Path, recwarn: WarningsRecorder
):
"""All input_type permutations result in a ``xr.Dataset``."""
compare_dataset = result_sequential_spectral_decay.data["dataset_1"]
compare_dataset = result_sequential_spectral_decay.data["sequential-decay"]
save_path = tmp_path / "result"

from_result = load_data(result_sequential_spectral_decay)

Expand All @@ -53,9 +54,9 @@ def test_load_data(

run_load_data_test(from_dataset, compare_dataset)

result_sequential_spectral_decay.save(tmp_path / "result.yml")
result_sequential_spectral_decay.save(save_path)

from_file = load_data(tmp_path / "dataset_1.nc")
from_file = load_data(save_path / "data/sequential-decay.nc")

run_load_data_test(from_file, compare_dataset)

Expand All @@ -69,7 +70,7 @@ def test_load_data(
assert len(filter_warnings(recwarn)) == 0

# Ensure not to mutate original fixture
result_multi_dataset = load_result(tmp_path / "result.yml")
result_multi_dataset = load_result(save_path)
result_multi_dataset.data["dataset_2"] = xr.Dataset({"foo": [1]})

from_result_multi_dataset = load_data(result_multi_dataset)
Expand Down

0 comments on commit 69bf782

Please sign in to comment.