From c84bfa6bc62c6e5e017b2e6f3bd02f0b7c25e8fc Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Mon, 18 Dec 2023 17:41:31 -0300 Subject: [PATCH 1/6] change to v0.5.0 --- CHANGELOG.md | 28 +++++++++++++++++++--------- README.md | 6 +++--- cadCAD/__init__.py | 2 +- setup.py | 2 +- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b28e7f6..1f4469a9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,18 +1,28 @@ # Changelog: +### 0.5.0 - December 18 2023 -### February 15, 2023 -* **Fixes:** - - Package has been cleaned-up for working with Python 3.10 -### 0.4.29.1 +#### New User Features + +- Added toggle for enabling users to deactivate deepcopying. This is done by passing an additional object on the `ExecutionContext`, eg. `ExecutionContext(mode, additional_objs={'deepcopy_off': True})` + +#### New Submodules + +- A collection of type annotations for encapsuling `cadCAD` projects is now implemented through the `cadCAD.types` submodules +- Added `cadCAD.tools` as a submodule, which is originated from the `cadCAD_tools` Python package. This submodule contains several helper functions for making the simulation experience more straightforward as well as a collection of performance profiling tools. +- Added `cadCAD.diagram` as a submodule, which is originated from the `cadCAD_diagram` Python package. This submodule contains functions for programatically generating block diagrams from existing models. +- More informative error messages when policies and SUFs are wrongly implemented. (Issues #288 and #258) + +#### Backend Improvements + +- Merged repo with the `cadCAD_legacy_devel`, which includes performance improvements. In particular, simulations will start up faster due to code optimizations. +- `cadCAD` now uses `pytest` as the testing framework. This was made possible by isolating the existing tests and wrapping them into functions. -#### Changes -- Parallel executor uses the context manager handling the Process Pool lifetime +#### Fixes -### 0.4.29 +- cadCAD is now Python 3.10+ compatible (Issue #306 and #301) +- Proper support for `ExecutionMode.single_mode` (Issue #253 and #254) -- Merged repo with the `cadCAD_tweaked`, which includes performance improvements -- Python 3.10 compatible ### September 28, 2021 #### New Features: * **ver. ≥ `0.4.28`:** diff --git a/README.md b/README.md index e5151fa8..ea165f20 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ / ___/ __` / __ / / / /| | / / / / / /__/ /_/ / /_/ / /___/ ___ |/ /_/ / \___/\__,_/\__,_/\____/_/ |_/_____/ -by cadCAD ver. 0.4.28 +by cadCAD ver. 0.5.0 ====================================== Complex Adaptive Dynamics o i e @@ -20,7 +20,7 @@ through simulation, with support for Monte Carlo methods, A/B testing and parame # Getting Started -#### Change Log: [ver. 0.4.28](CHANGELOG.md) +#### Change Log: [ver. 0.5.0](CHANGELOG.md) [Previous Stable Release (No Longer Supported)](https://github.com/cadCAD-org/cadCAD/tree/b9cc6b2e4af15d6361d60d6ec059246ab8fbf6da) @@ -47,7 +47,7 @@ $ ## 1. Installation: Requires [>= Python 3.6.13](https://www.python.org/downloads/) -**Option A:** Install Using **[pip](https://pypi.org/project/cadCAD/0.4.28/)** +**Option A:** Install Using **[pip](https://pypi.org/project/cadCAD/)** ```bash pip3 install cadCAD ``` diff --git a/cadCAD/__init__.py b/cadCAD/__init__.py index fbfbaf52..8a276188 100644 --- a/cadCAD/__init__.py +++ b/cadCAD/__init__.py @@ -2,7 +2,7 @@ from cadCAD.configuration import Experiment name = "cadCAD" -version = "0.4.28" +version = "0.5.0" experiment = Experiment() configs = experiment.configs diff --git a/setup.py b/setup.py index eef999fa..785f3aee 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ """ name = "cadCAD" -version = "0.4.29" +version = "0.5.0" setup(name=name, version=version, From 2d08c90407d0513b509479b40a85beb24de16bec Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 18 Jan 2024 20:14:47 -0300 Subject: [PATCH 2/6] add param count test --- testing/test_param_count.py | 91 +++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 testing/test_param_count.py diff --git a/testing/test_param_count.py b/testing/test_param_count.py new file mode 100644 index 00000000..3fb3f571 --- /dev/null +++ b/testing/test_param_count.py @@ -0,0 +1,91 @@ +from cadCAD.configuration import Experiment +from cadCAD.configuration.utils import config_sim +from cadCAD.engine import Executor, ExecutionContext, ExecutionMode +import pytest + + + + +P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3} +P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]} +P_single_swp = {'pA': [1, 2, 3], 'pB': [1], 'pC': [3]} +P_all_swp = {'pA': [1, 2, 3], 'pB': [1, 2, 3], 'pC': [1, 2, 3]} +Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp] + +BASE_CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), + (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), + (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] + +CONFIG_SIGNATURES_TO_TEST = [] +for p in Ps: + for config in BASE_CONFIG_SIGNATURES_TO_TEST: + CONFIG_SIGNATURES_TO_TEST.append(config + tuple([p])) + + +def run_experiment(exp: Experiment, mode: str): + exec_context = ExecutionContext(mode) + executor = Executor(exec_context=exec_context, configs=exp.configs) + (records, tensor_field, _) = executor.execute() + return records + + + + + + +def param_count_test_generator(provided_params): + def s_test_param_count(params, _2, _3, _4, _5): + assert params.keys() == provided_params.keys() + + +def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment: + + INITIAL_STATE = {'varA': None} + PSUBs = [{'policies': {}, 'variables': {'varA': param_count_test_generator(params)}}] * N_substeps + + SIM_CONFIG = config_sim( + { + "N": N_runs, + "T": range(N_timesteps), + "M": params, # Optional + } + ) + + exp = Experiment() + for i_sim in range(N_simulations): + exp.append_model( + sim_configs=SIM_CONFIG, + initial_state=INITIAL_STATE, + partial_state_update_blocks=PSUBs + ) + return exp + + +def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> int: + return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", BASE_CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("P", Ps) +def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P): + args = (N_sim, N_sw, N_r, N_t, N_s, P) + assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", BASE_CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("P", Ps) +def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s, P): + args = (N_sim, N_sw, N_r, N_t, N_s, P) + + if N_sim == 1 and N_sw == 1 and N_r == 1: + with pytest.raises(ValueError) as e_info: + assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) + else: + assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", BASE_CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("P", Ps) +def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P): + args = (N_sim, N_sw, N_r, N_t, N_s, P) + assert len(run_experiment(create_experiments(*args), 'local_proc')) == expected_rows(*args) From 9f6134d24e3acc346890463c0ff80ad927013253 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 18 Jan 2024 20:17:01 -0300 Subject: [PATCH 3/6] test improvements --- testing/test_param_count.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/testing/test_param_count.py b/testing/test_param_count.py index 3fb3f571..db192f03 100644 --- a/testing/test_param_count.py +++ b/testing/test_param_count.py @@ -12,15 +12,10 @@ P_all_swp = {'pA': [1, 2, 3], 'pB': [1, 2, 3], 'pC': [1, 2, 3]} Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp] -BASE_CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), +CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] -CONFIG_SIGNATURES_TO_TEST = [] -for p in Ps: - for config in BASE_CONFIG_SIGNATURES_TO_TEST: - CONFIG_SIGNATURES_TO_TEST.append(config + tuple([p])) - def run_experiment(exp: Experiment, mode: str): exec_context = ExecutionContext(mode) @@ -65,14 +60,14 @@ def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) -@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", BASE_CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) @pytest.mark.parametrize("P", Ps) def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P): args = (N_sim, N_sw, N_r, N_t, N_s, P) assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) -@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", BASE_CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) @pytest.mark.parametrize("P", Ps) def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s, P): args = (N_sim, N_sw, N_r, N_t, N_s, P) @@ -84,7 +79,7 @@ def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s, P): assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) -@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", BASE_CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) @pytest.mark.parametrize("P", Ps) def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P): args = (N_sim, N_sw, N_r, N_t, N_s, P) From 196040a3b5fc41db3acd1c7446f1c3192ddb6472 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 18 Jan 2024 20:31:12 -0300 Subject: [PATCH 4/6] add tests --- testing/test_param_count.py | 28 ++++++------------------ testing/tests/cadCAD_memory_address.json | 2 +- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/testing/test_param_count.py b/testing/test_param_count.py index db192f03..64eedb83 100644 --- a/testing/test_param_count.py +++ b/testing/test_param_count.py @@ -8,9 +8,10 @@ P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3} P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]} -P_single_swp = {'pA': [1, 2, 3], 'pB': [1], 'pC': [3]} -P_all_swp = {'pA': [1, 2, 3], 'pB': [1, 2, 3], 'pC': [1, 2, 3]} -Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp] +P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]} +P_all_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1, 2, 3]} +P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]} +Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp] CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), @@ -24,13 +25,9 @@ def run_experiment(exp: Experiment, mode: str): return records - - - - def param_count_test_generator(provided_params): def s_test_param_count(params, _2, _3, _4, _5): - assert params.keys() == provided_params.keys() + assert params.keys() == provided_params.keys(), 'Params are not matching' def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment: @@ -64,23 +61,12 @@ def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> @pytest.mark.parametrize("P", Ps) def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P): args = (N_sim, N_sw, N_r, N_t, N_s, P) - assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) - - -@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) -@pytest.mark.parametrize("P", Ps) -def test_row_count_multi(N_sim, N_sw, N_r, N_t, N_s, P): - args = (N_sim, N_sw, N_r, N_t, N_s, P) + len(run_experiment(create_experiments(*args), 'single_proc')) - if N_sim == 1 and N_sw == 1 and N_r == 1: - with pytest.raises(ValueError) as e_info: - assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) - else: - assert len(run_experiment(create_experiments(*args), 'multi_proc')) == expected_rows(*args) @pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) @pytest.mark.parametrize("P", Ps) def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P): args = (N_sim, N_sw, N_r, N_t, N_s, P) - assert len(run_experiment(create_experiments(*args), 'local_proc')) == expected_rows(*args) + len(run_experiment(create_experiments(*args), 'local_proc')) diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index c60ead7b..d6405c8c 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x111857380"} \ No newline at end of file +{"memory_address": "0x10d841d50"} \ No newline at end of file From 2599e2a6a43930e116ab613226fca7d8758c25b8 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Thu, 18 Jan 2024 20:34:39 -0300 Subject: [PATCH 5/6] update tests --- testing/test_param_count.py | 12 ++++++++++-- testing/tests/cadCAD_memory_address.json | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/testing/test_param_count.py b/testing/test_param_count.py index 64eedb83..317bcb04 100644 --- a/testing/test_param_count.py +++ b/testing/test_param_count.py @@ -25,15 +25,23 @@ def run_experiment(exp: Experiment, mode: str): return records -def param_count_test_generator(provided_params): +def param_count_test_suf_generator(provided_params): def s_test_param_count(params, _2, _3, _4, _5): assert params.keys() == provided_params.keys(), 'Params are not matching' + return ('varA', None) + return s_test_param_count +def param_count_test_policy_generator(provided_params): + def p_test_param_count(params, _2, _3, _4): + assert params.keys() == provided_params.keys(), 'Params are not matching' + return {'sigA': None} + return p_test_param_count + def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment: INITIAL_STATE = {'varA': None} - PSUBs = [{'policies': {}, 'variables': {'varA': param_count_test_generator(params)}}] * N_substeps + PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps SIM_CONFIG = config_sim( { diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index d6405c8c..322a5c64 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x10d841d50"} \ No newline at end of file +{"memory_address": "0x111444900"} \ No newline at end of file From a5fc6ade6ee122fa509a52c6ff80b42d7fe19090 Mon Sep 17 00:00:00 2001 From: Danilo Lessa Bernardineli Date: Mon, 18 Mar 2024 13:10:09 -0300 Subject: [PATCH 6/6] rm spaces --- testing/test_param_count.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/testing/test_param_count.py b/testing/test_param_count.py index 317bcb04..513c07d0 100644 --- a/testing/test_param_count.py +++ b/testing/test_param_count.py @@ -3,9 +3,6 @@ from cadCAD.engine import Executor, ExecutionContext, ExecutionMode import pytest - - - P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3} P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]} P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]}