diff --git a/cadCAD/engine/__init__.py b/cadCAD/engine/__init__.py index 4dc33409..8ce480ad 100644 --- a/cadCAD/engine/__init__.py +++ b/cadCAD/engine/__init__.py @@ -70,7 +70,7 @@ def distroduce_proc( class Executor: def __init__(self, - exec_context: ExecutionContext, configs: List[Configuration], sc=None, empty_return=False + exec_context: ExecutionContext, configs: List[Configuration], sc=None, empty_return=False, supress_print=False ) -> None: self.sc = sc self.SimExecutor = SimExecutor @@ -79,6 +79,7 @@ def __init__(self, self.additional_objs = exec_context.additional_objs self.configs = configs self.empty_return = empty_return + self.supress_print = supress_print def execute(self) -> Tuple[object, object, Dict[str, object]]: if self.empty_return is True: @@ -97,12 +98,14 @@ def execute(self) -> Tuple[object, object, Dict[str, object]]: config_idx = 0 # Execution Info - print_exec_info(self.exec_context, configs_as_objs(self.configs)) + if self.supress_print is False: + print_exec_info(self.exec_context, configs_as_objs(self.configs)) t1 = time() for x in tqdm(self.configs, total=len(self.configs), - desc="Initializing configurations"): + desc="Initializing configurations", + disable=self.supress_print): sessions.append( { 'user_id': x.user_id, 'experiment_id': x.experiment_id, 'session_id': x.session_id, @@ -180,7 +183,8 @@ def get_final_results(simulations: List[StateHistory], flat_timesteps, tensor_fields = [], [] for sim_result, psu, ep in tqdm(list(zip(simulations, psus, eps)), total=len(simulations), - desc='Flattening results'): + desc='Flattening results', + disable=self.supress_print): if do_flatten: flat_timesteps.append(flatten(sim_result)) tensor_fields.append(create_tensor_field(psu, ep)) @@ -209,8 +213,9 @@ def get_final_results(simulations: List[StateHistory], else: raise ValueError("Invalid execution mode specified") - - print("Execution Method: " + self.exec_method.__name__) + if self.supress_print is False: + print("Execution Method: " + self.exec_method.__name__) + simulations_results = self.exec_method( sim_executors, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, SimIDs, RunIDs, ExpIDs, SubsetIDs, SubsetWindows, original_N, self.additional_objs @@ -219,7 +224,8 @@ def get_final_results(simulations: List[StateHistory], final_result = get_final_results( simulations_results, partial_state_updates, eps, sessions, remote_threshold) elif self.exec_context == ExecutionMode.distributed: - print("Execution Method: " + self.exec_method.__name__) + if self.supress_print is False: + print("Execution Method: " + self.exec_method.__name__) simulations_results = self.exec_method( sim_executors, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, SimIDs, RunIDs, ExpIDs, SubsetIDs, SubsetWindows, original_N, self.sc @@ -228,6 +234,7 @@ def get_final_results(simulations: List[StateHistory], simulations_results, partial_state_updates, eps, sessions) t2 = time() - print(f"Total execution time: {t2 - t1 :.2f}s") + if self.supress_print is False: + print(f"Total execution time: {t2 - t1 :.2f}s") return final_result diff --git a/cadCAD/engine/execution.py b/cadCAD/engine/execution.py index 97a5fa87..cd1319ac 100644 --- a/cadCAD/engine/execution.py +++ b/cadCAD/engine/execution.py @@ -35,7 +35,6 @@ def single_proc_exec( Ts, SimIDs, Ns, SubsetIDs, SubsetWindows, var_dict_list) results: List = [] - print(f'Execution Mode: single_threaded') for raw_param in zip(*raw_params): simulation_exec, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window, var_dict = raw_param result = simulation_exec( @@ -60,7 +59,6 @@ def parallelize_simulations( additional_objs=None ): - print(f'Execution Mode: parallelized') params = list( zip( simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list, diff --git a/cadCAD/tools/execution/easy_run.py b/cadCAD/tools/execution/easy_run.py index 825ba480..9909d96d 100644 --- a/cadCAD/tools/execution/easy_run.py +++ b/cadCAD/tools/execution/easy_run.py @@ -44,6 +44,7 @@ def easy_run( drop_substeps=True, exec_mode='local', deepcopy_off=False, + supress_print=False ) -> pd.DataFrame: """ Run cadCAD simulations without headaches. @@ -69,7 +70,7 @@ def easy_run( elif exec_mode == 'single': _exec_mode = ExecutionMode().single_mode exec_context = ExecutionContext(_exec_mode, additional_objs={'deepcopy_off': deepcopy_off}) - executor = Executor(exec_context=exec_context, configs=configs) + executor = Executor(exec_context=exec_context, configs=configs, supress_print=supress_print) # Execute the cadCAD experiment (records, tensor_field, _) = executor.execute() diff --git a/testing/test_param_count.py b/testing/test_param_count.py index 513c07d0..efb97e7a 100644 --- a/testing/test_param_count.py +++ b/testing/test_param_count.py @@ -3,6 +3,7 @@ from cadCAD.engine import Executor, ExecutionContext, ExecutionMode import pytest + P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3} P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]} P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]} @@ -10,9 +11,13 @@ P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]} Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp] -CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), - (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), - (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] +CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), + (1, 3, 3, 3, 3), + (3, 1, 3, 3, 3), + (1, 1, 3, 3, 3), + (3, 3, 1, 3, 3), + (1, 3, 1, 3, 3), + (1, 1, 1, 3, 3)] def run_experiment(exp: Experiment, mode: str): @@ -35,10 +40,12 @@ def p_test_param_count(params, _2, _3, _4): return {'sigA': None} return p_test_param_count + def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment: INITIAL_STATE = {'varA': None} - PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps + PSUBs = [{'policies': {'sigA': param_count_test_policy_generator( + params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps SIM_CONFIG = config_sim( { @@ -58,8 +65,8 @@ def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_s return exp -def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> int: - return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) +def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps, P) -> int: + return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) @pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) @@ -69,7 +76,6 @@ def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P): len(run_experiment(create_experiments(*args), 'single_proc')) - @pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) @pytest.mark.parametrize("P", Ps) def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P): diff --git a/testing/test_print.py b/testing/test_print.py new file mode 100644 index 00000000..07c0e62a --- /dev/null +++ b/testing/test_print.py @@ -0,0 +1,77 @@ +from cadCAD.configuration import Experiment +from cadCAD.configuration.utils import config_sim +from cadCAD.engine import Executor, ExecutionContext, ExecutionMode +import pytest + +P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3} +P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]} +P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]} +P_all_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1, 2, 3]} +P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]} +Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp] + +CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), + (1, 3, 3, 3, 3), + (3, 1, 3, 3, 3), + (1, 1, 3, 3, 3), + (3, 3, 1, 3, 3), + (1, 3, 1, 3, 3), + (1, 1, 1, 3, 3)] + + +def run_experiment(exp: Experiment, mode: str, supress_print=False): + exec_context = ExecutionContext(mode) + executor = Executor(exec_context=exec_context, configs=exp.configs, supress_print=supress_print) + (records, tensor_field, _) = executor.execute() + return records + + +def param_count_test_suf_generator(provided_params): + def s_test_param_count(params, _2, _3, _4, _5): + assert params.keys() == provided_params.keys(), 'Params are not matching' + return ('varA', None) + return s_test_param_count + + +def param_count_test_policy_generator(provided_params): + def p_test_param_count(params, _2, _3, _4): + assert params.keys() == provided_params.keys(), 'Params are not matching' + return {'sigA': None} + return p_test_param_count + + +def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment: + + INITIAL_STATE = {'varA': None} + PSUBs = [{'policies': {'sigA': param_count_test_policy_generator( + params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps + + SIM_CONFIG = config_sim( + { + "N": N_runs, + "T": range(N_timesteps), + "M": params, # Optional + } + ) + + exp = Experiment() + for i_sim in range(N_simulations): + exp.append_model( + sim_configs=SIM_CONFIG, + initial_state=INITIAL_STATE, + partial_state_update_blocks=PSUBs + ) + return exp + + + +def test_print(capfd): + exp = run_experiment(create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={'a': 0}), 'single_proc', supress_print=False) + out, err = capfd.readouterr() + assert " ___________ ____\n ________ __ ___/ / ____/ | / __ \\\n / ___/ __` / __ / / / /| | / / / /\n/ /__/ /_/ / /_/ / /___/ ___ |/ /_/ /\n\\___/\\__,_/\\__,_/\\____/_/ |_/_____/\nby cadCAD" in out + assert 'Initializing configurations' in err + + exp = run_experiment(create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={'a': 0}), 'single_proc', supress_print=True) + out, err = capfd.readouterr() + assert out == '' + assert err == '' \ No newline at end of file diff --git a/testing/test_results_signature.py b/testing/test_results_signature.py new file mode 100644 index 00000000..af50d955 --- /dev/null +++ b/testing/test_results_signature.py @@ -0,0 +1,70 @@ +from cadCAD.configuration import Experiment +from cadCAD.configuration.utils import config_sim +from cadCAD.engine import Executor, ExecutionContext, ExecutionMode +import pytest +import pandas as pd # type: ignore +from typing import Dict, List + +# (N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) + + +CONFIG_SIGNATURES_TO_TEST = [ + (1, 20, 5, 10, 5), (3, 3, 3, 3, 3), (1, 3, 3, 3, 3), + (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), + (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] + + +def run_experiment(exp: Experiment, mode: str) -> List[Dict]: + exec_context = ExecutionContext(mode) + executor = Executor(exec_context=exec_context, configs=exp.configs) + (records, tensor_field, _) = executor.execute() + return records + + +def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3) -> Experiment: + + INITIAL_STATE = {'varA': None} + PSUBs = [{'policies': {}, 'variables': {}}] * N_substeps + params = {'A': [None] * N_sweeps, + 'B': [None]} + + SIM_CONFIG = config_sim( + { + "N": N_runs, + "T": range(N_timesteps), + "M": params, # Optional + } + ) + + exp = Experiment() + for i_sim in range(N_simulations): + exp.append_model( + sim_configs=SIM_CONFIG, + initial_state=INITIAL_STATE, + partial_state_update_blocks=PSUBs + ) + return exp + + +def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> int: + return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +def test_identifiers_value_counts_single(N_sim, N_sw, N_r, N_t, N_s): + args = (N_sim, N_sw, N_r, N_t, N_s) + results = run_experiment(create_experiments(*args), 'single_proc') + df = pd.DataFrame(results).query("timestep > 0") + assert len(set(df.timestep.value_counts().values)) == 1 + assert len(set(df.subset.value_counts().values)) == 1 + assert len(set(df.run.value_counts().values)) == 1 + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST[:-1]) +def test_identifiers_value_counts_multi(N_sim, N_sw, N_r, N_t, N_s): + args = (N_sim, N_sw, N_r, N_t, N_s) + results = run_experiment(create_experiments(*args), 'multi_proc') + df = pd.DataFrame(results).query("timestep > 0") + assert len(set(df.timestep.value_counts().values)) == 1 + assert len(set(df.subset.value_counts().values)) == 1 + assert len(set(df.run.value_counts().values)) == 1 diff --git a/testing/test_row_count.py b/testing/test_row_count.py index a1d78f14..834c844b 100644 --- a/testing/test_row_count.py +++ b/testing/test_row_count.py @@ -2,13 +2,14 @@ from cadCAD.configuration.utils import config_sim from cadCAD.engine import Executor, ExecutionContext, ExecutionMode import pytest - +import pandas as pd # type: ignore +from typing import Dict, List CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] -def run_experiment(exp: Experiment, mode: str): +def run_experiment(exp: Experiment, mode: str) -> List[Dict]: exec_context = ExecutionContext(mode) executor = Executor(exec_context=exec_context, configs=exp.configs) (records, tensor_field, _) = executor.execute() @@ -44,11 +45,11 @@ def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> i return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) - @pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s): args = (N_sim, N_sw, N_r, N_t, N_s) - assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args) + results = run_experiment(create_experiments(*args), 'single_proc') + assert len(results) == expected_rows(*args) @pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)