From 0e94301536d6640cea9ae91816d368d8efa90d75 Mon Sep 17 00:00:00 2001 From: Veljko Lipovac <68282540+vlipovac@users.noreply.github.com> Date: Mon, 13 May 2024 10:51:48 +0200 Subject: [PATCH] Maintenance in porepy.ad (#1166) * ENH: Introducing TimeDependentOperator and IterativeOperator, a generalized handling of arbitrary time step and iterate indices. * MOD: Maintenance on ad.Operator parsing, and operator function framework Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/equation_system.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/equation_system.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operator_functions.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operator_functions.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * Update src/porepy/numerics/ad/operators.py Co-authored-by: Ivar Stefansson * MOD: Applying some changes from review. * Update src/porepy/numerics/ad/operators.py Co-authored-by: Eirik Keilegavlen * DOC: Added comments regarding Variable ids * STY: black & flake8 * REF: EquationSystem.dofs_of again returns indices corresponding to order of input arguments. * MOD: Changing time step indexation: Previous time starts with index 1. --------- Co-authored-by: Ivar Stefansson Co-authored-by: Eirik Keilegavlen --- src/porepy/applications/test_utils/models.py | 4 +- src/porepy/examples/mandel_biot.py | 4 +- src/porepy/examples/terzaghi_biot.py | 2 +- src/porepy/models/boundary_condition.py | 2 +- src/porepy/models/momentum_balance.py | 2 +- src/porepy/models/solution_strategy.py | 11 +- src/porepy/numerics/ad/_ad_utils.py | 165 ++- src/porepy/numerics/ad/equation_system.py | 325 ++--- src/porepy/numerics/ad/forward_mode.py | 50 + src/porepy/numerics/ad/operator_functions.py | 541 ++++---- src/porepy/numerics/ad/operators.py | 1233 +++++++++-------- .../conforming_propagation.py | 2 +- .../fracture_deformation/propagation_model.py | 4 +- .../numerics/nonlinear/nonlinear_solvers.py | 2 +- src/porepy/numerics/vem/dual_elliptic.py | 8 +- src/porepy/viz/data_saving_model_mixin.py | 2 +- src/porepy/viz/exporter.py | 10 +- src/porepy/viz/plot_grid.py | 2 +- .../setups/manu_flow_comp_2d_frac.py | 7 +- .../setups/manu_poromech_nofrac_2d.py | 10 +- .../setups/manu_thermoporomech_nofrac_2d.py | 15 +- .../setups/manu_thermoporomech_nofrac_3d.py | 6 +- tests/models/test_energy_balance.py | 2 +- tests/models/test_fluid_mass_balance.py | 12 +- tests/models/test_momentum_balance.py | 4 +- tests/models/test_poromechanics.py | 12 +- tests/models/test_thermoporomechanics.py | 4 +- tests/numerics/ad/test_equation_system.py | 39 +- tests/numerics/ad/test_forward_mode.py | 56 + tests/numerics/ad/test_operator_functions.py | 78 ++ tests/numerics/ad/test_operators.py | 169 ++- .../test_fracture_propagation.py | 16 +- tests/numerics/fv/test_tpfa.py | 2 +- tests/viz/test_exporter.py | 16 +- tests/viz/test_plot_grid.py | 6 +- tutorials/benchmark_simulation.ipynb | 2 +- tutorials/equations.ipynb | 2 +- tutorials/exporting_models.ipynb | 2 +- 38 files changed, 1555 insertions(+), 1274 deletions(-) create mode 100644 tests/numerics/ad/test_operator_functions.py diff --git a/src/porepy/applications/test_utils/models.py b/src/porepy/applications/test_utils/models.py index add292f489..568043b653 100644 --- a/src/porepy/applications/test_utils/models.py +++ b/src/porepy/applications/test_utils/models.py @@ -222,10 +222,10 @@ def compare_scaled_primary_variables( for var_name, var_unit in zip(variable_names, variable_units): # Obtain scaled values. scaled_values_0 = setup_0.equation_system.get_variable_values( - variables=[var_name], time_step_index=0 + variables=[var_name], time_step_index=1 ) scaled_values_1 = setup_1.equation_system.get_variable_values( - variables=[var_name], time_step_index=0 + variables=[var_name], time_step_index=1 ) # Convert back to SI units. values_0 = setup_0.fluid.convert_units(scaled_values_0, var_unit, to_si=True) diff --git a/src/porepy/examples/mandel_biot.py b/src/porepy/examples/mandel_biot.py index 208d4b3d84..32762e017f 100644 --- a/src/porepy/examples/mandel_biot.py +++ b/src/porepy/examples/mandel_biot.py @@ -1435,7 +1435,7 @@ def initial_condition(self) -> None: values=self.exact_sol.pressure(sd, 0), data=data, iterate_index=0, - time_step_index=0, + time_step_index=1, ) # Set initial displacement @@ -1444,7 +1444,7 @@ def initial_condition(self) -> None: values=self.exact_sol.displacement(sd, 0), data=data, iterate_index=0, - time_step_index=0, + time_step_index=1, ) def after_simulation(self) -> None: diff --git a/src/porepy/examples/terzaghi_biot.py b/src/porepy/examples/terzaghi_biot.py index 9cb6bab8cb..6e77236aa6 100644 --- a/src/porepy/examples/terzaghi_biot.py +++ b/src/porepy/examples/terzaghi_biot.py @@ -699,7 +699,7 @@ def initial_condition(self) -> None: values=initial_p, data=data, iterate_index=0, - time_step_index=0, + time_step_index=1, ) def after_simulation(self) -> None: diff --git a/src/porepy/models/boundary_condition.py b/src/porepy/models/boundary_condition.py index de091e0af0..a997b5b70c 100644 --- a/src/porepy/models/boundary_condition.py +++ b/src/porepy/models/boundary_condition.py @@ -92,7 +92,7 @@ def update_boundary_condition( # No previous time step exists. The method was called during # the initialization. vals = function(bg) - pp.set_solution_values(name=name, values=vals, data=data, time_step_index=0) + pp.set_solution_values(name=name, values=vals, data=data, time_step_index=1) # Set the unknown time step values. vals = function(bg) diff --git a/src/porepy/models/momentum_balance.py b/src/porepy/models/momentum_balance.py index f04b23469c..097d807980 100644 --- a/src/porepy/models/momentum_balance.py +++ b/src/porepy/models/momentum_balance.py @@ -790,7 +790,7 @@ def initial_condition(self) -> None: self.equation_system.set_variable_values( traction_vals.ravel("F"), [self.contact_traction_variable], - time_step_index=0, + time_step_index=1, iterate_index=0, ) diff --git a/src/porepy/models/solution_strategy.py b/src/porepy/models/solution_strategy.py index 85c74da962..31e49ee720 100644 --- a/src/porepy/models/solution_strategy.py +++ b/src/porepy/models/solution_strategy.py @@ -257,11 +257,14 @@ def domain(self) -> pp.Domain: def time_step_indices(self) -> np.ndarray: """Indices for storing time step solutions. + Note: + (Previous) Time step indices should start with 1. + Returns: An array of the indices of which time step solutions will be stored. """ - return np.array([0]) + return np.array([1]) @property def iterate_indices(self) -> np.ndarray: @@ -299,9 +302,9 @@ def reset_state_from_file(self) -> None: time_index, times_file, ) - vals = self.equation_system.get_variable_values(time_step_index=0) + vals = self.equation_system.get_variable_values(time_step_index=1) self.equation_system.set_variable_values( - vals, iterate_index=0, time_step_index=0 + vals, iterate_index=0, time_step_index=1 ) # Update the boundary conditions to both the time step and iterate solution. self.update_time_dependent_ad_arrays() @@ -439,7 +442,7 @@ def after_nonlinear_convergence(self) -> None: solution = self.equation_system.get_variable_values(iterate_index=0) self.equation_system.shift_time_step_values() self.equation_system.set_variable_values( - values=solution, time_step_index=0, additive=False + values=solution, time_step_index=1, additive=False ) self.convergence_status = True self.save_data_time_step() diff --git a/src/porepy/numerics/ad/_ad_utils.py b/src/porepy/numerics/ad/_ad_utils.py index dfcce0b6cd..766475cc23 100644 --- a/src/porepy/numerics/ad/_ad_utils.py +++ b/src/porepy/numerics/ad/_ad_utils.py @@ -28,7 +28,7 @@ from __future__ import annotations from abc import ABCMeta -from typing import Optional +from typing import Any, Optional import numpy as np import scipy.sparse as sps @@ -294,6 +294,60 @@ def discretize_from_list( pass +def _validate_indices( + time_step_index: Optional[int] = None, + iterate_index: Optional[int] = None, +) -> list[tuple[Any, int]]: + """Helper method to validate the indexation of getter and setter methods for + values in a grid's data dictionary. + + See :func:`set_solution_values` and :func:`get_solution_values`. + + """ + if time_step_index is None and iterate_index is None: + raise ValueError( + "At least one of time_step_index and iterate_index needs to be different" + " from None." + ) + + out = [] + + if iterate_index is not None: + # Some iterate values of the current time + if iterate_index >= 0: + out.append((pp.ITERATE_SOLUTIONS, iterate_index)) + # Negative iterate indices are not supported + else: + raise ValueError( + "Use increasing, non-negative integers for (previous) iterate values." + ) + + if time_step_index is not None: + # Some previous time. + if time_step_index > 0: + out.append((pp.TIME_STEP_SOLUTIONS, time_step_index)) + # Current time. NOTE this is ambigous since the current time is an unknown and + # has multiple iterate values. + # Alternatively, we could associate time_step_index = 0 with iterate_index = 0 + # the below elif branch introduces the convention that + # time step = iterate step = 0 are equivalent. + elif time_step_index == 0: + # if (pp.ITERATE_SOLUTIONS, 0) not in out: + # out.append((pp.ITERATE_SOLUTIONS, 0)) + raise ValueError( + "Using time_step_index = 0 (current time) is ambiguous." + + " Specify iterate_index instead." + + " First previous time step value is time_step_index = 1." + ) + # Negative time step indices are not supported + else: + raise ValueError( + "Use increasing, non-negative integers for previous time step values." + ) + + return out + + def set_solution_values( name: str, values: np.ndarray, @@ -302,7 +356,8 @@ def set_solution_values( iterate_index: Optional[int] = None, additive: bool = False, ) -> None: - """Function for setting values in the data dictionary. + """Function for setting values in the data dictionary, for some time-dependent or + iterative term. Parameters: name: Name of the quantity that is to be assigned values. @@ -320,36 +375,29 @@ def set_solution_values( dictionary should be added to or overwritten. Raises: - ValueError: If neither of `time_step_index` or `iterate_index` have been - assigned a non-None value. + ValueError: In the case of inconsistent usage of indices + (both None, or negative values). + ValueError: If the user attempts to set values additively at an index where no + values were set before. """ - if time_step_index is None and iterate_index is None: - raise ValueError( - "At least one of time_step_index and iterate_index needs to be different" - " from None." - ) - - if not additive: - if time_step_index is not None: - if pp.TIME_STEP_SOLUTIONS not in data: - data[pp.TIME_STEP_SOLUTIONS] = {} - if name not in data[pp.TIME_STEP_SOLUTIONS]: - data[pp.TIME_STEP_SOLUTIONS][name] = {} - data[pp.TIME_STEP_SOLUTIONS][name][time_step_index] = values.copy() - - if iterate_index is not None: - if pp.ITERATE_SOLUTIONS not in data: - data[pp.ITERATE_SOLUTIONS] = {} - if name not in data[pp.ITERATE_SOLUTIONS]: - data[pp.ITERATE_SOLUTIONS][name] = {} - data[pp.ITERATE_SOLUTIONS][name][iterate_index] = values.copy() - else: - if time_step_index is not None: - data[pp.TIME_STEP_SOLUTIONS][name][time_step_index] += values - - if iterate_index is not None: - data[pp.ITERATE_SOLUTIONS][name][iterate_index] += values + loc_index = _validate_indices(time_step_index, iterate_index) + + for loc, index in loc_index: + if loc not in data: + data[loc] = {} + if name not in data[loc]: + data[loc][name] = {} + + if additive: + if index not in data[loc][name]: + raise ValueError( + f"Cannot set value additively for {name} at {(loc, index)}:" + + " No values stored to add to." + ) + data[loc][name][index] += values + else: + data[loc][name][index] = values.copy() def get_solution_values( @@ -358,11 +406,8 @@ def get_solution_values( time_step_index: Optional[int] = None, iterate_index: Optional[int] = None, ) -> np.ndarray: - """Function for fetching values stored in the data dictionary. - - This function should be used for obtaining solution values that are not related to a - variable. This is to avoid the cumbersome alternative of writing e.g.: - `data["solution_name"][pp.TIME_STEP_SOLUTION/pp.ITERATE_SOLUTION][0]`. + """Function for fetching values stored in the data dictionary, for some + time-dependent or iterative term. Parameters: name: Name of the parameter whose values we are interested in. @@ -375,45 +420,31 @@ def get_solution_values( from before. Raises: - ValueError: If both time_step_index and iterate_index are None. - - ValueErorr: If both time_step_index and iterate_index are assigned a value. - - KeyError: If there are no data values assigned to the provided name. - - KeyError: If there are no data values assigned to the time step/iterate index. + ValueError: In the case of inconsistent usage of indices + (both None or negative values). + AssertionError: If the user attempts to get iterate and time step values + simultanously. Only 1 index is permitted in getter + KeyError: If no values are stored for the passed index. Returns: - An array containing the solution values. + A copy of the values stored at the passed index. """ - if time_step_index is None and iterate_index is None: - raise ValueError("Both time_step_index and iterate_index cannot be None.") + loc_index = _validate_indices(time_step_index, iterate_index) + assert ( + len(loc_index) == 1 + ), "Cannot get value from both iterate and time step at once. Call separately." - if time_step_index is not None and iterate_index is not None: - raise ValueError( - "Both time_step_index and iterate_index cannot be assigned a value." - ) + loc, index = loc_index[0] - if time_step_index is not None: - if name not in data[pp.TIME_STEP_SOLUTIONS].keys(): - raise KeyError(f"There are no values related the parameter name {name}.") + try: + value = data[loc][name][index].copy() + except KeyError as err: + raise KeyError( + f"No values stored for {name} at {(loc, index)}: {str(err)}." + ) from err - if time_step_index not in data[pp.TIME_STEP_SOLUTIONS][name].keys(): - raise KeyError( - f"There are no values stored for time step index {time_step_index}." - ) - return data[pp.TIME_STEP_SOLUTIONS][name][time_step_index].copy() - - else: - if name not in data[pp.ITERATE_SOLUTIONS].keys(): - raise KeyError(f"There are no values related the parameter name {name}.") - - if iterate_index not in data[pp.ITERATE_SOLUTIONS][name].keys(): - raise KeyError( - f"There are no values stored for iterate index {iterate_index}." - ) - return data[pp.ITERATE_SOLUTIONS][name][iterate_index].copy() + return value class MergedOperator(operators.Operator): diff --git a/src/porepy/numerics/ad/equation_system.py b/src/porepy/numerics/ad/equation_system.py index 481012567e..84b09fb9b8 100644 --- a/src/porepy/numerics/ad/equation_system.py +++ b/src/porepy/numerics/ad/equation_system.py @@ -168,12 +168,19 @@ def __init__(self, mdg: pp.MixedDimensionalGrid) -> None: """ - self._variables: list[Variable] = list() - """Contains references to Variables. + self._variables: dict[int, Variable] = dict() + """Dictionary mapping variable IDs to the atomic variables created and managed + by this instance. + + Variables contained here are ordered chronologically in terms of + instantiation. It does not reflect the order of DOFs, which is to some degree + optimized. A Variable is uniquely identified by its name and domain, stored as attributes of the Variable object. + Implementation-wise it is uniquely identified by its ID. + """ self._Schur_complement: Optional[tuple] = None @@ -181,9 +188,11 @@ def __init__(self, mdg: pp.MixedDimensionalGrid) -> None: """ - self._variable_numbers: dict[Variable, int] = dict() - """Dictionary containing the index of the variable in the system vector of the - last assembled system. + self._variable_numbers: dict[int, int] = dict() + """A Map between a variable's ID and its index in the system vector. + + This is an optimized structure, meaning the order of entries is created in + :meth:`_cluster_dofs_gridwise`. """ @@ -195,8 +204,8 @@ def __init__(self, mdg: pp.MixedDimensionalGrid) -> None: """ - self._variable_dof_type: dict[Variable, dict[GridEntity, int]] = dict() - """Dictionary containing the type of DOFs per variable. + self._variable_dof_type: dict[int, dict[GridEntity, int]] = dict() + """Dictionary mapping from variable IDs to the type of DOFs per variable. The type is given as a dictionary with keys 'cells', 'faces' or 'nodes', and integer values denoting the number of DOFs per grid entity. @@ -239,7 +248,7 @@ def SubSystem( unknown_equations = set(equations).difference(known_equations) if len(unknown_equations) > 0: raise ValueError(f"Unknown variable(s) {unknown_equations}.") - unknown_variables = set(variables).difference(self._variables) + unknown_variables = set(variables).difference(self.variables) if len(unknown_variables) > 0: raise ValueError(f"Unknown variable(s) {unknown_variables}.") @@ -251,19 +260,20 @@ def SubSystem( # attributes. This should be acceptable since this is a factory method. # Loop over known variables to preserve DOF order. - for variable in self._variables: + for variable in self.variables: if variable in variables: # Update variables in subsystem. - new_equation_system._variables.append(variable) + new_equation_system._variables[variable.id] = variable # Update variable numbers in subsystem. - new_equation_system._variable_dof_type[variable] = ( - self._variable_dof_type[variable] + new_equation_system._variable_dof_type[variable.id] = ( + self._variable_dof_type[variable.id] ) # Create dofs in subsystem. new_equation_system._append_dofs(variable) + new_equation_system._cluster_dofs_gridwise() # Loop over known equations to preserve row order. for name in known_equations: if name in equations: @@ -293,13 +303,13 @@ def variables(self) -> list[Variable]: system. """ - return self._variables + return [var for var in self._variables.values()] @property def variable_domains(self) -> list[pp.GridLike]: """List containing all domains where at least one variable is defined.""" domains = set() - for var in self._variables: + for var in self.variables: domains.add(var.domain) return list(domains) @@ -324,7 +334,7 @@ def md_variable( """ if grids is None: - variables = [var for var in self._variables if var.name == name] + variables = [var for var in self.variables if var.name == name] # We don't allow combinations of variables with different domain types # in a md variable. heterogeneous_domain = False @@ -345,7 +355,7 @@ def md_variable( else: variables = [ var - for var in self._variables + for var in self.variables if var.name == name and var.domain in grids ] return MixedDimensionalVariable(variables) @@ -453,12 +463,14 @@ def create_variables( # Store it in the system variables.append(new_variable) - self._variables.append(new_variable) + self._variables[new_variable.id] = new_variable # Append the new DOFs to the global system. - self._variable_dof_type[new_variable] = dof_info + self._variable_dof_type[new_variable.id] = dof_info self._append_dofs(new_variable) + # New optimized order + self._cluster_dofs_gridwise() # Create an md variable that wraps all the individual variables created on # individual grids. merged_variable = MixedDimensionalVariable(variables) @@ -521,11 +533,11 @@ def get_variables( and tag_name is None and tag_value is None ): - return self._variables + return self.variables # If no variables or grids are given, use full sets. if variables is None: - variables = self._variables + variables = self.variables if grids is None: # Note: This gives all grids known to variables, not all grids in the # md grid. The result of the filtering will be the same, though. @@ -572,56 +584,28 @@ def get_variable_values( :meth:`num_dofs`. Raises: - ValueError: If neither of ``time_step_index`` or ``iterate_index`` have been - assigned a non-None value. - ValueError: If both ``time_step_index`` and ``iterate_index`` have been - assigned a value. - KeyError: If no values are stored for the VariableType input. ValueError: If unknown VariableType arguments are passed. """ - if time_step_index is None and iterate_index is None: - raise ValueError( - "Either time_step_index or iterate_index needs to be different from" - " None" - ) - - if time_step_index is not None and iterate_index is not None: - raise ValueError( - "Only one of time_step_index or iterate_index should be assigned a" - " value." - ) - variables = self._parse_variable_type(variables) + var_ids = [var.id for var in variables] # Storage for atomic blocks of the sub vector (identified by name-grid pairs). values = [] # Loop over all blocks and process those requested. # This ensures uniqueness and correct order. - for variable in self._variable_numbers: - if variable in variables: - name = variable.name - grid = variable.domain - if isinstance(grid, pp.Grid): - data = self.mdg.subdomain_data(grid) - elif isinstance(grid, pp.MortarGrid): - data = self.mdg.interface_data(grid) - # Extract a copy of requested values. - try: - if iterate_index is not None: - values.append( - data[pp.ITERATE_SOLUTIONS][name][iterate_index].copy() - ) - - elif time_step_index is not None: - values.append( - data[pp.TIME_STEP_SOLUTIONS][name][time_step_index].copy() - ) - - except KeyError: - raise KeyError( - f"No values stored for variable {name} on grid {grid}." - ) + for id_ in self._variable_numbers: + if id_ in var_ids: + variable = self._variables[id_] + + val = pp.get_solution_values( + variable.name, + self._get_data(variable.domain), + time_step_index=time_step_index, + iterate_index=iterate_index, + ) + # NOTE get_solution_values already returns a copy + values.append(val) # If there are matching blocks, concatenate and return. if values: @@ -648,77 +632,57 @@ def set_variable_values( Mismatches of is-size and should-be-size according to the subspace specified by ``variables`` will raise respective errors by numpy. + See also: + :meth:`~porepy.numerics.ad._ad_utils.set_solution_values`. + Parameters: values: Vector of size corresponding to number of DOFs of the specified variables. variables (optional): VariableType input for which the values are requested. If None (default), the global vector of unknowns will be set. - time_step_index: Several solutions might be stored in the data dictionary. - This parameter determines which one of these is to be overwritten/added - to (depends on ``additive``). If ``None``, the values will not be - stored to ``pp.TIME_STEP_SOLUTIONS``. - iterate_index: Several iterates might be stored in the data dictionary. This - parameter determines which one of these is to be overwritten/added to - (depends on ``additive``). If ``None``, the values will not be stored - to ``pp.ITERATE_SOLUTIONS``. + time_step_index: Index of previous time step for which the values are + intended. + iterate_index: Iterate index for current time step for which the values are + intended. additive (optional): Flag to write values additively. To be used in iterative procedures. Raises: - ValueError: If neither of ``time_step_index`` or ``iterate_index`` have been - assigned a value. ValueError: If unknown VariableType arguments are passed. """ - if time_step_index is None and iterate_index is None: - raise ValueError( - "At least one of time_step_index and iterate_index needs to be" - "different from None." - ) # Start of dissection. dof_start = 0 dof_end = 0 variables = self._parse_variable_type(variables) - for variable, variable_number in self._variable_numbers.items(): - if variable in variables: - name = variable.name - grid = variable.domain - - data = self._get_data(grid=grid) + var_ids = [var.id for var in variables] + for id_, variable_number in self._variable_numbers.items(): + if id_ in var_ids: + # 1. Slice the vector to local size + # This will raise errors if indexation is out of range. num_dofs = int(self._variable_num_dofs[variable_number]) + # Extract local vector. + # This will raise errors if indexation is out of range. dof_end = dof_start + num_dofs # Extract local vector. # This will raise errors if indexation is out of range. local_vec = values[dof_start:dof_end] - # The data dictionary will have ``pp.TIME_STEP_SOLUTIONS`` and - # ``pp.ITERATE_SOLUTIONS`` entries already created during - # create_variables. If an error is returned here, a variable has been - # created in a non-standard way. Store new values as requested. - if additive: - if iterate_index is not None: - data[pp.ITERATE_SOLUTIONS][name][iterate_index] += local_vec - - if time_step_index is not None: - data[pp.TIME_STEP_SOLUTIONS][name][time_step_index] += local_vec + # 2. Use the AD utilities to set the values + variable = self._variables[id_] + pp.set_solution_values( + variable.name, + local_vec, + self._get_data(grid=variable.domain), + time_step_index=time_step_index, + iterate_index=iterate_index, + additive=additive, + ) - else: - if iterate_index is not None: - # The copy is critcial here. - data[pp.ITERATE_SOLUTIONS][name][ - iterate_index - ] = local_vec.copy() - - if time_step_index is not None: - # The copy is critcial here. - data[pp.TIME_STEP_SOLUTIONS][name][ - time_step_index - ] = local_vec.copy() - - # Move dissection forward. + # 3. Move dissection forward. dof_start = dof_end # Last sanity check if the vector was properly sized, or if it was too large. @@ -788,16 +752,25 @@ def _shift_variable_values( """ # Looping through the variables and shifting the values variables = self._parse_variable_type(variables) - for variable, _ in self._variable_numbers.items(): - if variable in variables: - name = variable.name - grid = variable.domain - data = self._get_data(grid=grid) - - # Shift old values as requested. - num_stored = len(data[location][name]) - for i in range(num_stored - 1, 0, -1): - data[location][name][i] = data[location][name][i - 1].copy() + for variable in variables: + name = variable.name + grid = variable.domain + data = self._get_data(grid=grid) + + # Shift old values as requested. + num_stored = len(data[location][name]) + if location == pp.ITERATE_SOLUTIONS: + range_ = range(num_stored - 1, 0, -1) + # previous time step values start with index 1. + # NOTE this functionality should be in _ad_utils, together with set and get + elif location == pp.TIME_STEP_SOLUTIONS: + range_ = range(num_stored, 1, -1) + else: + raise NotImplementedError( + f"Shift values not implemented for location {location}" + ) + for i in range_: + data[location][name][i] = data[location][name][i - 1].copy() def _get_data( self, @@ -821,7 +794,9 @@ def _get_data( ### DOF management ----------------------------------------------------------------- def _append_dofs(self, variable: pp.ad.Variable) -> None: - """Appends DOFs for a newly created variable. + """Appends DOFs for a newly created variable at the end of the current order. + + Optimization of variable order is done afterwards. Must only be called by :meth:`create_variables`. @@ -834,12 +809,12 @@ def _append_dofs(self, variable: pp.ad.Variable) -> None: # Sanity check that no previous data is overwritten. This should not happen, # if class not used in hacky way. - assert variable not in self._variable_numbers + assert variable.id not in self._variable_numbers # Count number of dofs for this variable on this grid and store it. # The number of dofs for each dof type defaults to zero. - local_dofs = self._variable_dof_type[variable] + local_dofs = self._variable_dof_type[variable.id] # Both subdomains and interfaces have cell variables. num_dofs = variable.domain.num_cells * local_dofs.get("cells", 0) @@ -851,14 +826,11 @@ def _append_dofs(self, variable: pp.ad.Variable) -> None: ) + variable.domain.num_nodes * local_dofs.get("nodes", 0) # Update the global dofs and block numbers - self._variable_numbers.update({variable: last_variable_number}) + self._variable_numbers.update({variable.id: last_variable_number}) self._variable_num_dofs = np.concatenate( [self._variable_num_dofs, np.array([num_dofs], dtype=int)] ) - # first optimization of Jacobian structure - self._cluster_dofs_gridwise() - def _cluster_dofs_gridwise(self) -> None: """Re-arranges the DOFs grid-wise s.t. we obtain grid-blocks in the column sense and reduce the matrix bandwidth. @@ -866,41 +838,37 @@ def _cluster_dofs_gridwise(self) -> None: The aim is to impose a more block-diagonal-like structure on the Jacobian where blocks in the column sense represent single grids in the following order: - Note: - Off-diagonal blocks will still be present if subdomain-interface variables - are defined. - 1. For each grid in ``mdg.subdomains`` 1. For each variable defined on that grid 2. For each grid in ``mdg.interfaces`` 1. For each variable defined on that mortar grid - The order of variables per grid is given by the order of variable creation - (stored as order of keys in ``self.variables``). - + The order of variables per grid is given by the order of variable creation. This method is called after each creation of variables and respective DOFs. - TODO: Revisit. I think I have broken it by looping over _variables instead of - subdomains and interfaces. """ # Data stracture for the new order of dofs. new_variable_counter: int = 0 - new_variable_numbers: dict[Variable, int] = dict() + new_variable_numbers: dict[int, int] = dict() new_block_dofs: list[int] = list() - for variable in self._variables: - # If this variable-grid combination is present, add it to the new - # order of dofs. - if variable in self._variable_numbers: - # Extract created number of dofs - local_dofs: int = self._variable_num_dofs[ - self._variable_numbers[variable] - ] + # 1. Per subdomain, order variables + for grid in self.mdg.subdomains(): + for id_, variable in self._variables.items(): + if variable.domain == grid: + local_dofs = self._variable_num_dofs[self._variable_numbers[id_]] + new_block_dofs.append(local_dofs) + new_variable_numbers.update({id_: new_variable_counter}) + new_variable_counter += 1 - # Store new block number and dofs in new order. - new_block_dofs.append(local_dofs) - new_variable_numbers.update({variable: new_variable_counter}) - new_variable_counter += 1 + # 2. Per interface, order variables + for intf in self.mdg.interfaces(): + for id_, variable in self._variables.items(): + if variable.domain == intf: + local_dofs = self._variable_num_dofs[self._variable_numbers[id_]] + new_block_dofs.append(local_dofs) + new_variable_numbers.update({id_: new_variable_counter}) + new_variable_counter += 1 # Replace old block order self._variable_num_dofs = np.array(new_block_dofs, dtype=int) @@ -933,17 +901,17 @@ def _parse_variable_type(self, variables: Optional[VariableList]) -> list[Variab """ if variables is None: - return self.get_variables() + return self.variables parsed_variables = [] assert isinstance(variables, list) for variable in variables: if isinstance(variable, MixedDimensionalVariable): - parsed_variables += variable.sub_vars + parsed_variables += [var for var in variable.sub_vars] elif isinstance(variable, Variable): parsed_variables.append(variable) elif isinstance(variable, str): # Use _variables to avoid recursion (get_variables() calls this method) - vars = [var for var in self._variables if var.name == variable] + vars = [var for var in self._variables.values() if var.name == variable] parsed_variables += vars else: raise ValueError( @@ -1029,37 +997,46 @@ def projection_to(self, variables: Optional[VariableList] = None) -> sps.csr_mat return sps.csr_matrix((0, num_dofs)) def dofs_of(self, variables: VariableList) -> np.ndarray: - """Get the indices in the global vector of unknowns belonging to the variable(s). + """Get the indices in the global vector of unknowns belonging to the variables. Parameters: variables: VariableType input for which the indices are requested. Returns: - an order-preserving array of indices of DOFs belonging to the VariableType input. + An array of indices/ DOFs corresponding to ``variables``. + Note that the order of indices corresponds to the order in ``variables``. Raises: - ValueError: if unknown VariableType arguments are passed. + ValueError: If an unknown variable is passed as argument. """ variables = self._parse_variable_type(variables) global_variable_dofs = np.hstack((0, np.cumsum(self._variable_num_dofs))) - # Storage of indices per requested variable. - indices = list() - for variable in variables: - var_number = self._variable_numbers[variable] - var_indices = np.arange( - global_variable_dofs[var_number], - global_variable_dofs[var_number + 1], - dtype=int, - ) - indices.append(var_indices) + indices: list[np.ndarray] = [] + + for var in variables: + if var.id in self._variable_numbers: + variable_number = self._variable_numbers[var.id] + var_indices = np.arange( + global_variable_dofs[variable_number], + global_variable_dofs[variable_number + 1], + dtype=int, + ) + indices.append(var_indices) + else: + raise ValueError( + f"Variable {var.name} with ID {var.id} not registered among DOFS" + + f" of equation system {self}." + ) # Concatenate indices, if any if len(indices) > 0: - return np.concatenate(indices, dtype=int) + all_indices = np.concatenate(indices, dtype=int) else: - return np.array([], dtype=int) + all_indices = np.array([], dtype=int) + + return all_indices def identify_dof(self, dof: int) -> Variable: """Identifies the variable to which a specific DOF index belongs. @@ -1085,10 +1062,16 @@ def identify_dof(self, dof: int) -> Variable: # Find the variable number belonging to this index variable_number = np.argmax(global_variable_dofs > dof) - 1 # Get the variable key from _variable_numbers - variable = [ - var for var, num in self._variable_numbers.items() if num == variable_number - ][0] - return variable + # find the ID belonging to the dof + id_ = [ + id_ for id_, num in self._variable_numbers.items() if num == variable_number + ] + # sanity check that only 1 ID was found + assert len(id_) == 1, "Failed to find unique ID corresponding to `dof`." + # find variable with the ID + variable = [var for _id, var in self._variables.items() if _id == id_[0]] + assert len(variable) == 1, "Failed to find Variable corresponding to `dof`." + return variable[0] ### Equation management ------------------------------------------------------------------- @@ -1261,11 +1244,11 @@ def update_variable_num_dofs(self) -> None: be used with care. """ - for var, ind in self._variable_numbers.items(): + for id_, var in self._variables.items(): # Grid quantity (grid or interface), and variable grid = var.domain - dof = self._variable_dof_type[var] + dof = self._variable_dof_type[id_] num_dofs: int = grid.num_cells * dof.get("cells", 0) # type: ignore if isinstance(grid, pp.Grid): @@ -1275,7 +1258,7 @@ def update_variable_num_dofs(self) -> None: ) + grid.num_nodes * dof.get("nodes", 0) # Update local counting - self._variable_num_dofs[ind] = num_dofs + self._variable_num_dofs[self._variable_numbers[id_]] = num_dofs ### System assembly and discretization ---------------------------------------------------- @@ -1602,7 +1585,7 @@ def assemble( """ if variables is None: - variables = self._variables + variables = self.variables # equ_blocks is a dictionary with equation names as keys and the corresponding # row indices of the equations. If the user has requested that equations are diff --git a/src/porepy/numerics/ad/forward_mode.py b/src/porepy/numerics/ad/forward_mode.py index 574ba65972..3c84ca47d4 100644 --- a/src/porepy/numerics/ad/forward_mode.py +++ b/src/porepy/numerics/ad/forward_mode.py @@ -100,6 +100,56 @@ def __repr__(self) -> str: s += " elements" return s + def __getitem__(self, key: slice | np._ArrayLikeInt) -> AdArray: + """Slice the Ad Array row-wise (value and Jacobian). + + Parameters: + key: A row-index (integer) or slice object to be applied to :attr:`val` and + :attr:`jac` + + Returns: + A new Ad array with values and Jacobian sliced row-wise. + + """ + # NOTE mypy complains even though numpy arrays can handle slices [x:y:z] + # Probably a missing type annotation on numpy's side + val = self.val[key] # type:ignore[index] + # in case of single index, broadcast to 1D array + if val.ndim == 0: + val = np.array([val]) + return AdArray(val, self.jac[key]) + + def __setitem__( + self, + key: slice | np._ArrayLikeInt, + new_value: pp.number | np.ndarray | AdArray, + ) -> None: + """Insert new values in :attr:`val` and :attr:`jac` row-wise. + + Note: + Broadcasting is outsourced to numpy and scipy. If ``new_value`` is not + compatible in terms of size and ``key``, respective errors are raised. + + Parameters: + key: A row-index (integer) or slice object to set the rows in value and + Jacobian + new_value: New values for :attr:`val` and rows of :attr:`jac`. + If ``new_value`` is an Ad array, its ``jac`` is inserted into the + defined rows. + + Raises: + NotImplementedError: If ``new_value`` is not a number, numpy array or + Ad array. + + """ + if isinstance(new_value, np.ndarray | pp.number): + self.val[key] = new_value + elif isinstance(new_value, AdArray): + self.val[key] = new_value.val + self.jac[key] = new_value.jac + else: + raise NotImplementedError("Setting") + def __add__(self, other: AdType) -> AdArray: """Add the AdArray to another object. diff --git a/src/porepy/numerics/ad/operator_functions.py b/src/porepy/numerics/ad/operator_functions.py index b2509373b7..7a3c3c02b3 100644 --- a/src/porepy/numerics/ad/operator_functions.py +++ b/src/porepy/numerics/ad/operator_functions.py @@ -1,6 +1,13 @@ -"""This module contains callable operators representing functions to be called with other -operators as input arguments. -Contains also a decorator class for callables, which transforms them automatically in the +"""This module contains callable operators representing functions to be called with +other operators as input arguments. + +Operator functions represent a numerical function in the AD framework, with its +arguments represented by other Ad operators. +The actual numerical value is obtained during +:meth:`~porepy.numerics.ad.operators.Operator.value` or +:meth:`~porepy.numerics.ad.operators.Operator.value_and_jacobian`. + +Contains also a decorator class for callables, which transforms them automatically in a specified operator function type. """ @@ -9,7 +16,7 @@ import abc from functools import partial -from typing import Callable, Optional, Type +from typing import Callable, Optional, Sequence, Type import numpy as np import scipy.sparse as sps @@ -17,89 +24,86 @@ import porepy as pp from porepy.numerics.ad.forward_mode import AdArray +from .functions import FloatType from .operators import Operator __all__ = [ - "Function", - "ConstantFunction", + "AbstractFunction", "DiagonalJacobianFunction", + "Function", "InterpolatedFunction", "ADmethod", ] -### BASE CLASSES ------------------------------------------------------------------------------ + +def _raise_no_arithmetics_with_functions_error(): + raise TypeError("Operator functions must be called before applying any operation.") class AbstractFunction(Operator): - """Abstract class for all operator functions, i.e. functions evaluated on some other AD - operators. + """Abstract class for all operator functions, i.e. functions called with some other + AD operators. - Implements the callable-functionality and provides abstract methods for obtaining function - values and the Jacobian. - The abstraction intends to provide means for approximating operators, where values are - e.g. interpolated and the Jacobian is approximated using FD. + Implements the call with Ad operators, creating an operator with children + and its operation set to + :attr:`~porepy.numerics.ad.operators.Operator.Operations.evaluate`. - Note: - One can flag the operator as ``ad_compatible``. If flagged, the AD framework passes - AD arrays directly to the callable ``func`` and will **not** call the abstract methods - for values and the Jacobian during operator parsing. - If for some reason one wants to flag the function as AD compatible, but still have the - abstract methods called, this is as of now **not** supported. + Provides abstract methods to implement the computation of value and Jacobian of the + function independently. - For now only one child class, porepy.ad.Function, flags itself always as AD compatible. + The abstract function itself has no arithmetic overloads, since its meaning + is given only by calling it using other operators. Type errors are raised if the + user attempts to use any overload implemented in the base class. Parameters: - func: callable Python object representing a (numeric) function. - Expected to take numerical information in some form and return numerical - information in the same form. - name: name of this instance as an AD operator - array_compatible (optional): If true, the callable ``func`` will be called - using arrays (numpy.typing.ArrayLike). Flagging this true, the user ensures - that the callable can work with arrays and return respectively - formatted output. If false, the function will be evaluated element-wise - (scalar input). Defaults to False. - ad_compatible (Optional): If true, the callable ``func`` will be called using - the porepy.ad.AdArray. - - Note that as of now, this will effectively bypass the abstract methods - for generating values and the Jacobian, assuming both will be provided - correctly by the return value of ``func``. - - Defaults to False. + name: Name of this instance as an AD operator. """ def __init__( self, - func: Callable, - name: str, - array_compatible: bool = False, - ad_compatible: bool = False, - ): - ### PUBLIC - - self.func: Callable = func - """Callable passed at instantiation""" - - self.array_compatible: bool = array_compatible - """Indicator whether the callable can process arrays.""" - - super().__init__(name=name, operation=Operator.Operations.evaluate) + name: Optional[str] = None, + domains: Optional[pp.GridLikeSequence] = None, + operation: Optional[Operator.Operations] = None, + children: Optional[Sequence[Operator]] = None, + **kwargs, # Left for inheritance for more complex functions + ) -> None: + # NOTE Constructor is overwritten to have a consistent signature + # But the operation is always overwritten to point to evaluate. + # Done for reasons of multiple inheritance. + super().__init__( + name=name, + domains=domains, + operation=pp.ad.Operator.Operations.evaluate, + children=children, + ) def __call__(self, *args: pp.ad.Operator) -> pp.ad.Operator: """Renders this function operator callable, fulfilling its notion as 'function'. Parameters: - *args: AD operators passed as symbolic arguments for the callable passed at - instantiation. + *args: AD operators representing the arguments of the function represented + by this instance. Returns: - Operator with call-arguments as children in the operator tree. - The assigned operation is ``evaluate``. + Operator with assigned operation ``evaluate``. + + It's children are given by this instance, and ``*args``. This is required + to make the numerical function available during parsing (see :meth:`parse`). """ - children = [self, *args] - op = Operator(children=children, operation=self.operation) + assert ( + len(args) > 0 + ), "Operator functions must be called with at least 1 argument." + + op = Operator( + name=f"{self.name}{[a.name for a in args]}", + # domains=self.domains, + operation=pp.ad.Operator.Operations.evaluate, + children=args, + ) + # Assigning the functional representation by the implementation of this instance + op.func = self.func # type: ignore return op def __repr__(self) -> str: @@ -109,59 +113,101 @@ def __repr__(self) -> str: s = f"AD Operator function '{self._name}'" return s - def __mul__(self, other): - raise RuntimeError( - "AD Operator functions are meant to be called, not multiplied." - ) + def __neg__(self) -> Operator: + return _raise_no_arithmetics_with_functions_error() - def __add__(self, other): - raise RuntimeError("AD Operator functions are meant to be called, not added.") + def __add__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() - def __sub__(self, other): - raise RuntimeError( - "AD Operator functions are meant to be called, not subtracted." - ) + def __radd__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() - def __rsub__(self, other): - raise RuntimeError( - "AD Operator functions are meant to be called, not subtracted." - ) + def __sub__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() + + def __rsub__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() + + def __mul__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() + + def __rmul__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() + + def __truediv__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() + + def __rtruediv__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() - def __div__(self, other): - raise RuntimeError("AD Operator functions are meant to be called, not divided.") + def __pow__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() - def __truediv__(self, other): - raise RuntimeError("AD Operator functions are meant to be called, not divided.") + def __rpow__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() - def parse(self, md: pp.MixedDimensionalGrid): - """Parsing to a numerical value. + def __matmul__(self, other: Operator) -> Operator: + return _raise_no_arithmetics_with_functions_error() - The real work will be done by combining the function with arguments, during - parsing of an operator tree. + def __rmatmul__(self, other): + return _raise_no_arithmetics_with_functions_error() + + def parse(self, mdg: pp.MixedDimensionalGrid): + """Operator functions return themselves to give the recursion in + :class:`~porepy.numerics.ad.operators.Operator` access to the underlying + :meth:`func`.""" + return self + + def func(self, *args: FloatType) -> float | np.ndarray | AdArray: + """The underlying numerical function which is represented by this operator + function. + + Called during parsing with the numerical representation of operator arguments + and returning the numerical value and derivative of this operator instance. + + The numerical function calls in any case :meth:`get_values` with ``*args``. + If ``*args`` contains an Ad array, it calls also :meth:`get_jacobian`. Parameters: - md: Mixed-dimensional grid. + *args: Numerical representation of the operators with which this instance + was called. The arguments will be in the same order as the operators + passed to the call to this instance. Returns: - The instance itself. + If ``*args`` contains only numpy arrays, it returns the result of + :meth:`get_values`. + If it contains an Ad array, it combines the results of :meth:`get_values` + and :meth:`get_jacobian` in an Ad array and returns it. """ - return self + + values = self.get_values(*args) + + if any(isinstance(a, AdArray) for a in args): + jac = self.get_jacobian(*args) + if isinstance(values, float): + assert jac.shape[0] == 1, "Inconsistent Jacobian of scalar function." + values = np.array([values]) + return AdArray(values, self.get_jacobian(*args)) + else: + return values @abc.abstractmethod - def get_values(self, *args: AdArray) -> np.ndarray: + def get_values(self, *args: float | np.ndarray | AdArray) -> float | np.ndarray: """Abstract method for evaluating the callable passed at instantiation. - This method will be called during the operator parsing. - The AD arrays passed as arguments will be in the same order as the operators passed to - the call to this instance. + The returned numpy array will be set as + :attr:`~porepy.numerics.ad.forward_mode.AdArray.val` in for cases when any + child is parsed as an Ad array. + Otherwise the value returned here will be returned directly as the numerical + representation of this instance. - The returned numpy array will be set as 'val' argument for the AD array representing - this instance. + This method is called in :meth:`func`. Parameters: - *args: AdArray representation of the operators passed during the call to this - instance + *args: Numerical representation of the operators with which this instance + was called. The arguments will be in the same order as the operators + passed to the call to this instance. Returns: Function values in numerical format. @@ -170,188 +216,117 @@ def get_values(self, *args: AdArray) -> np.ndarray: pass @abc.abstractmethod - def get_jacobian(self, *args: AdArray) -> sps.spmatrix: - """ - Abstract method for evaluating the Jacobian of the callable passed at instantiation. + def get_jacobian(self, *args: float | np.ndarray | AdArray) -> sps.spmatrix: + """Abstract method for evaluating the Jacobian of the function represented + by this instance. - This method will be called during the operator parsing. - The AD arrays passed as arguments will be in the same order as the operators passed to - the call to this instance. + The returned matrix will be set as + :attr:`~porepy.numerics.ad.forward_mode.AdArray.jac` in for cases when any + child is parsed as an Ad array. - The returned numpy array will be be set as 'jac' argument for the AD array representing - this instance. + This method is called in :meth:`func` if any argument is an Ad array. Note: - The necessary dimensions for the jacobian can be extracted from the dimensions - of the Jacobians of passed AdArray instances. + The necessary dimensions for the jacobian can be extracted from the + dimensions of the Jacobians of passed Ad arrays in ``*args``. Parameters: - *args: AdArray representation of the operators passed during the call to this - instance + *args: Numerical representation of the operators with which this instance + was called. The arguments will be in the same order as the operators + passed to the call to this instance. Returns: - Numeric representation of the Jacobian of this function. + Function derivatives in numerical format. """ pass -class AbstractJacobianFunction(AbstractFunction): - """Partially abstract base class, providing a call to the callable ``func`` in order to - obtain numeric function values. +class DiagonalJacobianFunction(AbstractFunction): + """Partially abstract operator function, which approximates the Jacobian of the + function using identities and scalar multipliers per dependency. - What remains abstract is the Jacobian. + Can be used to for functions with approximated derivatives. - """ - - def get_values(self, *args: AdArray) -> np.ndarray: - """ - Returns: - The direct evaluation of the callable using ``val`` of passed AD arrays. + Parameters: + multipliers: Scalar multipliers for the identity blocks in the Jacobian, + per function argument. The order in ``multipliers`` is expected to match + the order of AD operators passed to the call of this instance. - """ - # get values of argument AdArrays. - vals = (arg.val for arg in args) + """ - # if the callable is flagged as conform for vector operations, feed vectors - if self.array_compatible: - return self.func(*vals) + def __init__( + self, + multipliers: float | list[float], + name: str, + ): + super().__init__(name=name) + # check and format input for further use + if isinstance(multipliers, list): + self._multipliers = [float(val) for val in multipliers] else: - # if not vector-conform, feed element-wise - - # TODO this displays some special behavior when val-arrays have different lengths: - # it returns None-like things for every iteration more then shortest length - # These Nones are ignored for some reason by the function call, as well as by the - # array constructor. - # If a mortar var and a subdomain var are given as args, - # then the lengths will be different for example. - return np.array([self.func(*vals_i) for vals_i in zip(*vals)]) - + self._multipliers = [float(multipliers)] -### CONCRETE IMPLEMENTATIONS ------------------------------------------------------------------ + def get_jacobian(self, *args: float | np.ndarray | AdArray) -> sps.spmatrix: + """The approximate Jacobian consists of identity blocks times scalar multiplier + per every function dependency.""" + jacs = [ + arg.jac * m + for arg, m in zip(args, self._multipliers) + if isinstance(arg, AdArray) + ] + return sum(jacs).tocsr() class Function(AbstractFunction): - """Ad representation of an analytically given function, - where it is expected that passing AdArrays directly to ``func`` will - return the proper result. + """Ad representation of an analytically given function, which can handle both + numpy arrays and Ad arrays. Here the values **and** the Jacobian are obtained exactly by the AD framework. - The intended use is as a wrapper for operations on pp.ad.AdArray objects, - in forms which are not directly or easily expressed by the rest of the Ad - framework. + The intended use is as a wrapper for callables, which can handle numpy and Ad + arrays. E.g., exponential or logarithmic functions, which cannot be expressed + with arithmetic overloads of Ad operators. Note: This is a special case where the abstract methods for getting values and the - Jacobian are formally implemented but never used by the AD framework. A separate - operation called ``evaluate`` is implemented instead, which simply feeds the AD - arrays to ``func``. - - """ - - def __init__(self, func: Callable, name: str, array_compatible: bool = True): - super().__init__(func, name, array_compatible) - self.ad_compatible = True - - def get_values(self, *args: AdArray) -> np.ndarray: - result = self.func(*args) - return result.val - - def get_jacobian(self, *args: AdArray) -> np.ndarray: - result = self.func(*args) - return result.jac + Jacobian are formally implemented but never used by the AD framework. + :meth:`func` is overwritten to use the ``func`` passed at instantiation. -class ConstantFunction(AbstractFunction): - """Function representing constant, scalar values with no dependencies and ergo a - zero Jacobian. - - It still has to be called though since it fulfills the notion of a 'function'. - - Parameters: - values: constant values per cell. + Paramters: + func: A callable returning a numpy array for numpy array arguments, and an + Ad array for arguments containing Ad arrays. """ - def __init__(self, name: str, values: np.ndarray): - # dummy function, takes whatever and returns only the pre-set values - def func(*args): - return values + def __init__(self, func: Callable[..., FloatType], name: str) -> None: + super().__init__(name=name) - super().__init__(func, name) - self._values = values + self._func: Callable[..., float | np.ndarray | AdArray] = func + """Reference to the callable passed at instantiation.""" - def get_values(self, *args: AdArray) -> np.ndarray: - """ - Returns: - The values passed at instantiation. + def func(self, *args: FloatType) -> float | np.ndarray | AdArray: + """Overwrites the parent method to call the numerical function passed at + instantiation.""" + return self._func(*args) - """ - return self._values + def get_values(self, *args: float | np.ndarray | AdArray) -> float | np.ndarray: + result = self._func(*args) + return result.val if isinstance(result, AdArray) else result - def get_jacobian(self, *args: AdArray) -> sps.spmatrix: - """ - Note: - The return value is not a sparse matrix as imposed by the parent method signature, - but a zero. - Numerical operations with a zero always works with any numeric formats in - numpy, scipy and PorePy's AD framework. - Since the constant function (most likely) gets no arguments passed, we have - no way of knowing the necessary shape for a zero matrix. Hence scalar. - - Returns: the trivial derivative of a constant. - - """ - return 0.0 - - -class DiagonalJacobianFunction(AbstractJacobianFunction): - """Approximates the Jacobian of the function using identities and scalar multipliers - per dependency. - - Parameters: - multipliers: scalar multipliers for the identity blocks in the Jacobian, - per dependency of ``func``. The order in ``multipliers`` is expected to match - the order of AD operators passed to the call of this function. - - """ - - def __init__( - self, - func: Callable, - name: str, - multipliers: float | list[float], - array_compatible: bool = False, - ): - super().__init__(func, name, array_compatible) - # check and format input for further use - if isinstance(multipliers, list): - self._multipliers = [float(val) for val in multipliers] - else: - self._multipliers = [float(multipliers)] - - def get_jacobian(self, *args: AdArray) -> sps.spmatrix: - """The approximate Jacobian consists of identity blocks times scalar multiplier - per every function dependency. - - """ - # the Jacobian of a (Merged) Variable is already a properly sized block identity - jac = args[0].jac * self._multipliers[0] - - # summing identity blocks for each dependency - if len(args) > 1: - # TODO think about exception handling in case not enough - # L-values were provided initially - for arg, L in zip(args[1:], self._multipliers[1:]): - jac += arg.jac * L - - return jac + def get_jacobian(self, *args: float | np.ndarray | AdArray) -> sps.spmatrix: + assert any( + isinstance(a, AdArray) for a in args + ), "No Ad arrays passed as arguments." + result = self._func(*args) + assert isinstance(result, AdArray) + return result.jac class InterpolatedFunction(AbstractFunction): - """Represents the passed function as an interpolation of chosen order on a cartesian, - uniform grid. + """Represents the passed function as an interpolation of chosen order on a + Cartesian, uniform grid. The image of the function is expected to be of dimension 1, while the domain can be multidimensional. @@ -361,6 +336,13 @@ class InterpolatedFunction(AbstractFunction): Each row-entry represents a value for an argument of ``func`` in respective order. + Important: + The construction of the Jacobian assumes that the arguments/dependencies of the + interpolated function are independent variables (their jacobian has only a + single identity block). The correct behavior of the interpolation in other cases + is not guaranteed due to how derivative values are stored in the sparse matrix + of derivatives. + Parameters: min_val: lower bounds for the domain of ``func``. max_val: upper bound for the domain. @@ -383,9 +365,8 @@ def __init__( npt: np.ndarray, order: int = 1, preval: bool = False, - array_compatible: bool = False, ): - super().__init__(func, name, array_compatible) + super().__init__(name=name) ### PUBLIC self.order: int = order @@ -410,39 +391,47 @@ def __init__( f"Interpolation of order {self.order} not implemented." ) - def get_values(self, *args: AdArray) -> np.ndarray: + def get_values(self, *args: float | np.ndarray | AdArray) -> np.ndarray: # stacking argument values vertically for interpolation - X = np.vstack([x.val for x in args]) + args_: list[float | np.ndarray] = [] + for a in args: + if isinstance(a, AdArray): + args_.append(a.val) + else: + args_.append(a) + X: np.ndarray = np.vstack(args_) return self._table.interpolate(X) - def get_jacobian(self, *args: AdArray) -> sps.spmatrix: + def get_jacobian(self, *args: float | np.ndarray | AdArray) -> sps.spmatrix: # get points at which to evaluate the differentiation - X = np.vstack([x.val for x in args]) + X = np.vstack([x.val if isinstance(x, AdArray) else x for x in args]) # allocate zero matrix for Jacobian with correct dimensions and in CSR format - jac = sps.csr_matrix(args[0].jac.shape) + jacs = [] for axis, arg in enumerate(args): - # The trivial Jacobian of one argument gives us the correct position for the - # entries as ones - partial_jac = arg.jac - # replace the ones with actual values - # Since csr, we can simply replace the data array with the values of the derivative - partial_jac.data = self._table.gradient(X, axis)[0] - - # add blocks to complete Jacobian - jac += partial_jac + if isinstance(arg, AdArray): + # The trivial Jacobian of one argument gives us the correct position for + # the entries as ones + partial_jac = arg.jac + # replace the ones with actual values + # Since csr, we can simply replace the data array with the values of the + # derivative + partial_jac.data = self._table.gradient(X, axis)[0] + jacs.append(partial_jac) - return jac + return sum(jacs).tocsr() -### FUNCTION DECORATOR ------------------------------------------------------------------------ +### FUNCTION DECORATOR class ADmethod: - """(Decorator) Class for methods representing e.g., physical properties. - The decorated function is expected to take scalars/vectors and return a scalar/vector. + """(Decorator) Class for numerical functions, to wrap them into operator functions. + + The designated operator function must be able to take a keyword argument ``func``. - The return value will be an AD operator of a type passed to the decorator. + The decorated, numerical function is expected to be able to handle numerical + arguments including Ad arrays. Examples: .. code:: python @@ -452,24 +441,22 @@ class ADmethod: # decorating class methods class IdealGas: - @ADmethod(ad_operator=pp.ad.DiagonalJacobianFunction, - operators_args={"multipliers"=[1,1]}) + @ADmethod(ad_operator=pp.ad.Function, + operator_args={'name'='density'}) def density(self, p: float, T: float) -> float: return p/T - # decorating function - @ADmethod(ad_operator=pp.ad.Function) + # decorating function with default operator function (pp.ad.Function) + @ADmethod def dummy_rel_perm(s): return s**2 - With above code, the density of an instance of ``IdealGas`` can be called using - :class:`~porepy.numerics.ad.operators.MergedVariable` representing - pressure and temperature. - Analogously, ``dummy_rel_perm`` can be called with one representing the saturation. + With above code, the decorated functions can be called with AD operators + representing the function arguments. Note: If used as decorator WITHOUT explicit instantiation, the instantiation will be - done implicitly with above default arguments (that's how Python decorators work). + done implicitly with default arguments (that's how Python decorators work). Parameters: func: decorated function object @@ -484,8 +471,12 @@ def __init__( self, func: Optional[Callable] = None, ad_function_type: Type[AbstractFunction] = Function, - operator_kwargs: dict = {}, + operator_kwargs: Optional[dict] = None, ) -> None: + if operator_kwargs is None: + operator_kwargs = {"name": "unnamed_function"} + + assert "name" in operator_kwargs, "Operator functions must be named." # reference to decorated function object self._func = func # mark if decoration without explicit call to constructor @@ -510,12 +501,12 @@ def __call__(self, *args, **kwargs) -> ADmethod | pp.ad.Operator: Note: If the decorator was explicitly instantiated during decoration, - that instance will effectively be replaced by another decorator instance created - here in the call. - It is expected that the the call will follow the instantiation immediately when - used as a decorator, hence properly dereferencing the original instance. - If used differently or if another reference is saved between explicit instantiation - and call, this is a potential memory leak. + that instance will effectively be replaced by another decorator instance + created here in the call. + It is expected that the the call will follow the instantiation immediately + when used as a decorator, hence properly dereferencing the original + instance. If used differently or if another reference is saved between + explicit instantiation and call, this is a potential memory leak. """ # If decorated without explicit init, the function is passed during a call to @@ -537,8 +528,8 @@ def __call__(self, *args, **kwargs) -> ADmethod | pp.ad.Operator: # a call to the decorated function # when calling the decorator, distinguish between bound method call - # ('args' contains 'self' of the decorated instance) and an unbound function call - # (whatever 'args' and 'kwargs' contain, we pass it to the wrapper) + # ('args' contains 'self' of the decorated instance) and an unbound function + # call (whatever 'args' and 'kwargs' contain, we pass it to the wrapper) if self._bound_to is None: wrapped_function = self.ad_wrapper(*args, **kwargs) elif self._bound_to == args[0]: @@ -556,8 +547,7 @@ def __call__(self, *args, **kwargs) -> ADmethod | pp.ad.Operator: return wrapped_function def __get__(self, binding_instance: object, binding_type: type) -> Callable: - """ - Descriptor protocol. + """Implemenation of descriptor protocol. If this ADmethod decorates a class method (and effectively replaces it), it will be bound to the class instance, similar to bound methods. @@ -588,6 +578,7 @@ def __get__(self, binding_instance: object, binding_type: type) -> Callable: def ad_wrapper(self, *args, **kwargs) -> Operator: """Actual wrapper function. + Constructs the necessary AD-Operator class wrapping the decorated callable and performs the evaluation/call. @@ -599,8 +590,8 @@ def ad_wrapper(self, *args, **kwargs) -> Operator: # Make sure proper assignment of callable was made assert self._func is not None - # extra safety measure to ensure a bound call is done to the right binding instance. - # We pass only the binding instance referenced in the descriptor protocol. + # extra safety measure to ensure a bound call is done to the right binding + # instance. We pass only the binding instance referenced in the descr. protocol. if self._bound_to is None: operator_func = self._func else: diff --git a/src/porepy/numerics/ad/operators.py b/src/porepy/numerics/ad/operators.py index 7e7aa1ebef..cd524d61e9 100644 --- a/src/porepy/numerics/ad/operators.py +++ b/src/porepy/numerics/ad/operators.py @@ -7,7 +7,7 @@ from enum import Enum from functools import reduce from itertools import count -from typing import Any, Literal, Optional, Sequence, Union, overload +from typing import Any, Callable, Literal, Optional, Sequence, TypeVar, Union, overload import numpy as np import scipy.sparse as sps @@ -20,6 +20,8 @@ __all__ = [ "Operator", + "TimeDependentOperator", + "IterativeOperator", "SparseArray", "DenseArray", "TimeDependentDenseArray", @@ -38,6 +40,62 @@ def _get_shape(mat): return mat.shape +def _get_previous_time_or_iterate( + op: Operator, prev_time: bool = True, steps: int = 1 +) -> Operator: + """Helper function which traverses an operator's tree recursively to get a + copy of it and it's children, representing ``op`` at a previous time or + iteration. + + Parameters: + op: Some operator whose tree should be traversed. + prev_time: ``default=True`` + + If True, it calls :meth:`Operator.previous_timestep`, otherwise it calls + :meth:`Operator.previous_iteration`. + + This is the only difference in the recursion and we can avoid duplicate + code. + steps: ``default=1`` + + Number of steps backwards in time or iterate sense. + + Returns: + A copy of the operator and its children, representing the previous time or + iteration. + + """ + + # The recursion reached an atomic operator, which has some time- or + # iterate-dependent behaviour + if isinstance(op, TimeDependentOperator) and prev_time: + return op.previous_timestep(steps=steps) + elif isinstance(op, IterativeOperator) and not prev_time: + return op.previous_iteration(steps=steps) + # NOTE The previous_iteration of a time-dependent operator will return the operator + # itself. Vice-versa, the previous_timestep of an Iterative operator will return + # itself. Holds only if the operator is original (no previous_* operation performed) + + # The recursion reached an operator without children and without time- or iterate- + # dependent behaviour + elif op.is_leaf(): + return op + # Else we are in the middle of the operator tree and need to go deeper, creating + # copies along. + else: + + # Create new operator from the tree, with the only difference being the new + # children, for which the recursion is invoked + # NOTE copy takes care of references to original_operator and func + new_op = copy.copy(op) + new_op.children = [ + _get_previous_time_or_iterate(child, prev_time=prev_time, steps=steps) + for child in op.children + ] + + return new_op + + class Operator: """Parent class for all AD operators. @@ -108,6 +166,28 @@ def __init__( " interfaces, subdomains or boundary grids." ) + self.func: Callable[..., float | np.ndarray | AdArray] + """Functional representation of this operator. + + As of now, only instances of + :class:`~porepy.numerics.ad.operator_functions.AbstractFunction` have a + functional representation, whereas basic arithmetics are implemented by + arithmetic overloads in this class. + + Note: + This declaration avoids operator functions creating operators with + themselves as the first child to provide access to + :meth:`~porepy.numerics.ad.operator_functions.AbstractFunction.func`, + and hence artificially bloating the operator tree. + + Note: + For future development: + + Functional representation can be used for an optimized representation + (keyword numba compilation). + + """ + self.children: Sequence[Operator] """List of children, other AD operators. @@ -181,6 +261,23 @@ def is_leaf(self) -> bool: """ return len(self.children) == 0 + @property + def is_current_iterate(self) -> bool: + """Returns True if this AD-operator represents its designated term at the + current time and iterate index. + + Note: + This flag is used in time step and iterate notions of + :class:`TimeDependentOperator` and :class:`IterativeOperator`. + + """ + # NOTE we use the existence of the original operator (not the index) + # because this works for both previous time and iteration. + if hasattr(self, "original_operator"): + return False + else: + return True + def set_name(self, name: str) -> None: """Reset this object's name originally passed at instantiation. @@ -190,72 +287,25 @@ def set_name(self, name: str) -> None: """ self._name = name - def previous_timestep(self) -> pp.ad.Operator: - """Return an operator that represents the value of this operator at the previous - timestep. + def previous_timestep(self, steps: int = 1) -> pp.ad.Operator: + """Base method to trigger a recursion over the operator tree and create a + shallow copy of this operator, where child operators with time-dependent + behaviour are pushed backwards in time. - The operator tree at the previous time step is created as a shallow copy, and will - thus be identical to the original operator, except that all time dependent operators - are evaluated at the previous time step. - - Returns: - A copy of self, with all time dependent operators evaluated at the previous - time step. + For more information, see :class:`TimeDependentOperator`. """ - # Create a copy of the operator tree evaluated at a previous time step. This is done - # by traversing the underlying graph, and set all time-dependent objects to be - # evaluated at the previous time step. - - def _traverse_tree(op: Operator) -> Operator: - """Helper function which traverses an operator tree by recursion.""" - - children = op.children - - if len(children) == 0: - # We are on an atomic operator. If this is a time-dependent operator, - # set it to be evaluated at the previous time step. If not, leave the - # operator as it is. - if isinstance( - op, (Variable, MixedDimensionalVariable, TimeDependentDenseArray) - ): - # Use the previous_timestep() method of the operator to get the - # operator evaluated at the previous time step. This in effect - # creates a copy of the operator. - # If other time-dependent other operators are added, they will have - # to override this previous_timestep method. - return op.previous_timestep() + return _get_previous_time_or_iterate(self, prev_time=True, steps=steps) - else: - # No need to use a copy here. - # This also means that operators that are not time dependent need not - # override this previous_timestep method. - return op - else: - # Recursively iterate over the subtree, get the children, evaluated at the - # previous time when relevant, and add it to the new list. - new_children: list[Operator] = list() - for ci, child in enumerate(children): - # Recursive call to fix the subtree. - new_children.append(_traverse_tree(child)) - - # Use the same lists of domains as in the old operator. - domains = op.domains - - # Create new operator from the tree. - new_op = Operator( - name=op.name, - domains=domains, - operation=op.operation, - children=new_children, - ) - return new_op + def previous_iteration(self, steps: int = 1) -> pp.ad.Operator: + """Base method to trigger a recursion over the operator tree and create a + shallow copy of this operator, where child operators with iterative + behaviour are pushed backwards in the iterative sense. - # Get a copy of the operator with all time-dependent quantities evaluated at the - # previous time step. - prev_time = _traverse_tree(self) + For more information, see :class:`IterativeOperator`. - return prev_time + """ + return _get_previous_time_or_iterate(self, prev_time=False, steps=steps) def parse(self, mdg: pp.MixedDimensionalGrid) -> Any: """Translate the operator into a numerical expression. @@ -275,63 +325,90 @@ def parse(self, mdg: pp.MixedDimensionalGrid) -> Any: """ raise NotImplementedError("This type of operator cannot be parsed right away") - def _parse_operator(self, op: Operator, mdg: pp.MixedDimensionalGrid): - """TODO: Currently, there is no prioritization between the operations; for + def _parse_operator( + self, + op: Operator, + eqs: pp.ad.EquationSystem, + ad_base: AdArray | np.ndarray, + ): + """Recursive parsing of operator tree to return numerical representation. + + TODO: Currently, there is no prioritization between the operations; for some reason, things just work. We may need to make an ordering in which the operations should be carried out. It seems that the strategy of putting on hold until all children are processed works, but there likely are cases where this is not the case. + + Parameters: + op: The operator to be parsed (for recursion in tree of ``self``). + eqs: Equation system and its grid on which to perform the parsing. + ad_base: Starting point for forward mode, containing values + (and possibly derivatives as identities) of the global vector at current + time and iterate. + + Returns: + The numerical representation of this operator. + """ # The parsing strategy depends on the operator at hand: - # 1) If the operator is a Variable, it will be represented according to its - # stored state. - # 2) If the operator is a leaf in the tree-representation of the operator, - # parsing is left to the operator itself. - # 3) If the operator is formed by combining other operators lower in the tree, - # parsing is handled by first evaluating the children (leads to recursion) - # and then perform the operation on the result. - - # Check for case 1 or 2 - if isinstance(op, pp.ad.Variable) or isinstance(op, Variable): - # Case 1: Variable - - # How to access the array of (Ad representation of) states depends on - # whether this is a single or combined variable; see self.__init__, - # definition of self._variable_ids. - # TODO: no difference between merged or no mixed-dimensional variables!? - if isinstance(op, pp.ad.MixedDimensionalVariable) or isinstance( - op, MixedDimensionalVariable - ): - if op.prev_time: - return self._prev_vals[op.id] - elif op.prev_iter: - return self._prev_iter_vals[op.id] - else: - return self._ad[op.id] - else: - if op.prev_time: - return self._prev_vals[op.id] - elif op.prev_iter or not ( - op.id in self._ad - ): # TODO make it more explicit that op corresponds to a non_ad_variable? - # e.g. by op.id in non_ad_variable_ids. - return self._prev_iter_vals[op.id] - else: - return self._ad[op.id] - elif isinstance(op, pp.ad.AdArray): - # When using nested operator functions, op can be an already evaluated term. - # Just return it. + # 1) If it is numeric, then it is already some sort of leaf. Return it. + # 2) If it is a leaf: + # a) A md-variable with dofs per atomic variable. + # b) An atomic variable with its dofs. + # c) Some wrapper for discretizations or other data. + # 3) If it is an operator with children, invoke recursion and + # proceed to the non-void operation. + + # Case 1), Some numeric data, or already evaluated operator. + if isinstance(op, AdArray | np.ndarray | pp.number): return op - elif op.is_leaf(): - # Case 2 - return op.parse(mdg) # type:ignore + # to continue, we must assert it is an actual, unparsed operator + assert isinstance( + op, Operator + ), f"Failure in parsing: Unsupported type in operor tree {type(op)}." + + # Case 2) Leaf operators or variables + # NOTE Should MD variables really be leaves? + if op.is_leaf(): + # Case 2.a) Md-variable + if isinstance(op, MixedDimensionalVariable): + if op.is_previous_iterate or op.is_previous_time: + # Empty vector like the global vector of unknowns for prev time/iter + # insert the values at the right dofs and slice + vals = np.empty_like( + ad_base.val if isinstance(ad_base, AdArray) else ad_base + ) + # list of indices for sub variables + dofs = [] + for sub_var in op.sub_vars: + sub_dofs = eqs.dofs_of([sub_var]) + vals[sub_dofs] = sub_var.parse(eqs.mdg) + dofs.append(sub_dofs) + + return vals[np.hstack(dofs, dtype=int)] if dofs else np.array([]) + # Like for atomic variables, ad_base contains current time and iter + else: + return ad_base[eqs.dofs_of([op])] + # Case 2.b) atomic variables + elif isinstance(op, Variable): + # If a variable represents a previous iteration or time, parse values. + if op.is_previous_iterate or op.is_previous_time: + return op.parse(eqs.mdg) + # Otherwise use the current time and iteration values. + else: + return ad_base[eqs.dofs_of([op])] + # Case 2.c) All other leafs like discretizations or some wrapped data + else: + # Mypy complains because the return type of parse is Any. + return op.parse(eqs.mdg) # type:ignore - # This is not an atomic operator. First parse its children, then combine them - results = [self._parse_operator(child, mdg) for child in op.children] + # Case 3) This is a non-atomic operator with an assigned operation + # Invoke recursion + results = [self._parse_operator(child, eqs, ad_base) for child in op.children] - # Combine the results + # Finally, do the operation operation = op.operation if operation == Operator.Operations.add: # To add we need two objects @@ -447,24 +524,20 @@ def _parse_operator(self, op: Operator, mdg: pp.MixedDimensionalGrid): raise ValueError(msg) from exc elif operation == Operator.Operations.evaluate: - # This is a function, which should have at least one argument - assert len(results) > 1 - func_op = results[0] + # Operator functions should have at least 1 child (themselves) + assert len(results) >= 1, "Operator functions must have at least 1 child." + assert hasattr(op, "func"), ( + f"Operators with operation {operation} must have a functional" + + f" representation `func` implemented as a callable member." + ) - # if the callable can be fed with AdArrays, do it - if func_op.ad_compatible: - return func_op.func(*results[1:]) - else: - # This should be a Function with approximated Jacobian and value. - try: - val = func_op.get_values(*results[1:]) - jac = func_op.get_jacobian(*results[1:]) - except Exception as exc: - # TODO specify what can go wrong here (Exception type) - msg = "Ad parsing: Error evaluating operator function:\n" - msg += func_op._parse_readable() - raise ValueError(msg) from exc - return AdArray(val, jac) + try: + return op.func(*results) + except Exception as exc: + # TODO specify what can go wrong here (Exception type) + msg = "Error while parsing operator function:\n" + msg += op._parse_readable() + raise ValueError(msg) from exc else: raise ValueError(f"Encountered unknown operation {operation}") @@ -742,207 +815,31 @@ def _evaluate( depends on the operator. """ - # Get the mixed-dimensional grid used for the dof-manager. - mdg = system_manager.mdg - - # Identify all variables in the Operator tree. This will include real variables, - # and representation of previous time steps and iterations. - ( - variable_dofs, - variable_ids, - is_prev_time, - is_prev_iter, - ) = self._identify_variables(system_manager) - - # Split variable dof indices and ids into groups of current variables (those - # of the current iteration step), and those from the previous time steps and - # iterations. - current_indices = [] - current_ids = [] - prev_indices = [] - prev_ids = [] - prev_iter_indices = [] - prev_iter_ids = [] - for ind, var_id, is_prev, is_prev_it in zip( - variable_dofs, variable_ids, is_prev_time, is_prev_iter - ): - if is_prev: - prev_indices.append(ind) - prev_ids.append(var_id) - elif is_prev_it: - prev_iter_indices.append(ind) - prev_iter_ids.append(var_id) - else: - current_indices.append(ind) - current_ids.append(var_id) - - # Save information. - # IMPLEMENTATION NOTE: Storage in a separate data class could have - # been a more elegant option. - self._variable_dofs = current_indices - self._variable_ids = current_ids - self._prev_time_dofs = prev_indices - self._prev_time_ids = prev_ids - self._prev_iter_dofs = prev_iter_indices - self._prev_iter_ids = prev_iter_ids - - # Parsing in two stages: First make a forward Ad-representation of the variable - # state (this must be done jointly for all variables of the operator to get all - # derivatives represented). Then parse the operator by traversing its - # tree-representation, and parse and combine individual operators. - - prev_vals = system_manager.get_variable_values(time_step_index=0) - prev_iter_vals = system_manager.get_variable_values(iterate_index=0) + # If state is not specified, use values at current time, current iterate if state is None: state = system_manager.get_variable_values(iterate_index=0) - # Initialize Ad variables with the current iterates - - # The size of the Jacobian matrix will always be set according to the - # variables found by the EquationSystem. - - # NOTE: This implies that to derive a subsystem from the Jacobian + # 1. Generate the basis for forward AD + # If with derivatives, we use Ad arrays, without we use the current state array + # NOTE as of now, we have a global approach: Construct a global identity + # as derivative, and then slice the DOFs present in this operator. + # This implies that to derive a subsystem from the Jacobian # matrix of this Operator will require restricting the columns of # this matrix. - - # First generate an Ad array (ready for forward Ad) for the full set. - # If the Jacobian is not requested, this step is skipped. - vars: AdArray | np.ndarray + ad_base: AdArray | np.ndarray if evaluate_jacobian: - vars = initAdArrays([state])[0] + ad_base = initAdArrays([state])[0] else: - vars = state - - # Next, the Ad array must be split into variables of the right size - # (splitting impacts values and number of rows in the Jacobian, but - # the Jacobian columns must stay the same to preserve all cross couplings - # in the derivatives). - - # Dictionary which maps from Ad variable ids to AdArray. - self._ad: dict[int, AdArray] = {} - - # Loop over all variables, restrict to an Ad array corresponding to - # this variable. - for var_id, dof in zip(self._variable_ids, self._variable_dofs): - ncol = state.size - nrow = np.unique(dof).size - # Restriction matrix from full state (in Forward Ad) to the specific - # variable. - R = sps.coo_matrix( - (np.ones(nrow), (np.arange(nrow), dof)), shape=(nrow, ncol) - ).tocsr() - self._ad[var_id] = R @ vars - - # Also make mappings from the previous iteration. - # This is simpler, since it is only a matter of getting the residual vector - # correctly (not Jacobian matrix). - - prev_iter_vals_list = [prev_iter_vals[ind] for ind in self._prev_iter_dofs] - self._prev_iter_vals = { - var_id: val - for (var_id, val) in zip(self._prev_iter_ids, prev_iter_vals_list) - } - - # Also make mappings from the previous time step. - prev_vals_list = [prev_vals[ind] for ind in self._prev_time_dofs] - self._prev_vals = { - var_id: val for (var_id, val) in zip(self._prev_time_ids, prev_vals_list) - } - - # Parse operators. This is left to a separate function to facilitate the + ad_base = state + + # 2. Parse operators. This is left to a separate function to facilitate the # necessary recursion for complex operators. - eq = self._parse_operator(self, mdg) + eq = self._parse_operator(self, system_manager, ad_base) return eq - def _identify_variables( - self, - system_manager: pp.ad.EquationSystem, - var: Optional[list] = None, - ): - """Identify all variables in this operator.""" - # 1. Get all variables present in this operator. - # The variable finder is implemented in a special function, aimed at recursion - # through the operator tree. - # Uniquify by making this a set, and then sort on variable id - variables = sorted( - list(set(self._find_subtree_variables())), - key=lambda var: var.id, - ) - - # 2. Get a mapping between variables (*not* only MixedDimensionalVariables) and - # their indices according to the EquationSystem. This is needed to access the - # state of a variable when parsing the operator to numerical values using - # forward Ad. - - # For each variable, get the global index - inds = [] - variable_ids = [] - prev_time = [] - prev_iter = [] - for variable in variables: - # Indices (in EquationSystem sense) of this variable. Will be built - # gradually for MixedDimensionalVariables, in one go for plain Variables. - ind_var = [] - prev_time.append(variable.prev_time) - prev_iter.append(variable.prev_iter) - - if isinstance(variable, MixedDimensionalVariable): - # Is this equivalent to the test in previous function? - # Loop over all subvariables for the mixed-dimensional variable - for i, sub_var in enumerate(variable.sub_vars): - if sub_var.prev_time or sub_var.prev_iter: - # If this is a variable representing a previous time step or - # iteration, we need to use the original variable to get hold of - # the correct dof indices, since this is the variable that was - # created by the EquationSystem. However, we will tie the - # indices to the id of this variable, since this is the one that - # will be used for lookup later on. - sub_var_known_to_eq_system: Variable = sub_var.original_variable - else: - sub_var_known_to_eq_system = sub_var - - # Get the index of this sub variable in the global numbering of the - # EquationSystem. If an error message is raised that the variable is - # not present in the EquationSystem, it is likely that this operator - # contains a variable that is not known to the EquationSystem (it - # has not passed through EquationSystem.create_variable()). - ind_var.append(system_manager.dofs_of([sub_var_known_to_eq_system])) - if i == 0: - # Store id of variable, but only for the first one; we will - # concatenate the arrays in ind_var into one array - variable_ids.append(variable.id) - - if len(variable.sub_vars) == 0: - # For empty lists of subvariables, we still need to assign an id - # to the variable. - variable_ids.append(variable.id) - else: - # This is a variable that lives on a single grid - if variable.prev_iter or variable.prev_time: - # If this is a variable representing a previous time step or - # iteration, we need to use the original variable to get hold of - # the correct dof indices, since this is the variable that was - # created by the EquationSystem. However, we will tie the - # indices to the id of this variable, since this is the one that - # will be used for lookup later on. - variable_known_to_eq_system = variable.original_variable - else: - variable_known_to_eq_system = variable - - ind_var.append(system_manager.dofs_of([variable_known_to_eq_system])) - variable_ids.append(variable.id) - - # Gather all indices for this variable - if len(ind_var) > 0: - inds.append(np.hstack([i for i in ind_var])) - else: - inds.append(np.array([], dtype=int)) - - return inds, variable_ids, prev_time, prev_iter - - def _find_subtree_variables(self) -> Sequence[Variable]: + def find_variables_in_tree(self) -> Sequence[Variable | MixedDimensionalVariable]: """Method to recursively look for Variables (or MixedDimensionalVariables) in an operator tree. """ @@ -961,7 +858,7 @@ def _find_subtree_variables(self) -> Sequence[Variable]: # children - they have none. for child in self.children: if isinstance(child, Operator): - sub_variables += child._find_subtree_variables() + sub_variables += child.find_variables_in_tree() # Some work is needed to parse the information var_list: list[Variable] = [] @@ -1234,15 +1131,226 @@ def _parse_other(self, other): return [self, DenseArray(other)] elif isinstance(other, sps.spmatrix): return [self, SparseArray(other)] - elif isinstance(other, Operator): - return [self, other] elif isinstance(other, AdArray): # This may happen when using nested pp.ad.Function. return [self, other] + elif isinstance(other, pp.ad.AbstractFunction): + # Need to put this here, because overload of AbstractFunction is not + # applied if AbstractFunction is right operand. + pp.ad.operator_functions._raise_no_arithmetics_with_functions_error() + elif isinstance(other, Operator): + # Put Operator at end, because Seconary and Abstract are also operators + return [self, other] else: raise ValueError(f"Cannot parse {other} as an AD operator") +class TimeDependentOperator(Operator): + """Intermediate parent class for operator classes, which can have a time-dependent + representation. + + Implements the notion of time step indices, as well as a method to create a + representation of an operator instance at a previous time. + + Operators created via constructor always start at the current time. + + """ + + def __init__( + self, + name: str | None = None, + domains: Optional[pp.GridLikeSequence] = None, + operation: Optional[Operator.Operations] = None, + children: Optional[Sequence[Operator]] = None, + ) -> None: + super().__init__( + name=name, domains=domains, operation=operation, children=children + ) + + self.original_operator: Operator + """Reference to the operator representing this operator at the current time amd + iterate. + + This attribute is only available in operators representing previous time steps. + + """ + + self._time_step_index: int = 0 + """Time step index, starting with 0 (current time) and increasing for previous + time steps.""" + + @property + def is_previous_time(self) -> bool: + """True, if the operator represents a previous time-step.""" + return True if self._time_step_index > 0 else False + + @property + def time_step_index(self) -> int: + """Returns the time step index this instance represents. + + - 0 indicates this is an operator at the current time step + - 1 represents the first previous time step + - 2 represents the next time step further back in time + - ... + + Note: + Time-dependent operators with time step index 0 will always return the + value at the most recent iterate. + + """ + return self._time_step_index + + def previous_timestep( + self: _TimeDependentOperator, steps: int = 1 + ) -> _TimeDependentOperator: + """Returns a copy of the time-dependent operator with an advanced time-step + index. + + Time-dependent operators do not invoke the recursion (like the base class), + but represent a leaf in the recursion tree. + + Note: + You cannot create operators at the previous time step from operators which + are at some previous iterate. Use the :attr:`original_operator` instead. + + Parameters: + steps: ``default=1`` + + Number of steps backwards in time. + + Raises: + ValueError: If this instance represents an operator at a previous iterate. + AssertionError: If ``steps`` is not strictly positive. + + """ + if isinstance(self, IterativeOperator): + if self.is_previous_iterate: + raise ValueError( + "Cannot create an operator representing a previous time step," + + " if it already represents a previous iterate." + ) + + assert steps > 0, "Number of steps backwards must be strictly positive." + # TODO copy or deepcopy? Is this enough for every operator class? + op = copy.copy(self) + + # NOTE Use private time step index, because it is always an integer + # The public time step index is NONE for current time + # (which translates to -1 for the private index) + op._time_step_index = self._time_step_index + int(steps) + + # keeping track to the very first one + if self.is_current_iterate: + op.original_operator = self + else: + op.original_operator = self.original_operator + + return op + + +_TimeDependentOperator = TypeVar("_TimeDependentOperator", bound=TimeDependentOperator) + + +class IterativeOperator(Operator): + """Intermediate parent class for operator classes, which can have multiple + representations in the iterative sense. + + Implements the notion of iterate indices, as well as a method to create a + representation of an operator instance at a iterate time. + + Operators created via constructor always start at the current iterate. + + Note: + Operators which represents some previous iterate represent also + always the current time. + + """ + + def __init__( + self, + name: str | None = None, + domains: Optional[pp.GridLikeSequence] = None, + operation: Optional[Operator.Operations] = None, + children: Optional[Sequence[Operator]] = None, + ) -> None: + super().__init__( + name=name, domains=domains, operation=operation, children=children + ) + + self.original_operator: Operator + """Reference to the operator representing this operator at the current time amd + iterate. + + This attribute is only available in operators representing previous time steps. + + """ + + self._iterate_index: int = 0 + """iterate index, starting with 0 (current iterate at current time) and + increasing for previous iterates.""" + + @property + def is_previous_iterate(self) -> bool: + """True, if the operator represents a previous iterate.""" + return True if self._iterate_index > 0 else False + + @property + def iterate_index(self) -> int: + """Returns the iterate index this instance represents, at the current time. + + - 0 represents the current iterate + - 1 represents the first previous iterate + - 2 represents the iterate before that + - ... + + """ + return self._iterate_index + + def previous_iteration( + self: _IterativeOperator, steps: int = 1 + ) -> _IterativeOperator: + """Returns a copy of the iterative operator with an advanced iterate index. + + Iterative operators do not invoke the recursion (like the base class), + but represent a leaf in the recursion tree. + + Note: + You cannot create operators at the previous iterates from operators which + are at some previous time step. Use the :attr:`original_operator` instead. + + Parameters: + steps: ``default=1`` + + Number of steps backwards in the iterate sense. + + Raises: + ValueError: If this instance represents an operator at a previous time step. + AssertionError: If ``steps`` is not strictly positive. + + """ + if isinstance(self, TimeDependentOperator): + if self.is_previous_time: + raise ValueError( + "Cannot create an operator representing a previous iterate," + + " if it already represents a previous time step." + ) + assert steps > 0, "Number of steps backwards must be strictly positive." + # See TODO in TimeDependentOperator.previous_timestep + op = copy.copy(self) + op._iterate_index = self._iterate_index + int(steps) + + # keeping track to the very first one + if self.is_current_iterate: + op.original_operator = self + else: + op.original_operator = self.original_operator + + return op + + +_IterativeOperator = TypeVar("_IterativeOperator", bound=IterativeOperator) + + class SparseArray(Operator): """Ad representation of a sparse matrix. @@ -1295,12 +1403,12 @@ def parse(self, mdg: pp.MixedDimensionalGrid) -> sps.spmatrix: """ return self._mat - def transpose(self) -> "SparseArray": + def transpose(self) -> SparseArray: """Returns an AD operator representing the transposed matrix.""" return SparseArray(self._mat.transpose()) @property - def T(self) -> "SparseArray": + def T(self) -> SparseArray: """Shorthand for transpose.""" return self.transpose() @@ -1371,7 +1479,7 @@ def parse(self, mdg: pp.MixedDimensionalGrid) -> np.ndarray: return self._values -class TimeDependentDenseArray(Operator): +class TimeDependentDenseArray(TimeDependentOperator): """An Ad-wrapper around a time-dependent numpy array. The array is tied to a MixedDimensionalGrid, and is distributed among the data @@ -1409,28 +1517,9 @@ def __init__( self, name: str, domains: GridLikeSequence, - previous_timestep: bool = False, ): - self.prev_time: bool = previous_timestep - """If True, the array will be evaluated using ``data[pp.TIME_STEP_SOLUTIONS]`` - (data being the data dictionaries for subdomains and interfaces). - - If False, ``data[pp.ITERATE_SOLUTIONS]`` is used. - - """ - super().__init__(name=name, domains=domains) - def previous_timestep(self) -> TimeDependentDenseArray: - """ - Returns: - This array represented at the previous time step. - - """ - return TimeDependentDenseArray( - name=self._name, domains=self._domains, previous_timestep=True - ) - def parse(self, mdg: pp.MixedDimensionalGrid) -> np.ndarray: """Convert this array into numerical values. @@ -1448,6 +1537,11 @@ def parse(self, mdg: pp.MixedDimensionalGrid) -> np.ndarray: """ vals = [] + if self.is_previous_time: + index_kwarg = {"time_step_index": self.time_step_index} + else: + index_kwarg = {"iterate_index": 0} + for g in self._domains: if self._domain_type == "subdomains": assert isinstance(g, pp.Grid) @@ -1460,16 +1554,10 @@ def parse(self, mdg: pp.MixedDimensionalGrid) -> np.ndarray: data = mdg.boundary_grid_data(g) else: raise ValueError(f"Unknown grid type: {self._domain_type}.") - if self.prev_time: - vals.append( - pp.get_solution_values( - name=self._name, data=data, time_step_index=0 - ) - ) - else: - vals.append( - pp.get_solution_values(name=self._name, data=data, iterate_index=0) - ) + + vals.append( + pp.get_solution_values(name=self._name, data=data, **index_kwarg) + ) if len(vals) > 0: # Normal case: concatenate the values from all grids @@ -1479,10 +1567,13 @@ def parse(self, mdg: pp.MixedDimensionalGrid) -> np.ndarray: return np.empty(0, dtype=float) def __repr__(self) -> str: - return ( + msg = ( f"Wrapped time-dependent array with name {self._name}.\n" f"Defined on {len(self._domains)} {self._domain_type}.\n" ) + if self.is_previous_time: + msg += f"Evaluated at the previous time step {self.time_step_index}.\n" + return msg class Scalar(Operator): @@ -1546,26 +1637,38 @@ def set_value(self, value: float) -> None: self._value = value -class Variable(Operator): +class Variable(TimeDependentOperator, IterativeOperator): """AD operator representing a variable defined on a single grid or mortar grid. - For combinations of variables on different subdomains, see :class:`MergedVariable`. - - Conversion of the variable into numerical value should be done with respect to the - state of an array; see :meth:`Operator.evaluate`. Therefore, the variable does not - implement the method :meth:`Operator.parse`. + For combinations of variables on different subdomains, see + :class:`MixedDimensionalVariable`. A variable is associated with either a grid or an interface. Therefore it is assumed that either ``subdomains`` or ``interfaces`` is passed as an argument. + Also, a variable is associated with a specific time and iterate index. :meth:`parse` + will return the values at respective index on its :meth:`domain`. + + Important: + Each atomic variable (a variable on a single grid) has a :attr:`id`, unique + among created variables. This ID is used to map the DOFs in the global system + and hence critical. + + As of now, variable instances representing the same quantity at different + time and iterate steps have the same ID. + + This might with future development (e.g. adaptive mesh refinement). + Parameters: name: Variable name. ndof: Number of dofs per grid element. Valid keys are ``cells``, ``faces`` and ``nodes``. - subdomains (length=1): List containing a single grid. - interfaces (length=1): List containing a single mortar grid. - num_cells: Number of cells in the grid. - Only relevant if this is an interface variable. + domain: A subdomain or interface on which the variable is defined. + tags: A dictionary of tags. + + Raises: + NotImplementedError: If ``domain`` is not a grid or mortar grid. Variables are + not supported on boundaries. """ @@ -1580,9 +1683,14 @@ def __init__( ndof: dict[Literal["cells", "faces", "nodes"], int], domain: GridLike, tags: Optional[dict[str, Any]] = None, - previous_timestep: bool = False, - previous_iteration: bool = False, ) -> None: + + # Variables are not supported on the boundary. + if not isinstance(domain, (pp.Grid, pp.MortarGrid)): + raise NotImplementedError( + "Variables only supported on domains of type 'Grid' or 'MortarGrid'." + ) + # Block a mypy warning here: Domain is known to be GridLike (grid, mortar grid, # or boundary grid), thus the below wrapping in a list gives a list of GridLike, # but the super constructor expects a sequence of grids, sequence or mortar @@ -1590,36 +1698,10 @@ def __init__( # circumvent the warning is not worth it. super().__init__(name=name, domains=[domain]) # type: ignore [arg-type] - ### PUBLIC - - self.prev_time: bool = previous_timestep - """Flag indicating if the variable represents the state at the previous time - step. - - """ - - self.prev_iter: bool = previous_iteration - """Flag indicating if the variable represents the state at the previous - iteration. - - """ - - self.id: int = next(Variable._ids) - """ID counter. Used to identify variables during operator parsing.""" - - self.original_variable: Variable - """The original variable, if this variable is a copy of another variable. - - This attribute is used by the methods :meth:`Variable.previous_timestep` and - :meth:`Variable.previous_iteration` to keep a link to the original variable. - """ - - if self._domain_type == "boundary grids": - raise NotImplementedError("Variables on boundaries are not supported.") - - ### PRIVATE - # domain + self._id: int = next(Variable._ids) + """See :meth:`id`.""" self._g: GridLike = domain + """See :meth:`domain`""" # dofs per self._cells: int = ndof.get("cells", 0) self._faces: int = ndof.get("faces", 0) @@ -1628,9 +1710,38 @@ def __init__( # tag self._tags: dict[str, Any] = tags if tags is not None else {} + @property + def id(self) -> int: + """Returns an integer unique among variables used for identification. + Assigned during instantiation. + + The id of a variable is common for all instances of the variable, regardless of + whether it represents the present state, the previous iteration, or the previous + time step. + + While a specific variable can be identified in terms of its id, it is often + advisable to rather use its name and domain, preferrably using relevant + functionality in + :class:`~porepy.numerics.ad.equation_system.EquationSystem`. + + """ + return self._id + @property def domain(self) -> GridLike: - """The grid or mortar grid on which this variable is defined.""" + """The grid or mortar grid on which this variable is defined. + + Note: + Not to be confused with :meth:`domains`, which has the grid in a sequence + of length 1. + + This is for inheritance reasons, since :class:`Variable` inherits from + :class:`Operator`. + + TODO: Clean up. + + + """ return self._g @property @@ -1661,84 +1772,29 @@ def set_name(self, name: str) -> None: """ raise RuntimeError("Cannot rename operators representing a variable.") - def previous_timestep(self) -> Variable: - """Return a representation of this variable on the previous time step. - - Raises: - ValueError: - If the variable is a representation of the previous iteration, - previously set by :meth:`~previous_iteration`. - - NotImplementedError: - If the variable is already a representation of the previous time step. - Currently, we support creating only one previous time step. - - Returns: - A representation of this variable at the previous time step, - with its ``prev_time`` attribute set to ``True``. - - """ - if self.prev_time: - raise NotImplementedError( - "Currently, it is not supported to create a variable that represents " - "more than one time step behind." - ) - - if self.prev_iter: - raise ValueError( - "Cannot create a variable both on the previous time step and " - "previous iteration." - ) - - ndof: dict[Literal["cells", "faces", "nodes"], int] = { - "cells": self._cells, - "faces": self._faces, - "nodes": self._nodes, - } - new_var = Variable(self.name, ndof, self.domain, previous_timestep=True) - # Assign self as the original variable. - new_var.original_variable = self - return new_var - - def previous_iteration(self) -> Variable: - """Return a representation of this mixed-dimensional variable on the previous - iteration. - - Raises: - ValueError: - If the variable is a representation of the previous time step, - previously set by :meth:`~previous_timestep`. - - NotImplementedError: - If the variable is already a representation of the previous time - iteration. Currently, we support creating only one previous iteration. - - Returns: - A representation of this variable on the previous time iteration, - with its ``prev_iter`` attribute set to ``True``. - - """ - if self.prev_time: - raise ValueError( - "Cannot create a variable both on the previous time step and " - "previous iteration." + def parse(self, mdg: pp.MixedDimensionalGrid) -> Any: + """Returns the values stored for this variable at its time step or iterate + index.""" + + # By logic in the constructor, it can only be a subdomain or interface + if isinstance(self._g, pp.Grid): + data = mdg.subdomain_data(self._g) + elif isinstance(self._g, pp.MortarGrid): + data = mdg.interface_data(self._g) + + if self.is_previous_time: + return pp.get_solution_values( + self.name, + data, + time_step_index=self.time_step_index, ) - if self.prev_iter: - raise NotImplementedError( - "Currently, it is not supported to create a variable that represents " - "more than one iteration behind." + else: + return pp.get_solution_values( + self.name, + data, + iterate_index=self.iterate_index, ) - ndof: dict[Literal["cells", "faces", "nodes"], int] = { - "cells": self._cells, - "faces": self._faces, - "nodes": self._nodes, - } - new_var = Variable(self.name, ndof, self.domain, previous_iteration=True) - # Assign self as the original variable. - new_var.original_variable = self - return new_var - def __repr__(self) -> str: s = f"Variable {self.name} with id {self.id}" if isinstance(self.domain, pp.MortarGrid): @@ -1749,89 +1805,112 @@ def __repr__(self) -> str: f"Degrees of freedom: cells ({self._cells}), faces ({self._faces}), " f"nodes ({self._nodes})\n" ) - if self.prev_iter: - s += "Evaluated at the previous iteration.\n" - elif self.prev_time: - s += "Evaluated at the previous time step.\n" + if self.is_previous_iterate: + s += f"Evaluated at the previous iteration {self.iterate_index}.\n" + elif self.is_previous_time: + s += f"Evaluated at the previous time step {self.time_step_index}.\n" return s class MixedDimensionalVariable(Variable): """Ad representation of a collection of variables that individually live on separate - subdomains or interfaces, but treated jointly in the mixed-dimensional sense. + subdomains or interfaces, but represent the same quantity and are treated jointly in + the mixed-dimensional sense. - Conversion of the variables into numerical value should be done with respect to the - state of an array; see :meth:`Operator.evaluate`. Therefore, the MergedVariable does - not implement the method :meth:`Operator.parse`. + Note: + As of now, the wrapped fixed-dimensional variables must fulfill the following + assumptions: + + 1. They have the same name + 2. They are at the same time step and iterate. + 3. They are defined in different grids (no overlaps). Parameters: - variables: List of variables to be merged. Should all have the same name. + variables: List of variables to be merged. + + Raises: + AssertionError: If one of the above assumptions is violated. """ def __init__(self, variables: list[Variable]) -> None: - ### PUBLIC - self.sub_vars = variables - """List of sub-variables passed at instantiation, each defined on a separate - domain. - - """ + time_indices = [] + iterate_indices = [] + names = [] + domains = [] + + for var in variables: + time_indices.append(var.time_step_index) + iterate_indices.append(var.iterate_index) + names.append(var.name) + domains.append(var.domain) + + # check assumptions + if len(variables) > 0: + assert ( + len(set(time_indices)) == 1 + ), "Cannot create md-variable from variables at different time steps." + assert ( + len(set(iterate_indices)) == 1 + ), "Cannot create md-variable from variables at different iterates." + assert ( + len(set(names)) == 1 + ), "Cannot create md-variable from variables with different names." + assert len(set(domains)) == len( + domains + ), "Cannot create md-variable from variables with overlapping domains." + # Default values for empty md variable + else: + time_indices = [-1] + iterate_indices = [0] + names = ["empty_md_variable"] - self.id = next(Variable._ids) - """ID counter. Used to identify variables during operator parsing.""" + ### PRIVATE + self._id = next(Variable._ids) + # NOTE private time step index is -1 if public time step index of atomic + # variables is None (current time) + self._time_step_index = -1 if time_indices[0] is None else time_indices[0] + # NOTE private and public iterate indices are always integers + self._iterate_index = iterate_indices[0] + self._name = names[0] - self.prev_time: bool = False - """Flag indicating if the variable represents the state at the previous time - step. + # Mypy complains that we do not know that all variables have the same type of + # domain. While formally correct, this should be picked up in other places so we + # ignore the warning here. + self._domains = domains # type: ignore[assignment] - """ + ### PUBLIC - self.prev_iter: bool = False - """Flag indicating if the variable represents the state at the previous - iteration. + self.sub_vars = variables + """List of sub-variables passed at instantiation, each defined on a separate + domain. """ - self.original_variable: MixedDimensionalVariable - """The original variable, if this variable is a copy of another variable. - - This attribute is used by the methods :meth:`Variable.previous_timestep` and - :meth:`Variable.previous_iteration` to keep a link to the original variable. - - """ + self._initialize_children() + self.copy_common_sub_tags() - ### PRIVATE + def __repr__(self) -> str: + if len(self.sub_vars) == 0: + return ( + "Mixed-dimensional variable defined on an empty list of " + "subdomains or interfaces." + ) - # Flag to identify variables merged over no subdomains. This requires special - # treatment in various parts of the code. A use case is variables that are only - # defined on subdomains of codimension >= 1 (e.g., contact traction variable), - # assigned to a problem where the grid happened not to have any fractures. - self._no_variables = len(variables) == 0 + s = "Mixed-dimensional" + s += ( + f" variable with name {self.name}, id {self.id}\n" + f"Composed of {len(self.sub_vars)} variables\n" + f"Total size: {self.size}\n" + ) + if self.is_previous_iterate: + s += f"Evaluated at the previous iteration {self.iterate_index}.\n" + elif self.is_previous_time: + s += f"Evaluated at the previous time step {self.time_step_index}.\n" - # It should be defined in the parent class, but we do not call super().__init__ - # Mypy complains that we do not know that all variables have the same type of - # domain. While formally correct, this should be picked up in other places so we - # ignore the warning here. - self._domains = [ - var.domains[0] for var in variables # type: ignore[assignment] - ] - # Take the name from the first variable. - if self._no_variables: - self._name = "no_sub_variables" - else: - self._name = variables[0].name - # Check that all variables have the same name. - # We may release this in the future, but for now, we make it a requirement - all_names = set(var.name for var in variables) - assert len(all_names) <= 1 - - # must be done since super not called here in init - # Yura: Is it only the problem of type checking that makes us inherit from - # Variable? - self._initialize_children() - self.copy_common_sub_tags() + return s def copy_common_sub_tags(self) -> None: """Copy any shared tags from the sub-variables to this variable. @@ -1842,7 +1921,7 @@ def copy_common_sub_tags(self) -> None: """ self._tags = {} # If there are no sub variables, there is nothing to do. - if self._no_variables: + if len(self.sub_vars) == 0: return # Initialize with tags from the first sub-variable. common_tags = set(self.sub_vars[0].tags.keys()) @@ -1861,12 +1940,7 @@ def copy_common_sub_tags(self) -> None: @property def domain(self) -> list[GridLike]: # type: ignore[override] """A tuple of all domains on which the atomic sub-variables are defined.""" - domains = [var.domain for var in self.sub_vars] - # Verify that all domains of of the same type - assert all(isinstance(d, pp.Grid) for d in domains) or all( - isinstance(d, pp.MortarGrid) for d in domains - ) - return domains + return [var.domain for var in self.sub_vars] @property def size(self) -> int: @@ -1874,86 +1948,27 @@ def size(self) -> int: by summing the sizes of sub-variables.""" return sum([v.size for v in self.sub_vars]) - def previous_timestep(self) -> MixedDimensionalVariable: - """Return a representation of this mixed-dimensional variable on the previous - time step. - - Raises: - ValueError: - If the variable is a representation of the previous iteration, - previously set by :meth:`~previous_iteration`. - - NotImplementedError: - If the variable is already a representation of the previous time step. - Currently, we support creating only one previous time step. - - Returns: - A representation of this merged variable on the previous time - iteration, with its ``prev_iter`` attribute set to ``True``. - - """ - - new_subs = [var.previous_timestep() for var in self.sub_vars] - new_var = MixedDimensionalVariable(new_subs) - new_var.prev_time = True - # Assign self as the original variable. - new_var.original_variable = self - return new_var - - def previous_iteration(self) -> MixedDimensionalVariable: - """Return a representation of this mixed-dimensional variable on the previous - iteration. - - Raises: - ValueError: - If the variable is a representation of the previous time step, - previously set by :meth:`~previous_timestep`. - - NotImplementedError: - If the variable is already a representation of the previous time - iteration. Currently, we support creating only one previous iteration. - - Returns: - A representation of this merged variable on the previous - iteration, with its ``prev_iter`` attribute set to ``True`` - - """ - new_subs = [var.previous_iteration() for var in self.sub_vars] - new_var = MixedDimensionalVariable(new_subs) - new_var.prev_iter = True - # Assign self as the original variable. - new_var.original_variable = self - return new_var - - def copy(self) -> "MixedDimensionalVariable": - """Copy the mixed-dimensional variable. - - Returns: - A shallow copy should be sufficient here; the attributes are not expected to - change. - - """ - return copy.deepcopy(self) + def parse(self, mdg: pp.MixedDimensionalGrid) -> Any: + """Returns a sequence of values stored for each variable in :attr:`sub_vars`.""" + raise TypeError( + "Md-variables parsed on a md-grid without the equation system." + + " Use ``value(equation_system)`` instead." + ) - def __repr__(self) -> str: - if self._no_variables: - return ( - "Mixed-dimensional variable defined on an empty list of " - "subdomains or interfaces." - ) + def previous_timestep(self, steps: int = 1) -> MixedDimensionalVariable: + """Mixed-dimensional variables have sub-variables which also need to be + obtained at the previous time step.""" - s = "Mixed-dimensional" - s += ( - f" variable with name {self.name}, id {self.id}\n" - f"Composed of {len(self.sub_vars)} variables\n" - f"Total size: {self.size}\n" - ) - if self.prev_iter: - s += "Evaluated at the previous iteration.\n" - elif self.prev_time: - s += "Evaluated at the previous time step.\n" + op = super().previous_timestep(steps=steps) + op.sub_vars = [var.previous_timestep(steps=steps) for var in self.sub_vars] + return op - return s + def previous_iteration(self, steps: int = 1) -> MixedDimensionalVariable: + """Mixed-dimensional variables have sub-variables which also need to be + obtained at the previous iteration.""" + op = super().previous_iteration(steps=steps) + op.sub_vars = [var.previous_iteration(steps=steps) for var in self.sub_vars] + return op @overload diff --git a/src/porepy/numerics/fracture_deformation/conforming_propagation.py b/src/porepy/numerics/fracture_deformation/conforming_propagation.py index dacdf54a3d..11903c333b 100644 --- a/src/porepy/numerics/fracture_deformation/conforming_propagation.py +++ b/src/porepy/numerics/fracture_deformation/conforming_propagation.py @@ -574,7 +574,7 @@ def _pick_propagation_faces( cells = np.unique(sd_primary.cell_faces[faces_primary].nonzero()[1]) vals[cells] = 1 pp.set_solution_values( - name="neighbor_cells", values=vals, data=data_primary, time_step_index=0 + name="neighbor_cells", values=vals, data=data_primary, time_step_index=1 ) def _tip_bases( diff --git a/src/porepy/numerics/fracture_deformation/propagation_model.py b/src/porepy/numerics/fracture_deformation/propagation_model.py index 6a7d1d6a5b..b05a6865b0 100644 --- a/src/porepy/numerics/fracture_deformation/propagation_model.py +++ b/src/porepy/numerics/fracture_deformation/propagation_model.py @@ -170,7 +170,7 @@ def _map_variables(self, x: np.ndarray) -> np.ndarray: # Only cell-based dofs have been considered so far. # It should not be difficult to handle other types of variables, # but the need has not been there. - dofs = self.equation_system._variable_dof_type[var] + dofs = self.equation_system._variable_dof_type[var.id] face_dof: int = dofs.get("faces", 0) node_dof: int = dofs.get("nodes", 0) if face_dof != 0 or node_dof != 0: @@ -239,7 +239,7 @@ def _map_variables(self, x: np.ndarray) -> np.ndarray: # grid, second populate newly formed cells. # Mapping of old variables. - dofs = self.equation_system._variable_dof_type[var] + dofs = self.equation_system._variable_dof_type[var.id] cell_dof = dofs["cells"] mapping = sps.kron(cell_map, sps.eye(cell_dof)) x_new[self.equation_system.dofs_of([var])] = ( diff --git a/src/porepy/numerics/nonlinear/nonlinear_solvers.py b/src/porepy/numerics/nonlinear/nonlinear_solvers.py index e947f2b651..23ac3afe80 100644 --- a/src/porepy/numerics/nonlinear/nonlinear_solvers.py +++ b/src/porepy/numerics/nonlinear/nonlinear_solvers.py @@ -74,7 +74,7 @@ def solve(self, model) -> tuple[bool, int]: is_converged = False is_diverged = False nonlinear_increment = model.equation_system.get_variable_values( - time_step_index=0 + time_step_index=1 ) # Extract residual of initial guess. diff --git a/src/porepy/numerics/vem/dual_elliptic.py b/src/porepy/numerics/vem/dual_elliptic.py index 03b3e0b9ef..5b16e31a16 100644 --- a/src/porepy/numerics/vem/dual_elliptic.py +++ b/src/porepy/numerics/vem/dual_elliptic.py @@ -42,7 +42,7 @@ def project_flux( # we need to recover the flux from the mortar variable before # the projection, only lower dimensional edges need to be considered. edge_flux = np.zeros( - pp.get_solution_values(name=flux, data=data, time_step_index=0).size + pp.get_solution_values(name=flux, data=data, time_step_index=1).size ) faces = sd.tags["fracture_faces"] if np.any(faces): @@ -63,14 +63,14 @@ def project_flux( # edge_flux += sign * g_m.mortar_to_primary_int() * # d_e[pp.TIME_STEP_SOLUTIONS][mortar_key][0] mortar_values = pp.get_solution_values( - name=mortar_key, data=data_intf, time_step_index=0 + name=mortar_key, data=data_intf, time_step_index=1 ) edge_flux += sign * intf.primary_to_mortar_avg().T * mortar_values - flux_values = pp.get_solution_values(name=flux, data=data, time_step_index=0) + flux_values = pp.get_solution_values(name=flux, data=data, time_step_index=1) discr_projected_flux = discr.project_flux(sd, edge_flux + flux_values, data) pp.set_solution_values( - name=P0_flux, values=discr_projected_flux, data=data, time_step_index=0 + name=P0_flux, values=discr_projected_flux, data=data, time_step_index=1 ) diff --git a/src/porepy/viz/data_saving_model_mixin.py b/src/porepy/viz/data_saving_model_mixin.py index 28a7082763..73f2a5ff8a 100644 --- a/src/porepy/viz/data_saving_model_mixin.py +++ b/src/porepy/viz/data_saving_model_mixin.py @@ -99,7 +99,7 @@ def data_to_export(self) -> list[DataInput]: variables = self.equation_system.variables for var in variables: scaled_values = self.equation_system.get_variable_values( - variables=[var], time_step_index=0 + variables=[var], time_step_index=1 ) units = var.tags["si_units"] values = self.fluid.convert_units(scaled_values, units, to_si=True) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 1f4674f246..ec9ea45802 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -500,7 +500,7 @@ def _from_vector_format( value[offset : offset + sd.num_cells], sd ) pp.set_solution_values( - name=key, values=values, data=sd_data, time_step_index=0 + name=key, values=values, data=sd_data, time_step_index=1 ) offset += sd.num_cells @@ -516,7 +516,7 @@ def _from_vector_format( name=key, values=values, data=intf_data, - time_step_index=0, + time_step_index=1, ) offset += intf.num_cells @@ -966,7 +966,7 @@ def _add_data( ): # Fetch data and convert to vectorial format if needed data_to_convert = pp.get_solution_values( - name=key, data=grid_data, time_step_index=0 + name=key, data=grid_data, time_step_index=1 ) value: np.ndarray = _to_vector_format( data_to_convert, @@ -1058,7 +1058,7 @@ def add_data_from_tuple_subdomains_str( # Fetch data and convert to vectorial format if suitable data_to_convert = pp.get_solution_values( - name=key, data=sd_data, time_step_index=0 + name=key, data=sd_data, time_step_index=1 ) value = _to_vector_format(data_to_convert, sd) @@ -1130,7 +1130,7 @@ def add_data_from_tuple_interfaces_str( # Fetch data and convert to vectorial format if suitable data_to_convert = pp.get_solution_values( - name=key, data=intf_data, time_step_index=0 + name=key, data=intf_data, time_step_index=1 ) value = _to_vector_format(data_to_convert, intf) diff --git a/src/porepy/viz/plot_grid.py b/src/porepy/viz/plot_grid.py index 9bc089054c..3e4aeeb9eb 100644 --- a/src/porepy/viz/plot_grid.py +++ b/src/porepy/viz/plot_grid.py @@ -263,7 +263,7 @@ def plot_mdg( extr_value = np.array([np.inf, -np.inf]) for _, sd_data in mdg.subdomains(return_data=True): values = pp.get_solution_values( - name=cell_value, data=sd_data, time_step_index=0 + name=cell_value, data=sd_data, time_step_index=1 ) extr_value[0] = min( np.amin(values), diff --git a/tests/functional/setups/manu_flow_comp_2d_frac.py b/tests/functional/setups/manu_flow_comp_2d_frac.py index d01cc95660..c7ed71a754 100644 --- a/tests/functional/setups/manu_flow_comp_2d_frac.py +++ b/tests/functional/setups/manu_flow_comp_2d_frac.py @@ -661,8 +661,7 @@ def fluid_source(self, subdomains: list[pp.Grid]) -> pp.ad.Operator: external_sources = pp.ad.TimeDependentDenseArray( name="external_sources", domains=self.mdg.subdomains(), - previous_timestep=True, - ) + ).previous_timestep() # Add-up contribution fluid_source = internal_sources + external_sources @@ -748,7 +747,7 @@ def update_time_dependent_ad_arrays(self) -> None: name="external_sources", values=matrix_source, data=data_matrix, - time_step_index=0, + time_step_index=1, ) frac_source = self.exact_sol.fracture_source(sd_frac, t) @@ -757,7 +756,7 @@ def update_time_dependent_ad_arrays(self) -> None: name="external_sources", values=frac_source, data=data_frac, - time_step_index=0, + time_step_index=1, ) def after_simulation(self) -> None: diff --git a/tests/functional/setups/manu_poromech_nofrac_2d.py b/tests/functional/setups/manu_poromech_nofrac_2d.py index 72e6368d03..a230eaed23 100644 --- a/tests/functional/setups/manu_poromech_nofrac_2d.py +++ b/tests/functional/setups/manu_poromech_nofrac_2d.py @@ -642,8 +642,7 @@ def fluid_source(self, subdomains: list[pp.Grid]) -> pp.ad.Operator: external_sources = pp.ad.TimeDependentDenseArray( name="source_flow", domains=self.mdg.subdomains(), - previous_timestep=True, - ) + ).previous_timestep() # Add up contribution of internal and external sources of fluid fluid_sources = internal_sources + external_sources @@ -680,8 +679,7 @@ def body_force(self, subdomains: list[pp.Grid]) -> pp.ad.Operator: external_sources = pp.ad.TimeDependentDenseArray( name="source_mechanics", domains=self.mdg.subdomains(), - previous_timestep=True, - ) + ).previous_timestep() return external_sources @@ -755,13 +753,13 @@ def before_nonlinear_loop(self) -> None: # Mechanics source mech_source = self.exact_sol.mechanics_source(sd=sd, time=t) pp.set_solution_values( - name="source_mechanics", values=mech_source, data=data, time_step_index=0 + name="source_mechanics", values=mech_source, data=data, time_step_index=1 ) # Flow source flow_source = self.exact_sol.flow_source(sd=sd, time=t) pp.set_solution_values( - name="source_flow", values=flow_source, data=data, time_step_index=0 + name="source_flow", values=flow_source, data=data, time_step_index=1 ) def after_simulation(self) -> None: diff --git a/tests/functional/setups/manu_thermoporomech_nofrac_2d.py b/tests/functional/setups/manu_thermoporomech_nofrac_2d.py index 9d71d64122..5c6ee8455b 100644 --- a/tests/functional/setups/manu_thermoporomech_nofrac_2d.py +++ b/tests/functional/setups/manu_thermoporomech_nofrac_2d.py @@ -988,8 +988,7 @@ def fluid_source(self, subdomains: list[pp.Grid]) -> pp.ad.Operator: external_sources = pp.ad.TimeDependentDenseArray( name="source_flow", domains=self.mdg.subdomains(), - previous_timestep=True, - ) + ).previous_timestep() # Add up contribution of internal and external sources of fluid fluid_sources = internal_sources + external_sources @@ -1003,8 +1002,7 @@ def body_force(self, subdomains: list[pp.Grid]) -> pp.ad.Operator: external_sources = pp.ad.TimeDependentDenseArray( name="source_mechanics", domains=self.mdg.subdomains(), - previous_timestep=True, - ) + ).previous_timestep() return external_sources @@ -1021,8 +1019,7 @@ def energy_source(self, subdomains: list[pp.Grid]) -> pp.ad.Operator: external_sources = pp.ad.TimeDependentDenseArray( name="source_energy", domains=self.mdg.subdomains(), - previous_timestep=True, - ) + ).previous_timestep() # Add up contribution of internal and external sources of energy. thermal_sources = internal_sources + external_sources @@ -1074,18 +1071,18 @@ def before_nonlinear_loop(self) -> None: # Mechanics source mech_source = self.exact_sol.mechanics_source(sd=sd, time=t) pp.set_solution_values( - name="source_mechanics", values=mech_source, data=data, time_step_index=0 + name="source_mechanics", values=mech_source, data=data, time_step_index=1 ) # Flow source flow_source = self.exact_sol.flow_source(sd=sd, time=t) pp.set_solution_values( - name="source_flow", values=flow_source, data=data, time_step_index=0 + name="source_flow", values=flow_source, data=data, time_step_index=1 ) # Energy source energy_source = self.exact_sol.energy_source(sd=sd, time=t) pp.set_solution_values( - name="source_energy", values=energy_source, data=data, time_step_index=0 + name="source_energy", values=energy_source, data=data, time_step_index=1 ) def _is_nonlinear_problem(self) -> bool: diff --git a/tests/functional/setups/manu_thermoporomech_nofrac_3d.py b/tests/functional/setups/manu_thermoporomech_nofrac_3d.py index ce39c19227..aefdb0c0c4 100644 --- a/tests/functional/setups/manu_thermoporomech_nofrac_3d.py +++ b/tests/functional/setups/manu_thermoporomech_nofrac_3d.py @@ -909,18 +909,18 @@ def before_nonlinear_loop(self) -> None: # Mechanics source mech_source = self.exact_sol.mechanics_source(sd=sd, time=t) pp.set_solution_values( - name="source_mechanics", values=mech_source, data=data, time_step_index=0 + name="source_mechanics", values=mech_source, data=data, time_step_index=1 ) # Flow source flow_source = self.exact_sol.flow_source(sd=sd, time=t) pp.set_solution_values( - name="source_flow", values=flow_source, data=data, time_step_index=0 + name="source_flow", values=flow_source, data=data, time_step_index=1 ) # Energy source energy_source = self.exact_sol.energy_source(sd=sd, time=t) pp.set_solution_values( - name="source_energy", values=energy_source, data=data, time_step_index=0 + name="source_energy", values=energy_source, data=data, time_step_index=1 ) def _is_nonlinear_problem(self) -> bool: diff --git a/tests/models/test_energy_balance.py b/tests/models/test_energy_balance.py index 0aadc4b80c..11913a1f12 100644 --- a/tests/models/test_energy_balance.py +++ b/tests/models/test_energy_balance.py @@ -154,7 +154,7 @@ def test_advection_or_diffusion_dominated(fluid_vals, solid_vals): for sd in setup.mdg.subdomains(): var = setup.equation_system.get_variables(["temperature"], [sd]) vals = setup.equation_system.get_variable_values( - variables=var, time_step_index=0 + variables=var, time_step_index=1 ) assert np.allclose( vals, diff --git a/tests/models/test_fluid_mass_balance.py b/tests/models/test_fluid_mass_balance.py index 6291a5b257..363a468e09 100644 --- a/tests/models/test_fluid_mass_balance.py +++ b/tests/models/test_fluid_mass_balance.py @@ -740,21 +740,21 @@ class TestMixedDimGravity: def solve(self): pp.run_time_dependent_model(self.model, self.model.params) pressure = self.model.equation_system.get_variable_values( - [self.model.pressure_variable], time_step_index=0 + [self.model.pressure_variable], time_step_index=1 ) return pressure def verify_pressure(self, p_known: float = 0): """Verify that the pressure of all subdomains equals p_known.""" pressure = self.model.equation_system.get_variable_values( - [self.model.pressure_variable], time_step_index=0 + [self.model.pressure_variable], time_step_index=1 ) assert np.allclose(pressure, p_known, rtol=1e-3, atol=1e-3) def verify_mortar_flux(self, u_known: float): """Verify that the mortar flux of all interfaces equals u_known.""" flux = self.model.equation_system.get_variable_values( - [self.model.interface_darcy_flux_variable], time_step_index=0 + [self.model.interface_darcy_flux_variable], time_step_index=1 ) assert np.allclose(np.abs(flux), u_known, rtol=1e-3, atol=1e-3) @@ -770,7 +770,7 @@ def verify_hydrostatic(self, angle=0, a=1e-1): sd_primary = mdg.subdomains(dim=mdg.dim_max())[0] data_primary = mdg.subdomain_data(sd_primary) p_primary = pp.get_solution_values( - name="pressure", data=data_primary, time_step_index=0 + name="pressure", data=data_primary, time_step_index=1 ) # The cells above the fracture @@ -782,7 +782,7 @@ def verify_hydrostatic(self, angle=0, a=1e-1): sd_secondary = mdg.subdomains(dim=mdg.dim_max() - 1)[0] data_secondary = mdg.subdomain_data(sd_secondary) p_secondary = pp.get_solution_values( - name="pressure", data=data_secondary, time_step_index=0 + name="pressure", data=data_secondary, time_step_index=1 ) # Half the additional jump is added to the fracture pressure @@ -791,7 +791,7 @@ def verify_hydrostatic(self, angle=0, a=1e-1): assert np.allclose(p_secondary, p_known, rtol=1e-3, atol=1e-3) flux = self.model.equation_system.get_variable_values( - [self.model.interface_darcy_flux_variable], time_step_index=0 + [self.model.interface_darcy_flux_variable], time_step_index=1 ) assert np.allclose(flux, 0, rtol=1e-3, atol=1e-3) diff --git a/tests/models/test_momentum_balance.py b/tests/models/test_momentum_balance.py index 411adac66b..52579050fc 100644 --- a/tests/models/test_momentum_balance.py +++ b/tests/models/test_momentum_balance.py @@ -58,7 +58,7 @@ def test_2d_single_fracture(solid_vals, north_displacement): # Check that the pressure is linear sd = setup.mdg.subdomains(dim=setup.nd)[0] var = setup.equation_system.get_variables([setup.displacement_variable], [sd]) - vals = setup.equation_system.get_variable_values(variables=var, time_step_index=0) + vals = setup.equation_system.get_variable_values(variables=var, time_step_index=1) if np.isclose(north_displacement, 0): assert np.allclose(vals, 0) else: @@ -257,7 +257,7 @@ def test_lithostatic(dim: int): # Fetch the displacement variable and convert it to an model.nd x model.num_cells # array. var = model.equation_system.get_variables([model.displacement_variable], [sd]) - vals = model.equation_system.get_variable_values(variables=var, time_step_index=0) + vals = model.equation_system.get_variable_values(variables=var, time_step_index=1) vals = vals.reshape((model.nd, -1), order="F") # Analytical displacement. diff --git a/tests/models/test_poromechanics.py b/tests/models/test_poromechanics.py index 7291adefda..d0f41c1774 100644 --- a/tests/models/test_poromechanics.py +++ b/tests/models/test_poromechanics.py @@ -49,7 +49,7 @@ def initial_condition(self): self.equation_system.set_variable_values( self.fluid.pressure() * np.ones(self.mdg.num_subdomain_cells()), [self.pressure_variable], - time_step_index=0, + time_step_index=1, iterate_index=0, ) sd, sd_data = self.mdg.subdomains(return_data=True)[0] @@ -61,7 +61,7 @@ def initial_condition(self): self.equation_system.set_variable_values( vals.ravel("F"), [self.displacement_variable], - time_step_index=0, + time_step_index=1, iterate_index=0, ) # Find mortar cells on the top boundary @@ -86,7 +86,7 @@ def initial_condition(self): self.equation_system.set_variable_values( vals.ravel("F"), [self.interface_displacement_variable], - time_step_index=0, + time_step_index=1, iterate_index=0, ) @@ -207,20 +207,20 @@ def get_variables( sd = setup.mdg.subdomains(dim=setup.nd)[0] u_var = setup.equation_system.get_variables([setup.displacement_variable], [sd]) u_vals = setup.equation_system.get_variable_values( - variables=u_var, time_step_index=0 + variables=u_var, time_step_index=1 ).reshape(setup.nd, -1, order="F") p_var = setup.equation_system.get_variables( [setup.pressure_variable], setup.mdg.subdomains() ) p_vals = setup.equation_system.get_variable_values( - variables=p_var, time_step_index=0 + variables=p_var, time_step_index=1 ) p_var = setup.equation_system.get_variables( [setup.pressure_variable], setup.mdg.subdomains(dim=setup.nd - 1) ) p_frac = setup.equation_system.get_variable_values( - variables=p_var, time_step_index=0 + variables=p_var, time_step_index=1 ) # Fracture sd_frac = setup.mdg.subdomains(dim=setup.nd - 1) diff --git a/tests/models/test_thermoporomechanics.py b/tests/models/test_thermoporomechanics.py index a3c42370ed..e80d70f66d 100644 --- a/tests/models/test_thermoporomechanics.py +++ b/tests/models/test_thermoporomechanics.py @@ -79,13 +79,13 @@ def get_variables(setup): [setup.temperature_variable], setup.mdg.subdomains() ) t_vals = setup.equation_system.get_variable_values( - variables=t_var, time_step_index=0 + variables=t_var, time_step_index=1 ) t_var = setup.equation_system.get_variables( [setup.temperature_variable], setup.mdg.subdomains(dim=setup.nd - 1) ) t_frac = setup.equation_system.get_variable_values( - variables=t_var, time_step_index=0 + variables=t_var, time_step_index=1 ) return u_vals, p_vals, p_frac, jump, traction, t_vals, t_frac diff --git a/tests/numerics/ad/test_equation_system.py b/tests/numerics/ad/test_equation_system.py index 844c494e2e..45e55f4860 100644 --- a/tests/numerics/ad/test_equation_system.py +++ b/tests/numerics/ad/test_equation_system.py @@ -57,9 +57,11 @@ def test_evaluate_variables(): vals_it = 2 * np.ones([sd.num_cells]) pp.set_solution_values( - name=var_name, values=vals_sol, data=d, time_step_index=0 + name=var_name, values=vals_sol, data=d, time_step_index=1 ) pp.set_solution_values(name=var_name, values=vals_it, data=d, iterate_index=0) + # Provide values for previous iterate as well + pp.set_solution_values(name=var_name, values=vals_it, data=d, iterate_index=1) # We only need to test a single variable, they should all be the same. single_variable = eq_system.variables[0] @@ -379,7 +381,7 @@ def __init__(self, square_system=False): self.name_intf_top_variable, ] sys_man.set_variable_values( - global_vals, variables=all_variables, iterate_index=0, time_step_index=0 + global_vals, variables=all_variables, iterate_index=0, time_step_index=1 ) self.initial_values = global_vals @@ -644,18 +646,18 @@ def test_set_get_methods( # the global ordering. assert np.allclose(setup.initial_values[np.sort(inds)], retrieved_vals) # The time step solution should not have been updated - retrieved_vals_state = sys_man.get_variable_values(variables, time_step_index=0) + retrieved_vals_state = sys_man.get_variable_values(variables, time_step_index=1) assert np.allclose(setup.initial_values[np.sort(inds)], retrieved_vals_state) # Set values again, this time also to the time step solutions. if iterate: - sys_man.set_variable_values(vals, variables, iterate_index=0, time_step_index=0) + sys_man.set_variable_values(vals, variables, iterate_index=0, time_step_index=1) else: - sys_man.set_variable_values(vals, variables, time_step_index=0) + sys_man.set_variable_values(vals, variables, time_step_index=1) # Retrieve only values from time step solutions; iterate should be the same as # before (and the additive mode is checked below). - retrieved_vals_state = sys_man.get_variable_values(variables, time_step_index=0) + retrieved_vals_state = sys_man.get_variable_values(variables, time_step_index=1) assert np.allclose(vals, retrieved_vals_state) @@ -666,7 +668,7 @@ def test_set_get_methods( sys_man.set_variable_values(new_vals, variables, iterate_index=0) retrieved_vals2 = sys_man.get_variable_values(variables, iterate_index=0) if not iterate: - retrieved_vals2 = sys_man.get_variable_values(variables, time_step_index=0) + retrieved_vals2 = sys_man.get_variable_values(variables, time_step_index=1) # Iterate has either been updated, or it still has the initial value if iterate: assert np.allclose(new_vals, retrieved_vals2) @@ -677,11 +679,11 @@ def test_set_get_methods( # Set values to time step solutions. This should overwrite the old values. if iterate: sys_man.set_variable_values( - new_vals, variables, iterate_index=0, time_step_index=0 + new_vals, variables, iterate_index=0, time_step_index=1 ) else: - sys_man.set_variable_values(new_vals, variables, time_step_index=0) - retrieved_vals_state_2 = sys_man.get_variable_values(variables, time_step_index=0) + sys_man.set_variable_values(new_vals, variables, time_step_index=1) + retrieved_vals_state_2 = sys_man.get_variable_values(variables, time_step_index=1) assert np.allclose(new_vals, retrieved_vals_state_2) # Set the values again, this time with additive=True. This should double the @@ -690,7 +692,7 @@ def test_set_get_methods( sys_man.set_variable_values(new_vals, variables, iterate_index=0, additive=True) retrieved_vals3 = sys_man.get_variable_values(variables, iterate_index=0) elif not iterate: - retrieved_vals3 = sys_man.get_variable_values(variables, time_step_index=0) + retrieved_vals3 = sys_man.get_variable_values(variables, time_step_index=1) if iterate: assert np.allclose(2 * new_vals, retrieved_vals3) @@ -701,13 +703,13 @@ def test_set_get_methods( # Set to time step solutions, with additive=True. This should double the retrieved if iterate: sys_man.set_variable_values( - new_vals, variables, iterate_index=0, time_step_index=0, additive=True + new_vals, variables, iterate_index=0, time_step_index=1, additive=True ) else: sys_man.set_variable_values( - new_vals, variables, time_step_index=0, additive=True + new_vals, variables, time_step_index=1, additive=True ) - retrieved_vals_state_3 = sys_man.get_variable_values(variables, time_step_index=0) + retrieved_vals_state_3 = sys_man.get_variable_values(variables, time_step_index=1) assert np.allclose(2 * new_vals, retrieved_vals_state_3) # Test storage of multiple values of time step and iterate solutions from here and @@ -720,7 +722,7 @@ def _retrieve_and_check_time_step(known_values): # are as expected. for ind, val in enumerate(known_values): assert np.allclose( - sys_man.get_variable_values(variables, time_step_index=ind), val + sys_man.get_variable_values(variables, time_step_index=ind + 1), val ) # Building a few solution vectors and defining the desired solution indices @@ -728,12 +730,11 @@ def _retrieve_and_check_time_step(known_values): vals1 = vals0 * 2 vals2 = vals0 * 3 - solution_indices = np.array([0, 1, 2]) + solution_indices = np.array([1, 2, 3]) vals_mat = np.array([vals0, vals1, vals2]) # Test setting values at several indices and then gathering them - for i in solution_indices: - val = vals_mat[i].copy() + for i, val in zip(solution_indices, vals_mat): sys_man.set_variable_values(values=val, variables=variables, time_step_index=i) _retrieve_and_check_time_step([vals0, vals1, vals2]) @@ -748,7 +749,7 @@ def _retrieve_and_check_time_step(known_values): # Test additive = True to make sure only the most recently stored values are added # to. sys_man.set_variable_values( - values=vals0, variables=variables, time_step_index=0, additive=True + values=vals0, variables=variables, time_step_index=1, additive=True ) _retrieve_and_check_time_step([2 * vals0, vals0, vals1]) diff --git a/tests/numerics/ad/test_forward_mode.py b/tests/numerics/ad/test_forward_mode.py index a5a58c7bd3..2710c696c2 100644 --- a/tests/numerics/ad/test_forward_mode.py +++ b/tests/numerics/ad/test_forward_mode.py @@ -7,6 +7,8 @@ """ from __future__ import annotations +import pytest + import numpy as np import scipy.sparse as sps @@ -198,3 +200,57 @@ def test_exp_scalar_times_ad_var(): jac_a = sps.hstack([J, zero, zero]) assert np.allclose(b.val, np.exp(c * val)) and np.allclose(b.jac.A, jac.A) assert np.all(a.val == [1, 2, 3]) and np.all(a.jac.A == jac_a.A) + + +@pytest.mark.parametrize( + 'index,index_c', [ # indices and their complement for tested array + (1, [0, 2, 3, 4, 5, 6, 7, 8, 9]), + (slice(0, 10, 2), slice(1, 10, 2)), + (np.array([0, 2, 4, 6, 8], dtype=int), np.array([1, 3, 5, 7, 9], dtype=int)), + ] +) +def test_get_set_slice_ad_var(index, index_c): + a = initAdArrays([np.arange(10)])[0] + + val = np.arange(10) + jac = sps.csr_matrix(np.eye(10)) + + assert np.all(val == a.val) + assert np.all(jac == a.jac.toarray()) + + if isinstance(index, int): + target_val = np.array([val[index]]) + else: + target_val = val[index] + target_jac = jac[index].toarray() + + # Testing slicing + a_slice = a[index] + + assert a_slice.val.shape == target_val.shape + assert a_slice.jac.shape == target_jac.shape + assert np.all(a_slice.val == target_val) + assert np.all(a_slice.jac == target_jac) + + # testing setting values with slicing + + b = a[index] * 10. + assert np.all(b.val == val[index] * 10.) + assert np.all(b.jac.toarray() == jac[index] * 10.) + + # setting an AD array should set val and jacobian row-wise + a_copy = a.copy() + a[index] = b + assert np.all(a[index].val == b.val) + assert np.all(a[index].jac.A == b.jac.A) + # complement should not be affected + assert np.all(a[index_c].val == a_copy[index_c].val) + assert np.all(a[index_c].jac.A == a_copy[index_c].jac.A) + + # setting a numpy array should only modify the values of the ad array + b = target_val * 10. + a = a_copy.copy() + a[index] = b + assert np.all(a[index].val == b) + assert np.all(a[index_c].val == a_copy[index_c].val) + assert np.all(a.jac.A == a_copy.jac.A) diff --git a/tests/numerics/ad/test_operator_functions.py b/tests/numerics/ad/test_operator_functions.py new file mode 100644 index 0000000000..d8959192af --- /dev/null +++ b/tests/numerics/ad/test_operator_functions.py @@ -0,0 +1,78 @@ +"""Test collection testing Ad operator functions. + +Testing involves their creation, arithmetic overloads and parsing. + +""" +from __future__ import annotations + +import pytest + +import numpy as np +import porepy as pp + + +def test_ad_function(): + """Tests involving the Ad wrapper for analytically represented functions. + + Note: + Value and Jacobian of various analytically represented functions are covered + in test_functions.py. + + This test tests the AD wrapper ad.Function and its wrapping functionality using + the identity function. + + """ + + func = lambda x: x # identity + + F = pp.ad.Function(func, 'identity') + + g = pp.CartGrid(np.array([3, 2])) + mdg = pp.meshing.subdomains_to_mdg([g]) + mdg.compute_geometry() + + sds = mdg.subdomains() + eqsys = pp.ad.EquationSystem(mdg) + eqsys.create_variables('foo', {'cells': 1}, sds) + + var = eqsys.md_variable('foo', sds) + + # setting values at current time, previous time and previous iter + vals = np.ones(mdg.num_subdomain_cells()) + eqsys.set_variable_values(vals, [var], iterate_index=0) + eqsys.set_variable_values(vals * 2, [var], iterate_index=1) + eqsys.set_variable_values(vals * 10, [var], time_step_index=1) + + # test that the function without call with operator is inoperable + for op in ['*', '/', '+', '-', '**', '@']: + with pytest.raises(TypeError): + _ = eval(f"F {op} var") + with pytest.raises(TypeError): + _ = eval(f"var {op} F") + + F_var = F(var) + + val = F_var.value_and_jacobian(eqsys) + # test values at current time step + assert np.all(val.val == 1.) + assert np.all(val.jac.A == np.eye(mdg.num_subdomain_cells())) + + # vals at previous iter and zero Jacobian + F_var_pi = F_var.previous_iteration() + val = F_var_pi.value_and_jacobian(eqsys) + assert np.all(val.val == 2.) + assert np.all(val.jac.A == 0.) + + # Analogously for prev time + F_var_pt = F_var.previous_timestep() + val = F_var_pt.value_and_jacobian(eqsys) + assert np.all(val.val == 10.) + assert np.all(val.jac.A == 0.) + + # when evaluating with values only, the result should be a numpy array + val = F_var.value(eqsys) + val_pi = F_var_pi.value(eqsys) + val_pt = F_var_pt.value(eqsys) + assert isinstance(val, np.ndarray) + assert isinstance(val_pi, np.ndarray) + assert isinstance(val_pt, np.ndarray) \ No newline at end of file diff --git a/tests/numerics/ad/test_operators.py b/tests/numerics/ad/test_operators.py index 4d37300729..f4aa950382 100644 --- a/tests/numerics/ad/test_operators.py +++ b/tests/numerics/ad/test_operators.py @@ -114,7 +114,7 @@ def test_copy_operator_tree(): eq_system = pp.ad.EquationSystem(mdg) eq_system.create_variables("foo", {"cells": 1}, mdg.subdomains()) eq_system.set_variable_values( - np.zeros(eq_system.num_dofs()), iterate_index=0, time_step_index=0 + np.zeros(eq_system.num_dofs()), iterate_index=0, time_step_index=1 ) # In their initial state, all operators should have the same values @@ -241,8 +241,9 @@ def test_ad_operator_unary_minus_parsing(): mat2 = sps.csr_matrix(np.random.rand(3)) sp_array1 = pp.ad.SparseArray(mat1) sp_array2 = pp.ad.SparseArray(mat2) + eqsys = pp.ad.EquationSystem(pp.MixedDimensionalGrid()) op = sp_array1 + sp_array2 - assert np.allclose(op._parse_operator(-op, None).data, -(mat1 + mat2).data) + assert np.allclose(op._parse_operator(-op, eqsys, None).data, -(mat1 + mat2).data) def test_time_dependent_array(): @@ -258,7 +259,7 @@ def test_time_dependent_array(): for sd, sd_data in mdg.subdomains(return_data=True): vals_sol = np.zeros(sd.num_cells) pp.set_solution_values( - name="foo", values=vals_sol, data=sd_data, time_step_index=0 + name="foo", values=vals_sol, data=sd_data, time_step_index=1 ) vals_it = sd.dim * np.ones(sd.num_cells) @@ -270,7 +271,7 @@ def test_time_dependent_array(): # Create an empty primary variable list vals_sol = np.arange(intf.num_cells) pp.set_solution_values( - name="bar", values=vals_sol, data=intf_data, time_step_index=0 + name="bar", values=vals_sol, data=intf_data, time_step_index=1 ) vals_it = np.ones(intf.num_cells) @@ -281,7 +282,7 @@ def test_time_dependent_array(): for bg, bg_data in mdg.boundaries(return_data=True): vals_sol = np.arange(bg.num_cells) pp.set_solution_values( - name="foobar", values=vals_sol, data=bg_data, time_step_index=0 + name="foobar", values=vals_sol, data=bg_data, time_step_index=1 ) vals_it = np.ones(bg.num_cells) * bg.parent.dim @@ -354,6 +355,11 @@ def test_time_dependent_array(): "foofoobar", domains=[*mdg.subdomains(), *mdg.interfaces()] ) + # Time dependent arrays at two time steps back are possible, but the evaluation + # should raise a key error because no values are stored + with pytest.raises(KeyError): + _ = sd_prev_timestep.previous_timestep().parse(mdg) + def test_ad_variable_creation(): """Test creation of Ad variables by way of the EquationSystem. @@ -412,34 +418,6 @@ def test_ad_variable_creation(): assert mvar_1_copy.id == mvar_1.id assert mvar_1_deepcopy.id == mvar_1.id - # Get versions of the variables at previous iteration and time step. - # This should return variables with different ids - - # First variables - var_1_prev_iter = var_1.previous_iteration() - var_1_prev_time = var_1.previous_timestep() - assert var_1_prev_iter.id != var_1.id - assert var_1_prev_time.id != var_1.id - - # Then mixed-dimensional variables. - mvar_1_prev_iter = mvar_1.previous_iteration() - mvar_1_prev_time = mvar_1.previous_timestep() - assert mvar_1_prev_iter.id != mvar_1.id - assert mvar_1_prev_time.id != mvar_1.id - - # We prohibit creating a variable both on previous time step and iter. - with pytest.raises(ValueError): - _ = mvar_1_prev_iter.previous_timestep() - with pytest.raises(ValueError): - _ = mvar_1_prev_time.previous_iteration() - - # We prohibit creating a variable on more than one iter or time step behind. - # NOTE: This should be removed when this feature is implemented. - with pytest.raises(NotImplementedError): - _ = mvar_1_prev_iter.previous_iteration() - with pytest.raises(NotImplementedError): - _ = mvar_1_prev_time.previous_timestep() - def test_ad_variable_evaluation(): """Test that the values of Ad variables are as expected under evalutation @@ -519,7 +497,7 @@ def _compare_ad_objects(a, b): val_state = np.random.rand(sd.num_cells * num_dofs) val_iterate = np.random.rand(sd.num_cells * num_dofs) - pp.set_solution_values(name=var, values=val_state, data=data, time_step_index=0) + pp.set_solution_values(name=var, values=val_state, data=data, time_step_index=1) pp.set_solution_values(name=var, values=val_iterate, data=data, iterate_index=0) state_map[sd] = val_state @@ -532,7 +510,7 @@ def _compare_ad_objects(a, b): val_iterate = np.random.rand(sd.num_cells) pp.set_solution_values( - name=var2, values=val_state, data=data, time_step_index=0 + name=var2, values=val_state, data=data, time_step_index=1 ) pp.set_solution_values( name=var2, values=val_iterate, data=data, iterate_index=0 @@ -553,7 +531,7 @@ def _compare_ad_objects(a, b): val_iterate = np.random.rand(intf.num_cells * num_dofs) pp.set_solution_values( - name=mortar_var, values=val_state, data=data, time_step_index=0 + name=mortar_var, values=val_state, data=data, time_step_index=1 ) pp.set_solution_values( name=mortar_var, values=val_iterate, data=data, iterate_index=0 @@ -651,6 +629,107 @@ def _compare_ad_objects(a, b): assert np.allclose(true_state[ind1], v1_prev.value(eq_system, true_iterate)) +@pytest.mark.parametrize('prev_time', [True, False]) +def test_ad_variable_prev_time_and_iter(prev_time): + # Test only 1 variable, the rest should be covered by other tests + mdg, _ = pp.mdg_library.square_with_orthogonal_fractures( + "cartesian", + {"cell_size": 0.5}, + fracture_indices=[1], + ) + eqsys = pp.ad.EquationSystem(mdg) + + # Integer to test the depth of prev _*, could be a test parameter, but no need + depth = 3 + var_name = 'foo' + vec = np.ones(mdg.num_subdomain_cells()) + + eqsys.create_variables( + var_name, dof_info={"cells": 1}, subdomains=mdg.subdomains() + ) + var = eqsys.md_variable(var_name) + + # Starting point is time step index is 0, iterate index is 0 + # (current time and iter) + assert var.time_step_index == 0 + assert var.iterate_index == 0 + + # For AD to work, we need at least values at iterate_index = 0 + eqsys.set_variable_values(vec * 0., [var], iterate_index = 0) + + # Test configuration dependent on whether prev iter or prev time is tested. + # Code is analogous + if prev_time: + index_key = 'time_step_index' + other_index_key = 'iterate_index' + get_prev_key = 'previous_timestep' + + # prohibit prev time step variable to also be prev iter + with pytest.raises(ValueError): + var_pt = var.previous_timestep() + _ = var_pt.previous_iteration() + else: + index_key = 'iterate_index' + other_index_key = 'time_step_index' + get_prev_key = 'previous_iteration' + + # prohibit prev iter variable to also be prev time + with pytest.raises(ValueError): + var_pi = var.previous_iteration() + _ = var_pi.previous_timestep() + + # Set values except for the last step. The current value is set above + for i in range(1, depth - 1): + eqsys.set_variable_values(vec * i, [var], **{index_key: i}) + + # Evaluating the last step, should raise a key error because no values set + with pytest.raises(KeyError): + # bot time and iterate index start at 0, hence the shift + var_prev = getattr(var, get_prev_key)(steps = depth - 1) + _ = var_prev.value(eqsys) + + # Evaluate prev var and check that the values are what they're supposed to be. + for i in range(1, depth - 1): + var_i = getattr(var, get_prev_key)(steps = 1) + val_i = var_i.value(eqsys) + assert np.allclose(val_i, i) + + # prev var has no Jacobian + ad_i = var_i.value_and_jacobian(eqsys) + assert np.all(ad_i.jac.A == 0.) + + # Test creating with explicit stepping and recursive stepping + vars_exp = [getattr(var, get_prev_key)(steps=i) for i in range(1, depth - 1)] + + vars_rec = [] + for i in range(1, depth - 1): + var_i = copy.copy(var) + for _ in range(i): + var_i = getattr(var_i, get_prev_key)() + vars_rec.append(var_i) + + assert len(vars_exp) == len(vars_rec) + vals_exp = [v.value(eqsys) for v in vars_exp] + vals_rec = [v.value(eqsys) for v in vars_rec] + + for v_e, v_r in zip(vals_exp, vals_rec): + assert np.allclose(v_e, v_r) + + # Testing IDs. NOTE as of now, variables at prev iter have the same ID until + # full support is given + all_ids = set([var.id] + [v.id for v in vars_exp] + [v.id for v in vars_rec]) + assert len(all_ids) == 1 + + # Testing index values. + # For prev time, time step index increases starting from 0, while iterate is 0 + # For prev iter, iterate index increases starting from 0, while time is always 0 + for i in range(1, depth - 1): + assert getattr(vars_exp[i - 1], index_key) == i + assert getattr(vars_exp[i - 1], other_index_key) == 0 + assert getattr(vars_rec[i - 1], index_key) == i + assert getattr(vars_rec[i - 1], other_index_key) == 0 + + @pytest.mark.parametrize( "grids", [ @@ -675,7 +754,7 @@ def test_variable_combinations(grids, variables): data[pp.PRIMARY_VARIABLES].update({var: {"cells": 1}}) vals = np.random.rand(sd.num_cells) - pp.set_solution_values(name=var, values=vals, data=data, time_step_index=0) + pp.set_solution_values(name=var, values=vals, data=data, time_step_index=1) # Ad boilerplate eq_system = pp.ad.EquationSystem(mdg) @@ -684,7 +763,7 @@ def test_variable_combinations(grids, variables): eq_system.set_variable_values( np.random.rand(mdg.num_subdomain_cells()), [var], - time_step_index=0, + time_step_index=1, iterate_index=0, ) # Standard Ad variables @@ -700,7 +779,7 @@ def test_variable_combinations(grids, variables): expr = var.value_and_jacobian(eq_system) # Check that the size of the variable is correct values = pp.get_solution_values( - name=var.name, data=data, time_step_index=0 + name=var.name, data=data, time_step_index=1 ) assert np.allclose(expr.val, values) # Check that the Jacobian matrix has the right number of columns @@ -713,7 +792,7 @@ def test_variable_combinations(grids, variables): for sub_var in var.sub_vars: data = mdg.subdomain_data(sub_var.domain) values = pp.get_solution_values( - name=sub_var.name, data=data, time_step_index=0 + name=sub_var.name, data=data, time_step_index=1 ) vals.append(values) @@ -772,10 +851,10 @@ def test_time_differentiation(): vals_sol_bar = 2 * np.ones(sd.num_cells) pp.set_solution_values( - name="foo", values=vals_sol_foo, data=sd_data, time_step_index=0 + name="foo", values=vals_sol_foo, data=sd_data, time_step_index=1 ) pp.set_solution_values( - name="bar", values=vals_sol_bar, data=sd_data, time_step_index=0 + name="bar", values=vals_sol_bar, data=sd_data, time_step_index=1 ) vals_it_foo = 3 * np.ones(sd.num_cells) @@ -793,7 +872,7 @@ def test_time_differentiation(): vals_it_foo = np.ones(sd.num_cells) pp.set_solution_values( - name="foo", values=vals_sol_foo, data=sd_data, time_step_index=0 + name="foo", values=vals_sol_foo, data=sd_data, time_step_index=1 ) pp.set_solution_values( name="foo", values=vals_it_foo, data=sd_data, iterate_index=0 @@ -807,7 +886,7 @@ def test_time_differentiation(): vals_it = 2 * np.ones(intf.num_cells) pp.set_solution_values( - name="foobar", values=vals_sol, data=intf_data, time_step_index=0 + name="foobar", values=vals_sol, data=intf_data, time_step_index=1 ) pp.set_solution_values( name="foobar", values=vals_it, data=intf_data, iterate_index=0 @@ -1415,7 +1494,7 @@ def _get_ad_array( d = mdg.subdomain_data(g) pp.set_solution_values( - name="foo", values=variable_val, data=d, time_step_index=0 + name="foo", values=variable_val, data=d, time_step_index=1 ) pp.set_solution_values(name="foo", values=variable_val, data=d, iterate_index=0) mat = pp.ad.SparseArray(jac) diff --git a/tests/numerics/fracture_deformation/test_fracture_propagation.py b/tests/numerics/fracture_deformation/test_fracture_propagation.py index cdfe5d56f0..8edeaf9494 100644 --- a/tests/numerics/fracture_deformation/test_fracture_propagation.py +++ b/tests/numerics/fracture_deformation/test_fracture_propagation.py @@ -855,7 +855,7 @@ def _verify(self, mdg, split_faces): val_sol = cell_val_2d val_it = 2 * cell_val_2d - pp.set_solution_values(name=self.cv2, values=val_sol, data=d, time_step_index=0) + pp.set_solution_values(name=self.cv2, values=val_sol, data=d, time_step_index=1) pp.set_solution_values(name=self.cv2, values=val_it, data=d, iterate_index=0) for g in g_1d: @@ -866,7 +866,7 @@ def _verify(self, mdg, split_faces): val_it = 2 * cell_val_1d[g] pp.set_solution_values( - name=self.cv1, values=val_sol, data=d, time_step_index=0 + name=self.cv1, values=val_sol, data=d, time_step_index=1 ) pp.set_solution_values( name=self.cv1, values=val_it, data=d, iterate_index=0 @@ -883,7 +883,7 @@ def _verify(self, mdg, split_faces): val_it = 2 * cell_val_mortar[g] pp.set_solution_values( - name=self.mv, values=val_sol, data=d, time_step_index=0 + name=self.mv, values=val_sol, data=d, time_step_index=1 ) pp.set_solution_values(name=self.mv, values=val_it, data=d, iterate_index=0) @@ -921,7 +921,7 @@ def _verify(self, mdg, split_faces): # updated d = mdg.subdomain_data(g_2d) time_step_values_cv2 = pp.get_solution_values( - name=self.cv2, data=d, time_step_index=0 + name=self.cv2, data=d, time_step_index=1 ) assert np.all(time_step_values_cv2 == cell_val_2d) @@ -952,7 +952,7 @@ def _verify(self, mdg, split_faces): d = mdg.subdomain_data(g) time_step_values_cv1 = pp.get_solution_values( - name=self.cv1, data=d, time_step_index=0 + name=self.cv1, data=d, time_step_index=1 ) assert np.all(time_step_values_cv1 == truth_1d) @@ -974,7 +974,7 @@ def _verify(self, mdg, split_faces): val_1d_iterate_prev[g] = np.r_[val_1d_iterate_prev[g], extended_1d + 1] pp.set_solution_values( - name=self.cv1, values=val_1d_prev[g], data=d, time_step_index=0 + name=self.cv1, values=val_1d_prev[g], data=d, time_step_index=1 ) pp.set_solution_values( name=self.cv1, @@ -1008,7 +1008,7 @@ def _verify(self, mdg, split_faces): assert np.all(x_mortar == truth_mortar) assert np.all( - pp.get_solution_values(name=self.mv, data=d, time_step_index=0) + pp.get_solution_values(name=self.mv, data=d, time_step_index=1) == truth_mortar ) assert np.all( @@ -1030,7 +1030,7 @@ def _verify(self, mdg, split_faces): ] pp.set_solution_values( - name=self.mv, values=val_mortar_prev[g], data=d, time_step_index=0 + name=self.mv, values=val_mortar_prev[g], data=d, time_step_index=1 ) pp.set_solution_values( name=self.mv, diff --git a/tests/numerics/fv/test_tpfa.py b/tests/numerics/fv/test_tpfa.py index c7a2b635c9..031484b0d4 100644 --- a/tests/numerics/fv/test_tpfa.py +++ b/tests/numerics/fv/test_tpfa.py @@ -126,7 +126,7 @@ def initial_condition(self): values=np.array([2, 3], dtype=float), data=data, iterate_index=0, - time_step_index=0, + time_step_index=1, ) def set_geometry(self): diff --git a/tests/viz/test_exporter.py b/tests/viz/test_exporter.py index e4701ec9b2..ff6a946121 100644 --- a/tests/viz/test_exporter.py +++ b/tests/viz/test_exporter.py @@ -225,14 +225,14 @@ def test_mdg(setup: ExporterTestSetup): name="dummy_scalar", values=np.ones(sd.num_cells) * sd.dim, data=sd_data, - time_step_index=0, + time_step_index=1, ) pp.set_solution_values( name="dummy_vector", values=np.ones((3, sd.num_cells)) * sd.dim, data=sd_data, - time_step_index=0, + time_step_index=1, ) for intf, intf_data in mdg.interfaces(return_data=True): @@ -240,14 +240,14 @@ def test_mdg(setup: ExporterTestSetup): name="dummy_scalar", values=np.zeros(intf.num_cells), data=intf_data, - time_step_index=0, + time_step_index=1, ) pp.set_solution_values( name="unique_dummy_scalar", values=np.zeros(intf.num_cells), data=intf_data, - time_step_index=0, + time_step_index=1, ) # Export data @@ -432,14 +432,14 @@ def test_mdg_data_selection(setup: ExporterTestSetup): name="dummy_scalar", values=np.ones(sd.num_cells) * sd.dim, data=sd_data, - time_step_index=0, + time_step_index=1, ) pp.set_solution_values( name="dummy_vector", values=np.ones((3, sd.num_cells)) * sd.dim, data=sd_data, - time_step_index=0, + time_step_index=1, ) for intf, intf_data in mdg.interfaces(return_data=True): @@ -447,14 +447,14 @@ def test_mdg_data_selection(setup: ExporterTestSetup): name="dummy_scalar", values=np.zeros(intf.num_cells), data=intf_data, - time_step_index=0, + time_step_index=1, ) pp.set_solution_values( name="unique_dummy_scalar", values=np.zeros(intf.num_cells), data=intf_data, - time_step_index=0, + time_step_index=1, ) # Fetch separate subdomains diff --git a/tests/viz/test_plot_grid.py b/tests/viz/test_plot_grid.py index b383ad912b..d7e8c8cf00 100644 --- a/tests/viz/test_plot_grid.py +++ b/tests/viz/test_plot_grid.py @@ -60,7 +60,7 @@ def mdg(request: pytest.FixtureRequest) -> pp.MixedDimensionalGrid: for i in range(len(variables)): pp.set_solution_values( - name=variables[i], values=values[i], data=data, time_step_index=0 + name=variables[i], values=values[i], data=data, time_step_index=1 ) else: @@ -93,10 +93,10 @@ def test_plot_grid_simple_grid(mdg: MixedDimensionalGrid, vector_variable: str): The redundant dimensions are filled with zeros.""" grid, data = mdg.subdomains(return_data=True)[0] scalar_data = pp.get_solution_values( - name=SCALAR_VARIABLE, data=data, time_step_index=0 + name=SCALAR_VARIABLE, data=data, time_step_index=1 ) vector_data = pp.get_solution_values( - name=vector_variable, data=data, time_step_index=0 + name=vector_variable, data=data, time_step_index=1 ) vector_data = vector_data.reshape((mdg.dim_max(), -1), order="F") vector_data = np.vstack( diff --git a/tutorials/benchmark_simulation.ipynb b/tutorials/benchmark_simulation.ipynb index fb41562ca0..e979dfe16a 100644 --- a/tutorials/benchmark_simulation.ipynb +++ b/tutorials/benchmark_simulation.ipynb @@ -279,7 +279,7 @@ " if np.isclose(vals[0], -3.69897001):\n", " vals += 1.5\n", " assert vals.size == 1, \"Normal permeability is not constant.\"\n", - " pp.set_solution_values(\"permeability\", vals, data, time_step_index=0)\n", + " pp.set_solution_values(\"permeability\", vals, data, time_step_index=1)\n", "\n", "pp.plot_grid(\n", " perm_model.mdg,\n", diff --git a/tutorials/equations.ipynb b/tutorials/equations.ipynb index 0007e29637..31fdff22cf 100644 --- a/tutorials/equations.ipynb +++ b/tutorials/equations.ipynb @@ -528,7 +528,7 @@ " values=p_new,\n", " variables=[p],\n", " iterate_index=0, # | For a more advanced reader:\n", - " time_step_index=0, # | We reference method documentation to see what these keyword\n", + " time_step_index=1, # | We reference method documentation to see what these keyword\n", " additive=False, # | arguments do.\n", ")\n", "\n", diff --git a/tutorials/exporting_models.ipynb b/tutorials/exporting_models.ipynb index baca49eb7c..6787b7f658 100644 --- a/tutorials/exporting_models.ipynb +++ b/tutorials/exporting_models.ipynb @@ -178,7 +178,7 @@ " variables = self.equation_system.variables\n", " for var in variables:\n", " # Note that we use iterate_index=0 to get the current solution, whereas\n", - " # the regular exporter uses time_step_index=0.\n", + " # the regular exporter uses time_step_index=1.\n", " scaled_values = self.equation_system.get_variable_values(\n", " variables=[var], iterate_index=0\n", " )\n",