From c0958fc58409f62b01e83ceff3ef7fada1f82301 Mon Sep 17 00:00:00 2001 From: Peter Verveer Date: Thu, 6 Feb 2025 14:21:48 +0000 Subject: [PATCH] Use a scaler object to implement output constraint scaling --- src/ert/run_models/everest_run_model.py | 21 ++++- src/everest/optimizer/everest2ropt.py | 25 +----- src/everest/optimizer/opt_model_transforms.py | 78 ++++++++++++++++++- .../config_advanced.yml/ropt_config.json | 4 +- tests/everest/test_math_func.py | 39 ++++++++++ tests/everest/test_ropt_initialization.py | 4 + 6 files changed, 142 insertions(+), 29 deletions(-) diff --git a/src/ert/run_models/everest_run_model.py b/src/ert/run_models/everest_run_model.py index 20c07916507..87abdc2e336 100644 --- a/src/ert/run_models/everest_run_model.py +++ b/src/ert/run_models/everest_run_model.py @@ -34,6 +34,7 @@ from everest.everest_storage import EverestStorage, OptimalResult from everest.optimizer.everest2ropt import everest2ropt from everest.optimizer.opt_model_transforms import ( + ConstraintScaler, ObjectiveScaler, get_opt_model_transforms, ) @@ -223,16 +224,30 @@ def _init_transforms(self, variables: NDArray[np.float64]) -> OptModelTransforms transforms = get_opt_model_transforms( self._everest_config.controls, self._everest_config.objective_functions, + self._everest_config.output_constraints, realization_weights, ) # If required, initialize auto-scaling: assert isinstance(transforms.objectives, ObjectiveScaler) - if transforms.objectives.has_auto_scale: - objectives, _, _ = self._run_forward_model( + assert transforms.nonlinear_constraints is None or isinstance( + transforms.nonlinear_constraints, ConstraintScaler + ) + if transforms.objectives.has_auto_scale or ( + transforms.nonlinear_constraints + and transforms.nonlinear_constraints.has_auto_scale + ): + objectives, constraints, _ = self._run_forward_model( np.repeat(np.expand_dims(variables, axis=0), nreal, axis=0), realizations, ) - transforms.objectives.calculate_auto_scales(objectives) + if transforms.objectives.has_auto_scale: + transforms.objectives.calculate_auto_scales(objectives) + if ( + transforms.nonlinear_constraints + and transforms.nonlinear_constraints.has_auto_scale + ): + assert constraints is not None + transforms.nonlinear_constraints.calculate_auto_scales(constraints) return transforms def _create_optimizer(self) -> BasicOptimizer: diff --git a/src/everest/optimizer/everest2ropt.py b/src/everest/optimizer/everest2ropt.py index ccd3b0238b7..4c7fdf1d5aa 100644 --- a/src/everest/optimizer/everest2ropt.py +++ b/src/everest/optimizer/everest2ropt.py @@ -99,7 +99,6 @@ def _parse_objectives(objective_functions: list[ObjectiveFunctionConfig], ropt_c def _parse_input_constraints( - controls: FlattenedControls, input_constraints: list[InputConstraintConfig] | None, formatted_control_names: list[str], formatted_control_names_dotdash: list[str], @@ -160,8 +159,6 @@ def _parse_output_constraints( return rhs_values: list[float] = [] - scales: list[float] = [] - auto_scale: list[bool] = [] types: list[ConstraintType] = [] def _add_output_constraint( @@ -169,8 +166,6 @@ def _add_output_constraint( ): if rhs_value is not None: rhs_values.append(rhs_value) - scales.append(constr.scale if constr.scale is not None else 1.0) - auto_scale.append(constr.auto_scale or False) types.append(constraint_type) for constr in output_constraints: @@ -181,25 +176,16 @@ def _add_output_constraint( raise RuntimeError( "output constraint error: target cannot be combined with bounds" ) + _add_output_constraint(target, ConstraintType.EQ) _add_output_constraint( - target, - ConstraintType.EQ, + upper_bound, ConstraintType.LE, None if lower_bound is None else "upper" ) _add_output_constraint( - upper_bound, - ConstraintType.LE, - None if lower_bound is None else "upper", - ) - _add_output_constraint( - lower_bound, - ConstraintType.GE, - None if upper_bound is None else "lower", + lower_bound, ConstraintType.GE, None if upper_bound is None else "lower" ) ropt_config["nonlinear_constraints"] = { "rhs_values": rhs_values, - "scales": scales, - "auto_scale": auto_scale, "types": types, } @@ -350,12 +336,9 @@ def everest2ropt( """ ropt_config: dict[str, Any] = {} - flattened_controls = FlattenedControls(ever_config.controls) - - _parse_controls(flattened_controls, ropt_config) + _parse_controls(FlattenedControls(ever_config.controls), ropt_config) _parse_objectives(ever_config.objective_functions, ropt_config) _parse_input_constraints( - flattened_controls, ever_config.input_constraints, ever_config.formatted_control_names, ever_config.formatted_control_names_dotdash, diff --git a/src/everest/optimizer/opt_model_transforms.py b/src/everest/optimizer/opt_model_transforms.py index 7be5a689cc0..02f95f74608 100644 --- a/src/everest/optimizer/opt_model_transforms.py +++ b/src/everest/optimizer/opt_model_transforms.py @@ -2,10 +2,19 @@ import numpy as np from numpy.typing import NDArray +from ropt.enums import ConstraintType from ropt.transforms import OptModelTransforms -from ropt.transforms.base import ObjectiveTransform, VariableTransform - -from everest.config import ControlConfig, ObjectiveFunctionConfig +from ropt.transforms.base import ( + NonLinearConstraintTransform, + ObjectiveTransform, + VariableTransform, +) + +from everest.config import ( + ControlConfig, + ObjectiveFunctionConfig, + OutputConstraintConfig, +) from everest.config.utils import FlattenedControls @@ -79,9 +88,57 @@ def has_auto_scale(self) -> bool: return bool(np.any(self._auto_scales)) +class ConstraintScaler(NonLinearConstraintTransform): + def __init__( + self, scales: list[float], auto_scales: list[bool], weights: list[float] + ) -> None: + self._scales = np.asarray(scales, dtype=np.float64) + self._auto_scales = np.asarray(auto_scales, dtype=np.bool_) + self._weights = np.asarray(weights, dtype=np.float64) + + def transform_rhs_values( + self, rhs_values: NDArray[np.float64], types: NDArray[np.ubyte] + ) -> tuple[NDArray[np.float64], NDArray[np.ubyte]]: + def flip_type(constraint_type: ConstraintType) -> ConstraintType: + match constraint_type: + case ConstraintType.GE: + return ConstraintType.LE + case ConstraintType.LE: + return ConstraintType.GE + case _: + return constraint_type + + rhs_values = rhs_values / self._scales # noqa: PLR6104 + types = np.fromiter( + ( + flip_type(type_) if scale < 0 else type_ + for type_, scale in zip(types, self._scales, strict=False) + ), + np.ubyte, + ) + return rhs_values, types + + def forward(self, constraints: NDArray[np.float64]) -> NDArray[np.float64]: + return constraints / self._scales + + def backward(self, constraints: NDArray[np.float64]) -> NDArray[np.float64]: + return constraints * self._scales + + def calculate_auto_scales(self, constraints: NDArray[np.float64]) -> None: + auto_scales = np.abs( + np.nansum(constraints * self._weights[:, np.newaxis], axis=0) + ) + self._scales[self._auto_scales] *= auto_scales[self._auto_scales] + + @property + def has_auto_scale(self) -> bool: + return bool(np.any(self._auto_scales)) + + def get_opt_model_transforms( controls: list[ControlConfig], objectives: list[ObjectiveFunctionConfig], + constraints: list[OutputConstraintConfig] | None, weights: list[float], ) -> OptModelTransforms: flattened_controls = FlattenedControls(controls) @@ -107,4 +164,19 @@ def get_opt_model_transforms( ], weights, ), + nonlinear_constraints=( + ConstraintScaler( + [ + 1.0 if constraint.scale is None else constraint.scale + for constraint in constraints + ], + [ + False if constraint.auto_scale is None else constraint.auto_scale + for constraint in constraints + ], + weights, + ) + if constraints + else None + ), ) diff --git a/tests/everest/snapshots/test_ropt_initialization/test_everest2ropt_snapshot/config_advanced.yml/ropt_config.json b/tests/everest/snapshots/test_ropt_initialization/test_everest2ropt_snapshot/config_advanced.yml/ropt_config.json index 3f17ca490cb..3481519b6d3 100644 --- a/tests/everest/snapshots/test_ropt_initialization/test_everest2ropt_snapshot/config_advanced.yml/ropt_config.json +++ b/tests/everest/snapshots/test_ropt_initialization/test_everest2ropt_snapshot/config_advanced.yml/ropt_config.json @@ -51,10 +51,10 @@ "function_estimators": null, "realization_filters": null, "rhs_values": [ - 0.1 + 1.0 ], "scales": [ - 0.1 + 1.0 ], "types": [ 2 diff --git a/tests/everest/test_math_func.py b/tests/everest/test_math_func.py index 5b9bf6af95e..a44ac42f8d6 100644 --- a/tests/everest/test_math_func.py +++ b/tests/everest/test_math_func.py @@ -1,6 +1,7 @@ import os from pathlib import Path +import numpy as np import pytest import yaml @@ -181,3 +182,41 @@ def test_math_func_auto_scaled_objectives(copy_math_func_test_data_to_tmp): total = -(expected_p * 0.5 + expected_q * 0.25) / (0.5 + 0.25) assert total == optim + + +@pytest.mark.integration_test +def test_math_func_auto_scaled_constraints(copy_math_func_test_data_to_tmp): + config = EverestConfig.load_file("config_advanced.yml") + config_dict = config.model_dump(exclude_none=True) + + # control number of batches, no need for full convergence: + config_dict["optimization"]["convergence_tolerance"] = 1e-10 + config_dict["optimization"]["max_batch_num"] = 3 + + # Run with auto_scaling: + config_dict["environment"]["output_folder"] = "output1" + config_dict["output_constraints"][0]["auto_scale"] = True + config_dict["output_constraints"][0]["scale"] = 1.0 + config = EverestConfig.model_validate(config_dict) + run_model = EverestRunModel.create(config) + evaluator_server_config = EvaluatorServerConfig() + run_model.run_experiment(evaluator_server_config) + result1 = run_model.result + + # Run the equivalent without auto-scaling: + config_dict["environment"]["output_folder"] = "output2" + config_dict["output_constraints"][0]["auto_scale"] = False + config_dict["output_constraints"][0]["scale"] = 0.25 # x(0) + # We need one batch less, no auto-scaling: + config_dict["optimization"]["max_batch_num"] -= 1 + config = EverestConfig.model_validate(config_dict) + run_model = EverestRunModel.create(config) + evaluator_server_config = EvaluatorServerConfig() + run_model.run_experiment(evaluator_server_config) + result2 = run_model.result + + assert result1.total_objective == pytest.approx(result2.total_objective) + assert np.allclose( + np.fromiter(result1.controls.values(), dtype=np.float64), + np.fromiter(result2.controls.values(), dtype=np.float64), + ) diff --git a/tests/everest/test_ropt_initialization.py b/tests/everest/test_ropt_initialization.py index b545bc7f346..44361c06924 100644 --- a/tests/everest/test_ropt_initialization.py +++ b/tests/everest/test_ropt_initialization.py @@ -48,6 +48,7 @@ def test_everest2ropt_controls_auto_scale(): transforms=get_opt_model_transforms( config.controls, config.objective_functions, + config.output_constraints, config.model.realizations_weights, ), ) @@ -65,6 +66,7 @@ def test_everest2ropt_variables_auto_scale(): transforms=get_opt_model_transforms( config.controls, config.objective_functions, + config.output_constraints, config.model.realizations_weights, ), ) @@ -136,6 +138,7 @@ def test_everest2ropt_controls_input_constraint_auto_scale(): transforms=get_opt_model_transforms( config.controls, config.objective_functions, + config.output_constraints, config.model.realizations_weights, ), ) @@ -280,6 +283,7 @@ def test_everest2ropt_snapshot(case, snapshot): transforms=get_opt_model_transforms( config.controls, config.objective_functions, + config.output_constraints, config.model.realizations_weights, ), ).model_dump()