From 5073260a0faf2e4f4b3141139b6828fcdb74f653 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 13:23:09 +0200 Subject: [PATCH 01/41] Delete ToDo --- edisgo/flex_opt/reinforce_grid.py | 1 - 1 file changed, 1 deletion(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index eaf387107..89a0a6515 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -905,7 +905,6 @@ def enhanced_reinforce_grid( logger.info(f"Initial mode 'lv' reinforcement for {lv_grid} successful.") except (ValueError, RuntimeError, exceptions.MaximumIterationError): logger.warning(f"Initial mode 'lv' reinforcement for {lv_grid} failed.") - # ToDo catch convergence reinforcement versuchen try: logger.info("Try initial enhanced reinforcement.") From 4f6fe13712fdb323ef57d121f3497f1246972408 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 14:22:38 +0200 Subject: [PATCH 02/41] Fix use same time index --- edisgo/flex_opt/check_tech_constraints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edisgo/flex_opt/check_tech_constraints.py b/edisgo/flex_opt/check_tech_constraints.py index 76e1414f7..ccf7b52c6 100644 --- a/edisgo/flex_opt/check_tech_constraints.py +++ b/edisgo/flex_opt/check_tech_constraints.py @@ -709,7 +709,7 @@ def stations_relative_load(edisgo_obj, grids=None): except Exception: pass - return loading / allowed_loading.loc[:, loading.columns] + return loading / allowed_loading.loc[loading.index, loading.columns] def components_relative_load(edisgo_obj, n_minus_one=False): From 8c6e57a082649ccc902e0c2a09beb18859f083e4 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 14:25:08 +0200 Subject: [PATCH 03/41] Allow selecting most critical time steps from subset of time steps --- edisgo/tools/temporal_complexity_reduction.py | 26 ++++++++++++++++--- .../test_temporal_complexity_reduction.py | 14 +++++----- 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index d4c5a34e6..048b93017 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -396,7 +396,7 @@ def _scored_most_critical_voltage_issues_time_interval( return time_intervals_df -def _troubleshooting_mode(edisgo_obj): +def _troubleshooting_mode(edisgo_obj, timesteps=None): """ Handles non-convergence issues in power flow by iteratively reducing load and feed-in until the power flow converges. @@ -404,10 +404,21 @@ def _troubleshooting_mode(edisgo_obj): Load and feed-in is reduced in steps of 10% down to 20% of the original load and feed-in. The most critical time intervals / time steps can then be determined based on the power flow results with the reduced load and feed-in. + + Parameters + ----------- + edisgo_obj : :class:`~.EDisGo` + The eDisGo API object + timesteps : :pandas:`pandas.DatetimeIndex` or \ + :pandas:`pandas.Timestamp` + Timesteps specifies from which time steps to select most critical ones. It + defaults to None in which case all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex` are used. + """ try: logger.debug("Running initial power flow for temporal complexity reduction.") - edisgo_obj.analyze() + edisgo_obj.analyze(timesteps=timesteps) # Exception is used, as non-convergence can also lead to RuntimeError, not only # ValueError except Exception: @@ -421,6 +432,7 @@ def _troubleshooting_mode(edisgo_obj): for fraction in np.arange(0.8, 0.0, step=-0.1): try: edisgo_obj.analyze( + timesteps=timesteps, troubleshooting_mode="iteration", range_start=fraction, range_num=1, @@ -615,6 +627,7 @@ def get_most_critical_time_intervals( def get_most_critical_time_steps( edisgo_obj: EDisGo, + timesteps=None, num_steps_loading=None, num_steps_voltage=None, percentage: float = 1.0, @@ -627,6 +640,11 @@ def get_most_critical_time_steps( ----------- edisgo_obj : :class:`~.EDisGo` The eDisGo API object + timesteps : :pandas:`pandas.DatetimeIndex` or \ + :pandas:`pandas.Timestamp` + Timesteps specifies from which time steps to select most critical ones. It + defaults to None in which case all time steps in + :attr:`~.network.timeseries.TimeSeries.timeindex` are used. num_steps_loading : int The number of most critical overloading events to select. If None, `percentage` is used. Default: None. @@ -654,10 +672,10 @@ def get_most_critical_time_steps( """ # Run power flow if use_troubleshooting_mode: - edisgo_obj = _troubleshooting_mode(edisgo_obj) + edisgo_obj = _troubleshooting_mode(edisgo_obj, timesteps=timesteps) else: logger.debug("Running initial power flow for temporal complexity reduction.") - edisgo_obj.analyze() + edisgo_obj.analyze(timesteps=timesteps) # Select most critical steps based on current violations loading_scores = _scored_most_critical_loading(edisgo_obj) diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index 05db1238d..4664f17fd 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -32,7 +32,6 @@ def setup_class(self): self.edisgo.analyze() def test__scored_most_critical_loading(self): - ts_crit = temp_red._scored_most_critical_loading(self.edisgo) assert len(ts_crit) == 180 @@ -40,7 +39,6 @@ def test__scored_most_critical_loading(self): assert np.isclose(ts_crit.iloc[-1], 1.14647) def test__scored_most_critical_voltage_issues(self): - ts_crit = temp_red._scored_most_critical_voltage_issues(self.edisgo) assert len(ts_crit) == 120 @@ -48,14 +46,20 @@ def test__scored_most_critical_voltage_issues(self): assert np.isclose(ts_crit.iloc[-1], 0.01062258) def test_get_most_critical_time_steps(self): - ts_crit = temp_red.get_most_critical_time_steps( self.edisgo, num_steps_loading=2, num_steps_voltage=2 ) assert len(ts_crit) == 3 - def test__scored_most_critical_loading_time_interval(self): + ts_crit = temp_red.get_most_critical_time_steps( + self.edisgo, + num_steps_loading=2, + num_steps_voltage=2, + timesteps=self.edisgo.timeseries.timeindex[:24], + ) + assert len(ts_crit) == 2 + def test__scored_most_critical_loading_time_interval(self): # test with default values ts_crit = temp_red._scored_most_critical_loading_time_interval(self.edisgo, 24) assert len(ts_crit) == 9 @@ -82,7 +86,6 @@ def test__scored_most_critical_loading_time_interval(self): assert ts_crit.loc[0, "percentage_max_overloaded_components"] == 1 def test__scored_most_critical_voltage_issues_time_interval(self): - # test with default values ts_crit = temp_red._scored_most_critical_voltage_issues_time_interval( self.edisgo, 24 @@ -107,7 +110,6 @@ def test__scored_most_critical_voltage_issues_time_interval(self): assert np.isclose(ts_crit.loc[0, "percentage_buses_max_voltage_deviation"], 1.0) def test_get_most_critical_time_intervals(self): - self.edisgo.timeseries.timeindex = self.edisgo.timeseries.timeindex[:25] self.edisgo.timeseries.scale_timeseries(p_scaling_factor=5, q_scaling_factor=5) steps = temp_red.get_most_critical_time_intervals( From 475a2f3cd5b29295121aa3bee6df99b8effc2b6f Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 14:32:10 +0200 Subject: [PATCH 04/41] Change how reduced_analysis is set --- edisgo/edisgo.py | 12 +++++++----- edisgo/flex_opt/reinforce_grid.py | 22 ++++++++++++++-------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index f0edd32b6..28b3d4baf 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -1196,6 +1196,7 @@ def _scale_timeseries(pypsa_network_copy, fraction): def reinforce( self, timesteps_pfa: str | pd.DatetimeIndex | pd.Timestamp | None = None, + reduced_analysis: bool = False, copy_grid: bool = False, max_while_iterations: int = 20, split_voltage_band: bool = True, @@ -1237,14 +1238,15 @@ def reinforce( time steps. If your time series already represents the worst-case, keep the default value of None because finding the worst-case snapshots takes some time. - * 'reduced_analysis' - Reinforcement is conducted for all time steps at which at least one - branch shows its highest overloading or one bus shows its highest voltage - violation. * :pandas:`pandas.DatetimeIndex` or \ :pandas:`pandas.Timestamp` Use this option to explicitly choose which time steps to consider. - + reduced_analysis : bool + If True, reinforcement is conducted for all time steps at which at least + one branch shows its highest overloading or one bus shows its highest + voltage violation. Time steps to consider are specified through parameter + `timesteps_pfa`. If False, all time steps in parameter `timesteps_pfa` + are used. Default: False. copy_grid : bool If True, reinforcement is conducted on a copied grid and discarded. Default: False. diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index 89a0a6515..27fe6ff20 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -25,6 +25,7 @@ def reinforce_grid( edisgo: EDisGo, timesteps_pfa: str | pd.DatetimeIndex | pd.Timestamp | None = None, + reduced_analysis: bool = False, max_while_iterations: int = 20, split_voltage_band: bool = True, mode: str | None = None, @@ -47,6 +48,10 @@ def reinforce_grid( timesteps_pfa specifies for which time steps power flow analysis is conducted. See parameter `timesteps_pfa` in function :attr:`~.EDisGo.reinforce` for more information. + reduced_analysis : bool + Specifies, whether to run reinforcement on a subset of time steps that are most + critical. See parameter `reduced_analysis` in function + :attr:`~.EDisGo.reinforce` for more information. max_while_iterations : int Maximum number of times each while loop is conducted. Default: 20. split_voltage_band : bool @@ -139,14 +144,6 @@ def reinforce_grid( snapshots["min_residual_load"], ] ).dropna() - elif isinstance(timesteps_pfa, str) and timesteps_pfa == "reduced_analysis": - timesteps_pfa = get_most_critical_time_steps( - edisgo, - num_steps_loading=kwargs.get("num_steps_loading", None), - num_steps_voltage=kwargs.get("num_steps_voltage", None), - percentage=kwargs.get("percentage", 1.0), - use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), - ) # if timesteps_pfa is not of type datetime or does not contain # datetimes throw an error elif not isinstance(timesteps_pfa, datetime.datetime): @@ -160,6 +157,15 @@ def reinforce_grid( f"Input {timesteps_pfa} for timesteps_pfa is not valid." ) + if reduced_analysis: + timesteps_pfa = get_most_critical_time_steps( + edisgo, + timesteps=timesteps_pfa, + num_steps_loading=kwargs.get("num_steps_loading", None), + num_steps_voltage=kwargs.get("num_steps_voltage", None), + percentage=kwargs.get("percentage", 1.0), + use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), + ) iteration_step = 1 lv_grid_id = kwargs.get("lv_grid_id", None) scale_timeseries = kwargs.get("scale_timeseries", None) From b7886fc6bbc2eb020796c393d77d4d85bcf04204 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 14:38:22 +0200 Subject: [PATCH 05/41] More changes to change how reduced analysis is set --- edisgo/edisgo.py | 9 +++++---- edisgo/flex_opt/reinforce_grid.py | 8 ++++---- tests/flex_opt/test_reinforce_grid.py | 2 +- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index 28b3d4baf..360f8118f 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -1303,20 +1303,20 @@ def reinforce( reinforce MV/LV stations for LV worst-cases. Default: False. num_steps_loading : int - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be + In case `reduced_analysis` is set to True, this parameter can be used to specify the number of most critical overloading events to consider. If None, `percentage` is used. Default: None. num_steps_voltage : int - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be + In case `reduced_analysis` is set to True, this parameter can be used to specify the number of most critical voltage issues to select. If None, `percentage` is used. Default: None. percentage : float - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be + In case `reduced_analysis` is set to True, this parameter can be used to specify the percentage of most critical time steps to select. The default is 1.0, in which case all most critical time steps are selected. Default: 1.0. use_troubleshooting_mode : bool - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be + In case `reduced_analysis` is set to True, this parameter can be used to specify how to handle non-convergence issues in the power flow analysis. If set to True, non-convergence issues are tried to be circumvented by reducing load and feed-in until the power flow converges. @@ -1409,6 +1409,7 @@ def reinforce( func( edisgo_obj, + reduced_analysis=reduced_analysis, max_while_iterations=max_while_iterations, split_voltage_band=split_voltage_band, without_generator_import=without_generator_import, diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index 27fe6ff20..f7ff65133 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -89,20 +89,20 @@ def reinforce_grid( reinforce MV/LV stations for LV worst-cases. Default: False. num_steps_loading : int - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be used + In case `reduced_analysis` is set to True, this parameter can be used to specify the number of most critical overloading events to consider. If None, `percentage` is used. Default: None. num_steps_voltage : int - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be used + In case `reduced_analysis` is set to True, this parameter can be used to specify the number of most critical voltage issues to select. If None, `percentage` is used. Default: None. percentage : float - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be used + In case `reduced_analysis` is set to True, this parameter can be used to specify the percentage of most critical time steps to select. The default is 1.0, in which case all most critical time steps are selected. Default: 1.0. use_troubleshooting_mode : bool - In case `timesteps_pfa` is set to 'reduced_analysis', this parameter can be used + In case `reduced_analysis` is set to True, this parameter can be used to specify how to handle non-convergence issues in the power flow analysis. See parameter `use_troubleshooting_mode` in function :attr:`~.EDisGo.reinforce` for more information. Default: True. diff --git a/tests/flex_opt/test_reinforce_grid.py b/tests/flex_opt/test_reinforce_grid.py index dd4ca4cb4..ad1b296a7 100644 --- a/tests/flex_opt/test_reinforce_grid.py +++ b/tests/flex_opt/test_reinforce_grid.py @@ -58,7 +58,7 @@ def test_reinforce_grid(self): # test reduced analysis res_reduced = reinforce_grid( edisgo=copy.deepcopy(self.edisgo), - timesteps_pfa="reduced_analysis", + reduced_analysis=True, num_steps_loading=4, ) assert_frame_equal( From 43552cfd726e3f714fa26784f08dc09b72328093 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 14:54:54 +0200 Subject: [PATCH 06/41] Change LV reniforcement --- edisgo/flex_opt/reinforce_grid.py | 67 ++++++++++++++++++++++++------- 1 file changed, 53 insertions(+), 14 deletions(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index f7ff65133..7f4fe80d8 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -883,7 +883,7 @@ def enhanced_reinforce_grid( logger.info("Run initial grid reinforcement for single LV grids.") for lv_grid in list(edisgo_object.topology.mv_grid.lv_grids): logger.info(f"Check initial convergence for {lv_grid=}.") - _, ts_not_converged = edisgo_object.analyze( + ts_converged, ts_not_converged = edisgo_object.analyze( mode="lv", raise_not_converged=False, lv_grid_id=lv_grid.id ) if len(ts_not_converged) > 0: @@ -902,12 +902,31 @@ def enhanced_reinforce_grid( ) try: logger.info(f"Try initial mode 'lv' reinforcement for {lv_grid=}.") - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=True, - **kwargs, - ) + if len(ts_not_converged) > 0: + # if there are time steps that did not converge, run reinforcement + # first on converged time steps + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=False, + timesteps_pfa=ts_converged, + **kwargs, + ) + # run reinforcement again in catch-convergence mode with all time steps + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=True, + **kwargs, + ) + else: + # if all time steps converged, run normal reinforcement + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=False, + **kwargs, + ) logger.info(f"Initial mode 'lv' reinforcement for {lv_grid} successful.") except (ValueError, RuntimeError, exceptions.MaximumIterationError): logger.warning(f"Initial mode 'lv' reinforcement for {lv_grid} failed.") @@ -940,7 +959,7 @@ def enhanced_reinforce_grid( for lv_grid in list(edisgo_object.topology.mv_grid.lv_grids): logger.info(f"Check convergence for {lv_grid=}.") - _, ts_not_converged = edisgo_object.analyze( + ts_converged, ts_not_converged = edisgo_object.analyze( mode="lv", raise_not_converged=False, lv_grid_id=lv_grid.id ) if len(ts_not_converged) > 0: @@ -961,12 +980,32 @@ def enhanced_reinforce_grid( ) try: logger.info(f"Try mode 'lv' reinforcement for {lv_grid=}.") - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=True, - **kwargs, - ) + if len(ts_not_converged) > 0: + # if there are time steps that did not converge, run reinforcement + # first on converged time steps + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=False, + timesteps_pfa=ts_converged, + **kwargs, + ) + # run reinforcement again in catch-convergence mode with all time + # steps + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=True, + **kwargs, + ) + else: + # if all time steps converged, run normal reinforcement + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=False, + **kwargs, + ) logger.info(f"Mode 'lv' reinforcement for {lv_grid} successful.") except (ValueError, RuntimeError, exceptions.MaximumIterationError): logger.info(f"Mode 'lv' reinforcement for {lv_grid} failed.") From 0f15fab11f4b9ede77b75f64b736d45420bfe0e0 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 18:00:00 +0200 Subject: [PATCH 07/41] Fix type hinting --- edisgo/edisgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index 360f8118f..4c5e92979 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -998,7 +998,7 @@ def analyze( range_num: int = 10, scale_timeseries: float | None = None, **kwargs, - ) -> tuple[pd.DataFrame, pd.DataFrame]: + ) -> tuple[pd.DatetimeIndex, pd.DatetimeIndex]: """ Conducts a static, non-linear power flow analysis. From b17d7141636bfe1770c0bfa176d9b3a3e1da770e Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 18:01:21 +0200 Subject: [PATCH 08/41] Bug fix hand analyze parameters to get_most_critical_time_steps --- edisgo/flex_opt/reinforce_grid.py | 22 ++++--- edisgo/tools/temporal_complexity_reduction.py | 57 ++++++++++++++++--- 2 files changed, 63 insertions(+), 16 deletions(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index 7f4fe80d8..c6f88b947 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -157,15 +157,6 @@ def reinforce_grid( f"Input {timesteps_pfa} for timesteps_pfa is not valid." ) - if reduced_analysis: - timesteps_pfa = get_most_critical_time_steps( - edisgo, - timesteps=timesteps_pfa, - num_steps_loading=kwargs.get("num_steps_loading", None), - num_steps_voltage=kwargs.get("num_steps_voltage", None), - percentage=kwargs.get("percentage", 1.0), - use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), - ) iteration_step = 1 lv_grid_id = kwargs.get("lv_grid_id", None) scale_timeseries = kwargs.get("scale_timeseries", None) @@ -176,6 +167,19 @@ def reinforce_grid( else: analyze_mode = mode + if reduced_analysis: + timesteps_pfa = get_most_critical_time_steps( + edisgo, + mode=analyze_mode, + timesteps=timesteps_pfa, + lv_grid_id=lv_grid_id, + scale_timeseries=scale_timeseries, + num_steps_loading=kwargs.get("num_steps_loading", None), + num_steps_voltage=kwargs.get("num_steps_voltage", None), + percentage=kwargs.get("percentage", 1.0), + use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), + ) + edisgo.analyze( mode=analyze_mode, timesteps=timesteps_pfa, diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 048b93017..69bf70afc 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -396,7 +396,13 @@ def _scored_most_critical_voltage_issues_time_interval( return time_intervals_df -def _troubleshooting_mode(edisgo_obj, timesteps=None): +def _troubleshooting_mode( + edisgo_obj, + mode=None, + timesteps=None, + lv_grid_id=None, + scale_timeseries=None, +): """ Handles non-convergence issues in power flow by iteratively reducing load and feed-in until the power flow converges. @@ -409,16 +415,31 @@ def _troubleshooting_mode(edisgo_obj, timesteps=None): ----------- edisgo_obj : :class:`~.EDisGo` The eDisGo API object + mode : str or None + Allows to toggle between power flow analysis for the whole network or just + the MV or one LV grid. See parameter `mode` in function + :attr:`~.EDisGo.analyze` for more information. timesteps : :pandas:`pandas.DatetimeIndex` or \ :pandas:`pandas.Timestamp` Timesteps specifies from which time steps to select most critical ones. It defaults to None in which case all time steps in :attr:`~.network.timeseries.TimeSeries.timeindex` are used. + lv_grid_id : int or str + ID (e.g. 1) or name (string representation, e.g. "LVGrid_1") of LV grid + to analyze in case mode is 'lv'. Default: None. + scale_timeseries : float or None + See parameter `scale_timeseries` in function :attr:`~.EDisGo.analyze` for more + information. """ try: logger.debug("Running initial power flow for temporal complexity reduction.") - edisgo_obj.analyze(timesteps=timesteps) + edisgo_obj.analyze( + mode=mode, + timesteps=timesteps, + lv_grid_id=lv_grid_id, + scale_timeseries=scale_timeseries, + ) # Exception is used, as non-convergence can also lead to RuntimeError, not only # ValueError except Exception: @@ -429,13 +450,17 @@ def _troubleshooting_mode(edisgo_obj, timesteps=None): "not all time steps converged. Power flow is run again with reduced " "network load." ) - for fraction in np.arange(0.8, 0.0, step=-0.1): + if isinstance(scale_timeseries, float): + iter_start = scale_timeseries - 0.1 + else: + iter_start = 0.8 + for fraction in np.arange(iter_start, 0.0, step=-0.1): try: edisgo_obj.analyze( + mode=mode, timesteps=timesteps, - troubleshooting_mode="iteration", - range_start=fraction, - range_num=1, + lv_grid_id=lv_grid_id, + scale_timeseries=fraction, ) logger.info( f"Power flow fully converged for a reduction factor " @@ -627,7 +652,10 @@ def get_most_critical_time_intervals( def get_most_critical_time_steps( edisgo_obj: EDisGo, + mode=None, timesteps=None, + lv_grid_id=None, + scale_timeseries=None, num_steps_loading=None, num_steps_voltage=None, percentage: float = 1.0, @@ -640,11 +668,21 @@ def get_most_critical_time_steps( ----------- edisgo_obj : :class:`~.EDisGo` The eDisGo API object + mode : str or None + Allows to toggle between power flow analysis for the whole network or just + the MV or one LV grid. See parameter `mode` in function + :attr:`~.EDisGo.analyze` for more information. timesteps : :pandas:`pandas.DatetimeIndex` or \ :pandas:`pandas.Timestamp` Timesteps specifies from which time steps to select most critical ones. It defaults to None in which case all time steps in :attr:`~.network.timeseries.TimeSeries.timeindex` are used. + lv_grid_id : int or str + ID (e.g. 1) or name (string representation, e.g. "LVGrid_1") of LV grid + to analyze in case mode is 'lv'. Default: None. + scale_timeseries : float or None + See parameter `scale_timeseries` in function :attr:`~.EDisGo.analyze` for more + information. num_steps_loading : int The number of most critical overloading events to select. If None, `percentage` is used. Default: None. @@ -675,7 +713,12 @@ def get_most_critical_time_steps( edisgo_obj = _troubleshooting_mode(edisgo_obj, timesteps=timesteps) else: logger.debug("Running initial power flow for temporal complexity reduction.") - edisgo_obj.analyze(timesteps=timesteps) + edisgo_obj.analyze( + mode=mode, + timesteps=timesteps, + lv_grid_id=lv_grid_id, + scale_timeseries=scale_timeseries, + ) # Select most critical steps based on current violations loading_scores = _scored_most_critical_loading(edisgo_obj) From b54ebc986fbb649ee158468b399e51e9076c1123 Mon Sep 17 00:00:00 2001 From: birgits Date: Sun, 22 Oct 2023 18:11:13 +0200 Subject: [PATCH 09/41] Allow not running an inital analyze when using reduced analysis --- edisgo/edisgo.py | 5 ++++ edisgo/flex_opt/reinforce_grid.py | 8 ++++++ edisgo/tools/temporal_complexity_reduction.py | 28 ++++++++++++------- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index 4c5e92979..e41ba99c8 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -1323,6 +1323,11 @@ def reinforce( The most critical time steps are then determined based on the power flow results with the reduced load and feed-in. If False, an error will be raised in case time steps do not converge. Default: True. + run_initial_analyze : bool + In case `reduced_analysis` is set to True, this parameter can be + used to specify whether to run an initial analyze to determine most + critical time steps or to use existing results. If set to False, + `use_troubleshooting_mode` is ignored. Default: True. Returns -------- diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index c6f88b947..d33bbf902 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -106,6 +106,11 @@ def reinforce_grid( to specify how to handle non-convergence issues in the power flow analysis. See parameter `use_troubleshooting_mode` in function :attr:`~.EDisGo.reinforce` for more information. Default: True. + run_initial_analyze : bool + In case `reduced_analysis` is set to True, this parameter can be + used to specify whether to run an initial analyze to determine most + critical time steps or to use existing results. If set to False, + `use_troubleshooting_mode` is ignored. Default: True. Returns ------- @@ -178,6 +183,7 @@ def reinforce_grid( num_steps_voltage=kwargs.get("num_steps_voltage", None), percentage=kwargs.get("percentage", 1.0), use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), + run_initial_analyze=kwargs.get("run_initial_analyze", True), ) edisgo.analyze( @@ -929,6 +935,7 @@ def enhanced_reinforce_grid( mode="lv", lv_grid_id=lv_grid.id, catch_convergence_problems=False, + run_initial_analyze=False, **kwargs, ) logger.info(f"Initial mode 'lv' reinforcement for {lv_grid} successful.") @@ -1008,6 +1015,7 @@ def enhanced_reinforce_grid( mode="lv", lv_grid_id=lv_grid.id, catch_convergence_problems=False, + run_initial_analyze=False, **kwargs, ) logger.info(f"Mode 'lv' reinforcement for {lv_grid} successful.") diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 69bf70afc..9703a157e 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -660,6 +660,7 @@ def get_most_critical_time_steps( num_steps_voltage=None, percentage: float = 1.0, use_troubleshooting_mode=True, + run_initial_analyze=True, ) -> pd.DatetimeIndex: """ Get the time steps with the most critical overloading and voltage issues. @@ -700,6 +701,10 @@ def get_most_critical_time_steps( are then determined based on the power flow results with the reduced load and feed-in. If False, an error will be raised in case time steps do not converge. Default: True. + run_initial_analyze : bool + This parameter can be used to specify whether to run an initial analyze to + determine most critical time steps or to use existing results. If set to False, + `use_troubleshooting_mode` is ignored. Default: True. Returns -------- @@ -709,16 +714,19 @@ def get_most_critical_time_steps( """ # Run power flow - if use_troubleshooting_mode: - edisgo_obj = _troubleshooting_mode(edisgo_obj, timesteps=timesteps) - else: - logger.debug("Running initial power flow for temporal complexity reduction.") - edisgo_obj.analyze( - mode=mode, - timesteps=timesteps, - lv_grid_id=lv_grid_id, - scale_timeseries=scale_timeseries, - ) + if run_initial_analyze: + if use_troubleshooting_mode: + edisgo_obj = _troubleshooting_mode(edisgo_obj, timesteps=timesteps) + else: + logger.debug( + "Running initial power flow for temporal complexity reduction." + ) + edisgo_obj.analyze( + mode=mode, + timesteps=timesteps, + lv_grid_id=lv_grid_id, + scale_timeseries=scale_timeseries, + ) # Select most critical steps based on current violations loading_scores = _scored_most_critical_loading(edisgo_obj) From 70748ce57d9c418dd8b3a08beea2e082170d8e8c Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 23 Oct 2023 16:25:14 +0200 Subject: [PATCH 10/41] Quick bug fix --- edisgo/flex_opt/reinforce_grid.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index d33bbf902..a5eaa8976 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -935,7 +935,7 @@ def enhanced_reinforce_grid( mode="lv", lv_grid_id=lv_grid.id, catch_convergence_problems=False, - run_initial_analyze=False, + run_initial_analyze=True, **kwargs, ) logger.info(f"Initial mode 'lv' reinforcement for {lv_grid} successful.") @@ -1015,7 +1015,7 @@ def enhanced_reinforce_grid( mode="lv", lv_grid_id=lv_grid.id, catch_convergence_problems=False, - run_initial_analyze=False, + run_initial_analyze=True, **kwargs, ) logger.info(f"Mode 'lv' reinforcement for {lv_grid} successful.") From 708bdb3635c2797512cafaaff6aa61d815a156a1 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 23 Oct 2023 16:58:31 +0200 Subject: [PATCH 11/41] Bug fix when analyze mode is lv, station node voltage cannot be checked --- edisgo/flex_opt/check_tech_constraints.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/edisgo/flex_opt/check_tech_constraints.py b/edisgo/flex_opt/check_tech_constraints.py index ccf7b52c6..70e9bbc42 100644 --- a/edisgo/flex_opt/check_tech_constraints.py +++ b/edisgo/flex_opt/check_tech_constraints.py @@ -1190,6 +1190,10 @@ def voltage_deviation_from_allowed_voltage_limits( v_dev_allowed_upper, v_dev_allowed_lower = allowed_voltage_limits( edisgo_obj, buses=buses, split_voltage_band=split_voltage_band ) + # the following is needed in case the power flow was only conducted for one LV + # grid - voltage at station node cannot be checked, warning is already raised + # in allowed_voltage_limits() + buses = v_dev_allowed_upper.columns # get voltages from power flow analysis v_mag_pu_pfa = edisgo_obj.results.v_res.loc[:, buses] From bd5c8888d29effb8848bfe32d9c130b91fada1f0 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 23 Oct 2023 16:58:48 +0200 Subject: [PATCH 12/41] Change back as this wasn't the problem --- edisgo/flex_opt/reinforce_grid.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index a5eaa8976..d33bbf902 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -935,7 +935,7 @@ def enhanced_reinforce_grid( mode="lv", lv_grid_id=lv_grid.id, catch_convergence_problems=False, - run_initial_analyze=True, + run_initial_analyze=False, **kwargs, ) logger.info(f"Initial mode 'lv' reinforcement for {lv_grid} successful.") @@ -1015,7 +1015,7 @@ def enhanced_reinforce_grid( mode="lv", lv_grid_id=lv_grid.id, catch_convergence_problems=False, - run_initial_analyze=True, + run_initial_analyze=False, **kwargs, ) logger.info(f"Mode 'lv' reinforcement for {lv_grid} successful.") From c832690c47415244d20f54be64f9a9fa9452038b Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 23 Oct 2023 16:59:24 +0200 Subject: [PATCH 13/41] Add return in case no timesteps for reinforcement exist --- edisgo/flex_opt/reinforce_grid.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index d33bbf902..15c77d086 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -185,6 +185,9 @@ def reinforce_grid( use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), run_initial_analyze=kwargs.get("run_initial_analyze", True), ) + if len(timesteps_pfa) == 0: + logger.debug("Zero time steps for grid reinforcement.") + return edisgo.results edisgo.analyze( mode=analyze_mode, From 44a220333b2c35fde6a4c7c38f5de45fe2655e37 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 23 Oct 2023 16:59:45 +0200 Subject: [PATCH 14/41] Bug fix add parameters when function is called --- edisgo/tools/temporal_complexity_reduction.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 9703a157e..8c29aa586 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -716,7 +716,13 @@ def get_most_critical_time_steps( # Run power flow if run_initial_analyze: if use_troubleshooting_mode: - edisgo_obj = _troubleshooting_mode(edisgo_obj, timesteps=timesteps) + edisgo_obj = _troubleshooting_mode( + edisgo_obj, + mode=mode, + timesteps=timesteps, + lv_grid_id=lv_grid_id, + scale_timeseries=scale_timeseries, + ) else: logger.debug( "Running initial power flow for temporal complexity reduction." From 51b53de5aa36b1d7f52919cac84d1a4591e5bd39 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 23 Oct 2023 18:33:22 +0200 Subject: [PATCH 15/41] Fix only use reduced analysis when power flow converges --- edisgo/flex_opt/reinforce_grid.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index 15c77d086..57106235a 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -705,6 +705,7 @@ def reinforce(): edisgo, timesteps_pfa=selected_timesteps, scale_timeseries=set_scaling_factor, + use_troubleshooting_mode=troubleshooting_mode, **kwargs, ) converged = True @@ -720,11 +721,13 @@ def reinforce(): # Get the timesteps from kwargs and then remove it to set it later manually timesteps_pfa = kwargs.pop("timesteps_pfa", None) selected_timesteps = timesteps_pfa + troubleshooting_mode_set = kwargs.pop("troubleshooting_mode", True) # Initial try logger.info("Run initial reinforcement.") set_scaling_factor = 1.0 iteration = 0 + troubleshooting_mode = False converged = reinforce() if converged is False: logger.info("Initial reinforcement did not succeed.") @@ -756,6 +759,7 @@ def reinforce(): "reinforcement." ) selected_timesteps = converging_timesteps + troubleshooting_mode = troubleshooting_mode_set reinforce() # Run reinforcement for time steps that did not converge after initial reinforcement @@ -765,6 +769,7 @@ def reinforce(): "reinforcement." ) selected_timesteps = non_converging_timesteps + troubleshooting_mode = False converged = reinforce() if converged: @@ -798,6 +803,7 @@ def reinforce(): ) + highest_converged_scaling_factor logger.info(f"Try reinforcement with {set_scaling_factor=} at {iteration=}") + troubleshooting_mode = False converged = reinforce() if converged: logger.info( @@ -818,6 +824,7 @@ def reinforce(): if set_scaling_factor != 1: logger.info("Run final reinforcement.") selected_timesteps = timesteps_pfa + troubleshooting_mode = False reinforce() return edisgo.results From 1bd354b321c9624449e3cde2adaa6daa8ba27146 Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 24 Oct 2023 14:23:40 +0200 Subject: [PATCH 16/41] Add helper function temporarily --- edisgo/network/grids.py | 91 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/edisgo/network/grids.py b/edisgo/network/grids.py index e3cc3619b..978f5241b 100644 --- a/edisgo/network/grids.py +++ b/edisgo/network/grids.py @@ -350,6 +350,97 @@ def p_set_per_sector(self): """ return self.loads_df.groupby(["sector"]).sum()["p_set"] + def assign_length_to_grid_station(self): + """ + Assign length in km from each bus in the grid to the grid's station. + The length is written to column 'length_to_grid_station' in + :attr:`~.network.topology.Topology.buses_df`. + """ + buses_df = self._edisgo_obj.topology.buses_df + graph = self.graph + station = self.station.index[0] + + for bus in self.buses_df.index: + buses_df.at[bus, "length_to_grid_station"] = nx.shortest_path_length( + graph, source=station, target=bus, weight="length" + ) + + def assign_grid_feeder(self, mode: str = "grid_feeder"): + """ + Assigns MV or LV feeder to each bus and line, depending on the `mode`. + The feeder name is written to a new column `mv_feeder` or `grid_feeder`, + depending on the `mode`, in :class:`~.network.topology.Topology`'s + :attr:`~.network.topology.Topology.buses_df` and + :attr:`~.network.topology.Topology.lines_df`. + The MV feeder name corresponds to the name of the neighboring node of the + HV/MV station. The grid feeder name corresponds to the name of the neighboring + node of the respective grid's station. The feeder name of the source node, i.e. + the station, is set to "station_node". + Parameters + ---------- + mode : str + Specifies whether to assign MV or grid feeder. + If mode is "mv_feeder" the MV feeder the busses and lines are in are + determined. If mode is "grid_feeder" LV busses and lines are assigned the + LV feeder they are in and MV busses and lines are assigned the MV feeder + they are in. Default: "grid_feeder". + """ + buses_df = self._edisgo_obj.topology.buses_df + lines_df = self._edisgo_obj.topology.lines_df + + if mode == "grid_feeder": + graph = self.graph + station = self.station.index[0] + column_name = "grid_feeder" + elif mode == "mv_feeder": + graph = self._edisgo_obj.topology.to_graph() + station = self._edisgo_obj.topology.transformers_hvmv_df["bus1"][0] + column_name = "mv_feeder" + else: + raise ValueError("Choose an existing mode.") + + # get all buses in network and remove station to get separate subgraphs + graph_nodes = list(graph.nodes()) + graph_nodes.remove(station) + subgraph = graph.subgraph(graph_nodes) + + buses_df.at[station, column_name] = "station_node" + for neighbor in graph.neighbors(station): + # get all nodes in that feeder by doing a DFS in the disconnected + # subgraph starting from the node adjacent to the station `neighbor` + feeder_graph = nx.dfs_tree(subgraph, source=neighbor) + feeder_lines = set() + for node in feeder_graph.nodes(): + buses_df.at[node, column_name] = neighbor + feeder_lines.update( + {edge[2]["branch_name"] for edge in graph.edges(node, data=True)} + ) + lines_df.loc[lines_df.index.isin(feeder_lines), column_name] = neighbor + + def get_feeder_stats(self) -> pd.DataFrame: + """ + Generate statistics of the grid's feeders. + So far, only the feeder length is determined. + Returns + ------- + :pandas:`pandas.DataFrame` + Dataframe with feeder name in index and column 'length' containing the + respective feeder length in km. + """ + self.assign_grid_feeder() + self.assign_length_to_grid_station() + buses_df = self.buses_df + feeders = ( + buses_df.loc[ + buses_df["grid_feeder"] != "station_node", + ["grid_feeder", "length_to_grid_station"], + ] + .groupby("grid_feeder") + .max() + .rename(columns={"length_to_grid_station": "length"}) + ) + return feeders + def __repr__(self): return "_".join([self.__class__.__name__, str(self.id)]) From 9cc3e67211c04409b32b08630a4bd60211770b8f Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 24 Oct 2023 20:01:15 +0200 Subject: [PATCH 17/41] Bug fix --- edisgo/flex_opt/reinforce_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index 57106235a..0f7f171a5 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -185,7 +185,7 @@ def reinforce_grid( use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), run_initial_analyze=kwargs.get("run_initial_analyze", True), ) - if len(timesteps_pfa) == 0: + if timesteps_pfa is not None and len(timesteps_pfa) == 0: logger.debug("Zero time steps for grid reinforcement.") return edisgo.results From 74381c34edfd925d4ebc4cebe6d6a0c1c15d99be Mon Sep 17 00:00:00 2001 From: birgits Date: Thu, 18 Jan 2024 14:30:18 -0800 Subject: [PATCH 18/41] Revert "Change LV reniforcement" This reverts commit 43552cfd726e3f714fa26784f08dc09b72328093. --- edisgo/flex_opt/reinforce_grid.py | 69 +++++++------------------------ 1 file changed, 14 insertions(+), 55 deletions(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index 0f7f171a5..a916f9aa7 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -903,7 +903,7 @@ def enhanced_reinforce_grid( logger.info("Run initial grid reinforcement for single LV grids.") for lv_grid in list(edisgo_object.topology.mv_grid.lv_grids): logger.info(f"Check initial convergence for {lv_grid=}.") - ts_converged, ts_not_converged = edisgo_object.analyze( + _, ts_not_converged = edisgo_object.analyze( mode="lv", raise_not_converged=False, lv_grid_id=lv_grid.id ) if len(ts_not_converged) > 0: @@ -922,32 +922,12 @@ def enhanced_reinforce_grid( ) try: logger.info(f"Try initial mode 'lv' reinforcement for {lv_grid=}.") - if len(ts_not_converged) > 0: - # if there are time steps that did not converge, run reinforcement - # first on converged time steps - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=False, - timesteps_pfa=ts_converged, - **kwargs, - ) - # run reinforcement again in catch-convergence mode with all time steps - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=True, - **kwargs, - ) - else: - # if all time steps converged, run normal reinforcement - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=False, - run_initial_analyze=False, - **kwargs, - ) + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=True, + **kwargs, + ) logger.info(f"Initial mode 'lv' reinforcement for {lv_grid} successful.") except (ValueError, RuntimeError, exceptions.MaximumIterationError): logger.warning(f"Initial mode 'lv' reinforcement for {lv_grid} failed.") @@ -980,7 +960,7 @@ def enhanced_reinforce_grid( for lv_grid in list(edisgo_object.topology.mv_grid.lv_grids): logger.info(f"Check convergence for {lv_grid=}.") - ts_converged, ts_not_converged = edisgo_object.analyze( + _, ts_not_converged = edisgo_object.analyze( mode="lv", raise_not_converged=False, lv_grid_id=lv_grid.id ) if len(ts_not_converged) > 0: @@ -1001,33 +981,12 @@ def enhanced_reinforce_grid( ) try: logger.info(f"Try mode 'lv' reinforcement for {lv_grid=}.") - if len(ts_not_converged) > 0: - # if there are time steps that did not converge, run reinforcement - # first on converged time steps - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=False, - timesteps_pfa=ts_converged, - **kwargs, - ) - # run reinforcement again in catch-convergence mode with all time - # steps - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=True, - **kwargs, - ) - else: - # if all time steps converged, run normal reinforcement - edisgo_object.reinforce( - mode="lv", - lv_grid_id=lv_grid.id, - catch_convergence_problems=False, - run_initial_analyze=False, - **kwargs, - ) + edisgo_object.reinforce( + mode="lv", + lv_grid_id=lv_grid.id, + catch_convergence_problems=True, + **kwargs, + ) logger.info(f"Mode 'lv' reinforcement for {lv_grid} successful.") except (ValueError, RuntimeError, exceptions.MaximumIterationError): logger.info(f"Mode 'lv' reinforcement for {lv_grid} failed.") From 4aff553ff5a77db84d845ef91aecf532f905c35e Mon Sep 17 00:00:00 2001 From: birgits Date: Thu, 18 Jan 2024 20:55:31 -0800 Subject: [PATCH 19/41] Fix test - setup class needs to be rerun --- tests/tools/test_temporal_complexity_reduction.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index 4664f17fd..9b760da7d 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -7,7 +7,7 @@ class TestTemporalComplexityReduction: - @classmethod + @pytest.fixture(autouse=True) def setup_class(self): self.edisgo = EDisGo(ding0_grid=pytest.ding0_test_network_path) self.edisgo.set_time_series_worst_case_analysis() From db325bca2986f236d54ceebb9366de6e6cfb9e6a Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 19 Jan 2024 15:35:42 -0800 Subject: [PATCH 20/41] Get rid of redundant code and parameter documentation --- edisgo/tools/temporal_complexity_reduction.py | 211 +++++++++++------- 1 file changed, 128 insertions(+), 83 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 267362529..7f7dd8da9 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -42,7 +42,7 @@ def _scored_most_critical_loading(edisgo_obj: EDisGo) -> pd.Series: # Get lines that have violations crit_lines_score = relative_i_res[relative_i_res > 1] - # Get most critical timesteps per component + # Get most critical time steps per component crit_lines_score = ( (crit_lines_score[crit_lines_score == crit_lines_score.max()]) .dropna(how="all") @@ -115,23 +115,23 @@ def _scored_most_critical_loading_time_interval( The eDisGo API object time_steps_per_time_interval : int Amount of continuous time steps in an interval that violation is determined for. - Currently, these can only be multiples of 24. + See parameter `time_steps_per_time_interval` in + :func:`~get_most_critical_time_intervals` for more information. Default: 168. time_steps_per_day : int - Number of time steps in one day. In case of an hourly resolution this is 24. - As currently only an hourly resolution is possible, this value should always be - 24. + Number of time steps in one day. See parameter `time_steps_per_day` in + :func:`~get_most_critical_time_intervals` for more information. Default: 24. time_step_day_start : int - Time step of the day at which each interval should start. If you want it to - start at midnight, this should be set to 0. Default: 0. + Time step of the day at which each interval should start. See parameter + `time_step_day_start` in :func:`~get_most_critical_time_intervals` for more + information. + Default: 0. overloading_factor : float Factor at which an overloading of a component is considered to be close enough - to the highest overloading of that component. This is used to determine the - number of components that reach their highest overloading in each time interval. - Per default, it is set to 0.95, which means that if the highest overloading of - a component is 2, it will be considered maximally overloaded at an overloading - of higher or equal to 2*0.95. + to the highest overloading of that component. See parameter + `overloading_factor` in :func:`~get_most_critical_time_intervals` for more + information. Default: 0.95. Returns @@ -171,52 +171,15 @@ def _scored_most_critical_loading_time_interval( costs = pd.concat([costs_lines, costs_trafos_lv, costs_trafos_mv]) crit_lines_cost = crit_lines_score * costs - # Get highest overloading in each window for each component and sum it up - crit_timesteps = ( - crit_lines_cost.rolling( - window=int(time_steps_per_time_interval), closed="right" - ) - .max() - .sum(axis=1) - ) - # select each nth time window to only consider windows starting at a certain time - # of day and sort time intervals in descending order - # ToDo: To make function work for frequencies other than hourly, the following - # needs to be adapted to index based on time index instead of iloc - crit_timesteps = ( - crit_timesteps.iloc[int(time_steps_per_time_interval) - 1 :] - .iloc[time_step_day_start + 1 :: time_steps_per_day] - .sort_values(ascending=False) - ) - # move time index as rolling gives the end of the time interval, but we want the - # beginning - timesteps = crit_timesteps.index - pd.DateOffset( - hours=int(time_steps_per_time_interval) - ) - time_intervals = [ - pd.date_range( - start=timestep, periods=int(time_steps_per_time_interval), freq="h" - ) - for timestep in timesteps - ] - - # make dataframe with time steps in each time interval and the percentage of - # components that reach their maximum overloading - time_intervals_df = pd.DataFrame( - index=range(len(time_intervals)), - columns=["time_steps", "percentage_max_overloaded_components"], + time_intervals_df = _most_critical_time_interval( + costs_per_time_step=crit_lines_cost, + grid_issues_magnitude_df=crit_lines_score, + which="overloading", + deviation_factor=overloading_factor, + time_steps_per_time_interval=time_steps_per_time_interval, + time_steps_per_day=time_steps_per_day, + time_step_day_start=time_step_day_start, ) - time_intervals_df["time_steps"] = time_intervals - lines_no_max = crit_lines_score.columns.values - total_lines = len(lines_no_max) - max_per_line = crit_lines_score.max() - for i in range(len(time_intervals)): - # check if worst overloading of every line is included in time interval - max_per_line_ti = crit_lines_score.loc[time_intervals[i]].max() - time_intervals_df["percentage_max_overloaded_components"][i] = ( - len(max_per_line_ti[max_per_line_ti >= max_per_line * overloading_factor]) - / total_lines - ) return time_intervals_df @@ -245,24 +208,23 @@ def _scored_most_critical_voltage_issues_time_interval( The eDisGo API object time_steps_per_time_interval : int Amount of continuous time steps in an interval that violation is determined for. - Currently, these can only be multiples of 24. + See parameter `time_steps_per_time_interval` in + :func:`~get_most_critical_time_intervals` for more information. Default: 168. time_steps_per_day : int - Number of time steps in one day. In case of an hourly resolution this is 24. - As currently only an hourly resolution is possible, this value should always be - 24. + Number of time steps in one day. See parameter `time_steps_per_day` in + :func:`~get_most_critical_time_intervals` for more information. Default: 24. time_step_day_start : int - Time step of the day at which each interval should start. If you want it to - start at midnight, this should be set to 0. Default: 0. + Time step of the day at which each interval should start. See parameter + `time_step_day_start` in :func:`~get_most_critical_time_intervals` for more + information. + Default: 0. voltage_deviation_factor : float Factor at which a voltage deviation at a bus is considered to be close enough - to the highest voltage deviation at that bus. This is used to determine the - number of buses that reach their highest voltage deviation in each time - interval. Per default, it is set to 0.95. This means that if the highest voltage - deviation at a bus is 0.2, it will be included in the determination of number - of buses that reach their maximum voltage deviation in a certain time interval - at a voltage deviation of higher or equal to 0.2*0.95. + to the highest voltage deviation at that bus. See parameter + `voltage_deviation_factor` in :func:`~get_most_critical_time_intervals` for more + information. Default: 0.95. Returns @@ -284,7 +246,7 @@ def _scored_most_critical_voltage_issues_time_interval( voltage_diff = check_tech_constraints.voltage_deviation_from_allowed_voltage_limits( edisgo_obj ) - voltage_diff = voltage_diff.abs()[voltage_diff.abs() > 0] + voltage_diff = voltage_diff[voltage_diff != 0.0].abs() # determine costs per feeder lv_station_buses = [ @@ -331,9 +293,92 @@ def _scored_most_critical_voltage_issues_time_interval( # weigh feeder voltage violation with costs per feeder voltage_diff_feeder = voltage_diff_feeder * costs_per_feeder.squeeze() - # Get the highest voltage issues in each window for each feeder and sum it up + time_intervals_df = _most_critical_time_interval( + costs_per_time_step=voltage_diff_feeder, + grid_issues_magnitude_df=voltage_diff_copy, + which="voltage", + deviation_factor=voltage_deviation_factor, + time_steps_per_time_interval=time_steps_per_time_interval, + time_steps_per_day=time_steps_per_day, + time_step_day_start=time_step_day_start, + ) + + return time_intervals_df + + +def _most_critical_time_interval( + costs_per_time_step, + grid_issues_magnitude_df, + which, + deviation_factor=0.95, + time_steps_per_time_interval=168, + time_steps_per_day=24, + time_step_day_start=0, +): + """ + Helper function used in functions + :func:`~_scored_most_critical_loading_time_interval` and + :func:`~_scored_most_critical_voltage_issues_time_interval` + to get time intervals sorted by severity of grid issue. + + This function currently only works for an hourly resolution! + + Parameters + ----------- + costs_per_time_step : :pandas:`pandas.DataFrame` + Dataframe containing the estimated grid expansion costs per line or feeder. + Columns contain line or feeder names. + Index of the dataframe are all time steps power flow analysis + was conducted for of type :pandas:`pandas.Timestamp`. + grid_issues_magnitude_df : :pandas:`pandas.DataFrame` + Dataframe containing the relative overloading or voltage deviation per time + step in case of an overloading or voltage issue in that time step. + Columns contain line or bus names. + Index of the dataframe are all time steps power flow analysis + was conducted for of type :pandas:`pandas.Timestamp`. + which : str + Defines whether function is used to determine most critical time intervals for + voltage or overloading problems. Can either be "voltage" or "overloading". + deviation_factor : float + Factor at which a grid issue is considered to be close enough to the highest + grid issue. In case parameter `which` is "voltage", see parameter + `voltage_deviation_factor` in :func:`~get_most_critical_time_intervals` for more + information. In case parameter `which` is "overloading", see parameter + `overloading_factor` in :func:`~get_most_critical_time_intervals` for more + information. + Default: 0.95. + time_steps_per_time_interval : int + Amount of continuous time steps in an interval that violation is determined for. + See parameter `time_steps_per_time_interval` in + :func:`~get_most_critical_time_intervals` for more information. + Default: 168. + time_steps_per_day : int + Number of time steps in one day. See parameter `time_steps_per_day` in + :func:`~get_most_critical_time_intervals` for more information. + Default: 24. + time_step_day_start : int + Time step of the day at which each interval should start. See parameter + `time_step_day_start` in :func:`~get_most_critical_time_intervals` for more + information. + Default: 0. + + Returns + -------- + :pandas:`pandas.DataFrame` + Contains time intervals in which grid expansion needs due to voltage issues + are detected. The time intervals are sorted descending + by the expected cumulated grid expansion costs, so that the time interval with + the highest expected costs corresponds to index 0. The time steps in the + respective time interval are given in column "time_steps" and the share + of buses for which the maximum voltage deviation is reached during the time + interval is given in column "percentage_buses_max_voltage_deviation". Each bus + is only considered once. That means if its maximum voltage deviation was + already considered in an earlier time interval, it is not considered again. + + """ + # get the highest issues in each window for each feeder and sum it up crit_timesteps = ( - voltage_diff_feeder.rolling( + costs_per_time_step.rolling( window=int(time_steps_per_time_interval), closed="right" ) .max() @@ -359,26 +404,26 @@ def _scored_most_critical_voltage_issues_time_interval( ] # make dataframe with time steps in each time interval and the percentage of - # buses that reach their maximum voltage deviation + # buses/branches that reach their maximum voltage deviation / overloading + if which == "voltage": + percentage = "percentage_buses_max_voltage_deviation" + else: + percentage = "percentage_max_overloaded_components" time_intervals_df = pd.DataFrame( index=range(len(time_intervals)), - columns=["time_steps", "percentage_buses_max_voltage_deviation"], + columns=["time_steps", percentage], ) time_intervals_df["time_steps"] = time_intervals - max_per_bus = voltage_diff_copy.max().fillna(0) - buses_no_max = max_per_bus.index.values - total_buses = len(buses_no_max) + max_per_bus = grid_issues_magnitude_df.max().fillna(0) + total_buses = len(grid_issues_magnitude_df.columns) for i in range(len(time_intervals)): # check if worst voltage deviation of every bus is included in time interval - max_per_bus_ti = voltage_diff_copy.loc[time_intervals[i]].max() - time_intervals_df["percentage_buses_max_voltage_deviation"][i] = ( - len( - max_per_bus_ti[max_per_bus_ti >= max_per_bus * voltage_deviation_factor] - ) + max_per_bus_ti = grid_issues_magnitude_df.loc[time_intervals[i]].max() + time_intervals_df[percentage][i] = ( + len(max_per_bus_ti[max_per_bus_ti >= max_per_bus * deviation_factor]) / total_buses ) - return time_intervals_df From 716c636fe461e607660866b7be2f25ca76825dc7 Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 19 Jan 2024 16:42:16 -0800 Subject: [PATCH 21/41] Allow weighting by costs or without weighting factor --- edisgo/tools/temporal_complexity_reduction.py | 159 +++++++++++------- 1 file changed, 102 insertions(+), 57 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 7f7dd8da9..abf743ba1 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -3,7 +3,6 @@ import logging import os -from copy import deepcopy from typing import TYPE_CHECKING import numpy as np @@ -97,12 +96,13 @@ def _scored_most_critical_loading_time_interval( time_steps_per_day=24, time_step_day_start=0, overloading_factor=0.95, + weigh_by_costs=True, ): """ Get time intervals sorted by severity of overloadings. - The overloading is weighed by the estimated expansion costs of each respective line - and transformer. + The overloading can weighed by the estimated expansion costs of each respective line + and transformer. See parameter `weigh_by_costs` for more information. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and `time_step_day_start`. @@ -133,6 +133,11 @@ def _scored_most_critical_loading_time_interval( `overloading_factor` in :func:`~get_most_critical_time_intervals` for more information. Default: 0.95. + weigh_by_costs : bool + Defines whether overloading issues should be weighed by estimated grid expansion + costs or not. See parameter `weigh_by_costs` in + :func:`~get_most_critical_time_intervals` for more information. + Default: True. Returns -------- @@ -153,26 +158,29 @@ def _scored_most_critical_loading_time_interval( # Get lines that have violations and replace nan values with 0 crit_lines_score = relative_i_res[relative_i_res > 1].fillna(0) - # weight line violations with expansion costs - costs_lines = ( - line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) - ) - costs_trafos_lv = pd.Series( - index=[ - str(lv_grid) + "_station" - for lv_grid in list(edisgo_obj.topology.mv_grid.lv_grids) - ], - data=edisgo_obj.config["costs_transformers"]["lv"], - ) - costs_trafos_mv = pd.Series( - index=["MVGrid_" + str(edisgo_obj.topology.id) + "_station"], - data=edisgo_obj.config["costs_transformers"]["mv"], - ) - costs = pd.concat([costs_lines, costs_trafos_lv, costs_trafos_mv]) - crit_lines_cost = crit_lines_score * costs + if weigh_by_costs: + # weigh line violations with expansion costs + costs_lines = ( + line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) + ) + costs_trafos_lv = pd.Series( + index=[ + str(lv_grid) + "_station" + for lv_grid in list(edisgo_obj.topology.mv_grid.lv_grids) + ], + data=edisgo_obj.config["costs_transformers"]["lv"], + ) + costs_trafos_mv = pd.Series( + index=["MVGrid_" + str(edisgo_obj.topology.id) + "_station"], + data=edisgo_obj.config["costs_transformers"]["mv"], + ) + costs = pd.concat([costs_lines, costs_trafos_lv, costs_trafos_mv]) + crit_lines_weighed = crit_lines_score * costs + else: + crit_lines_weighed = crit_lines_score.copy() time_intervals_df = _most_critical_time_interval( - costs_per_time_step=crit_lines_cost, + costs_per_time_step=crit_lines_weighed, grid_issues_magnitude_df=crit_lines_score, which="overloading", deviation_factor=overloading_factor, @@ -190,12 +198,13 @@ def _scored_most_critical_voltage_issues_time_interval( time_steps_per_day=24, time_step_day_start=0, voltage_deviation_factor=0.95, + weigh_by_costs=True, ): """ Get time intervals sorted by severity of voltage issues. - The voltage issues are weighed by the estimated expansion costs in each respective - feeder. + The voltage issues can be weighed by the estimated expansion costs in each + respective feeder. See parameter `weigh_by_costs` for more information. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and `time_step_day_start`. @@ -226,6 +235,11 @@ def _scored_most_critical_voltage_issues_time_interval( `voltage_deviation_factor` in :func:`~get_most_critical_time_intervals` for more information. Default: 0.95. + weigh_by_costs : bool + Defines whether voltage issues should be weighed by estimated grid expansion + costs or not. See parameter `weigh_by_costs` in + :func:`~get_most_critical_time_intervals` for more information. + Default: True. Returns -------- @@ -242,60 +256,61 @@ def _scored_most_critical_voltage_issues_time_interval( """ - # Get voltage deviation from allowed voltage limits + # get voltage deviation from allowed voltage limits voltage_diff = check_tech_constraints.voltage_deviation_from_allowed_voltage_limits( edisgo_obj ) - voltage_diff = voltage_diff[voltage_diff != 0.0].abs() - - # determine costs per feeder - lv_station_buses = [ - lv_grid.station.index[0] for lv_grid in edisgo_obj.topology.mv_grid.lv_grids - ] - costs_lines = ( - line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) - ) - costs_trafos_lv = pd.Series( - index=lv_station_buses, - data=edisgo_obj.config._data["costs_transformers"]["lv"], - ) - costs = pd.concat([costs_lines, costs_trafos_lv]) + voltage_diff = voltage_diff[voltage_diff != 0.0].abs().fillna(0) # set feeder using MV feeder for MV components and LV feeder for LV components edisgo_obj.topology.assign_feeders(mode="grid_feeder") # feeders of buses at MV/LV station's secondary sides are set to the name of the # station bus to have them as separate feeders + lv_station_buses = [ + lv_grid.station.index[0] for lv_grid in edisgo_obj.topology.mv_grid.lv_grids + ] edisgo_obj.topology.buses_df.loc[lv_station_buses, "grid_feeder"] = lv_station_buses - feeder_lines = edisgo_obj.topology.lines_df.grid_feeder - feeder_trafos_lv = pd.Series( - index=lv_station_buses, - data=lv_station_buses, - ) - feeder = pd.concat([feeder_lines, feeder_trafos_lv]) - costs_per_feeder = ( - pd.concat([costs.rename("costs"), feeder.rename("feeder")], axis=1) - .groupby(by="feeder")[["costs"]] - .sum() - ) - # check for every feeder if any of the buses within violate the allowed voltage # deviation, by grouping voltage_diff per feeder feeder_buses = edisgo_obj.topology.buses_df.grid_feeder columns = [feeder_buses.loc[col] for col in voltage_diff.columns] - voltage_diff_copy = deepcopy(voltage_diff).fillna(0) - voltage_diff.columns = columns + voltage_diff_feeder = voltage_diff.copy() + voltage_diff_feeder.columns = columns voltage_diff_feeder = ( - voltage_diff.transpose().reset_index().groupby(by="index").sum().transpose() + voltage_diff.transpose().reset_index().groupby(by="Bus").sum().transpose() ) voltage_diff_feeder[voltage_diff_feeder != 0] = 1 - # weigh feeder voltage violation with costs per feeder - voltage_diff_feeder = voltage_diff_feeder * costs_per_feeder.squeeze() + if weigh_by_costs: + # determine costs per feeder + costs_lines = ( + line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) + ) + costs_trafos_lv = pd.Series( + index=lv_station_buses, + data=edisgo_obj.config._data["costs_transformers"]["lv"], + ) + costs = pd.concat([costs_lines, costs_trafos_lv]) + + feeder_lines = edisgo_obj.topology.lines_df.grid_feeder + feeder_trafos_lv = pd.Series( + index=lv_station_buses, + data=lv_station_buses, + ) + feeder = pd.concat([feeder_lines, feeder_trafos_lv]) + costs_per_feeder = ( + pd.concat([costs.rename("costs"), feeder.rename("feeder")], axis=1) + .groupby(by="feeder")[["costs"]] + .sum() + ) + + # weigh feeder voltage violation with costs per feeder + voltage_diff_feeder = voltage_diff_feeder * costs_per_feeder.squeeze() time_intervals_df = _most_critical_time_interval( costs_per_time_step=voltage_diff_feeder, - grid_issues_magnitude_df=voltage_diff_copy, + grid_issues_magnitude_df=voltage_diff, which="voltage", deviation_factor=voltage_deviation_factor, time_steps_per_time_interval=time_steps_per_time_interval, @@ -524,11 +539,12 @@ def get_most_critical_time_intervals( use_troubleshooting_mode=True, overloading_factor=0.95, voltage_deviation_factor=0.95, + weigh_by_costs=True, ): """ Get time intervals sorted by severity of overloadings as well as voltage issues. - The overloading and voltage issues are weighed by the estimated expansion costs + The overloading and voltage issues can be weighed by the estimated expansion costs solving the issue would require. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and @@ -585,6 +601,33 @@ def get_most_critical_time_intervals( of buses that reach their maximum voltage deviation in a certain time interval at a voltage deviation of higher or equal to 0.2*0.95. Default: 0.95. + weigh_by_costs : bool + Defines whether overloading and voltage issues should be weighed by estimated + grid expansion costs or not. This can be done in order to take into account that + some grid issues are more relevant, as reinforcing a certain line or feeder will + be more expensive than another one. + + In case of voltage issues: + If True, the costs for each MV and LV feeder, as well as MV/LV station are + determined using the costs for earth work and new lines over the full length of + the feeder respectively for a new MV/LV station. In each time interval, the + estimated costs are only taken into account, in case there is a voltage issue + somewhere in the feeder. + The costs don't convey the actual costs but are an estimation, as + the real number of parallel lines needed is not determined and the whole feeder + length is used instead of the length over two-thirds of the feeder. + If False, the severity of each feeder's voltage issue is set to be the same. + + In case of overloading issues: + If True, the overloading of each line is multiplied by + the respective grid expansion costs of that line including costs for earth work + and one new line. + The costs don't convey the actual costs but are an estimation, as + the discrete needed number of parallel lines is not considered. + If False, only the relative overloading is used to determine the most relevant + time intervals. + + Default: True. Returns -------- @@ -626,6 +669,7 @@ def get_most_critical_time_intervals( time_steps_per_time_interval, time_step_day_start=time_step_day_start, overloading_factor=overloading_factor, + weigh_by_costs=weigh_by_costs, ) if num_time_intervals is None: num_time_intervals = int(np.ceil(len(loading_scores) * percentage)) @@ -646,6 +690,7 @@ def get_most_critical_time_intervals( time_steps_per_time_interval, time_step_day_start=time_step_day_start, voltage_deviation_factor=voltage_deviation_factor, + weigh_by_costs=weigh_by_costs, ) if num_time_intervals is None: num_time_intervals = int(np.ceil(len(voltage_scores) * percentage)) From 8d046b30e17d26aa72b3ea9d7053a258792ca512 Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 23 Jan 2024 19:36:17 -0800 Subject: [PATCH 22/41] Fix spelling --- edisgo/tools/temporal_complexity_reduction.py | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index abf743ba1..242731170 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -96,13 +96,13 @@ def _scored_most_critical_loading_time_interval( time_steps_per_day=24, time_step_day_start=0, overloading_factor=0.95, - weigh_by_costs=True, + weight_by_costs=True, ): """ Get time intervals sorted by severity of overloadings. - The overloading can weighed by the estimated expansion costs of each respective line - and transformer. See parameter `weigh_by_costs` for more information. + The overloading can weighted by the estimated expansion costs of each respective + line and transformer. See parameter `weight_by_costs` for more information. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and `time_step_day_start`. @@ -133,9 +133,9 @@ def _scored_most_critical_loading_time_interval( `overloading_factor` in :func:`~get_most_critical_time_intervals` for more information. Default: 0.95. - weigh_by_costs : bool - Defines whether overloading issues should be weighed by estimated grid expansion - costs or not. See parameter `weigh_by_costs` in + weight_by_costs : bool + Defines whether overloading issues should be weighted by estimated grid + expansion costs or not. See parameter `weight_by_costs` in :func:`~get_most_critical_time_intervals` for more information. Default: True. @@ -158,8 +158,8 @@ def _scored_most_critical_loading_time_interval( # Get lines that have violations and replace nan values with 0 crit_lines_score = relative_i_res[relative_i_res > 1].fillna(0) - if weigh_by_costs: - # weigh line violations with expansion costs + if weight_by_costs: + # weight line violations with expansion costs costs_lines = ( line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) ) @@ -175,12 +175,12 @@ def _scored_most_critical_loading_time_interval( data=edisgo_obj.config["costs_transformers"]["mv"], ) costs = pd.concat([costs_lines, costs_trafos_lv, costs_trafos_mv]) - crit_lines_weighed = crit_lines_score * costs + crit_lines_weighted = crit_lines_score * costs else: - crit_lines_weighed = crit_lines_score.copy() + crit_lines_weighted = crit_lines_score.copy() time_intervals_df = _most_critical_time_interval( - costs_per_time_step=crit_lines_weighed, + costs_per_time_step=crit_lines_weighted, grid_issues_magnitude_df=crit_lines_score, which="overloading", deviation_factor=overloading_factor, @@ -198,13 +198,13 @@ def _scored_most_critical_voltage_issues_time_interval( time_steps_per_day=24, time_step_day_start=0, voltage_deviation_factor=0.95, - weigh_by_costs=True, + weight_by_costs=True, ): """ Get time intervals sorted by severity of voltage issues. - The voltage issues can be weighed by the estimated expansion costs in each - respective feeder. See parameter `weigh_by_costs` for more information. + The voltage issues can be weighted by the estimated expansion costs in each + respective feeder. See parameter `weight_by_costs` for more information. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and `time_step_day_start`. @@ -235,9 +235,9 @@ def _scored_most_critical_voltage_issues_time_interval( `voltage_deviation_factor` in :func:`~get_most_critical_time_intervals` for more information. Default: 0.95. - weigh_by_costs : bool - Defines whether voltage issues should be weighed by estimated grid expansion - costs or not. See parameter `weigh_by_costs` in + weight_by_costs : bool + Defines whether voltage issues should be weighted by estimated grid expansion + costs or not. See parameter `weight_by_costs` in :func:`~get_most_critical_time_intervals` for more information. Default: True. @@ -282,7 +282,7 @@ def _scored_most_critical_voltage_issues_time_interval( ) voltage_diff_feeder[voltage_diff_feeder != 0] = 1 - if weigh_by_costs: + if weight_by_costs: # determine costs per feeder costs_lines = ( line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) @@ -305,7 +305,7 @@ def _scored_most_critical_voltage_issues_time_interval( .sum() ) - # weigh feeder voltage violation with costs per feeder + # weight feeder voltage violation with costs per feeder voltage_diff_feeder = voltage_diff_feeder * costs_per_feeder.squeeze() time_intervals_df = _most_critical_time_interval( @@ -539,12 +539,12 @@ def get_most_critical_time_intervals( use_troubleshooting_mode=True, overloading_factor=0.95, voltage_deviation_factor=0.95, - weigh_by_costs=True, + weight_by_costs=True, ): """ Get time intervals sorted by severity of overloadings as well as voltage issues. - The overloading and voltage issues can be weighed by the estimated expansion costs + The overloading and voltage issues can be weighted by the estimated expansion costs solving the issue would require. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and @@ -601,8 +601,8 @@ def get_most_critical_time_intervals( of buses that reach their maximum voltage deviation in a certain time interval at a voltage deviation of higher or equal to 0.2*0.95. Default: 0.95. - weigh_by_costs : bool - Defines whether overloading and voltage issues should be weighed by estimated + weight_by_costs : bool + Defines whether overloading and voltage issues should be weighted by estimated grid expansion costs or not. This can be done in order to take into account that some grid issues are more relevant, as reinforcing a certain line or feeder will be more expensive than another one. @@ -669,7 +669,7 @@ def get_most_critical_time_intervals( time_steps_per_time_interval, time_step_day_start=time_step_day_start, overloading_factor=overloading_factor, - weigh_by_costs=weigh_by_costs, + weight_by_costs=weight_by_costs, ) if num_time_intervals is None: num_time_intervals = int(np.ceil(len(loading_scores) * percentage)) @@ -690,7 +690,7 @@ def get_most_critical_time_intervals( time_steps_per_time_interval, time_step_day_start=time_step_day_start, voltage_deviation_factor=voltage_deviation_factor, - weigh_by_costs=weigh_by_costs, + weight_by_costs=weight_by_costs, ) if num_time_intervals is None: num_time_intervals = int(np.ceil(len(voltage_scores) * percentage)) From c2e45ad9982c6de278798093e649dce3c7d64936 Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 23 Jan 2024 19:36:50 -0800 Subject: [PATCH 23/41] Bugfix and remove time intervals without grid issues --- edisgo/tools/temporal_complexity_reduction.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 242731170..3453bf692 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -405,17 +405,18 @@ def _most_critical_time_interval( # needs to be adapted to index based on time index instead of iloc crit_timesteps = ( crit_timesteps.iloc[int(time_steps_per_time_interval) - 1 :] - .iloc[time_step_day_start + 1 :: time_steps_per_day] + .iloc[time_step_day_start::time_steps_per_day] .sort_values(ascending=False) ) - timesteps = crit_timesteps.index - pd.DateOffset( - hours=int(time_steps_per_time_interval) - ) + # get time steps in each time interval - these are set up setting the given time + # step to be the end of the respective time interval, as rolling() function gives + # the time step at the end of the considered time interval; further, only time + # intervals with a sum greater than zero are considered, as zero values mean, that + # there is no grid issue in the respective time interval time_intervals = [ - pd.date_range( - start=timestep, periods=int(time_steps_per_time_interval), freq="h" - ) - for timestep in timesteps + pd.date_range(end=timestep, periods=int(time_steps_per_time_interval), freq="h") + for timestep in crit_timesteps.index + if crit_timesteps[timestep] != 0.0 ] # make dataframe with time steps in each time interval and the percentage of From edd4ba3db93c10d7c74dd012f4ee01d3a0a3fe7a Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 23 Jan 2024 19:37:40 -0800 Subject: [PATCH 24/41] Adapt tests to bugfix and new parameter --- .../test_temporal_complexity_reduction.py | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index 9b760da7d..91fb18feb 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -62,10 +62,10 @@ def test_get_most_critical_time_steps(self): def test__scored_most_critical_loading_time_interval(self): # test with default values ts_crit = temp_red._scored_most_critical_loading_time_interval(self.edisgo, 24) - assert len(ts_crit) == 9 + assert len(ts_crit) == 10 assert ( ts_crit.loc[0, "time_steps"] - == pd.date_range("1/5/2018", periods=24, freq="H") + == pd.date_range("1/8/2018", periods=24, freq="H") ).all() assert np.isclose( ts_crit.loc[0, "percentage_max_overloaded_components"], 0.96479 @@ -85,29 +85,41 @@ def test__scored_most_critical_loading_time_interval(self): ).all() assert ts_crit.loc[0, "percentage_max_overloaded_components"] == 1 + # test without weighting by costs + ts_crit = temp_red._scored_most_critical_loading_time_interval( + self.edisgo, + 48, + weight_by_costs=False, + ) + assert len(ts_crit) == 9 + assert ( + ts_crit.loc[0, "time_steps"] + == pd.date_range("1/5/2018 0:00", periods=48, freq="H") + ).all() + def test__scored_most_critical_voltage_issues_time_interval(self): # test with default values ts_crit = temp_red._scored_most_critical_voltage_issues_time_interval( self.edisgo, 24 ) - assert len(ts_crit) == 9 + assert len(ts_crit) == 5 assert ( ts_crit.loc[0, "time_steps"] == pd.date_range("1/1/2018", periods=24, freq="H") ).all() - assert np.isclose(ts_crit.loc[0, "percentage_buses_max_voltage_deviation"], 1.0) - assert np.isclose(ts_crit.loc[1, "percentage_buses_max_voltage_deviation"], 1.0) + assert ( + ts_crit.loc[:, "percentage_buses_max_voltage_deviation"].values == 1.0 + ).all() # test with non-default values ts_crit = temp_red._scored_most_critical_voltage_issues_time_interval( - self.edisgo, 24, time_step_day_start=4, voltage_deviation_factor=0.5 + self.edisgo, 72, time_step_day_start=4, weight_by_costs=False ) - assert len(ts_crit) == 9 + assert len(ts_crit) == 5 assert ( ts_crit.loc[0, "time_steps"] - == pd.date_range("1/1/2018 4:00", periods=24, freq="H") + == pd.date_range("1/1/2018 4:00", periods=72, freq="H") ).all() - assert np.isclose(ts_crit.loc[0, "percentage_buses_max_voltage_deviation"], 1.0) def test_get_most_critical_time_intervals(self): self.edisgo.timeseries.timeindex = self.edisgo.timeseries.timeindex[:25] From 0595d573156d9a0861d9bee796d369e4451a5d9e Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 23 Jan 2024 19:51:56 -0800 Subject: [PATCH 25/41] Adapt test --- .../tools/test_temporal_complexity_reduction.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index 91fb18feb..03a41b005 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -122,11 +122,17 @@ def test__scored_most_critical_voltage_issues_time_interval(self): ).all() def test_get_most_critical_time_intervals(self): - self.edisgo.timeseries.timeindex = self.edisgo.timeseries.timeindex[:25] - self.edisgo.timeseries.scale_timeseries(p_scaling_factor=5, q_scaling_factor=5) + self.edisgo.timeseries.scale_timeseries(p_scaling_factor=2, q_scaling_factor=2) steps = temp_red.get_most_critical_time_intervals( - self.edisgo, time_steps_per_time_interval=24 + self.edisgo, time_steps_per_time_interval=24, percentage=0.5 ) - assert len(steps) == 1 - assert len(steps.columns) == 4 + assert len(steps) == 5 + assert ( + steps.loc[0, "time_steps_overloading"] + == pd.date_range("1/8/2018", periods=24, freq="H") + ).all() + assert ( + steps.loc[0, "time_steps_voltage_issues"] + == pd.date_range("1/1/2018", periods=24, freq="H") + ).all() From 4979e3bb8370dce0d8716f155b87b4f4526d9258 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 29 Jan 2024 14:19:28 -0800 Subject: [PATCH 26/41] Move approximation of costs to separate functions and add tests --- edisgo/tools/temporal_complexity_reduction.py | 129 ++++++++++++------ .../test_temporal_complexity_reduction.py | 12 ++ 2 files changed, 103 insertions(+), 38 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 3453bf692..e445bdcaa 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -160,21 +160,7 @@ def _scored_most_critical_loading_time_interval( if weight_by_costs: # weight line violations with expansion costs - costs_lines = ( - line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) - ) - costs_trafos_lv = pd.Series( - index=[ - str(lv_grid) + "_station" - for lv_grid in list(edisgo_obj.topology.mv_grid.lv_grids) - ], - data=edisgo_obj.config["costs_transformers"]["lv"], - ) - costs_trafos_mv = pd.Series( - index=["MVGrid_" + str(edisgo_obj.topology.id) + "_station"], - data=edisgo_obj.config["costs_transformers"]["mv"], - ) - costs = pd.concat([costs_lines, costs_trafos_lv, costs_trafos_mv]) + costs = _costs_per_line_and_transformer(edisgo_obj) crit_lines_weighted = crit_lines_score * costs else: crit_lines_weighted = crit_lines_score.copy() @@ -283,30 +269,10 @@ def _scored_most_critical_voltage_issues_time_interval( voltage_diff_feeder[voltage_diff_feeder != 0] = 1 if weight_by_costs: - # determine costs per feeder - costs_lines = ( - line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) - ) - costs_trafos_lv = pd.Series( - index=lv_station_buses, - data=edisgo_obj.config._data["costs_transformers"]["lv"], - ) - costs = pd.concat([costs_lines, costs_trafos_lv]) - - feeder_lines = edisgo_obj.topology.lines_df.grid_feeder - feeder_trafos_lv = pd.Series( - index=lv_station_buses, - data=lv_station_buses, - ) - feeder = pd.concat([feeder_lines, feeder_trafos_lv]) - costs_per_feeder = ( - pd.concat([costs.rename("costs"), feeder.rename("feeder")], axis=1) - .groupby(by="feeder")[["costs"]] - .sum() - ) - + # get costs per feeder + costs_per_feeder = _costs_per_feeder(edisgo_obj, lv_station_buses) # weight feeder voltage violation with costs per feeder - voltage_diff_feeder = voltage_diff_feeder * costs_per_feeder.squeeze() + voltage_diff_feeder = voltage_diff_feeder * costs_per_feeder time_intervals_df = _most_critical_time_interval( costs_per_time_step=voltage_diff_feeder, @@ -321,6 +287,93 @@ def _scored_most_critical_voltage_issues_time_interval( return time_intervals_df +def _costs_per_line_and_transformer(edisgo_obj): + """ + Helper function to get costs per line (including earthwork and costs for one new + line) and per transformer. + + Transformers are named after the grid at the lower voltage level and with the + expansion "_station", e.g. "LVGrid_0_station". + + Returns + ------- + :pandas:`pandas.Series` + Series with component name in index and costs in kEUR as values. + + """ + costs_lines = ( + line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) + ) + costs_trafos_lv = pd.Series( + index=[ + str(lv_grid) + "_station" + for lv_grid in list(edisgo_obj.topology.mv_grid.lv_grids) + ], + data=edisgo_obj.config["costs_transformers"]["lv"], + ) + costs_trafos_mv = pd.Series( + index=["MVGrid_" + str(edisgo_obj.topology.id) + "_station"], + data=edisgo_obj.config["costs_transformers"]["mv"], + ) + return pd.concat([costs_lines, costs_trafos_lv, costs_trafos_mv]) + + +def _costs_per_feeder(edisgo_obj, lv_station_buses=None): + """ + Helper function to get costs per MV and LV feeder (including earthwork and costs for + one new line) and per MV/LV transformer (as they are considered as feeders). + + Transformers are named after the bus at the MV/LV station's secondary side. + + Parameters + ----------- + edisgo_obj : :class:`~.EDisGo` + lv_station_buses : list(str) or None + List of bus names of buses at the secondary side of the MV/LV transformers. + If None, list is generated. + + Returns + ------- + :pandas:`pandas.Series` + Series with feeder names in index and costs in kEUR as values. + + """ + if lv_station_buses is None: + lv_station_buses = [ + lv_grid.station.index[0] for lv_grid in edisgo_obj.topology.mv_grid.lv_grids + ] + if "grid_feeder" not in edisgo_obj.topology.buses_df.columns: + # set feeder using MV feeder for MV components and LV feeder for LV components + edisgo_obj.topology.assign_feeders(mode="grid_feeder") + + # feeders of buses at MV/LV station's secondary sides are set to the name of the + # station bus to have them as separate feeders + edisgo_obj.topology.buses_df.loc[lv_station_buses, "grid_feeder"] = lv_station_buses + + costs_lines = ( + line_expansion_costs(edisgo_obj).drop(columns="voltage_level").sum(axis=1) + ) + costs_trafos_lv = pd.Series( + index=lv_station_buses, + data=edisgo_obj.config._data["costs_transformers"]["lv"], + ) + costs = pd.concat([costs_lines, costs_trafos_lv]) + + feeder_lines = edisgo_obj.topology.lines_df.grid_feeder + feeder_trafos_lv = pd.Series( + index=lv_station_buses, + data=lv_station_buses, + ) + feeder = pd.concat([feeder_lines, feeder_trafos_lv]) + costs_per_feeder = ( + pd.concat([costs.rename("costs"), feeder.rename("feeder")], axis=1) + .groupby(by="feeder")[["costs"]] + .sum() + ) + + return costs_per_feeder.squeeze() + + def _most_critical_time_interval( costs_per_time_step, grid_issues_magnitude_df, diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index 03a41b005..32d46770e 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -121,6 +121,18 @@ def test__scored_most_critical_voltage_issues_time_interval(self): == pd.date_range("1/1/2018 4:00", periods=72, freq="H") ).all() + def test__costs_per_line_and_transformer(self): + costs = temp_red._costs_per_line_and_transformer(self.edisgo) + assert len(costs) == 131 + 11 + assert np.isclose(costs["Line_10007"], 0.722445826838636 * 80) + assert np.isclose(costs["LVGrid_1_station"], 10) + + def test__costs_per_feeder(self): + costs = temp_red._costs_per_feeder(self.edisgo) + assert len(costs) == 37 + assert np.isclose(costs["Bus_BranchTee_MVGrid_1_1"], 295.34795) + assert np.isclose(costs["BusBar_MVGrid_1_LVGrid_1_LV"], 10) + def test_get_most_critical_time_intervals(self): self.edisgo.timeseries.scale_timeseries(p_scaling_factor=2, q_scaling_factor=2) steps = temp_red.get_most_critical_time_intervals( From 70b8da5b553202d7dec87e3001edadc7521689b2 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 29 Jan 2024 18:12:23 -0800 Subject: [PATCH 27/41] Add weighting by costs in time steps selection --- edisgo/edisgo.py | 6 + edisgo/flex_opt/reinforce_grid.py | 7 + edisgo/tools/temporal_complexity_reduction.py | 131 ++++++++++++++---- .../test_temporal_complexity_reduction.py | 21 ++- 4 files changed, 136 insertions(+), 29 deletions(-) diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index e41ba99c8..577f3ec1e 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -1328,6 +1328,12 @@ def reinforce( used to specify whether to run an initial analyze to determine most critical time steps or to use existing results. If set to False, `use_troubleshooting_mode` is ignored. Default: True. + weight_by_costs : bool + In case `reduced_analysis` is set to True, this parameter can be used + to specify whether to weight time steps by estimated grid expansion costs. + See parameter `weight_by_costs` in + :func:`~.tools.temporal_complexity_reduction.get_most_critical_time_steps` + for more information. Default: False. Returns -------- diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index a916f9aa7..a8be78421 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -111,6 +111,12 @@ def reinforce_grid( used to specify whether to run an initial analyze to determine most critical time steps or to use existing results. If set to False, `use_troubleshooting_mode` is ignored. Default: True. + weight_by_costs : bool + In case `reduced_analysis` is set to True, this parameter can be + used to specify whether to weight time steps by estimated grid expansion costs. + See parameter `weight_by_costs` in + :func:`~.tools.temporal_complexity_reduction.get_most_critical_time_steps` + for more information. Default: False. Returns ------- @@ -184,6 +190,7 @@ def reinforce_grid( percentage=kwargs.get("percentage", 1.0), use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), run_initial_analyze=kwargs.get("run_initial_analyze", True), + weight_by_costs=kwargs.get("weight_by_costs", False), ) if timesteps_pfa is not None and len(timesteps_pfa) == 0: logger.debug("Zero time steps for grid reinforcement.") diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index e445bdcaa..2544fdc68 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -17,21 +17,31 @@ logger = logging.getLogger(__name__) -def _scored_most_critical_loading(edisgo_obj: EDisGo) -> pd.Series: +def _scored_most_critical_loading( + edisgo_obj: EDisGo, weight_by_costs: bool = True +) -> pd.Series: """ - Method to get time steps where at least one component shows its highest overloading. + Get time steps sorted by severity of overloadings. + + The overloading can be weighted by the estimated expansion costs of each respective + line and transformer. See parameter `weight_by_costs` for more information. Parameters ----------- edisgo_obj : :class:`~.EDisGo` + weight_by_costs : bool + Defines whether overloading issues should be weighted by estimated grid + expansion costs or not. See parameter `weight_by_costs` in + :func:`~get_most_critical_time_steps` for more information. + Default: True. Returns -------- :pandas:`pandas.Series` Series with time index and corresponding sum of maximum relative overloadings - of lines and transformers. The series only contains time steps, where at least - one component is maximally overloaded, and is sorted descending by the - sum of maximum relative overloadings. + of lines and transformers (weighted by estimated reinforcement costs, in case + `weight_by_costs` is True). The series only contains time steps, where at least + one component is maximally overloaded, and is sorted descending order. """ @@ -42,30 +52,47 @@ def _scored_most_critical_loading(edisgo_obj: EDisGo) -> pd.Series: crit_lines_score = relative_i_res[relative_i_res > 1] # Get most critical time steps per component + crit_lines_score = crit_lines_score[crit_lines_score == crit_lines_score.max()] + + if weight_by_costs: + # weight line violations with expansion costs + costs = _costs_per_line_and_transformer(edisgo_obj) + crit_lines_score = crit_lines_score * costs + else: + crit_lines_score = crit_lines_score - 1 + + # drop components and time steps without violations crit_lines_score = ( - (crit_lines_score[crit_lines_score == crit_lines_score.max()]) - .dropna(how="all") - .dropna(how="all", axis=1) + crit_lines_score.dropna(how="all").dropna(how="all", axis=1).fillna(0) ) - - # Sort according to highest cumulated relative overloading - crit_lines_score = (crit_lines_score - 1).sum(axis=1) - return crit_lines_score.sort_values(ascending=False) + # sort sum in descending order + return crit_lines_score.sum(axis=1).sort_values(ascending=False) -def _scored_most_critical_voltage_issues(edisgo_obj: EDisGo) -> pd.Series: +def _scored_most_critical_voltage_issues( + edisgo_obj: EDisGo, weight_by_costs: bool = True +) -> pd.Series: """ Method to get time steps where at least one bus shows its highest deviation from allowed voltage boundaries. + The voltage issues can be weighted by the estimated expansion costs in each + respective feeder. See parameter `weight_by_costs` for more information. + Parameters ----------- edisgo_obj : :class:`~.EDisGo` + weight_by_costs : bool + Defines whether voltage issues should be weighted by estimated grid expansion + costs or not. See parameter `weight_by_costs` in + :func:`~get_most_critical_time_steps` for more information. + Default: True. Returns -------- :pandas:`pandas.Series` - Series with time index and corresponding sum of maximum voltage deviations. + Series with time index and corresponding sum of maximum voltage deviations + (weighted by estimated reinforcement costs, in case `weight_by_costs` is True). The series only contains time steps, where at least one bus has its highest deviation from the allowed voltage limits, and is sorted descending by the sum of maximum voltage deviations. @@ -76,18 +103,42 @@ def _scored_most_critical_voltage_issues(edisgo_obj: EDisGo) -> pd.Series: ) # Get score for nodes that are over or under the allowed deviations - voltage_diff = voltage_diff.abs()[voltage_diff.abs() > 0] + voltage_diff = voltage_diff[voltage_diff != 0.0].abs() # get only most critical events for component # Todo: should there be different ones for over and undervoltage? - voltage_diff = ( - (voltage_diff[voltage_diff.abs() == voltage_diff.abs().max()]) - .dropna(how="all") - .dropna(how="all", axis=1) - ) + voltage_diff = voltage_diff[voltage_diff == voltage_diff.max()] - voltage_diff = voltage_diff.sum(axis=1) + if weight_by_costs: + # set feeder using MV feeder for MV components and LV feeder for LV components + edisgo_obj.topology.assign_feeders(mode="grid_feeder") + # feeders of buses at MV/LV station's secondary sides are set to the name of the + # station bus to have them as separate feeders + lv_station_buses = [ + lv_grid.station.index[0] for lv_grid in edisgo_obj.topology.mv_grid.lv_grids + ] + edisgo_obj.topology.buses_df.loc[ + lv_station_buses, "grid_feeder" + ] = lv_station_buses + # weight voltage violations with expansion costs + costs = _costs_per_feeder(edisgo_obj, lv_station_buses=lv_station_buses) + # map feeder costs to buses + feeder_buses = edisgo_obj.topology.buses_df.grid_feeder + costs_buses = pd.Series( + { + bus_name: ( + costs[feeder_buses[bus_name]] + if feeder_buses[bus_name] != "station_node" + else 0 + ) + for bus_name in feeder_buses.index + } + ) + voltage_diff = voltage_diff * costs_buses - return voltage_diff.sort_values(ascending=False) + # drop components and time steps without violations + voltage_diff = voltage_diff.dropna(how="all").dropna(how="all", axis=1).fillna(0) + # sort sum in descending order + return voltage_diff.sum(axis=1).sort_values(ascending=False) def _scored_most_critical_loading_time_interval( @@ -101,7 +152,7 @@ def _scored_most_critical_loading_time_interval( """ Get time intervals sorted by severity of overloadings. - The overloading can weighted by the estimated expansion costs of each respective + The overloading can be weighted by the estimated expansion costs of each respective line and transformer. See parameter `weight_by_costs` for more information. The length of the time intervals and hour of day at which the time intervals should begin can be set through the parameters `time_steps_per_time_interval` and @@ -791,6 +842,7 @@ def get_most_critical_time_steps( percentage: float = 1.0, use_troubleshooting_mode=True, run_initial_analyze=True, + weight_by_costs=True, ) -> pd.DatetimeIndex: """ Get the time steps with the most critical overloading and voltage issues. @@ -835,6 +887,31 @@ def get_most_critical_time_steps( This parameter can be used to specify whether to run an initial analyze to determine most critical time steps or to use existing results. If set to False, `use_troubleshooting_mode` is ignored. Default: True. + weight_by_costs : bool + Defines whether overloading and voltage issues should be weighted by estimated + grid expansion costs or not. This can be done in order to take into account that + some grid issues are more relevant, as reinforcing a certain line or feeder will + be more expensive than another one. + + In case of voltage issues: + If True, the voltage issues at each bus are weighted by the estimated grid + expansion costs for the MV or LV feeder the bus is in or in case of MV/LV + stations by the costs for a new transformer. Feeder costs are determined using + the costs for earth work and new lines over the full length of the feeder. + The costs don't convey the actual costs but are an estimation, as + the real number of parallel lines needed is not determined and the whole feeder + length is used instead of the length over two-thirds of the feeder. + If False, the severity of each feeder's voltage issue is set to be the same. + + In case of overloading issues: + If True, the overloading of each line is multiplied by + the respective grid expansion costs of that line including costs for earth work + and one new line. + The costs don't convey the actual costs but are an estimation, as + the discrete needed number of parallel lines is not considered. + If False, only the relative overloading is used. + + Default: True. Returns -------- @@ -865,7 +942,9 @@ def get_most_critical_time_steps( ) # Select most critical steps based on current violations - loading_scores = _scored_most_critical_loading(edisgo_obj) + loading_scores = _scored_most_critical_loading( + edisgo_obj, weight_by_costs=weight_by_costs + ) if num_steps_loading is None: num_steps_loading = int(len(loading_scores) * percentage) else: @@ -880,7 +959,9 @@ def get_most_critical_time_steps( steps = loading_scores[:num_steps_loading].index # Select most critical steps based on voltage violations - voltage_scores = _scored_most_critical_voltage_issues(edisgo_obj) + voltage_scores = _scored_most_critical_voltage_issues( + edisgo_obj, weight_by_costs=weight_by_costs + ) if num_steps_voltage is None: num_steps_voltage = int(len(voltage_scores) * percentage) else: diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index 32d46770e..c80caf231 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -32,19 +32,32 @@ def setup_class(self): self.edisgo.analyze() def test__scored_most_critical_loading(self): - ts_crit = temp_red._scored_most_critical_loading(self.edisgo) - + ts_crit = temp_red._scored_most_critical_loading( + self.edisgo, weight_by_costs=False + ) assert len(ts_crit) == 180 assert np.isclose(ts_crit.iloc[0], 1.45613) assert np.isclose(ts_crit.iloc[-1], 1.14647) - def test__scored_most_critical_voltage_issues(self): - ts_crit = temp_red._scored_most_critical_voltage_issues(self.edisgo) + ts_crit = temp_red._scored_most_critical_loading(self.edisgo) + assert len(ts_crit) == 180 + assert np.isclose(ts_crit.iloc[0], 190.63611) + assert np.isclose(ts_crit.iloc[-1], 48.13501) + + def test__scored_most_critical_voltage_issues(self): + ts_crit = temp_red._scored_most_critical_voltage_issues( + self.edisgo, weight_by_costs=False + ) assert len(ts_crit) == 120 assert np.isclose(ts_crit.iloc[0], 0.01062258) assert np.isclose(ts_crit.iloc[-1], 0.01062258) + ts_crit = temp_red._scored_most_critical_voltage_issues(self.edisgo) + assert len(ts_crit) == 120 + assert np.isclose(ts_crit.iloc[0], 0.1062258) + assert np.isclose(ts_crit.iloc[-1], 0.1062258) + def test_get_most_critical_time_steps(self): ts_crit = temp_red.get_most_critical_time_steps( self.edisgo, num_steps_loading=2, num_steps_voltage=2 From cf0c41220666c18074f4d37ce5fa535ad97bba69 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 29 Jan 2024 18:35:58 -0800 Subject: [PATCH 28/41] Add tests --- edisgo/tools/temporal_complexity_reduction.py | 4 ++-- .../test_temporal_complexity_reduction.py | 24 ++++++++++++++++++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 2544fdc68..7938dbaf0 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -57,7 +57,7 @@ def _scored_most_critical_loading( if weight_by_costs: # weight line violations with expansion costs costs = _costs_per_line_and_transformer(edisgo_obj) - crit_lines_score = crit_lines_score * costs + crit_lines_score = crit_lines_score * costs.loc[crit_lines_score.columns] else: crit_lines_score = crit_lines_score - 1 @@ -133,7 +133,7 @@ def _scored_most_critical_voltage_issues( for bus_name in feeder_buses.index } ) - voltage_diff = voltage_diff * costs_buses + voltage_diff = voltage_diff * costs_buses.loc[voltage_diff.columns] # drop components and time steps without violations voltage_diff = voltage_diff.dropna(how="all").dropna(how="all", axis=1).fillna(0) diff --git a/tests/tools/test_temporal_complexity_reduction.py b/tests/tools/test_temporal_complexity_reduction.py index c80caf231..0fcbc7356 100644 --- a/tests/tools/test_temporal_complexity_reduction.py +++ b/tests/tools/test_temporal_complexity_reduction.py @@ -60,7 +60,11 @@ def test__scored_most_critical_voltage_issues(self): def test_get_most_critical_time_steps(self): ts_crit = temp_red.get_most_critical_time_steps( - self.edisgo, num_steps_loading=2, num_steps_voltage=2 + self.edisgo, + num_steps_loading=2, + num_steps_voltage=2, + weight_by_costs=False, + run_initial_analyze=False, ) assert len(ts_crit) == 3 @@ -72,6 +76,24 @@ def test_get_most_critical_time_steps(self): ) assert len(ts_crit) == 2 + ts_crit = temp_red.get_most_critical_time_steps( + self.edisgo, + mode="lv", + lv_grid_id=2, + percentage=0.5, + num_steps_voltage=2, + ) + assert len(ts_crit) == 0 + + ts_crit = temp_red.get_most_critical_time_steps( + self.edisgo, + mode="lv", + lv_grid_id=6, + percentage=0.5, + num_steps_voltage=2, + ) + assert len(ts_crit) == 60 + def test__scored_most_critical_loading_time_interval(self): # test with default values ts_crit = temp_red._scored_most_critical_loading_time_interval(self.edisgo, 24) From 139fd7f9d203dd3dd880f53fa4f707092ebf5e49 Mon Sep 17 00:00:00 2001 From: birgits Date: Tue, 30 Jan 2024 16:25:02 -0800 Subject: [PATCH 29/41] Add test --- tests/flex_opt/test_reinforce_grid.py | 6 ++---- tests/test_edisgo.py | 11 +++++++++++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tests/flex_opt/test_reinforce_grid.py b/tests/flex_opt/test_reinforce_grid.py index ad1b296a7..cb6074310 100644 --- a/tests/flex_opt/test_reinforce_grid.py +++ b/tests/flex_opt/test_reinforce_grid.py @@ -59,11 +59,9 @@ def test_reinforce_grid(self): res_reduced = reinforce_grid( edisgo=copy.deepcopy(self.edisgo), reduced_analysis=True, - num_steps_loading=4, - ) - assert_frame_equal( - res_reduced.equipment_changes, results_dict[None].equipment_changes + num_steps_loading=2, ) + assert len(res_reduced.i_res) == 2 def test_run_separate_lv_grids(self): edisgo = copy.deepcopy(self.edisgo) diff --git a/tests/test_edisgo.py b/tests/test_edisgo.py index e2642bf9a..bac0789cb 100755 --- a/tests/test_edisgo.py +++ b/tests/test_edisgo.py @@ -546,6 +546,17 @@ def test_enhanced_reinforce_grid(self): assert len(results.equipment_changes) == 892 assert results.v_res.shape == (4, 148) + edisgo_obj = copy.deepcopy(self.edisgo) + edisgo_obj = enhanced_reinforce_grid( + edisgo_obj, + reduced_analysis=True, + is_worst_case=False, + separate_lv_grids=True, + num_steps_loading=1, + num_steps_voltage=1, + ) + assert edisgo_obj.results.v_res.shape == (2, 162) + def test_add_component(self, caplog): self.setup_worst_case_time_series() index = self.edisgo.timeseries.timeindex From b6d2f9edde954e64e317e88548934112d78bf620 Mon Sep 17 00:00:00 2001 From: birgits Date: Wed, 31 Jan 2024 17:13:44 -0800 Subject: [PATCH 30/41] Limit pandas version --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e28c14695..71e1618b6 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,9 @@ def read(fname): "matplotlib >= 3.3.0", "multiprocess", "networkx >= 2.5.0", - "pandas >= 1.4.0", + # newer pandas versions don't work with specified sqlalchemy versions, but upgrading + # sqlalchemy leads to new errors.. should be fixed at some point + "pandas < 2.2.0", "plotly", "pydot", "pygeos", From 70c3264caa39bbe875231c9b88f8415b4c182d01 Mon Sep 17 00:00:00 2001 From: birgits Date: Wed, 31 Jan 2024 17:39:50 -0800 Subject: [PATCH 31/41] Add changes to whatsnew --- doc/whatsnew/v0-3-0.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/whatsnew/v0-3-0.rst b/doc/whatsnew/v0-3-0.rst index 3eeccb3c2..7cb375542 100644 --- a/doc/whatsnew/v0-3-0.rst +++ b/doc/whatsnew/v0-3-0.rst @@ -22,6 +22,7 @@ Changes * Added method to aggregate LV grid buses to station bus secondary side `#353 `_ * Adapted codebase to work with pandas 2.0 `#373 `_ * Added option to run reinforcement with reduced number of time steps `#379 `_ + (adapted in `#395 `_) * Added optimization method to determine dispatch of flexibilities that lead to minimal network expansion costs `#376 `_ * Added a new reinforcement method that separate lv grids when the overloading is very high `#380 `_ * Move function to assign feeder to Topology class and add methods to the Grid class to get information on the feeders `#360 `_ From 85b49f4b8d78fcf5840988885d76afba4a00fa5e Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 16 Feb 2024 10:19:20 -0800 Subject: [PATCH 32/41] Add logging information --- edisgo/tools/temporal_complexity_reduction.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 7938dbaf0..7d72d26b3 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -956,6 +956,12 @@ def get_most_critical_time_steps( f"{len(loading_scores)} time steps are exported." ) num_steps_loading = len(loading_scores) + elif num_steps_loading < len(loading_scores): + logger.info( + f"{num_steps_loading} of a total of {len(loading_scores)} relevant " + f"time steps for overloading issues are chosen for the selection " + f"of most critical time steps." + ) steps = loading_scores[:num_steps_loading].index # Select most critical steps based on voltage violations @@ -973,6 +979,12 @@ def get_most_critical_time_steps( f"{len(voltage_scores)} time steps are exported." ) num_steps_voltage = len(voltage_scores) + elif num_steps_voltage < len(voltage_scores): + logger.info( + f"{num_steps_voltage} of a total of {len(voltage_scores)} relevant " + f"time steps for voltage issues are chosen for the selection " + f"of most critical time steps." + ) steps = steps.append(voltage_scores[:num_steps_voltage].index) if len(steps) == 0: From 9e0d85d068d9d69d482de069b029d817ec214868 Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 16 Feb 2024 11:03:35 -0800 Subject: [PATCH 33/41] Add ToDo for how function could be improved --- edisgo/tools/temporal_complexity_reduction.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 7d72d26b3..977f49073 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -44,7 +44,12 @@ def _scored_most_critical_loading( one component is maximally overloaded, and is sorted descending order. """ - + # ToDo The relative loading is used in this function to determine most critical + # time steps. While this makes sense to determine which lines are overloaded, it + # is not the best indicator for the weighting as it does not convey the number + # of additional lines needed to solve a problem. For that the number of parallel + # standard lines and transformers needed would be better. However, for now + # using the relative overloading as an estimation is okay. # Get current relative to allowed current relative_i_res = check_tech_constraints.components_relative_load(edisgo_obj) From c272fdae55b0fabe310364cff2d24f7d12383233 Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 16 Feb 2024 13:51:55 -0800 Subject: [PATCH 34/41] Restrict pandas version --- eDisGo_env.yml | 2 +- eDisGo_env_dev.yml | 2 +- setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eDisGo_env.yml b/eDisGo_env.yml index f35301247..45b797e16 100644 --- a/eDisGo_env.yml +++ b/eDisGo_env.yml @@ -5,7 +5,7 @@ channels: dependencies: - python >= 3.8, < 3.10 - pip - - pandas >= 1.4 + - pandas >= 1.4, < 2.2.0 - conda-forge::fiona - conda-forge::geopy - conda-forge::geopandas diff --git a/eDisGo_env_dev.yml b/eDisGo_env_dev.yml index a59866094..ae86632ac 100644 --- a/eDisGo_env_dev.yml +++ b/eDisGo_env_dev.yml @@ -5,7 +5,7 @@ channels: dependencies: - python >= 3.8, < 3.10 - pip - - pandas >= 1.4 + - pandas >= 1.4, < 2.2.0 - conda-forge::fiona - conda-forge::geopy - conda-forge::geopandas diff --git a/setup.py b/setup.py index 71e1618b6..4f570f745 100644 --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ def read(fname): "networkx >= 2.5.0", # newer pandas versions don't work with specified sqlalchemy versions, but upgrading # sqlalchemy leads to new errors.. should be fixed at some point - "pandas < 2.2.0", + "pandas >= 1.4.0, < 2.2.0", "plotly", "pydot", "pygeos", From dfb71aec4ccf6da6f0ab87a8bdc196e135558515 Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 16 Feb 2024 14:30:45 -0800 Subject: [PATCH 35/41] Change links that are now redirected --- doc/quickstart.rst | 4 ++-- edisgo/tools/tools.py | 2 +- examples/edisgo_simple_example.ipynb | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/quickstart.rst b/doc/quickstart.rst index 21d38303b..4ce437c05 100644 --- a/doc/quickstart.rst +++ b/doc/quickstart.rst @@ -91,7 +91,7 @@ The steps required to set up HSL are also described in the Here is a short version for reference: First, you need to obtain an academic license for HSL Solvers. -Under https://www.hsl.rl.ac.uk/ipopt/ download the sources for Coin-HSL Full (Stable). +Under https://licences.stfc.ac.uk/product/coin-hsl download the sources for Coin-HSL Full (Stable). You will need to provide an institutional e-mail to gain access. Unpack the tar.gz: @@ -163,7 +163,7 @@ Beyond a running and up-to-date installation of eDisGo you need **grid topology data**. Currently synthetic grid data generated with the python project `Ding0 `_ is the only supported data source. You can retrieve data from -`Zenodo `_ +`Zenodo `_ (make sure you choose latest data) or check out the `Ding0 documentation `_ on how to generate grids yourself. diff --git a/edisgo/tools/tools.py b/edisgo/tools/tools.py index ead8e08d2..d05fe1b86 100644 --- a/edisgo/tools/tools.py +++ b/edisgo/tools/tools.py @@ -1119,7 +1119,7 @@ def reduce_memory_usage(df: pd.DataFrame, show_reduction: bool = False) -> pd.Da be reduced to a smaller data type. Source: - https://www.mikulskibartosz.name/how-to-reduce-memory-usage-in-pandas/ + https://mikulskibartosz.name/how-to-reduce-memory-usage-in-pandas Parameters ---------- diff --git a/examples/edisgo_simple_example.ipynb b/examples/edisgo_simple_example.ipynb index b0a68fc63..c7ee79ce1 100644 --- a/examples/edisgo_simple_example.ipynb +++ b/examples/edisgo_simple_example.ipynb @@ -112,7 +112,7 @@ "Currently, synthetic grid data generated with the python project\n", "[ding0](https://github.com/openego/ding0)\n", "is the only supported data source for distribution grid data. ding0 provides the grid topology data in the form of csv files, with separate files for buses, lines, loads, generators, etc. You can retrieve ding0 data from\n", - "[Zenodo](https://zenodo.org/record/890479)\n", + "[Zenodo](https://zenodo.org/records/890479)\n", "(make sure you choose latest data) or check out the\n", "[Ding0 documentation](https://dingo.readthedocs.io/en/dev/usage_details.html#ding0-examples)\n", "on how to generate grids yourself. A ding0 example grid can be viewed [here](https://github.com/openego/eDisGo/tree/dev/tests/data/ding0_test_network_2). It is possible to provide your own grid data if it is in the same format as the ding0 grid data. \n", From 7ec2ec2e54d2409f0430001d5985aeb84ef7cedc Mon Sep 17 00:00:00 2001 From: birgits Date: Fri, 16 Feb 2024 14:36:17 -0800 Subject: [PATCH 36/41] Only conduct reinforcement for previously non-converging time steps if grid has beed reinforced in between --- edisgo/flex_opt/reinforce_grid.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index a8be78421..fd2896d31 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -685,7 +685,7 @@ def catch_convergence_reinforce_grid( Reinforcement strategy to reinforce grids with non-converging time steps. First, conducts a grid reinforcement with only converging time steps. - Afterwards, tries to run reinforcement with all time steps that did not converge + Afterward, tries to run reinforcement with all time steps that did not converge in the beginning. At last, if there are still time steps that do not converge, the feed-in and load time series are iteratively scaled and the grid reinforced, starting with a low grid load and scaling-up the time series until the original @@ -769,15 +769,17 @@ def reinforce(): troubleshooting_mode = troubleshooting_mode_set reinforce() - # Run reinforcement for time steps that did not converge after initial reinforcement - if not non_converging_timesteps.empty: - logger.info( - "Run reinforcement for time steps that did not converge after initial " - "reinforcement." - ) - selected_timesteps = non_converging_timesteps - troubleshooting_mode = False - converged = reinforce() + # Run reinforcement for time steps that did not converge after initial + # reinforcement (only needs to done, when grid was previously reinforced using + # converged time steps, wherefore it is within that if-statement) + if not non_converging_timesteps.empty: + logger.info( + "Run reinforcement for time steps that did not converge after initial " + "reinforcement." + ) + selected_timesteps = non_converging_timesteps + troubleshooting_mode = False + converged = reinforce() if converged: return edisgo.results From a1ac8c8e74c6a6a89e75a57b3a7e113be3de816a Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 19 Feb 2024 13:08:25 -0800 Subject: [PATCH 37/41] Use maximum deviation in feeder rather than weighting all deviations equally --- edisgo/tools/temporal_complexity_reduction.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/edisgo/tools/temporal_complexity_reduction.py b/edisgo/tools/temporal_complexity_reduction.py index 977f49073..973d23b36 100644 --- a/edisgo/tools/temporal_complexity_reduction.py +++ b/edisgo/tools/temporal_complexity_reduction.py @@ -320,9 +320,8 @@ def _scored_most_critical_voltage_issues_time_interval( voltage_diff_feeder = voltage_diff.copy() voltage_diff_feeder.columns = columns voltage_diff_feeder = ( - voltage_diff.transpose().reset_index().groupby(by="Bus").sum().transpose() + voltage_diff.transpose().reset_index().groupby(by="Bus").max().transpose() ) - voltage_diff_feeder[voltage_diff_feeder != 0] = 1 if weight_by_costs: # get costs per feeder @@ -726,14 +725,14 @@ def get_most_critical_time_intervals( The costs don't convey the actual costs but are an estimation, as the real number of parallel lines needed is not determined and the whole feeder length is used instead of the length over two-thirds of the feeder. - If False, the severity of each feeder's voltage issue is set to be the same. + If False, only the maximum voltage deviation in the feeder is used to determine + the most relevant time intervals. In case of overloading issues: - If True, the overloading of each line is multiplied by - the respective grid expansion costs of that line including costs for earth work - and one new line. + If True, the overloading of each line is multiplied by the respective grid + expansion costs of that line including costs for earth work and one new line. The costs don't convey the actual costs but are an estimation, as - the discrete needed number of parallel lines is not considered. + the discrete number of needed parallel lines is not considered. If False, only the relative overloading is used to determine the most relevant time intervals. From b99abba45eb1f514d7fba62877e873720c04be3b Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 19 Feb 2024 15:25:25 -0800 Subject: [PATCH 38/41] Add ToDo --- tests/test_edisgo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_edisgo.py b/tests/test_edisgo.py index bac0789cb..1f09e62d9 100755 --- a/tests/test_edisgo.py +++ b/tests/test_edisgo.py @@ -430,6 +430,7 @@ def test_analyze(self, caplog): assert "Current fraction in iterative process: 1.0." in caplog.text def test_reinforce(self): + # ToDo add tests to check content of equipment_changes # ###################### test with default settings ########################## self.setup_worst_case_time_series() results = self.edisgo.reinforce() From f04bc2fa18f153b46d2a6a6a8e78ddb56dfe8494 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 19 Feb 2024 15:38:23 -0800 Subject: [PATCH 39/41] Change default for use_troubleshotting_mode as True is not useful in grid reinforcement --- edisgo/edisgo.py | 14 ++++++++++---- edisgo/flex_opt/reinforce_grid.py | 11 ++--------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/edisgo/edisgo.py b/edisgo/edisgo.py index 577f3ec1e..dca1660b2 100755 --- a/edisgo/edisgo.py +++ b/edisgo/edisgo.py @@ -1316,13 +1316,19 @@ def reinforce( default is 1.0, in which case all most critical time steps are selected. Default: 1.0. use_troubleshooting_mode : bool - In case `reduced_analysis` is set to True, this parameter can be - used to specify how to handle non-convergence issues in the power flow - analysis. If set to True, non-convergence issues are tried to be + In case `reduced_analysis` is set to True, this parameter can be used to + specify how to handle non-convergence issues when determining the most + critical time steps. If set to True, non-convergence issues are tried to be circumvented by reducing load and feed-in until the power flow converges. The most critical time steps are then determined based on the power flow results with the reduced load and feed-in. If False, an error will be - raised in case time steps do not converge. Default: True. + raised in case time steps do not converge. + Setting this to True doesn't make sense for the grid reinforcement as the + troubleshooting mode is only used when determining the most critical time + steps not when running a power flow analysis to determine grid reinforcement + needs. To handle non-convergence in the grid reinforcement set parameter + `catch_convergence_problems` to True. + Default: False. run_initial_analyze : bool In case `reduced_analysis` is set to True, this parameter can be used to specify whether to run an initial analyze to determine most diff --git a/edisgo/flex_opt/reinforce_grid.py b/edisgo/flex_opt/reinforce_grid.py index fd2896d31..e56eb58b4 100644 --- a/edisgo/flex_opt/reinforce_grid.py +++ b/edisgo/flex_opt/reinforce_grid.py @@ -105,7 +105,7 @@ def reinforce_grid( In case `reduced_analysis` is set to True, this parameter can be used to specify how to handle non-convergence issues in the power flow analysis. See parameter `use_troubleshooting_mode` in function :attr:`~.EDisGo.reinforce` - for more information. Default: True. + for more information. Default: False. run_initial_analyze : bool In case `reduced_analysis` is set to True, this parameter can be used to specify whether to run an initial analyze to determine most @@ -188,7 +188,7 @@ def reinforce_grid( num_steps_loading=kwargs.get("num_steps_loading", None), num_steps_voltage=kwargs.get("num_steps_voltage", None), percentage=kwargs.get("percentage", 1.0), - use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", True), + use_troubleshooting_mode=kwargs.get("use_troubleshooting_mode", False), run_initial_analyze=kwargs.get("run_initial_analyze", True), weight_by_costs=kwargs.get("weight_by_costs", False), ) @@ -712,7 +712,6 @@ def reinforce(): edisgo, timesteps_pfa=selected_timesteps, scale_timeseries=set_scaling_factor, - use_troubleshooting_mode=troubleshooting_mode, **kwargs, ) converged = True @@ -728,13 +727,11 @@ def reinforce(): # Get the timesteps from kwargs and then remove it to set it later manually timesteps_pfa = kwargs.pop("timesteps_pfa", None) selected_timesteps = timesteps_pfa - troubleshooting_mode_set = kwargs.pop("troubleshooting_mode", True) # Initial try logger.info("Run initial reinforcement.") set_scaling_factor = 1.0 iteration = 0 - troubleshooting_mode = False converged = reinforce() if converged is False: logger.info("Initial reinforcement did not succeed.") @@ -766,7 +763,6 @@ def reinforce(): "reinforcement." ) selected_timesteps = converging_timesteps - troubleshooting_mode = troubleshooting_mode_set reinforce() # Run reinforcement for time steps that did not converge after initial @@ -778,7 +774,6 @@ def reinforce(): "reinforcement." ) selected_timesteps = non_converging_timesteps - troubleshooting_mode = False converged = reinforce() if converged: @@ -812,7 +807,6 @@ def reinforce(): ) + highest_converged_scaling_factor logger.info(f"Try reinforcement with {set_scaling_factor=} at {iteration=}") - troubleshooting_mode = False converged = reinforce() if converged: logger.info( @@ -833,7 +827,6 @@ def reinforce(): if set_scaling_factor != 1: logger.info("Run final reinforcement.") selected_timesteps = timesteps_pfa - troubleshooting_mode = False reinforce() return edisgo.results From 0657cd10a7c0c4e2d2ca105d396f39ab54077079 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 19 Feb 2024 15:54:55 -0800 Subject: [PATCH 40/41] Try fixing failing link check github action --- doc/conf.py | 2 +- edisgo/io/db.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 32f385cbc..17724e9dc 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -116,7 +116,7 @@ def setup(sphinx): "networkx.%s", ), "sqlalchemy": ( - "http://docs.sqlalchemy.org/en/latest/core/connections.html#%s", + "https://docs.sqlalchemy.org/en/20/core/connections.html#%s", "sqlalchemy.%s", ), "numpy": ( diff --git a/edisgo/io/db.py b/edisgo/io/db.py index 02d1320e8..19dfabcde 100644 --- a/edisgo/io/db.py +++ b/edisgo/io/db.py @@ -163,7 +163,7 @@ def engine(path: Path | str, ssh: bool = False) -> Engine: Returns ------- - sqlalchemy.engine.base.Engine + :sqlalchemy:`sqlalchemy.Engine` Database engine """ From 79b5a312eeffb5f16596d474e14ba1b5a987c5c8 Mon Sep 17 00:00:00 2001 From: birgits Date: Mon, 19 Feb 2024 16:21:28 -0800 Subject: [PATCH 41/41] Ignore stackoverflow links when checking links Stackoverflow links for some reason now fail when checked with github actions, even though the link is correct. They are therefore for now ignored. --- doc/conf.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 17724e9dc..851a3fb6d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -116,7 +116,7 @@ def setup(sphinx): "networkx.%s", ), "sqlalchemy": ( - "https://docs.sqlalchemy.org/en/20/core/connections.html#%s", + "https://docs.sqlalchemy.org/en/latest/core/connections.html#%s", "sqlalchemy.%s", ), "numpy": ( @@ -134,6 +134,11 @@ def setup(sphinx): "plotly.%s", ), } +# ignore the following external links when checking the links +# stackoverflow is listed here because for some reason the link check fails for these +# in the github action, even though the link is correct +linkcheck_ignore = [r"https://stackoverflow.com*"] + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"]