Skip to content

Commit

Permalink
Merge pull request #75 from DHI/feature/merge_res1d
Browse files Browse the repository at this point in the history
Merging of regular and LTS extreme/periodic res1d files
  • Loading branch information
gedaskir authored Jan 25, 2024
2 parents d41a0f4 + fcecbd7 commit 10532a7
Show file tree
Hide file tree
Showing 20 changed files with 969 additions and 16 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -157,3 +157,6 @@ tests/testdata/discharge_in_structure.extract.txt
tests/testdata/w_right_discharge_in_structure.extract.csv
tests/testdata/w_right_discharge_in_structure.extract.dfs0
tests/testdata/w_right_discharge_in_structure.extract.txt
tests/testdata/lts_event_statistics.merged.res1d
tests/testdata/lts_monthly_statistics.merged.res1d
tests/testdata/catchment_merge_c.res1d
4 changes: 2 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
- Introduced TimeSeriesId to uniquely identify results.
- Read methods now include 'column_mode' parameter that enables multiindex reading (e.g. column_mode='compact').
- Added more type hints to improve IDE auto-completion and docstring peeking.

- Merging of regular and LTS extreme/periodic res1d files.

### Fixed

-
-

### Changed

Expand Down
38 changes: 38 additions & 0 deletions mikeio1d/res1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from .result_extractor import ExtractorCreator
from .result_extractor import ExtractorOutputFileType
from .result_network import ResultNetwork
from .result_reader_writer import ResultMerger
from .result_reader_writer import ResultReaderCreator
from .result_reader_writer import ResultReaderType
from .result_reader_writer import ResultWriter
Expand Down Expand Up @@ -464,3 +465,40 @@ def to_txt(
):
"""Extract to txt file."""
self.extract(file_path, queries, time_step_skipping_number, ExtractorOutputFileType.TXT)

@staticmethod
def merge(file_names: List[str] | List[Res1D], merged_file_name: str):
"""
Merges res1d files.
It is possible to merge three kinds of result files:
* Regular res1d (HD, RR, etc.)
* LTS extreme statistics
* LTS chronological statistics
For regular res1d files the requirement is that the simulation start time
of the first file matches the simulation end time of the second file
(the same principle for subsequent files).
For LTS result files, meaningful merged result file is obtained when
simulation periods for the files do not overlap.
Parameters
----------
file_names : list of str or Res1D objects
List of res1d file names to merge.
merged_file_name : str
File name of the res1d file to store the merged data.
"""
file_names = Res1D._convert_res1d_to_str_for_file_names(file_names)
result_merger = ResultMerger(file_names)
result_merger.merge(merged_file_name)

@staticmethod
def _convert_res1d_to_str_for_file_names(file_names: List[str] | List[Res1D]):
file_names_new = []
for i in range(len(file_names)):
entry = file_names[i]
file_name = entry.file_path if isinstance(entry, Res1D) else entry
file_names_new.append(file_name)
return file_names_new
2 changes: 2 additions & 0 deletions mikeio1d/result_reader_writer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,5 @@
from .result_reader_copier import ResultReaderCopier
from .result_reader_query import ResultReaderQuery
from .result_writer import ResultWriter

from .result_merger import ResultMerger
44 changes: 44 additions & 0 deletions mikeio1d/result_reader_writer/result_merger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from __future__ import annotations

from typing import TYPE_CHECKING

if TYPE_CHECKING:
from typing import List

from System.Collections.Generic import List as DotNetList
from System import String

from DHI.Mike1D.MikeIO import ResultMerger as Res1DResultMerger


class ResultMerger:
"""
Wrapper class for merging res1d result files.
Parameters
----------
file_names : list of str
List of res1d file names to merge.
"""

def __init__(self, file_names: List[str]):
self.file_names = file_names

def merge(self, merged_file_name: str):
"""
Merges the data from in file_names to a file
specified by merged_file_name.
Parameters
----------
merged_file_name : str
File name of the res1d file to store the merged data.
"""
file_names_dotnet = self._get_file_name_dotnet()
Res1DResultMerger.Merge(file_names_dotnet, merged_file_name)

def _get_file_name_dotnet(self):
file_names_dotnet = DotNetList[String]()
for file_name in self.file_names:
file_names_dotnet.Add(file_name)
return file_names_dotnet
25 changes: 25 additions & 0 deletions tests/test_res1d_catchments.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import pytest
import numpy as np
import pandas as pd

from mikeio1d.custom_exceptions import NoDataForQuery, InvalidQuantity
from mikeio1d.res1d import Res1D
Expand All @@ -17,6 +18,13 @@ def test_file_path():
return os.path.join(test_folder_path, "testdata", "catchments.res1d")


@pytest.fixture
def test_file_path_for_merging():
test_folder_path = os.path.dirname(os.path.abspath(__file__))
# File taken from TestSuite: RainfallRunoff\SWQ\DemoSWQ1BaseMixedRRAD.res1d
return os.path.join(test_folder_path, "testdata", "catchment_merge.res1d")


@pytest.fixture(params=[True, False])
def test_file(test_file_path, request):
return Res1D(test_file_path, lazy_load=request.param)
Expand Down Expand Up @@ -220,3 +228,20 @@ def test_catchment_static_attributes(res1d_catchments):
catchment.id
catchment.area
catchment.type


def test_res1d_merging(test_file_path_for_merging):
file_a = test_file_path_for_merging.replace(".res1d", "_a.res1d")
file_b = test_file_path_for_merging.replace(".res1d", "_b.res1d")
file_c = test_file_path_for_merging.replace(".res1d", "_c.res1d")

res1d_a = Res1D(file_a)
res1d_b = Res1D(file_b)
Res1D.merge([res1d_a, res1d_b], file_c)

df_a = res1d_a.read()
df_b = res1d_b.read().tail(-1)
df_c = Res1D(file_c).read()

df_merged = pd.concat([df_a, df_b])
pd.testing.assert_frame_equal(df_merged, df_c)
25 changes: 25 additions & 0 deletions tests/test_res1d_lts_chronological.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,3 +253,28 @@ def test_global_data_attributes(test_file):

actual_max = round(df["DischargeIntegratedMonthlyTotalOutflow"].max(), 3)
assert pytest.approx(actual_max) == 5971.352


def test_res1d_merging_same_file(test_file_path):
# Use the same file twice to create a merged LTS statistics file
file_names = [test_file_path, test_file_path]
merged_file_name = test_file_path.replace(".res1d", ".merged.res1d")
Res1D.merge(file_names, merged_file_name)

# Read the merged file
res1d = Res1D(merged_file_name)

# Test one reach location for particular values
df_reach = res1d.reaches.B4_1320l1.m_101_251.DischargeIntegratedMonthly.read()
assert pytest.approx(np.round(df_reach.max(), 3)) == 2 * 1215.915

df_reach_count = res1d.reaches.B4_1320l1.m_101_251.DischargeIntegratedMonthlyCount.read()
assert pytest.approx(np.round(df_reach_count.max(), 3)) == 2 * 3

df_reach_duration = res1d.reaches.B4_1320l1.m_101_251.DischargeIntegratedMonthlyDuration.read()
assert pytest.approx(np.round(df_reach_duration.max(), 3)) == 2 * 10.703

res1d_ori = Res1D(test_file_path)
df_ori = res1d_ori.read()
df_merged = res1d.read()
pd.testing.assert_frame_equal(2 * df_ori, df_merged)
46 changes: 45 additions & 1 deletion tests/test_res1d_lts_events.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,10 @@ def test_read_reach(test_file, quantity, reach_id, chainage, expected_max):

@pytest.mark.parametrize(
"quantity,node_id,expected_max",
[("WaterLevelMaximum", "B4.1320", 17.511), ("WaterLevelMaximum", "B4.1480", 16.957)],
[
("WaterLevelMaximum", "B4.1320", 17.511),
("WaterLevelMaximum", "B4.1480", 16.957),
],
)
def test_read_node(test_file, quantity, node_id, expected_max):
data = test_file.query.GetNodeValues(node_id, quantity)
Expand Down Expand Up @@ -215,3 +218,44 @@ def test_res1d_filter_readall(test_file_path, helpers):

# Release the .NET object
res1d = None


def test_res1d_merging_same_file(test_file_path):
# Use the same file twice to create a merged LTS statistics file
file_names = [test_file_path, test_file_path]
merged_file_name = test_file_path.replace(".res1d", ".merged.res1d")
Res1D.merge(file_names, merged_file_name)

# Read the merged file
res1d = Res1D(merged_file_name)

# Test one node location for particular values
df_node = res1d.nodes.B4_1320.WaterLevelMaximum.read()
b4_1320_event1 = df_node.iloc[0].iloc[0]
b4_1320_event2 = df_node.iloc[1].iloc[0]
assert b4_1320_event1 == b4_1320_event2
assert pytest.approx(np.round(b4_1320_event1, 3)) == 17.511

df_node_time = res1d.nodes.B4_1320.WaterLevelMaximumTime.read()
b4_1320_time1 = df_node_time.iloc[0]
b4_1320_time2 = df_node_time.iloc[1]
assert (b4_1320_time1 == b4_1320_time2).values[0]

# Test one reach location for particular values
df_reach = res1d.reaches.B4_1491l1.m_216.DischargeMaximum.read()
b4_1491l1_event1 = df_reach.iloc[0].iloc[0]
b4_1491l1_event2 = df_reach.iloc[1].iloc[0]
assert b4_1491l1_event1 == b4_1491l1_event2
assert pytest.approx(np.round(b4_1491l1_event1, 3)) == 0.151

df_reach_time = res1d.reaches.B4_1491l1.m_216.DischargeMaximumTime.read()
b4_1491l1_time1 = df_reach_time.iloc[0]
b4_1491l1_time2 = df_reach_time.iloc[1]
assert (b4_1491l1_time1 == b4_1491l1_time2).values[0]

# Validate all merged events. Every event now needs to appear twice.
df = res1d.read_all()
# TODO: Maybe it is possible to vectorize this check.
for col in df:
for i in range(0, len(df[col]), 2):
assert df[col][i] == df[col][i + 1]
2 changes: 1 addition & 1 deletion tests/test_res1d_network_river.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def test_result_quantity_methods(test_file):
discharge_in_structure = res1d.structures.W_right.DischargeInStructure

df = discharge_in_structure.to_dataframe()
max_value = round(df.max()[0], 3)
max_value = round(df.max().iloc[0], 3)
assert pytest.approx(max_value) == 11.018

# Test the calling of methods
Expand Down
Binary file added tests/testdata/catchment_merge_a.res1d
Binary file not shown.
Binary file added tests/testdata/catchment_merge_b.res1d
Binary file not shown.
12 changes: 12 additions & 0 deletions util/DHI.Mike1D.MikeIO/DHI.Mike1D.MikeIO.csproj
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,18 @@
</PropertyGroup>

<ItemGroup>
<Reference Include="DHI.Generic.MikeZero.EUM">
<HintPath>..\..\mikeio1d\bin\DHI.Generic.MikeZero.EUM.dll</HintPath>
</Reference>
<Reference Include="DHI.Generic.MikeZero.DFS">
<HintPath>..\..\mikeio1d\bin\DHI.Generic.MikeZero.DFS.dll</HintPath>
</Reference>
<Reference Include="DHI.corlib">
<HintPath>..\..\mikeio1d\bin\DHI.corlib.dll</HintPath>
</Reference>
<Reference Include="DHI.Mike1D.Generic">
<HintPath>..\..\mikeio1d\bin\DHI.Mike1D.Generic.dll</HintPath>
</Reference>
<Reference Include="DHI.Mike1D.ResultDataAccess">
<HintPath>..\..\mikeio1d\bin\DHI.Mike1D.ResultDataAccess.dll</HintPath>
</Reference>
Expand Down
62 changes: 61 additions & 1 deletion util/DHI.Mike1D.MikeIO/DataEntry.cs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
using System;
using DHI.Mike1D.Generic;
using DHI.Mike1D.ResultDataAccess;

namespace DHI.Mike1D.MikeIO
Expand All @@ -7,6 +9,8 @@ namespace DHI.Mike1D.MikeIO
/// </summary>
public class DataEntry
{
public DataEntryId EntryId { get; set; }

/// <inheritdoc cref="IDataItem"/>
public IDataItem DataItem { get; set; }

Expand All @@ -15,11 +19,67 @@ public class DataEntry
/// </summary>
public int ElementIndex { get; set; }

/// <inheritdoc />
/// <inheritdoc cref="DataEntry" />
public DataEntry(IDataItem dataItem, int elementIndex)
{
DataItem = dataItem;
ElementIndex = elementIndex;
EntryId = new DataEntryId(
dataItem.Quantity.Id,
dataItem.ItemTypeGroup,
dataItem.NumberWithinGroup,
ElementIndex);
}

/// <summary>
/// Sets value for the data entry at a given time step index.
/// <para>
/// The data item time data is expanded if the index larger than
/// the number of time steps.
/// </para>
/// </summary>
public void SetValue(int timeStepIndex, double value)
{
int numberOfTimeSteps = DataItem.TimeData.NumberOfTimeSteps;
if (numberOfTimeSteps <= timeStepIndex)
ExpandTimeData(timeStepIndex - numberOfTimeSteps + 1);

DataItem.TimeData.SetValue(timeStepIndex, ElementIndex, (float) value);
}

/// <summary>
/// Expands time data by given expansion size.
/// </summary>
public void ExpandTimeData(int expansionSize = 1)
{
var elementDeleteValues = new float[DataItem.NumberOfElements];
for (int i = 0; i < DataItem.NumberOfElements; i++)
elementDeleteValues[i] = (float) Constants.DOUBLE_DELETE_VALUE;

for (int i = 0; i < expansionSize; i++)
DataItem.TimeData.Add(elementDeleteValues);
}
}

/// <summary>
/// Tuple ID for a DataEntry
/// </summary>
public class DataEntryId : Tuple<string, ItemTypeGroup, int, int>
{
/// <inheritdoc cref="DataEntryId" />
public DataEntryId(
string quantityId,
ItemTypeGroup itemTypeGroup,
int numberWithinGroup,
int elementIndex) : base(quantityId, itemTypeGroup, numberWithinGroup, elementIndex)
{
}

/// <inheritdoc cref="DataEntryId" />
public DataEntryId(
string quantityId,
DataEntryId entryId) : base(quantityId, entryId.Item2, entryId.Item3, entryId.Item4)
{
}
}
}
Loading

0 comments on commit 10532a7

Please sign in to comment.