Skip to content

Commit

Permalink
Merge pull request #70 from DHI/bug/result_data_filters
Browse files Browse the repository at this point in the history
Fix for res1d location filtering.
  • Loading branch information
ryan-kipawa authored Dec 14, 2023
2 parents 1afb7b2 + ed6f266 commit 38d133f
Show file tree
Hide file tree
Showing 27 changed files with 289 additions and 59 deletions.
16 changes: 16 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,24 @@

### Added

### Fixed

### Changed

## 0.4.1 - 2023-12-14

### Added

- mikenet module for easier work with DHI .NET libraries.

### Fixed

- Res1D filtering for reaches inside MIKE 1D itself.

### Changed

- Use MIKE 1D NuGet packages v22.0.3 and v22.0.4 for DHI.Mike1D.ResultDataAccess

## [0.4]

### Added
Expand Down
2 changes: 1 addition & 1 deletion mikeio1d/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "0.4.0"
__version__ = "0.4.1"

if "64" not in architecture()[0]:
raise Exception("This library has not been tested for a 32 bit system.")
Expand Down
10 changes: 8 additions & 2 deletions scripts/nuget_retriever.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class NuGetRetriever:
bin_dir_name = r"mikeio1d\bin"

# Default version of DHI NuGet packages to retrieve
version_default = "21.0.0"
version_default = "22.0.3"

# DHI NuGet packages to install
package_names = [
Expand All @@ -49,6 +49,7 @@ class NuGetRetriever:
"DHI.EUM",
"DHI.PFS",
"DHI.Projections",
"DHI.corlib",
"DHI.Mike1D.CrossSectionModule",
"DHI.Mike1D.HDParameterDataAccess",
"DHI.Mike1D.Generic",
Expand All @@ -57,7 +58,12 @@ class NuGetRetriever:
"NetTopologySuite",
]

version_map = {"GeoAPI": "1.7.4", "NetTopologySuite": "2.0.0"}
version_map = {
"DHI.corlib": "1.0.0",
"DHI.Mike1D.ResultDataAccess": "22.0.4",
"GeoAPI": "1.7.4",
"NetTopologySuite": "2.0.0",
}

# Builds to include
include_builds = ["netstandard2.0", "net45", "net47", "win-x64"]
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@

setuptools.setup(
name="mikeio1d",
version="0.4.0",
version="0.4.1",
install_requires=[
'pythonnet<=2.5.2; python_version < "3.7.0"',
'pythonnet>=3.0.0a2; python_version >= "3.7.0"',
Expand All @@ -41,7 +41,7 @@
"matplotlib",
"jupyterlab",
],
"test": ["pytest", "matplotlib"],
"test": ["pytest", "matplotlib", "pyarrow"],
},
author="Gediminas Kirsanskas",
author_email="[email protected]",
Expand Down
40 changes: 40 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
import pytest

from pandas.testing import assert_index_equal
from pandas.testing import assert_series_equal

from mikeio1d.res1d import Res1D
from mikeio1d.result_network import ResultNode
from mikeio1d.result_network import ResultCatchment
Expand All @@ -8,6 +11,43 @@
from .testdata import testdata


class Helpers:
"""
Class containing helper methods for performing tests.
"""

@staticmethod
def assert_shared_columns_equal(df_ref, df):
"""
Compares columns in df to the ones in df_ref.
Note that df_ref typically has more columns than df.
Comparison is performed only in columns of df.
"""
assert_index_equal(df_ref.index, df.index)
for col in df:
# TODO: Replace with assert_series_equal(df[col], df_ref[col]) - this fails now since columns are not guaranteed unique
diff = (df[col] - df_ref[col]).abs().sum()

# TODO: Handle cases of different types than float
try:
diff = float(diff)
except:
continue

assert pytest.approx(diff) == 0.0


@pytest.fixture
def helpers():
return Helpers


@pytest.fixture()
def flow_split_file_path():
return testdata.FlowSplit_res1d


@pytest.fixture()
def res1d_network():
return Res1D(testdata.Network_res1d)
Expand Down
26 changes: 26 additions & 0 deletions tests/test_core.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import pytest
from .testdata import testdata

from pandas.testing import assert_frame_equal

from mikeio1d import Res1D


def testdata_name():
import dataclasses

return list(dataclasses.asdict(testdata).keys())


@pytest.mark.parametrize("extension", [".res1d", ".res", ".resx", ".out"])
def test_mikeio1d_generates_expected_dataframe_for_filetype(extension):
for name in testdata_name():
path = getattr(testdata, name)
if not path.endswith(extension):
continue
df = Res1D(path).read()
df = df.loc[
:, ~df.columns.duplicated()
] # TODO: Remove this when column names are guaranteed unique
df_expected = testdata.get_expected_dataframe(name)
assert_frame_equal(df, df_expected)
26 changes: 19 additions & 7 deletions tests/test_epanet_res_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def test_file(test_file_path, request):
def test_read(test_file):
df = test_file.read()
assert len(df) == 25
# TODO: assert not df.columns.duplicated().any() - add this, but it fails since columns are not guaranteed unique


def test_quantities(test_file):
Expand All @@ -36,7 +37,7 @@ def test_repr(test_file):
"<mikeio1d.Res1D>\n"
+ "Start time: 2022-10-13 00:00:00\n"
+ "End time: 2022-10-14 00:00:00\n"
"# Timesteps: 25\n"
+ "# Timesteps: 25\n"
+ "# Catchments: 0\n"
+ "# Nodes: 11\n"
+ "# Reaches: 13\n"
Expand Down Expand Up @@ -177,14 +178,21 @@ def test_dotnet_methods(test_file):
assert pytest.approx(77.8665) == epanet_res.query.GetReachSumValues("11", "Flow")[0]


def test_epanet_res_filter(test_file_path):
def test_epanet_res_filter(test_file_path, helpers):
nodes = ["10", "11"]
reaches = ["11"]
epanet_res = Res1D(test_file_path, nodes=nodes, reaches=reaches)

epanet_res.read(QueryDataReach("Flow", "10"))
epanet_res.read(QueryDataNode("Pressure", "10"))
epanet_res.read(QueryDataNode("Pressure", "11"))
df_flow_10 = epanet_res.read(QueryDataReach("Flow", "10"))
df_pressures_10 = epanet_res.read(QueryDataNode("Pressure", "10"))
df_pressure_11 = epanet_res.read(QueryDataNode("Pressure", "11"))

epanet_res_full = Res1D(test_file_path)
df_full = epanet_res_full.read()

helpers.assert_shared_columns_equal(df_full, df_flow_10)
helpers.assert_shared_columns_equal(df_full, df_pressures_10)
helpers.assert_shared_columns_equal(df_full, df_pressure_11)

# Currently Mike1D raises System.ArgumentOutOfRangeException when requesting location not included by filter
# This should be fixed in Mike1D to raise more meaningful Mike1DException
Expand All @@ -195,10 +203,14 @@ def test_epanet_res_filter(test_file_path):
assert epanet_res.read(QueryDataNode("Pressure", "10xyz"))


def test_epanet_res_filter_readall(test_file_path):
def test_epanet_res_filter_readall(test_file_path, helpers):
# Make sure read all can be used with filters
nodes = ["10", "11"]
reaches = ["11"]
epanet_res = Res1D(test_file_path, nodes=nodes, reaches=reaches)
df = epanet_res.read()

epanet_res_full = Res1D(test_file_path)
df_full = epanet_res_full.read()

epanet_res.read()
helpers.assert_shared_columns_equal(df_full, df)
26 changes: 19 additions & 7 deletions tests/test_epanet_resx_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def test_file(test_file_path, request):
def test_read(test_file):
df = test_file.read()
assert len(df) == 25
# TODO: assert not df.columns.duplicated().any() - add this, but it fails since columns are not guaranteed unique


def test_quantities(test_file):
Expand All @@ -36,7 +37,7 @@ def test_repr(test_file):
"<mikeio1d.Res1D>\n"
+ "Start time: 2022-10-13 00:00:00\n"
+ "End time: 2022-10-14 00:00:00\n"
"# Timesteps: 25\n"
+ "# Timesteps: 25\n"
+ "# Catchments: 0\n"
+ "# Nodes: 2\n"
+ "# Reaches: 1\n"
Expand Down Expand Up @@ -164,14 +165,21 @@ def test_dotnet_methods(test_file):
assert pytest.approx(95.8442) == epanet_resx.query.GetReachSumValues("9", "Pump energy")[0]


def test_epanet_resx_filter(test_file_path):
def test_epanet_resx_filter(test_file_path, helpers):
nodes = ["2", "9"]
reaches = ["9"]
epanet_resx = Res1D(test_file_path, nodes=nodes, reaches=reaches)

epanet_resx.read(QueryDataReach("Pump energy", "9"))
epanet_resx.read(QueryDataNode("Volume", "2"))
epanet_resx.read(QueryDataNode("Volume", "9"))
df_energy_9 = epanet_resx.read(QueryDataReach("Pump energy", "9"))
df_volume_2 = epanet_resx.read(QueryDataNode("Volume", "2"))
df_volume_9 = epanet_resx.read(QueryDataNode("Volume", "9"))

epanet_resx_full = Res1D(test_file_path)
df_full = epanet_resx_full.read()

helpers.assert_shared_columns_equal(df_full, df_energy_9)
helpers.assert_shared_columns_equal(df_full, df_volume_2)
helpers.assert_shared_columns_equal(df_full, df_volume_9)

# Currently Mike1D raises System.ArgumentOutOfRangeException when requesting location not included by filter
# This should be fixed in Mike1D to raise more meaningful Mike1DException
Expand All @@ -182,10 +190,14 @@ def test_epanet_resx_filter(test_file_path):
assert epanet_resx.read(QueryDataNode("Volume", "10xyz"))


def test_epanet_resx_filter_readall(test_file_path):
def test_epanet_resx_filter_readall(test_file_path, helpers):
# Make sure read all can be used with filters
nodes = ["2", "9"]
reaches = ["9"]
epanet_resx = Res1D(test_file_path, nodes=nodes, reaches=reaches)
df = epanet_resx.read()

epanet_resx_full = Res1D(test_file_path)
df_full = epanet_resx_full.read()

epanet_resx.read()
helpers.assert_shared_columns_equal(df_full, df)
23 changes: 17 additions & 6 deletions tests/test_res1d_catchments.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ def test_file(test_file_path, request):
def test_read(test_file):
df = test_file.read()
assert len(df) == 108
# TODO: assert not df.columns.duplicated().any() - add this, but it fails since columns are not guaranteed unique


def test_quantities(test_file):
Expand All @@ -37,7 +38,7 @@ def test_repr(test_file):
"<mikeio1d.Res1D>\n"
+ "Start time: 1994-08-07 16:35:00\n"
+ "End time: 1994-08-07 18:35:00\n"
"# Timesteps: 108\n"
+ "# Timesteps: 108\n"
+ "# Catchments: 31\n"
+ "# Nodes: 0\n"
+ "# Reaches: 0\n"
Expand Down Expand Up @@ -127,25 +128,35 @@ def test_dotnet_methods(test_file):
res1d.query.GetCatchmentValues("20_2_2", "TotalRunOff")


def test_res1d_filter(test_file_path):
def test_res1d_filter(test_file_path, helpers):
catchments = ["20_2_2", "22_8_8"]
res1d = Res1D(test_file_path, catchments=catchments)

res1d.read(QueryDataCatchment("TotalRunOff", "20_2_2"))
res1d.read(QueryDataCatchment("TotalRunOff", "22_8_8"))
df_20_2_2 = res1d.read(QueryDataCatchment("TotalRunOff", "20_2_2"))
df_22_8_8 = res1d.read(QueryDataCatchment("TotalRunOff", "22_8_8"))

res1d_full = Res1D(test_file_path)
df_full = res1d_full.read()

helpers.assert_shared_columns_equal(df_full, df_20_2_2)
helpers.assert_shared_columns_equal(df_full, df_22_8_8)

# Currently Mike1D raises NullReferenceException when requesting location not included by filter
# This should be fixed in Mike1D to raise more meaningful Mike1DException
# with pytest.raises(Exception):
# assert res1d.read(QueryDataCatchment("TotalRunOff", "100_16_16"))


def test_res1d_filter_readall(test_file_path):
def test_res1d_filter_readall(test_file_path, helpers):
# Make sure readall works with filters
catchments = ["20_2_2", "22_8_8"]
res1d = Res1D(test_file_path, catchments=catchments)
df = res1d.read()

res1d_full = Res1D(test_file_path)
df_full = res1d_full.read()

res1d.read()
helpers.assert_shared_columns_equal(df_full, df)


def test_catchment_attributes(test_file):
Expand Down
Loading

0 comments on commit 38d133f

Please sign in to comment.