diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 49e3f0a2..a2dd31f4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,13 +17,13 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9] + python-version: [3.11] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -32,25 +32,15 @@ jobs: python -m pip install --upgrade pip pip install -e .[ci] - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=80 --statistics - - name: Download executables needed for tests shell: bash -l {0} run: | python -c "import nlmod; nlmod.util.download_mfbinaries()" - - name: Run notebooks - if: ${{ github.event_name == 'push' }} - run: | - py.test ./tests -m "not notebooks" - - name: Run tests only - if: ${{ github.event_name == 'pull_request' }} + env: + NHI_GWO_USERNAME: ${{ secrets.NHI_GWO_USERNAME}} + NHI_GWO_PASSWORD: ${{ secrets.NHI_GWO_PASSWORD}} run: | py.test ./tests -m "not notebooks" diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 72f83ba7..1965aa1f 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -9,22 +9,24 @@ on: jobs: deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: '3.9' + python-version: '3.11' + - name: Install dependencies run: | python -m pip install --upgrade pip pip install build setuptools wheel + - name: build binary wheel and a source tarball run: | python -m build --sdist --wheel --outdir dist/ + - name: Publish a Python distribution to PyPI uses: pypa/gh-action-pypi-publish@release/v1 with: diff --git a/.prospector.yaml b/.prospector.yaml index 57c8ec7c..4656c947 100644 --- a/.prospector.yaml +++ b/.prospector.yaml @@ -29,6 +29,8 @@ pylint: - too-many-branches - too-many-statements - logging-fstring-interpolation + - import-outside-toplevel + - implicit-str-concat mccabe: disable: diff --git a/README.md b/README.md index 483db2a5..5b3f23f5 100644 --- a/README.md +++ b/README.md @@ -18,9 +18,13 @@ groundwater models, makes models more reproducible and transparent. The functions in `nlmod` have four main objectives: -1. Create and adapt the temporal and spatial discretization of a MODFLOW model using an xarray Dataset (`nlmod.dims`). -2. Download and read data from external sources, project this data on the modelgrid and add this data to an xarray Dataset (`nlmod.read`). -3. Use data in an xarray Dataset to build modflow packages for both groundwater flow and transport models using FloPy (`nlmod.sim`, `nlmod.gwf` and `nlmod.gwt` for Modflow 6 and `nlmod.modpath` for Modpath). +1. Create and adapt the temporal and spatial discretization of a MODFLOW model using an + xarray Dataset (`nlmod.dims`). +2. Download and read data from external sources, project this data on the modelgrid and + add this data to an xarray Dataset (`nlmod.read`). +3. Use data in an xarray Dataset to build modflow packages for both groundwater flow + and transport models using FloPy (`nlmod.sim`, `nlmod.gwf` and `nlmod.gwt` for + Modflow 6 and `nlmod.modpath` for Modpath). 4. Visualise modeldata in Python (`nlmod.plot`) or GIS software (`nlmod.gis`). More information can be found on the documentation-website: @@ -50,9 +54,10 @@ Install the module with pip: * `dask` * `colorama` * `joblib` +* `bottleneck` There are some optional dependecies, only needed (and imported) in a single method. -Examples of this are `bottleneck` (used in calculate_gxg), `geocube` (used in +Examples of this are `geocube` (used in add_min_ahn_to_gdf), `h5netcdf` (used for hdf5 files backend in xarray), `scikit-image` (used in calculate_sea_coverage). To install `nlmod` with the optional dependencies use: @@ -65,11 +70,4 @@ notoriously hard to install on certain platforms. Please see the ## Getting started -If you are using `nlmod` for the first time you need to download the MODFLOW -executables. You can easily download these executables by running this Python code: - - import nlmod - nlmod.download_mfbinaries() - -After you've downloaded the executables you can run the Jupyter Notebooks in the -examples folder. These notebooks illustrate how to use the `nlmod` package. +Start with the Jupyter Notebooks in the examples folder. These notebooks illustrate how to use the `nlmod` package. diff --git a/docs/conf.py b/docs/conf.py index b2798667..27df1c88 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,10 +10,11 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -from nlmod import __version__ import os import sys +from nlmod import __version__ + sys.path.insert(0, os.path.abspath(".")) diff --git a/docs/examples/00_model_from_scratch.ipynb b/docs/examples/00_model_from_scratch.ipynb index 40579e4f..81842350 100644 --- a/docs/examples/00_model_from_scratch.ipynb +++ b/docs/examples/00_model_from_scratch.ipynb @@ -20,28 +20,9 @@ "outputs": [], "source": [ "import flopy as fp\n", - "import matplotlib.pyplot as plt\n", - "import nlmod\n", - "import numpy as np\n", - "import pandas as pd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "nlmod.util.get_color_logger(\"INFO\");" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Download MODFLOW-binaries\n", - "To run MODFLOW, we need to download the MODFLOW-excecutables. We do this with the following code:" + "import pandas as pd\n", + "\n", + "import nlmod" ] }, { @@ -50,8 +31,8 @@ "metadata": {}, "outputs": [], "source": [ - "if not nlmod.util.check_presence_mfbinaries():\n", - " nlmod.download_mfbinaries()" + "nlmod.util.get_color_logger(\"INFO\")\n", + "nlmod.show_versions()" ] }, { diff --git a/docs/examples/01_basic_model.ipynb b/docs/examples/01_basic_model.ipynb index ca18a257..63e5e5eb 100644 --- a/docs/examples/01_basic_model.ipynb +++ b/docs/examples/01_basic_model.ipynb @@ -18,12 +18,6 @@ "metadata": {}, "outputs": [], "source": [ - "import logging\n", - "import os\n", - "\n", - "import flopy\n", - "import geopandas as gpd\n", - "import matplotlib.pyplot as plt\n", "import nlmod" ] }, @@ -33,9 +27,8 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"nlmod version: {nlmod.__version__}\")\n", - "\n", - "nlmod.util.get_color_logger(\"INFO\")" + "nlmod.util.get_color_logger(\"INFO\")\n", + "nlmod.show_versions()" ] }, { @@ -44,19 +37,20 @@ "source": [ "## Create model\n", "\n", - "With the code below we create a modflow model with the name 'IJmuiden'. This model has the following properties :\n", + "With the code below we create a modflow model with the name 'IJmuiden'. This model has the following properties:\n", + "\n", "- an extent that covers part of the Northsea, Noordzeekanaal and the small port city IJmuiden.\n", - "- a structured grid based on the subsurface models [Regis](https://www.dinoloket.nl/regis-ii-het-hydrogeologische-model) and [Geotop](https://www.dinoloket.nl/detaillering-van-de-bovenste-lagen-met-geotop). The Regis layers that are not present within the extent are removed. In this case we use 'MSz1' as the bottom layer of the model. Use `nlmod.read.regis.get_layer_names()` to get all the layer names of Regis. All Regis layers below this layer are not used in the model. Geotop is used to replace the holoceen layer in Regis because there is no kh or kv defined for the holoceen in Regis. Part of the model is in the North sea. Regis and Geotop have no data there. Therefore the Regis and Geotop layers are extrapolated from the shore and the seabed is added using bathymetry data from [Jarkus](https://www.openearth.nl/rws-bathymetry/2018.html).\n", + "- a structured grid based on the subsurface models [Regis](https://www.dinoloket.nl/regis-ii-het-hydrogeologische-model) and [Geotop](https://www.dinoloket.nl/detaillering-van-de-bovenste-lagen-met-geotop). The Regis layers that are not present within the extent are removed. In this case we use 'MSz1' as the bottom layer of the model. Use `nlmod.read.regis.get_layer_names()` to get all the layer names of Regis. All Regis layers below this layer are not used in the model. Geotop is used to replace the Holocene layer in Regis because there is no kh or kv defined for the Holocene in Regis. Part of the model is in the North sea. Regis and Geotop have no data there. Therefore the Regis and Geotop layers are extrapolated from the shore and the seabed is added using bathymetry data from [Jarkus](https://www.openearth.nl/rws-bathymetry/2018.html).\n", "- starting heads of 1 in every cell.\n", - "- the model is a steady state model of a single time step.\n", + "- the model is a steady state model with a single time step.\n", "- big surface water bodies (Northsea, IJsselmeer, Markermeer, Noordzeekanaal) within the extent are added as a general head boundary. The surface water bodies are obtained from a [shapefile](..\\data\\shapes\\opp_water.shp).\n", - "- surface drainage is added using [ahn](https://www.ahn.nl) data and a default conductance of $1000 m^2/d$\n", - "- recharge is added using data from the [knmi](https://www.knmi.nl/nederland-nu/klimatologie/daggegevens) using the following steps:~~\n", - " 1. Check for each cell which KNMI weather and/or rainfall station is closest.\n", - " 2. Download the data for the stations found in 1. for the model period. For a steady state stress period the average precipitation and evaporation of 8 years before the stress period time is used.\n", - " 3. Combine precipitation and evaporation data from step 2 to create a recharge time series for each cell\n", - " 4. Add the timeseries to the model dataset and create the recharge package.\n", - "- constant head boundaries are added to the model edges in every layer. The starting head is used as constant head." + "- surface drainage is added using the Dutch DEM ([ahn](https://www.ahn.nl)) and a default conductance of $1000 m^2/d$\n", + "- recharge is added using data from [knmi](https://www.knmi.nl/nederland-nu/klimatologie/daggegevens) using the following steps:\n", + " 1. Check for each cell which KNMI weather and/or rainfall station is closest.\n", + " 2. Download the data for the stations found in 1. for the model period. For a steady state stress period the average precipitation and evaporation of 8 years before the stress period time is used.\n", + " 3. Combine precipitation and evaporation data from step 2 to create a recharge time series for each cell,\n", + " 4. Add the timeseries to the model dataset and create the recharge package.\n", + "- constant head boundaries are added to the model edges in every layer. The starting head is used as the specified head." ] }, { @@ -215,6 +209,7 @@ "source": [ "## Write and Run\n", "Now that we've created all the modflow packages we need to write them to modflow files. You always have to write the modflow data to the model workspace before you can run the model. You can write the model files and run the model using the function `nlmod.sim.write_and_run)` as shown below. This function has two additional options:\n", + "\n", "1. Write the model dataset to the disk if `write_ds` is `True`. This makes it easier and faster to load model data if you ever need it. \n", "2. Write a copy of this Jupyter Notebook to the same directory as the modflow files if `nb_path` is the name of this Jupyter Notebook. It can be useful to have a copy of the script that created the modflow files, together with the files. " ] @@ -250,7 +245,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Data from a model with a structured grid can be easily visualised using the model dataset. Below some examples" + "Data from a model with a structured grid can be easily visualised using the model dataset. Below are some examples:" ] }, { diff --git a/docs/examples/02_surface_water.ipynb b/docs/examples/02_surface_water.ipynb index cf62249f..15ed00ff 100644 --- a/docs/examples/02_surface_water.ipynb +++ b/docs/examples/02_surface_water.ipynb @@ -12,7 +12,7 @@ "\n", "This example notebook shows some how to add surface water defined in a GeoDataFrame to a MODFLOW model using the `nlmod` package.\n", "\n", - "There are three water boards in the model area, of which we download seasonal data about the stage of the surface water. In this notebook we perform a steady-state run, in which the stage of the surface water is the mean of the summer and winter stage. For locations without a stage from the water board, we delineate information from a Digital Terrain Model, to set a stage. We assign a stage of 0.0 m NAP to the river Lek. to The surface water bodies in each cell are aggregated using an area-weighted method and added to the model as a river-package." + "There are three water boards in the model area, and we download seasonal data about the stage of the surface water for each. In this notebook we perform a steady-state run, in which the stage of the surface water is the mean of the summer and winter stage. For locations without a stage from the water board, we obtain information from a Digital Terrain Model near the surface water features, to estimate a stage. We assign a stage of 0.0 m NAP to the river Lek. The surface water bodies in each cell are aggregated using an area-weighted method and added to the model with the river-package." ] }, { @@ -25,12 +25,10 @@ "import os\n", "\n", "import flopy\n", - "import rioxarray\n", "import matplotlib.pyplot as plt\n", - "import nlmod\n", - "from geocube.api.core import make_geocube\n", - "from functools import partial\n", - "from geocube.rasterize import rasterize_image" + "import rioxarray\n", + "\n", + "import nlmod" ] }, { @@ -40,9 +38,8 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"nlmod version: {nlmod.__version__}\")\n", - "\n", - "nlmod.util.get_color_logger(\"INFO\")" + "nlmod.util.get_color_logger(\"INFO\")\n", + "nlmod.show_versions()" ] }, { @@ -94,7 +91,7 @@ "if not os.path.isfile(fname_ahn):\n", " ahn = nlmod.read.ahn.get_ahn4(extent, identifier=\"AHN4_DTM_5m\")\n", " ahn.rio.to_raster(fname_ahn)\n", - "ahn = rioxarray.open_rasterio(fname_ahn, mask_and_scale=True)" + "ahn = rioxarray.open_rasterio(fname_ahn, mask_and_scale=True)[0]" ] }, { @@ -103,7 +100,7 @@ "metadata": {}, "source": [ "### Layer 'waterdeel' from bgt\n", - "As the source of the location of the surface water bodies we use the 'waterdeel' layer of the Basisregistratie Grootschalige Topografie (BGT). This data consists of detailed polygons, maintained by dutch government agencies (water boards, municipalities and Rijkswatrstaat)." + "As the source of the location of the surface water bodies we use the 'waterdeel' layer of the Basisregistratie Grootschalige Topografie (BGT). This data consists of detailed polygons, maintained by dutch government agencies (water boards, municipalities and Rijkswaterstaat)." ] }, { @@ -223,7 +220,7 @@ "metadata": {}, "source": [ "#### Save the data to use in other notebooks as well\n", - "We save the bgt-data to a GeoPackage file, so we can use the data in other notebooks with surface water as well" + "We save the bgt-data to a GeoPackage file, so we can use the data in other notebooks with surface water as well." ] }, { @@ -275,7 +272,13 @@ "\n", "The `stage` and the `botm` columns are present in our dataset. The bottom resistance `c0` is rarely known, and is usually estimated when building the model. We will add our estimate later on.\n", "\n", - "*__Note__: the NaN's in the dataset indicate that not all parameters are known for each feature. This is not necessarily a problem but this will mean some features will not be converted to model input.*" + "
")[-1].strip()
+ ) # obtain apikey from codeblock on webpage
+ if len(api_key) != 120:
+ msg = f"Could not obtain API Key from {url}, trying API Key from memory. Found API Key = {api_key}"
+ logger.error(msg)
+ raise ValueError(msg)
+ logger.info(f"Retrieved anonymous API Key from {url}")
+ return api_key
except Exception as exc:
- if Timestamp.today() < Timestamp("2024-07-01"):
- logger.info("Retrieved anonymous API Key from memory")
+ api_key_memory_date = "2025-07-01"
+ if Timestamp.today() < Timestamp(api_key_memory_date):
+ logger.info(
+ f"Retrieved anonymous API Key (available till {api_key_memory_date}) from memory"
+ )
api_key = (
- "eyJvcmciOiI1ZTU1NGUxOTI3NGE5NjAwMDEyYTNlYjEiLCJpZCI6ImE1OGI5"
- "NGZmMDY5NDRhZDNhZjFkMDBmNDBmNTQyNjBkIiwiaCI6Im11cm11cjEyOCJ9"
+ "eyJvcmciOiI1ZTU1NGUxOTI3NGE5NjAwMDEyYTNlYjEiLCJpZCI6ImE1OGI5N"
+ "GZmMDY5NDRhZDNhZjFkMDBmNDBmNTQyNjBkIiwiaCI6Im11cm11cjEyOCJ9"
)
return api_key
else:
@@ -58,7 +59,7 @@ def get_list_of_files(
start_after_filename: Optional[str] = None,
timeout: int = 120,
) -> List[str]:
- """Download list of files from KNMI data platform"""
+ """Download list of files from KNMI data platform."""
if api_key is None:
api_key = get_anonymous_api_key()
files = []
@@ -69,6 +70,7 @@ def get_list_of_files(
params = {"maxKeys": f"{max_keys}"}
if start_after_filename is not None:
params["startAfterFilename"] = start_after_filename
+ logger.debug(f"Request to {url=} with {params=}")
r = requests.get(
url, params=params, headers={"Authorization": api_key}, timeout=timeout
)
@@ -88,7 +90,7 @@ def download_file(
api_key: Optional[str] = None,
timeout: int = 120,
) -> None:
- """Download file from KNMI data platform"""
+ """Download file from KNMI data platform."""
if api_key is None:
api_key = get_anonymous_api_key()
url = (
@@ -118,7 +120,7 @@ def download_files(
api_key: Optional[str] = None,
timeout: int = 120,
) -> None:
- """Download multiple files from KNMI data platform"""
+ """Download multiple files from KNMI data platform."""
for fname in tqdm(fnames):
download_file(
dataset_name=dataset_name,
@@ -131,7 +133,7 @@ def download_files(
def read_nc(fo: Union[str, FileIO], **kwargs: dict) -> xr.Dataset:
- """Read netcdf (.nc) file to xarray Dataset"""
+ """Read netcdf (.nc) file to xarray Dataset."""
# could help to provide argument: engine="h5netcdf"
return xr.open_dataset(fo, **kwargs)
@@ -160,7 +162,7 @@ def get_timestamp_from_fname(fname: str) -> Union[Timestamp, None]:
def add_h5_meta(meta: Dict[str, Any], h5obj: Any, orig_ky: str = "") -> Dict[str, Any]:
- """Read metadata from hdf5 (.h5) file and add to existing metadata dictionary"""
+ """Read metadata from hdf5 (.h5) file and add to existing metadata dictionary."""
def cleanup(val: Any) -> Any:
if isinstance(val, (ndarray, list)):
@@ -173,7 +175,7 @@ def cleanup(val: Any) -> Any:
return val
if hasattr(h5obj, "attrs"):
- attrs = getattr(h5obj, "attrs")
+ attrs = h5obj.attrs
submeta = {f"{orig_ky}/{ky}": cleanup(val) for ky, val in attrs.items()}
meta.update(submeta)
@@ -185,7 +187,7 @@ class MultipleDatasetsFound(Exception):
def read_h5_contents(h5fo: FileIO) -> Tuple[ndarray, Dict[str, Any]]:
- """Read contents from a hdf5 (.h5) file"""
+ """Read contents from a hdf5 (.h5) file."""
from h5py import Dataset as h5Dataset
data = None
@@ -205,7 +207,7 @@ def read_h5_contents(h5fo: FileIO) -> Tuple[ndarray, Dict[str, Any]]:
def read_h5(fo: Union[str, FileIO]) -> xr.Dataset:
- """Read hdf5 (.h5) file to xarray Dataset"""
+ """Read hdf5 (.h5) file to xarray Dataset."""
from h5py import File as h5File
with h5File(fo) as h5fo:
@@ -230,7 +232,7 @@ def read_h5(fo: Union[str, FileIO]) -> xr.Dataset:
def read_grib(
fo: Union[str, FileIO], filter_by_keys=None, **kwargs: dict
) -> xr.Dataset:
- """Read GRIB file to xarray Dataset"""
+ """Read GRIB file to xarray Dataset."""
if kwargs is None:
kwargs = {}
@@ -247,7 +249,7 @@ def read_grib(
def read_dataset_from_zip(
fname: str, hour: Optional[int] = None, **kwargs: dict
) -> xr.Dataset:
- """Read KNMI data platfrom .zip file to xarray Dataset"""
+ """Read KNMI data platfrom .zip file to xarray Dataset."""
if fname.endswith(".zip"):
with ZipFile(fname) as zipfo:
fnames = sorted([x for x in zipfo.namelist() if not x.endswith("/")])
@@ -275,7 +277,7 @@ def read_dataset(
hour: Optional[int] = None,
**kwargs: dict,
) -> xr.Dataset:
- """Read xarray dataset from different file types; .nc, .h5 or grib file"""
+ """Read xarray dataset from different file types; .nc, .h5 or grib file."""
if hour is not None:
if hour == 24:
hour = 0
diff --git a/nlmod/read/meteobase.py b/nlmod/read/meteobase.py
index a5df07db..5b3a9e11 100644
--- a/nlmod/read/meteobase.py
+++ b/nlmod/read/meteobase.py
@@ -11,8 +11,7 @@
class MeteobaseType(Enum):
- """Enum class to couple folder names to observation type (from in
- LEESMIJ.txt)"""
+ """Enum class to couple folder names to observation type (from in LEESMIJ.txt)"""
NEERSLAG = "Neerslagradargegevens in Arc/Info-formaat."
MAKKINK = "Verdampingsgegevens volgens Makkink."
@@ -56,8 +55,7 @@ def read_leesmij(fo: FileIO) -> Dict[str, Dict[str, str]]:
def get_timestamp_from_fname(fname: str) -> Timestamp:
- """Get the Timestamp from a filename (with some assumptions about the
- formatting)"""
+ """Get the Timestamp from a filename (with some assumptions about the formatting)"""
datestr = re.search("([0-9]{8})", fname) # assumes YYYYMMDD
if datestr is not None:
match = datestr.group(0)
@@ -130,7 +128,7 @@ def read_ascii(fo: FileIO) -> Union[np.ndarray, dict]:
def get_xy_from_ascii_meta(
- meta: Dict[str, Union[int, float]]
+ meta: Dict[str, Union[int, float]],
) -> Tuple[np.ndarray, np.ndarray]:
"""Get the xy coordinates Esri ASCII raster format header.
@@ -268,7 +266,6 @@ def read_meteobase(
-------
List[DataArray]
"""
-
with ZipFile(Path(path)) as zfile:
with zfile.open("LEESMIJ.TXT") as fo:
meta = read_leesmij(fo)
diff --git a/nlmod/read/nhi.py b/nlmod/read/nhi.py
index 858e3a16..4e721d60 100644
--- a/nlmod/read/nhi.py
+++ b/nlmod/read/nhi.py
@@ -1,7 +1,10 @@
+import io
import logging
import os
+import geopandas as gpd
import numpy as np
+import pandas as pd
import requests
import rioxarray
@@ -11,8 +14,7 @@
def download_file(url, pathname, filename=None, overwrite=False, timeout=120.0):
- """
- Download a file from the NHI website.
+ """Download a file from the NHI website.
Parameters
----------
@@ -33,7 +35,6 @@ def download_file(url, pathname, filename=None, overwrite=False, timeout=120.0):
-------
fname : str
The full path of the downloaded file.
-
"""
if filename is None:
filename = url.split("/")[-1]
@@ -47,8 +48,7 @@ def download_file(url, pathname, filename=None, overwrite=False, timeout=120.0):
def download_buisdrainage(pathname, overwrite=False):
- """
- Download resistance and depth of buisdrainage from the NHI website
+ """Download resistance and depth of buisdrainage from the NHI website.
Parameters
----------
@@ -63,7 +63,6 @@ def download_buisdrainage(pathname, overwrite=False):
The full path of the downloaded file containing the resistance of buisdrainage.
fname_d : str
The full path of the downloaded file containing the depth of buisdrainage.
-
"""
url_bas = "https://thredds.data.nhi.nu/thredds/fileServer/opendap/models/nhi3_2/25m"
@@ -86,8 +85,7 @@ def add_buisdrainage(
cond_method="average",
depth_method="mode",
):
- """
- Add data about the buisdrainage to the model Dataset.
+ """Add data about the buisdrainage to the model Dataset.
This data consists of the conductance of buisdrainage (m2/d) and the depth of
buisdrainage (m to surface level). With the default settings for `cond_method` and
@@ -125,7 +123,6 @@ def add_buisdrainage(
ds : xr.Dataset
The model dataset with added variables with the names `cond_var` and
`depth_var`.
-
"""
if pathname is None:
pathname = ds.cachedir
@@ -173,3 +170,208 @@ def add_buisdrainage(
ds[depth_var] = ds[depth_var] / 100.0
return ds
+
+
+def get_gwo_wells(
+ username,
+ password,
+ n_well_filters=1_000,
+ well_site=None,
+ organisation=None,
+ status=None,
+ well_index="Name",
+ timeout=120,
+ **kwargs,
+):
+ """Get metadata of extraction wells from the NHI GWO database.
+
+ Parameters
+ ----------
+ username : str
+ The username of the NHI GWO database. To retrieve a username and password visit
+ https://gwo.nhi.nu/register/.
+ password : str
+ The password of the NHI GWO database. To retrieve a username and password visit
+ https://gwo.nhi.nu/register/.
+ n_well_filters : int, optional
+ The number of wells that are requested per page. This number determines in how
+ many pieces the request is split. The default is 1000.
+ organisation : str, optional
+ The organisation that manages the wells. If not None, the organisation will be
+ used to filter the wells. The default is None.
+ well_site : str, optional
+ The name of well site the wells belong to. If not None, the well site will be
+ used to filter the wells. The default is None.
+ status : str, optional
+ The status of the wells. If not None, the status will be used to filter the
+ wells. Possible values are "Active", "Inactive" or "Abandoned". The default is
+ None.
+ well_index : str, tuple or list, optional
+ The column(s) in the resulting GeoDataFrame that is/are used as the index of
+ this GeoDataFrame. The default is "Name".
+ timeout : int, optional
+ The timeout time (in seconds) for requests to the database. The default is
+ 120 seconds.
+ **kwargs : dict
+ Kwargs are passed as additional parameters in the request to the database. For
+ available parameters see https://gwo.nhi.nu/api/v1/download/.
+
+ Returns
+ -------
+ gdf : geopandas.GeoDataFrame
+ A GeoDataFrame containing the properties of the wells and their filters.
+ """
+ # zie https://gwo.nhi.nu/api/v1/download/
+ url = "https://gwo.nhi.nu/api/v1/well_filters/"
+
+ page = 1
+ properties = []
+ while page is not None:
+ params = {"format": "csv", "n_well_filters": n_well_filters, "page": page}
+ if status is not None:
+ params["well__status"] = status
+ if organisation is not None:
+ params["well__organization"] = organisation
+ if well_site is not None:
+ params["well__site"] = well_site
+ params.update(kwargs)
+
+ r = requests.get(url, auth=(username, password), params=params, timeout=timeout)
+ content = r.content.decode("utf-8")
+ if len(content) == 0:
+ if page == 1:
+ msg = "No extraction wells found for the requested parameters"
+ raise ValueError(msg)
+ else:
+ # the number of wells is exactly a multiple of n_well_filters
+ page = None
+ continue
+ lines = content.split("\n")
+ empty_lines = np.where([set(line) == set(";") for line in lines])[0]
+ assert len(empty_lines) == 1, "Returned extraction wells cannot be interpreted"
+ skiprows = list(range(empty_lines[0] + 1)) + [empty_lines[0] + 2]
+ df = pd.read_csv(io.StringIO(content), skiprows=skiprows, sep=";")
+ properties.append(df)
+
+ if len(df) == n_well_filters:
+ page += 1
+ else:
+ page = None
+ df = pd.concat(properties)
+ geometry = gpd.points_from_xy(df.XCoordinate, df.YCoordinate)
+ gdf = gpd.GeoDataFrame(df, geometry=geometry, crs=28992)
+ if well_index is not None:
+ gdf = gdf.set_index(well_index)
+ return gdf
+
+
+def get_gwo_measurements(
+ username,
+ password,
+ n_measurements=10_000,
+ well_site=None,
+ well_index="Name",
+ measurement_index=("Name", "DateTime"),
+ timeout=120,
+ **kwargs,
+):
+ """Get extraction rates and metadata of wells from the NHI GWO database.
+
+ Parameters
+ ----------
+ username : str
+ The username of the NHI GWO database. To retrieve a username and password visit
+ https://gwo.nhi.nu/register/.
+ password : str
+ The password of the NHI GWO database. To retrieve a username and password visit
+ https://gwo.nhi.nu/register/.
+ n_measurements : int, optional
+ The number of measurements that are requested per page, with a maximum of
+ 200,000. This number determines in how many pieces the request is split. The
+ default is 10,000.
+ well_site : str, optional
+ The name of well site the wells belong to. If not None, the well site will be
+ used to filter the wells. The default is None.
+ well_index : str, tuple or list, optional
+ The column(s) in the resulting GeoDataFrame that is/are used as the index of
+ this GeoDataFrame. The default is "Name".
+ measurement_index : str, tuple or list, optional, optional
+ The column(s) in the resulting measurement-DataFrame that is/are used as the
+ index of this DataFrame. The default is ("Name", "DateTime").
+ timeout : int, optional
+ The timeout time (in seconds) of requests to the database. The default is
+ 120 seconds.
+ **kwargs : dict
+ Kwargs are passed as additional parameters in the request to the database. For
+ available parameters see https://gwo.nhi.nu/api/v1/download/.
+
+ Returns
+ -------
+ measurements : pandas.DataFrame
+ A DataFrame containing the extraction rates of the wells in the database.
+ gdf : geopandas.GeoDataFrame
+ A GeoDataFrame containing the properties of the wells and their filters.
+ """
+ url = "http://gwo.nhi.nu/api/v1/measurements/"
+ properties = []
+ measurements = []
+ page = 1
+ while page is not None:
+ params = {
+ "format": "csv",
+ "n_measurements": n_measurements,
+ "page": page,
+ }
+ if well_site is not None:
+ params["filter__well__site"] = well_site
+ params.update(kwargs)
+ r = requests.get(url, auth=(username, password), params=params, timeout=timeout)
+
+ content = r.content.decode("utf-8")
+ if len(content) == 0:
+ if page == 1:
+ msg = "No extraction rates found for the requested parameters"
+ raise (ValueError(msg))
+ else:
+ # the number of measurements is exactly a multiple of n_measurements
+ page = None
+ continue
+ lines = content.split("\n")
+ empty_lines = np.where([set(line) == set(";") for line in lines])[0]
+ assert len(empty_lines) == 2, "Returned extraction rates cannot be interpreted"
+
+ # read properties
+ skiprows = list(range(empty_lines[0] + 1)) + [empty_lines[0] + 2]
+ nrows = empty_lines[1] - empty_lines[0] - 3
+ df = pd.read_csv(io.StringIO(content), sep=";", skiprows=skiprows, nrows=nrows)
+ properties.append(df)
+
+ # read measurements
+ skiprows = list(range(empty_lines[1] + 1)) + [empty_lines[1] + 2]
+ df = pd.read_csv(
+ io.StringIO(content),
+ skiprows=skiprows,
+ sep=";",
+ parse_dates=["DateTime"],
+ dayfirst=True,
+ )
+ measurements.append(df)
+ if len(df) == n_measurements:
+ page += 1
+ else:
+ page = None
+ measurements = pd.concat(measurements)
+ # drop columns without measurements
+ measurements = measurements.loc[:, ~measurements.isna().all()]
+ if measurement_index is not None:
+ if isinstance(measurement_index, tuple):
+ measurement_index = list(measurement_index)
+ measurements = measurements.set_index(["Name", "DateTime"])
+ df = pd.concat(properties)
+ geometry = gpd.points_from_xy(df.XCoordinate, df.YCoordinate)
+ gdf = gpd.GeoDataFrame(df, geometry=geometry)
+ if well_index is not None:
+ gdf = gdf.set_index(well_index)
+ # drop duplicate properties from multiple pages
+ gdf = gdf[~gdf.index.duplicated()]
+ return measurements, gdf
diff --git a/nlmod/read/regis.py b/nlmod/read/regis.py
index 729d7b44..f1ba8683 100644
--- a/nlmod/read/regis.py
+++ b/nlmod/read/regis.py
@@ -12,11 +12,10 @@
logger = logging.getLogger(__name__)
-REGIS_URL = "http://www.dinodata.nl:80/opendap/REGIS/REGIS.nc"
-# REGIS_URL = 'https://www.dinodata.nl/opendap/hyrax/REGIS/REGIS.nc'
+REGIS_URL = "https://dinodata.nl/opendap/REGIS/REGIS.nc"
-@cache.cache_netcdf
+@cache.cache_netcdf()
def get_combined_layer_models(
extent,
regis_botm_layer="AKc",
@@ -26,7 +25,7 @@ def get_combined_layer_models(
geotop_layers="HLc",
geotop_k=None,
):
- """combine layer models into a single layer model.
+ """Combine layer models into a single layer model.
Possibilities so far include:
- use_regis -> full model based on regis
@@ -65,7 +64,6 @@ def get_combined_layer_models(
ValueError
if an invalid combination of layers is used.
"""
-
if use_regis:
regis_ds = get_regis(
extent, regis_botm_layer, remove_nan_layers=remove_nan_layers
@@ -93,7 +91,7 @@ def get_combined_layer_models(
return combined_ds
-@cache.cache_netcdf
+@cache.cache_netcdf()
def get_regis(
extent,
botm_layer="AKc",
@@ -102,7 +100,7 @@ def get_regis(
drop_layer_dim_from_top=True,
probabilities=False,
):
- """get a regis dataset projected on the modelgrid.
+ """Get a regis dataset projected on the modelgrid.
Parameters
----------
@@ -133,7 +131,6 @@ def get_regis(
regis_ds : xarray dataset
dataset with regis data projected on the modelgrid.
"""
-
ds = xr.open_dataset(REGIS_URL, decode_times=False)
# set x and y dimensions to cell center
@@ -196,10 +193,16 @@ def get_regis(
def add_geotop_to_regis_layers(
- rg, gt, layers="HLc", geotop_k=None, remove_nan_layers=True, anisotropy=1.0
+ rg,
+ gt,
+ layers="HLc",
+ geotop_k=None,
+ remove_nan_layers=True,
+ anisotropy=1.0,
+ gt_layered=None,
):
- """Combine geotop and regis in such a way that the one or more layers in
- Regis are replaced by the geo_eenheden of geotop.
+ """Combine geotop and regis in such a way that the one or more layers in Regis are
+ replaced by the geo_eenheden of geotop.
Parameters
----------
@@ -217,6 +220,10 @@ def add_geotop_to_regis_layers(
anisotropy : float, optional
The anisotropy value (kh/kv) used when there are no kv values in df. The
default is 1.0.
+ gt_layered : xarray.Dataset
+ A layered representation of the geotop-dataset. By supplying this parameter, the
+ user can change the GeoTOP-layering, which is usueally defined by
+ nlmod.read.geotop.to_model_layers(gt).
Returns
-------
@@ -254,8 +261,11 @@ def add_geotop_to_regis_layers(
rg["top"] = rg["botm"] + calculate_thickness(rg)
for layer in layers:
- # transform geotop data into layers
- gtl = geotop.to_model_layers(gt)
+ if gt_layered is not None:
+ gtl = gt_layered.copy(deep=True)
+ else:
+ # transform geotop data into layers
+ gtl = geotop.to_model_layers(gt)
# temporarily add layer dimension to top in gtl
gtl["top"] = gtl["botm"] + calculate_thickness(gtl)
@@ -291,14 +301,13 @@ def add_geotop_to_regis_layers(
def get_layer_names():
- """get all the available regis layer names.
+ """Get all the available regis layer names.
Returns
-------
layer_names : np.array
array with names of all the regis layers.
"""
-
layer_names = xr.open_dataset(REGIS_URL).layer.astype(str).values
return layer_names
diff --git a/nlmod/read/rws.py b/nlmod/read/rws.py
index 7af2a991..28b572b7 100644
--- a/nlmod/read/rws.py
+++ b/nlmod/read/rws.py
@@ -15,8 +15,8 @@
def get_gdf_surface_water(ds):
- """read a shapefile with surface water as a geodataframe, cut by the extent
- of the model.
+ """Read a shapefile with surface water as a geodataframe, cut by the extent of the
+ model.
Parameters
----------
@@ -37,9 +37,9 @@ def get_gdf_surface_water(ds):
return gdf_swater
-@cache.cache_netcdf
+@cache.cache_netcdf(coords_3d=True)
def get_surface_water(ds, da_basename):
- """create 3 data-arrays from the shapefile with surface water:
+ """Create 3 data-arrays from the shapefile with surface water:
- area: area of the shape in the cell
- cond: conductance based on the area and "bweerstand" column in shapefile
@@ -58,7 +58,6 @@ def get_surface_water(ds, da_basename):
ds : xarray.Dataset
dataset with modelgrid data.
"""
-
modelgrid = dims.modelgrid_from_ds(ds)
gdf = get_gdf_surface_water(ds)
@@ -91,10 +90,10 @@ def get_surface_water(ds, da_basename):
return ds_out
-@cache.cache_netcdf
+@cache.cache_netcdf(coords_2d=True)
def get_northsea(ds, da_name="northsea"):
- """Get Dataset which is 1 at the northsea and 0 everywhere else. Sea is
- defined by rws surface water shapefile.
+ """Get Dataset which is 1 at the northsea and 0 everywhere else. Sea is defined by
+ rws surface water shapefile.
Parameters
----------
@@ -109,7 +108,6 @@ def get_northsea(ds, da_name="northsea"):
Dataset with a single DataArray, this DataArray is 1 at sea and 0
everywhere else. Grid dimensions according to ds.
"""
-
gdf_surf_water = get_gdf_surface_water(ds)
# find grid cells with sea
@@ -140,7 +138,6 @@ def add_northsea(ds, cachedir=None):
b) fill top, bot, kh and kv add northsea cell by extrapolation
c) get bathymetry (northsea depth) from jarkus.
"""
-
logger.info(
"Filling NaN values in top/botm and kh/kv in "
"North Sea using bathymetry data from jarkus"
@@ -181,8 +178,7 @@ def calculate_sea_coverage(
nodata=-1,
return_filled_dtm=False,
):
- """
- Determine where the sea is by interpreting the digital terrain model.
+ """Determine where the sea is by interpreting the digital terrain model.
This method assumes the pixel defined in xy_sea (by default top-left) of the
DTM-DataArray is sea. It then determines the height of the sea that is required for
@@ -190,7 +186,6 @@ def calculate_sea_coverage(
Parameters
----------
-
dtm : xr.DataArray
The digital terrain data, which can be of higher resolution than ds, Nans are
filled by the minial value of dtm.
@@ -223,7 +218,6 @@ def calculate_sea_coverage(
sea : xr.DataArray
A DataArray with value of 1 where the sea is and 0 where it is not.
"""
-
from skimage.morphology import reconstruction
if not (dtm < zmax).any():
diff --git a/nlmod/read/waterboard.py b/nlmod/read/waterboard.py
index b44d44c2..64eaa5a8 100644
--- a/nlmod/read/waterboard.py
+++ b/nlmod/read/waterboard.py
@@ -49,15 +49,16 @@ def get_configuration():
config["Amstel, Gooi en Vecht"] = {
"bgt_code": "W0155",
"watercourses": {
- "url": "https://maps.waternet.nl/arcgis/rest/services/AGV_Legger/AGV_Onderh_Secundaire_Watergangen/MapServer",
- "layer": 40,
- "bottom_width": "BODEMBREEDTE",
- "bottom_height": "BODEMHOOGTE",
- "water_depth": "WATERDIEPTE",
+ "url": "https://maps.waternet.nl/arcgis/rest/services/Publiek/WNET_GEO_LEGGER_WL_2021/MapServer",
+ "layer": 0, # Primaire Waterloop Legger
+ "bottom_width": "AVVBODDR",
+ "bottom_height": "AVVBODH",
+ "water_depth": "AVVDIEPT",
+ "index": "OVKIDENT",
},
"level_areas": {
- "url": "https://maps.waternet.nl/arcgis/rest/services/AGV_Legger/Vastgestelde_Waterpeilen/MapServer",
- "layer": 0,
+ "url": "https://maps.waternet.nl/arcgis/rest/services/Publiek/GW_GPG/MapServer",
+ "layer": 5, # Vigerende peilgebieden
"index": "GPGIDENT",
"summer_stage": [
"GPGZMRPL",
@@ -201,18 +202,6 @@ def get_configuration():
},
"level_areas": {
"url": "https://kaarten.hhnk.nl/arcgis/rest/services/ws/ws_peilgebieden_vigerend/MapServer",
- "layer": 4,
- "table": {
- "id": 6,
- "SOORTSTREEFPEIL": {
- 901: "STREEFPEIL_JAARROND", # vast peilbeheer
- 902: "STREEFPEIL_WINTER",
- 903: "STREEFPEIL_ZOMER",
- 904: "STREEFPEIL_JAARROND", # dynamisch peilbeheer
- 905: "ONDERGRENS_JAARROND",
- 906: "BOVENGRENS_JAARROND",
- },
- },
"summer_stage": [
"ZOMER",
"STREEFPEIL_ZOMER",
@@ -522,7 +511,6 @@ def get_data(wb, data_kind, extent=None, max_record_count=None, config=None, **k
Raises
------
-
DESCRIPTION.
Returns
@@ -605,11 +593,12 @@ def get_data(wb, data_kind, extent=None, max_record_count=None, config=None, **k
def _set_column_from_columns(gdf, set_column, from_columns, nan_values=None):
- """Retrieve values from one or more Geo)DataFrame-columns and set these
- values as another column."""
+ """Retrieve values from one or more Geo)DataFrame-columns and set these values as
+ another column.
+ """
if set_column in gdf.columns:
raise (Exception(f"Column {set_column} allready exists"))
- gdf[set_column] = np.NaN
+ gdf[set_column] = np.nan
if from_columns is None:
return gdf
if isinstance(from_columns, str):
@@ -645,5 +634,5 @@ def _set_column_from_columns(gdf, set_column, from_columns, nan_values=None):
if nan_values is not None:
if isinstance(nan_values, (float, int)):
nan_values = [nan_values]
- gdf.loc[gdf[set_column].isin(nan_values), set_column] = np.NaN
+ gdf.loc[gdf[set_column].isin(nan_values), set_column] = np.nan
return gdf
diff --git a/nlmod/read/webservices.py b/nlmod/read/webservices.py
index 64c72742..97cc90bf 100644
--- a/nlmod/read/webservices.py
+++ b/nlmod/read/webservices.py
@@ -150,31 +150,40 @@ def arcrest(
else:
gdf = gpd.GeoDataFrame.from_features(features, crs=sr)
if table is not None:
- url_query = f"{url}/{table.pop('id')}/query"
- pgbids = ",".join([str(v) for v in gdf["OBJECTID"].values])
- params["where"] = f"PEILGEBIEDVIGERENDID IN ({pgbids})"
params["f"] = "json"
- data = _get_data(url_query, params, timeout=timeout)
+ url_query = f"{url}/{table.pop('id')}/query"
+
+ # loop over chunks of 100 pgbids. Long where clauses can cause
+ # the request to fail. 1300 pgbids fails but 130 works
+ chunk_size = 100
+ ids_chunks = [
+ gdf["OBJECTID"].values[i : i + chunk_size]
+ for i in range(0, len(gdf), chunk_size)
+ ]
+ data = {}
+ features = []
+
+ for ids_chunk in ids_chunks:
+ pgbids = ",".join([str(v) for v in ids_chunk])
+ where = f"PEILGEBIEDVIGERENDID IN ({pgbids})"
+ params["where"] = where
+ _data = _get_data(url_query, params, timeout=timeout, **kwargs)
+
+ data.update(_data)
+ features.extend(_data["features"])
+
+ assert "exceededTransferLimit" not in data, "exceededTransferLimit"
+ data["features"] = features
+
df = pd.DataFrame(
[feature["attributes"] for feature in data["features"]]
)
- # add peilen to gdf
- for col, convert_dic in table.items():
- df[col].replace(convert_dic, inplace=True)
- df.set_index(col, inplace=True)
- for oid in gdf["OBJECTID"]:
- insert_s = df.loc[
- df["PEILGEBIEDVIGERENDID"] == oid, "WATERHOOGTE"
- ]
- gdf.loc[
- gdf["OBJECTID"] == oid, insert_s.index
- ] = insert_s.values
return gdf
def _get_data(url, params, timeout=120, **kwargs):
- """get data using a request
+ """Get data using a request.
Parameters
----------
@@ -188,7 +197,6 @@ def _get_data(url, params, timeout=120, **kwargs):
Returns
-------
data
-
"""
r = requests.get(url, params=params, timeout=timeout, **kwargs)
if not r.ok:
@@ -423,9 +431,8 @@ def _split_wcs_extent(
fmt,
crs,
):
- """There is a max height and width limit for the wcs server. This function
- splits your extent in chunks smaller than the limit. It returns a list of
- Memory files.
+ """There is a max height and width limit for the wcs server. This function splits
+ your extent in chunks smaller than the limit. It returns a list of Memory files.
Parameters
----------
@@ -454,12 +461,12 @@ def _split_wcs_extent(
-------
MemoryFile
Rasterio MemoryFile of the merged data
+
Notes
-----
1. The resolution is used to obtain the data from the wcs server. Not sure
what kind of interpolation is used to resample the original grid.
"""
-
# write tiles
datasets = []
start_x = extent[0]
diff --git a/nlmod/sim/__init__.py b/nlmod/sim/__init__.py
index 1ca20b53..57b71d59 100644
--- a/nlmod/sim/__init__.py
+++ b/nlmod/sim/__init__.py
@@ -1 +1,2 @@
+# ruff: noqa: F403
from .sim import *
diff --git a/nlmod/sim/sim.py b/nlmod/sim/sim.py
index 119a97bf..4d5e2438 100644
--- a/nlmod/sim/sim.py
+++ b/nlmod/sim/sim.py
@@ -1,6 +1,7 @@
import datetime as dt
import logging
import os
+import pathlib
from shutil import copyfile
import flopy
@@ -13,9 +14,9 @@
def write_and_run(sim, ds, write_ds=True, script_path=None, silent=False):
- """write modflow files and run the model. Extra options include writing the
- model dataset to a netcdf file in the model workspace and copying the
- modelscript to the model workspace.
+ """Write modflow files and run the model. Extra options include writing the model
+ dataset to a netcdf file in the model workspace and copying the modelscript to the
+ model workspace.
Parameters
----------
@@ -51,7 +52,10 @@ def write_and_run(sim, ds, write_ds=True, script_path=None, silent=False):
ds.attrs["model_dataset_written_to_disk_on"] = dt.datetime.now().strftime(
"%Y%m%d_%H:%M:%S"
)
- ds.to_netcdf(os.path.join(ds.attrs["model_ws"], f"{ds.model_name}.nc"))
+ if isinstance(ds.attrs["model_ws"], pathlib.PurePath):
+ ds.to_netcdf(ds.attrs["model_ws"] / f"{ds.model_name}.nc")
+ else:
+ ds.to_netcdf(os.path.join(ds.attrs["model_ws"], f"{ds.model_name}.nc"))
logger.info("write modflow files to model workspace")
sim.write_simulation(silent=silent)
@@ -107,8 +111,8 @@ def get_tdis_perioddata(ds, nstp="nstp", tsmult="tsmult"):
return tdis_perioddata
-def sim(ds, exe_name=None):
- """create sim from the model dataset.
+def sim(ds, exe_name=None, version_tag=None):
+ """Create sim from the model dataset.
Parameters
----------
@@ -117,21 +121,36 @@ def sim(ds, exe_name=None):
attributes: model_name, mfversion, model_ws, time_units, start,
perlen, nstp, tsmult
exe_name: str, optional
- path to modflow executable, default is None, which assumes binaries
- are available in nlmod/bin directory. Binaries can be downloaded
- using `nlmod.util.download_mfbinaries()`.
+ path to modflow executable, default is None. If None, the path is
+ obtained from the flopy metadata that respects `version_tag`. If not
+ found, the executables are downloaded. Not compatible with version_tag.
+ version_tag : str, default None
+ GitHub release ID: for example "18.0" or "latest". If version_tag is provided,
+ the most recent installation location of MODFLOW is found in flopy metadata
+ that respects `version_tag`. If not found, the executables are downloaded.
+ Not compatible with exe_name.
Returns
-------
sim : flopy MFSimulation
simulation object.
"""
-
# start creating model
logger.info("creating mf6 SIM")
- if exe_name is None:
- exe_name = util.get_exe_path(ds.mfversion)
+ # Most likely exe_name was previously set with to_model_ds()
+ if exe_name is not None:
+ exe_name = util.get_exe_path(exe_name=exe_name, version_tag=version_tag)
+ elif "exe_name" in ds.attrs:
+ exe_name = util.get_exe_path(
+ exe_name=ds.attrs["exe_name"], version_tag=version_tag
+ )
+ elif "mfversion" in ds.attrs:
+ exe_name = util.get_exe_path(
+ exe_name=ds.attrs["mfversion"], version_tag=version_tag
+ )
+ else:
+ raise ValueError("No exe_name provided and no exe_name found in ds.attrs")
# Create the Flopy simulation object
sim = flopy.mf6.MFSimulation(
@@ -145,7 +164,7 @@ def sim(ds, exe_name=None):
def tdis(ds, sim, pname="tdis", nstp="nstp", tsmult="tsmult", **kwargs):
- """create tdis package from the model dataset.
+ """Create tdis package from the model dataset.
Parameters
----------
@@ -164,7 +183,6 @@ def tdis(ds, sim, pname="tdis", nstp="nstp", tsmult="tsmult", **kwargs):
dis : flopy TDis
tdis object.
"""
-
# start creating model
logger.info("creating mf6 TDIS")
@@ -185,7 +203,7 @@ def tdis(ds, sim, pname="tdis", nstp="nstp", tsmult="tsmult", **kwargs):
def ims(sim, complexity="MODERATE", pname="ims", **kwargs):
- """create IMS package.
+ """Create IMS package.
Parameters
----------
@@ -201,7 +219,6 @@ def ims(sim, complexity="MODERATE", pname="ims", **kwargs):
ims : flopy ModflowIms
ims object.
"""
-
logger.info("creating mf6 IMS")
print_option = kwargs.pop("print_option", "summary")
diff --git a/nlmod/util.py b/nlmod/util.py
index a2c1c72b..cfa273e7 100644
--- a/nlmod/util.py
+++ b/nlmod/util.py
@@ -1,19 +1,24 @@
+import json
import logging
import os
import re
import sys
import warnings
+from pathlib import Path
from typing import Dict, Optional
-import flopy
import geopandas as gpd
import requests
import xarray as xr
from colorama import Back, Fore, Style
-from shapely.geometry import box
+from flopy.utils import get_modflow
+from flopy.utils.get_modflow import flopy_appdata_path, get_release
+from shapely.geometry import Polygon, box
logger = logging.getLogger(__name__)
+nlmod_bindir = Path(__file__).parent / "bin"
+
class LayerError(Exception):
"""Generic error when modifying layers."""
@@ -89,29 +94,349 @@ def get_model_dirs(model_ws):
return figdir, cachedir
-def get_exe_path(exe_name="mf6"):
- """Get the full path of the executable. Uses the bin directory in the nlmod package.
+def get_exe_path(
+ exe_name="mf6",
+ bindir=None,
+ download_if_not_found=True,
+ version_tag=None,
+ repo="executables",
+):
+ """Get the full path of the executable.
+
+ Searching for the executables is done in the following order:
+ 0. If exe_name is a full path, return the full path of the executable.
+ 1. The directory specified with `bindir`. Raises error if exe_name is provided
+ and not found.
+ 2. The directory used by nlmod installed in this environment.
+ 3. If the executables were downloaded with flopy/nlmod from an other env,
+ most recent installation location of MODFLOW is found in flopy metadata
+
+ Else:
+ 4. Download the executables using `version_tag` and `repo`.
+
+ The returned directory is checked to contain exe_name if it is provided.
Parameters
----------
exe_name : str, optional
- name of the executable. The default is 'mf6'.
+ The name of the executable, by default "mf6".
+ bindir : Path, optional
+ The directory where the executables are stored, by default None
+ download_if_not_found : bool, optional
+ Download the executables if they are not found, by default True.
+ repo : str, default "executables"
+ Name of GitHub repository. Choose one of "executables" (default), "modflow6",
+ or "modflow6-nightly-build". If repo and version_tag are provided the most
+ recent installation location of MODFLOW is found in flopy metadata that
+ respects `version_tag` and `repo`. If not found, the executables are downloaded
+ using repo and version_tag.
+ version_tag : str, default None
+ GitHub release ID: for example "18.0" or "latest". If repo and version_tag are
+ provided the most recent installation location of MODFLOW is found in flopy
+ metadata that respects `version_tag` and `repo`. If not found, the executables
+ are downloaded using repo and version_tag.
Returns
-------
- exe_path : str
+ exe_full_path : str
full path of the executable.
"""
- exe_path = os.path.join(os.path.dirname(__file__), "bin", exe_name)
- if sys.platform.startswith("win"):
- exe_path += ".exe"
+ if sys.platform.startswith("win") and not exe_name.endswith(".exe"):
+ exe_name += ".exe"
- if not os.path.exists(exe_path):
- logger.warning(
- f"executable {exe_path} not found, download the binaries using nlmod.util.download_mfbinaries"
+ # If exe_name is a full path
+ if Path(exe_name).exists():
+ enable_version_check = version_tag is not None and repo is not None
+
+ if enable_version_check:
+ msg = (
+ "Incompatible arguments. If exe_name is provided, unable to check "
+ "the version."
+ )
+ raise ValueError(msg)
+ exe_full_path = exe_name
+
+ else:
+ exe_full_path = str(
+ get_bin_directory(
+ exe_name=exe_name,
+ bindir=bindir,
+ download_if_not_found=download_if_not_found,
+ version_tag=version_tag,
+ repo=repo,
+ )
+ / exe_name
)
- return exe_path
+ msg = f"Executable path: {exe_full_path}"
+ logger.debug(msg)
+
+ return exe_full_path
+
+
+def get_bin_directory(
+ exe_name="mf6",
+ bindir=None,
+ download_if_not_found=True,
+ version_tag=None,
+ repo="executables",
+) -> Path:
+ """Get the directory where the executables are stored.
+
+ Searching for the executables is done in the following order:
+ 0. If exe_name is a full path, return the full path of the executable.
+ 1. The directory specified with `bindir`. Raises error if exe_name is provided
+ and not found. Requires enable_version_check to be False.
+ 2. The directory used by nlmod installed in this environment.
+ 3. If the executables were downloaded with flopy/nlmod from an other env,
+ most recent installation location of MODFLOW is found in flopy metadata
+
+ Else:
+ 4. Download the executables using `version_tag` and `repo`.
+
+ The returned directory is checked to contain exe_name if exe_name is provided. If
+ exe_name is set to None only the existence of the directory is checked.
+
+ Parameters
+ ----------
+ exe_name : str, optional
+ The name of the executable, by default mf6.
+ bindir : Path, optional
+ The directory where the executables are stored, by default "mf6".
+ download_if_not_found : bool, optional
+ Download the executables if they are not found, by default True.
+ repo : str, default "executables"
+ Name of GitHub repository. Choose one of "executables" (default), "modflow6",
+ or "modflow6-nightly-build". If repo and version_tag are provided the most
+ recent installation location of MODFLOW is found in flopy metadata that
+ respects `version_tag` and `repo`. If not found, the executables are downloaded
+ using repo and version_tag.
+ version_tag : str, default None
+ GitHub release ID: for example "18.0" or "latest". If repo and version_tag are
+ provided the most recent installation location of MODFLOW is found in flopy
+ metadata that respects `version_tag` and `repo`. If not found, the executables
+ are downloaded using repo and version_tag.
+
+ Returns
+ -------
+ Path
+ The directory where the executables are stored.
+
+ Raises
+ ------
+ FileNotFoundError
+ If the executables are not found in the specified directories.
+ """
+ bindir = Path(bindir) if bindir is not None else None
+
+ if sys.platform.startswith("win") and not exe_name.endswith(".exe"):
+ exe_name += ".exe"
+
+ enable_version_check = version_tag is not None
+
+ # If exe_name is a full path
+ if Path(exe_name).exists():
+ if enable_version_check:
+ msg = (
+ "Incompatible arguments. If exe_name is provided, unable to check "
+ "the version."
+ )
+ raise ValueError(msg)
+ return Path(exe_name).parent
+
+ # If bindir is provided
+ if bindir is not None and enable_version_check:
+ msg = (
+ "Incompatible arguments. If bindir is provided, "
+ "unable to check the version."
+ )
+ raise ValueError(msg)
+
+ use_bindir = (
+ bindir is not None and exe_name is not None and (bindir / exe_name).exists()
+ )
+ use_bindir |= bindir is not None and exe_name is None and bindir.exists()
+
+ if use_bindir:
+ return bindir
+
+ # If the executables are in the flopy directory
+ flopy_bindirs = get_flopy_bin_directories(version_tag=version_tag, repo=repo)
+
+ if exe_name is not None:
+ flopy_bindirs = [
+ flopy_bindir
+ for flopy_bindir in flopy_bindirs
+ if Path(flopy_bindir / exe_name).exists()
+ ]
+ else:
+ flopy_bindirs = [
+ flopy_bindir
+ for flopy_bindir in flopy_bindirs
+ if Path(flopy_bindir).exists()
+ ]
+
+ if nlmod_bindir in flopy_bindirs:
+ return nlmod_bindir
+
+ if flopy_bindirs:
+ # Get most recent directory
+ return flopy_bindirs[-1]
+
+ # Else download the executables
+ if download_if_not_found:
+ download_mfbinaries(
+ bindir=bindir,
+ version_tag=version_tag if version_tag is not None else "latest",
+ repo=repo,
+ )
+
+ # Rerun this function
+ return get_bin_directory(
+ exe_name=exe_name,
+ bindir=bindir,
+ download_if_not_found=False,
+ version_tag=version_tag,
+ repo=repo,
+ )
+
+ else:
+ msg = (
+ f"Could not find {exe_name} in {bindir}, "
+ f"{nlmod_bindir} and {flopy_bindirs}."
+ )
+ raise FileNotFoundError(msg)
+
+
+def get_flopy_bin_directories(version_tag=None, repo="executables"):
+ """Get the directories where the executables are stored.
+
+ Obtain the bin directory installed with flopy. If enable_version_check is True,
+ all installation location of MODFLOW are found in flopy metadata that respects
+ `version_tag` and `repo`.
+
+ Parameters
+ ----------
+ repo : str, default "executables"
+ Name of GitHub repository. Choose one of "executables" (default),
+ "modflow6", or "modflow6-nightly-build". If repo and version_tag are provided
+ the most recent installation location of MODFLOW is found in flopy metadata
+ that respects `version_tag` and `repo`. If not found, the executables are
+ downloaded using repo and version_tag.
+ version_tag : str, default None
+ GitHub release ID: for example "18.0" or "latest". If repo and version_tag are
+ provided the most recent installation location of MODFLOW is found in flopy
+ metadata that respects `version_tag` and `repo`. If not found, the executables
+ are downloaded using repo and version_tag.
+
+ Returns
+ -------
+ list
+ list of directories where the executables are stored.
+ """
+ flopy_metadata_fp = flopy_appdata_path / "get_modflow.json"
+
+ if not flopy_metadata_fp.exists():
+ return []
+
+ meta_raw = flopy_metadata_fp.read_text()
+
+ # Remove trailing characters that are not part of the JSON.
+ while meta_raw[-3:] != "}\n]":
+ meta_raw = meta_raw[:-1]
+
+ # Get metadata of all flopy installations
+ meta_list = json.loads(meta_raw)
+
+ enable_version_check = version_tag is not None and repo is not None
+
+ if enable_version_check:
+ msg = (
+ "The version of the executables will be checked, because the "
+ f"`version_tag={version_tag}` is passed to `get_flopy_bin_directories()`."
+ )
+
+ # To convert latest into an explicit tag
+ if version_tag == "latest":
+ version_tag_pin = get_release(tag=version_tag, repo=repo, quiet=True)[
+ "tag_name"
+ ]
+ else:
+ version_tag_pin = version_tag
+
+ # get path to the most recent installation. Appended to end of get_modflow.json
+ meta_list_validversion = [
+ meta
+ for meta in meta_list
+ if (meta["release_id"] == version_tag_pin) and (meta["repo"] == repo)
+ ]
+
+ else:
+ msg = (
+ "The version of the executables will not be checked, because the "
+ "`version_tag` is not passed to `get_flopy_bin_directories()`."
+ )
+ meta_list_validversion = meta_list
+ logger.debug(msg)
+
+ path_list = [
+ Path(meta["bindir"])
+ for meta in meta_list_validversion
+ if Path(meta["bindir"]).exists()
+ ]
+ return path_list
+
+
+def download_mfbinaries(bindir=None, version_tag="latest", repo="executables"):
+ """Download and unpack platform-specific modflow binaries.
+
+ Source: USGS
+
+ Parameters
+ ----------
+ binpath : str, optional
+ path to directory to download binaries to, if it doesnt exist it
+ is created. Default is None which sets dir to nlmod/bin.
+ repo : str, default "executables"
+ Name of GitHub repository. Choose one of "executables" (default),
+ "modflow6", or "modflow6-nightly-build".
+ version_tag : str, default "latest"
+ GitHub release ID.
+ """
+ if bindir is None:
+ # Path objects are immutable so a copy is implied
+ bindir = nlmod_bindir
+
+ if not os.path.isdir(bindir):
+ os.makedirs(bindir)
+
+ get_modflow(bindir=str(bindir), release_id=version_tag, repo=repo)
+
+ # Ensure metadata is saved.
+ # https://github.com/modflowpy/flopy/blob/
+ # 0748dcb9e4641b5ad9616af115dd3be906f98f50/flopy/utils/get_modflow.py#L623
+ flopy_metadata_fp = flopy_appdata_path / "get_modflow.json"
+
+ if not flopy_metadata_fp.exists():
+ if "pytest" not in str(bindir) and "pytest" not in sys.modules:
+ logger.warning(
+ f"flopy metadata file not found at {flopy_metadata_fp}. "
+ "After downloading and installing the executables. "
+ "Creating a new metadata file."
+ )
+
+ release_metadata = get_release(tag=version_tag, repo=repo, quiet=True)
+ install_metadata = {
+ "release_id": release_metadata["tag_name"],
+ "repo": repo,
+ "bindir": str(bindir),
+ }
+
+ with open(flopy_metadata_fp, "w", encoding="UTF-8") as f:
+ json.dump([install_metadata], f, indent=4)
+
+ # download the provisional version of modpath from Github
+ download_modpath_provisional_exe(bindir=bindir, timeout=120)
def get_ds_empty(ds, keep_coords=None):
@@ -175,8 +500,10 @@ def get_da_from_da_ds(da_ds, dims=("y", "x"), data=None):
def find_most_recent_file(folder, name, extension=".pklz"):
- """Find the most recent file in a folder. File must startwith name and end width
- extension. If you want to look for the most recent folder use extension = ''.
+ """Find the most recent file in a folder.
+
+ File must startwith name and end width extension. If you want to look for the most
+ recent folder use extension = ''.
Parameters
----------
@@ -192,7 +519,6 @@ def find_most_recent_file(folder, name, extension=".pklz"):
newest_file : str
name of the most recent file
"""
-
i = 0
for file in os.listdir(folder):
if file.startswith(name) and file.endswith(extension):
@@ -229,7 +555,6 @@ def compare_model_extents(extent1, extent2):
1: extent1 is completely within extent2
2: extent2 is completely within extent1
"""
-
# option1 extent1 is completely within extent2
check_xmin = extent1[0] >= extent2[0]
check_xmax = extent1[1] <= extent2[1]
@@ -267,6 +592,49 @@ def compare_model_extents(extent1, extent2):
raise NotImplementedError("other options are not yet implemented")
+def extent_to_polygon(extent):
+ """Generate a shapely Polygon from an extent ([xmin, xmax, ymin, ymax])
+
+ Parameters
+ ----------
+ extent : tuple, list or array
+ extent (xmin, xmax, ymin, ymax).
+
+ Returns
+ -------
+ shapely.geometry.Polygon
+ polygon of the extent.
+
+ """
+ nw = (extent[0], extent[2])
+ no = (extent[1], extent[2])
+ zo = (extent[1], extent[3])
+ zw = (extent[0], extent[3])
+ return Polygon([nw, no, zo, zw])
+
+
+def extent_to_gdf(extent, crs="EPSG:28992"):
+ """Create a geodataframe with a single polygon with the extent given.
+
+ Parameters
+ ----------
+ extent : tuple, list or array
+ extent.
+ crs : str, optional
+ coördinate reference system of the extent, default is EPSG:28992
+ (RD new)
+
+ Returns
+ -------
+ gdf_extent : geopandas.GeoDataFrame
+ geodataframe with extent.
+ """
+ geom_extent = extent_to_polygon(extent)
+ gdf_extent = gpd.GeoDataFrame(geometry=[geom_extent], crs=crs)
+
+ return gdf_extent
+
+
def polygon_from_extent(extent):
"""Create a shapely polygon from a given extent.
@@ -280,7 +648,10 @@ def polygon_from_extent(extent):
polygon_ext : shapely.geometry.polygon.Polygon
polygon of the extent.
"""
-
+ logger.warning(
+ "nlmod.util.polygon_from_extent is deprecated. "
+ "Use nlmod.util.extent_to_polygon instead"
+ )
bbox = (extent[0], extent[2], extent[1], extent[3])
polygon_ext = box(*tuple(bbox))
@@ -303,7 +674,10 @@ def gdf_from_extent(extent, crs="EPSG:28992"):
gdf_extent : GeoDataFrame
geodataframe with extent.
"""
-
+ logger.warning(
+ "nlmod.util.gdf_from_extent is deprecated. "
+ "Use nlmod.util.extent_to_gdf instead"
+ )
geom_extent = polygon_from_extent(extent)
gdf_extent = gpd.GeoDataFrame(geometry=[geom_extent], crs=crs)
@@ -311,8 +685,9 @@ def gdf_from_extent(extent, crs="EPSG:28992"):
def gdf_within_extent(gdf, extent):
- """Select only parts of the geodataframe within the extent. Only accepts Polygon and
- Linestring geometry types.
+ """Select only parts of the geodataframe within the extent.
+
+ Only accepts Polygon and Linestring geometry types.
Parameters
----------
@@ -327,7 +702,7 @@ def gdf_within_extent(gdf, extent):
dataframe with only polygon features within the extent.
"""
# create geodataframe from the extent
- gdf_extent = gdf_from_extent(extent, crs=gdf.crs)
+ gdf_extent = extent_to_gdf(extent, crs=gdf.crs)
# check type
geom_types = gdf.geom_type.unique()
@@ -366,6 +741,7 @@ def get_google_drive_filename(fid, timeout=120):
warnings.warn(
"this function is no longer supported use the gdown package instead",
DeprecationWarning,
+ stacklevel=1,
)
if isinstance(id, requests.Response):
@@ -392,6 +768,7 @@ def download_file_from_google_drive(fid, destination=None):
warnings.warn(
"this function is no longer supported use the gdown package instead",
DeprecationWarning,
+ stacklevel=1,
)
def get_confirm_token(response):
@@ -430,30 +807,6 @@ def save_response_content(response, destination):
save_response_content(response, destination)
-def download_mfbinaries(bindir=None):
- """Download and unpack platform-specific modflow binaries.
-
- Source: USGS
-
- Parameters
- ----------
- binpath : str, optional
- path to directory to download binaries to, if it doesnt exist it
- is created. Default is None which sets dir to nlmod/bin.
- version : str, optional
- version string, by default 8.0
- """
-
- if bindir is None:
- bindir = os.path.join(os.path.dirname(__file__), "bin")
- if not os.path.isdir(bindir):
- os.makedirs(bindir)
- flopy.utils.get_modflow(bindir)
- if sys.platform.startswith("win"):
- # download the provisional version of modpath from Github
- download_modpath_provisional_exe(bindir)
-
-
def download_modpath_provisional_exe(bindir=None, timeout=120):
"""Download the provisional version of modpath to the folder with binaries."""
if bindir is None:
@@ -507,14 +860,12 @@ def __init__(
self, *args, colors: Optional[Dict[str, str]] = None, **kwargs
) -> None:
"""Initialize the formatter with specified format strings."""
-
super().__init__(*args, **kwargs)
self.colors = colors if colors else {}
def format(self, record) -> str:
"""Format the specified record as text."""
-
record.color = self.colors.get(record.levelname, "")
record.reset = Style.RESET_ALL
@@ -522,6 +873,18 @@ def format(self, record) -> str:
def get_color_logger(level="INFO"):
+ """Get a logger with colored output.
+
+ Parameters
+ ----------
+ level : str, optional
+ The logging level to set for the logger. Default is "INFO".
+
+ Returns
+ -------
+ logger : logging.Logger
+ The configured logger object.
+ """
if level == "DEBUG":
FORMAT = "{color}{levelname}:{name}.{funcName}:{lineno}:{message}{reset}"
else:
diff --git a/nlmod/version.py b/nlmod/version.py
index b6f9c6c1..213fe8f0 100644
--- a/nlmod/version.py
+++ b/nlmod/version.py
@@ -1,20 +1,17 @@
from importlib import metadata
from platform import python_version
-__version__ = "0.7.2"
+__version__ = "0.8.0"
def show_versions() -> None:
"""Method to print the version of dependencies."""
-
msg = (
- f"Python version: {python_version()}\n"
- f"NumPy version: {metadata.version('numpy')}\n"
- f"Xarray version: {metadata.version('xarray')}\n"
- f"Matplotlib version: {metadata.version('matplotlib')}\n"
- f"Flopy version: {metadata.version('flopy')}\n"
+ f"Python version : {python_version()}\n"
+ f"NumPy version : {metadata.version('numpy')}\n"
+ f"Xarray version : {metadata.version('xarray')}\n"
+ f"Matplotlib version : {metadata.version('matplotlib')}\n"
+ f"Flopy version : {metadata.version('flopy')}\n\n"
+ f"nlmod version : {__version__}"
)
-
- msg += f"\nnlmod version: {__version__}"
-
- return print(msg)
+ print(msg)
diff --git a/pyproject.toml b/pyproject.toml
index 6729fa58..4867067e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -35,7 +35,8 @@ dependencies = [
"matplotlib",
"dask",
"colorama",
- "joblib"
+ "joblib",
+ "bottleneck",
]
keywords = ["hydrology", "groundwater", "modeling", "Modflow 6", "flopy"]
classifiers = [
@@ -56,20 +57,13 @@ repository = "https://github.com/gwmod/nlmod"
documentation = "https://nlmod.readthedocs.io/en/latest/"
[project.optional-dependencies]
-full = [
- "nlmod[knmi]",
- "gdown",
- "geocube",
- "bottleneck",
- "contextily",
- "scikit-image",
-]
+full = ["nlmod[knmi]", "gdown", "geocube", "contextily", "scikit-image"]
knmi = ["h5netcdf", "nlmod[grib]"]
grib = ["cfgrib", "ecmwflibs"]
test = ["pytest>=7", "pytest-cov", "pytest-dependency"]
nbtest = ["nbformat", "nbconvert>6.4.5"]
lint = ["flake8", "isort", "black[jupyter]"]
-ci = ["nlmod[full,lint,test,nbtest]", "netCDF4>=1.6.3", "pandas<2.1.0"]
+ci = ["nlmod[full,lint,test,nbtest]", "netCDF4<1.7.0", "pandas<2.1.0"]
rtd = [
"nlmod[full]",
"ipython",
@@ -78,7 +72,7 @@ rtd = [
"nbsphinx",
"sphinx_rtd_theme==1.0.0",
"nbconvert==7.13.0",
- "netCDF4>=1.6.3",
+ "netCDF4<1.7.0",
]
[tool.setuptools.dynamic]
@@ -92,7 +86,7 @@ include-package-data = true
[tool.setuptools.package-data]
"nlmod.data" = ["*.gleg"]
-"nlmod.data.geotop" = ["*.csv"]
+"nlmod.data.geotop" = ["*"]
"nlmod.data.shapes" = ["*"]
"nlmod.bin" = ["mp7_2_002_provisional"]
@@ -102,6 +96,33 @@ line-length = 88
[tool.isort]
profile = "black"
+[tool.ruff]
+line-length = 88
+extend-include = ["*.ipynb"]
+
+[tool.ruff.lint]
+# See: https://docs.astral.sh/ruff/rules/
+select = [
+ "C4", # flake8-comprehensions
+ "E", # pycodestyle
+ "F", # pyflakes
+ "I", # isort
+ "PT", # pytest-style
+ "D", # pydocstyle
+ "B", # flake8-bugbear
+ "NPY", # numpy
+]
+ignore = [
+ "D401", # Imperative mood for docstring. Be glad we have docstrings at all :P!
+ "D100", # Missing docstring in module.
+ "D104", # Missing docstring in public package.
+]
+
+[tool.ruff.format]
+
+[tool.ruff.lint.pydocstyle]
+convention = "numpy"
+
[tool.pytest.ini_options]
addopts = "--strict-markers --durations=0 --cov-report xml:coverage.xml --cov nlmod -v"
markers = ["notebooks: run notebooks", "slow: slow tests", "skip: skip tests"]
diff --git a/tests/test_001_model.py b/tests/test_001_model.py
index 13e2a66e..3f0e4c1d 100644
--- a/tests/test_001_model.py
+++ b/tests/test_001_model.py
@@ -75,7 +75,7 @@ def test_get_ds_variable_delrc():
)
-@pytest.mark.slow
+@pytest.mark.slow()
def test_create_small_model_grid_only(tmpdir, model_name="test"):
extent = [98700.0, 99000.0, 489500.0, 489700.0]
# extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100)
@@ -117,7 +117,7 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"):
ds.to_netcdf(os.path.join(tst_model_dir, "small_model.nc"))
-@pytest.mark.slow
+@pytest.mark.slow()
def test_create_sea_model_grid_only(tmpdir, model_name="test"):
extent = [95000.0, 105000.0, 494000.0, 500000.0]
# extent, nrow, ncol = nlmod.read.regis.fit_extent_to_regis(extent, 100, 100)
@@ -143,7 +143,7 @@ def test_create_sea_model_grid_only(tmpdir, model_name="test"):
ds.to_netcdf(os.path.join(tst_model_dir, "basic_sea_model.nc"))
-@pytest.mark.slow
+@pytest.mark.slow()
def test_create_sea_model_grid_only_delr_delc_50(tmpdir, model_name="test"):
ds = get_ds_time_transient(tmpdir)
extent = [95000.0, 105000.0, 494000.0, 500000.0]
@@ -160,7 +160,7 @@ def test_create_sea_model_grid_only_delr_delc_50(tmpdir, model_name="test"):
ds.to_netcdf(os.path.join(tst_model_dir, "sea_model_grid_50.nc"))
-@pytest.mark.slow
+@pytest.mark.slow()
def test_create_sea_model(tmpdir):
ds = xr.open_dataset(
os.path.join(tst_model_dir, "basic_sea_model.nc"), mask_and_scale=False
@@ -210,7 +210,7 @@ def test_create_sea_model(tmpdir):
_ = nlmod.sim.write_and_run(sim, ds)
-@pytest.mark.slow
+@pytest.mark.slow()
def test_create_sea_model_perlen_list(tmpdir):
ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc"))
@@ -280,7 +280,7 @@ def test_create_sea_model_perlen_list(tmpdir):
nlmod.sim.write_and_run(sim, ds)
-@pytest.mark.slow
+@pytest.mark.slow()
def test_create_sea_model_perlen_14(tmpdir):
ds = xr.open_dataset(os.path.join(tst_model_dir, "basic_sea_model.nc"))
diff --git a/tests/test_002_regis_geotop.py b/tests/test_002_regis_geotop.py
index 8b46145c..dced58ba 100644
--- a/tests/test_002_regis_geotop.py
+++ b/tests/test_002_regis_geotop.py
@@ -1,4 +1,5 @@
import matplotlib.pyplot as plt
+
import nlmod
diff --git a/tests/test_003_mfpackages.py b/tests/test_003_mfpackages.py
index 86b08b1b..6857d9d2 100644
--- a/tests/test_003_mfpackages.py
+++ b/tests/test_003_mfpackages.py
@@ -94,7 +94,7 @@ def get_value_from_ds_datavar():
},
)
shape = list(ds.sizes.values())
- ds["test_var"] = ("layer", "y", "x"), np.arange(np.product(shape)).reshape(shape)
+ ds["test_var"] = ("layer", "y", "x"), np.arange(np.prod(shape)).reshape(shape)
# get value from ds
v0 = nlmod.util._get_value_from_ds_datavar(
diff --git a/tests/test_004_northsea.py b/tests/test_004_northsea.py
index 2e49171d..d71ccea4 100644
--- a/tests/test_004_northsea.py
+++ b/tests/test_004_northsea.py
@@ -60,7 +60,7 @@ def test_get_bathymetry_seamodel():
assert (~ds_bathymetry.bathymetry.isnull()).sum() > 0
-def test_get_bathymetrie_nosea():
+def test_get_bathymetry_nosea():
# model without sea
ds = test_001_model.get_ds_from_cache("small_model")
ds.update(nlmod.read.rws.get_northsea(ds))
@@ -69,7 +69,7 @@ def test_get_bathymetrie_nosea():
assert (~ds_bathymetry.bathymetry.isnull()).sum() == 0
-def test_add_bathymetrie_to_top_bot_kh_kv_seamodel():
+def test_add_bathymetry_to_top_bot_kh_kv_seamodel():
# model with sea
ds = test_001_model.get_ds_from_cache("basic_sea_model")
ds.update(nlmod.read.rws.get_northsea(ds))
diff --git a/tests/test_005_external_data.py b/tests/test_005_external_data.py
index f558b796..1d40d0d1 100644
--- a/tests/test_005_external_data.py
+++ b/tests/test_005_external_data.py
@@ -1,6 +1,8 @@
import pandas as pd
import pytest
import test_001_model
+import xarray as xr
+from shapely.geometry import LineString
import nlmod
@@ -65,9 +67,13 @@ def test_get_ahn3():
def test_get_ahn4():
extent = [98000.0, 100000.0, 494000.0, 496000.0]
- da = nlmod.read.ahn.get_ahn4(extent)
+ ahn = nlmod.read.ahn.get_ahn4(extent)
+ assert isinstance(ahn, xr.DataArray)
+ assert not ahn.isnull().all(), "AHN only has nan values"
- assert not da.isnull().all(), "AHN only has nan values"
+ line = LineString([(99000, 495000), (100000, 496000)])
+ ahn_line = nlmod.read.ahn.get_ahn_along_line(line, ahn=ahn)
+ assert isinstance(ahn_line, xr.DataArray)
def test_get_ahn():
@@ -80,6 +86,10 @@ def test_get_ahn():
assert not ahn_ds["ahn"].isnull().all(), "AHN only has nan values"
+def test_get_ahn_at_point():
+ nlmod.read.ahn.get_ahn_at_point(100010, 400010)
+
+
def test_get_surface_water_ghb():
# model with sea
ds = test_001_model.get_ds_from_cache("basic_sea_model")
@@ -88,13 +98,13 @@ def test_get_surface_water_ghb():
sim = nlmod.sim.sim(ds)
# create time discretisation
- tdis = nlmod.sim.tdis(ds, sim)
+ nlmod.sim.tdis(ds, sim)
# create groundwater flow model
gwf = nlmod.gwf.gwf(ds, sim)
# create ims
- ims = nlmod.sim.ims(sim)
+ nlmod.sim.ims(sim)
nlmod.gwf.dis(ds, gwf)
diff --git a/tests/test_006_caching.py b/tests/test_006_caching.py
index 741c1ffd..5bdfb3e0 100644
--- a/tests/test_006_caching.py
+++ b/tests/test_006_caching.py
@@ -1,96 +1,86 @@
+import os
import tempfile
-import pytest
-import test_001_model
-
import nlmod
-tmpdir = tempfile.gettempdir()
-
-
-def test_ds_check_true():
- # two models with the same grid and time dicretisation
- ds = test_001_model.get_ds_from_cache("small_model")
- ds2 = ds.copy()
-
- check = nlmod.cache._check_ds(ds, ds2)
-
- assert check
-
-
-def test_ds_check_time_false():
- # two models with a different time discretisation
- ds = test_001_model.get_ds_from_cache("small_model")
- ds2 = test_001_model.get_ds_time_steady(tmpdir)
-
- check = nlmod.cache._check_ds(ds, ds2)
-
- assert not check
-
-
-def test_ds_check_time_attributes_false():
- # two models with a different time discretisation
- ds = test_001_model.get_ds_from_cache("small_model")
- ds2 = ds.copy()
-
- ds2.time.attrs["time_units"] = "MONTHS"
-
- check = nlmod.cache._check_ds(ds, ds2)
-
- assert not check
-
-def test_cache_data_array():
+def test_cache_ahn_data_array():
+ """Test caching of AHN data array. Does not have dataset as argument."""
extent = [119_900, 120_000, 441_900, 442_000]
- ahn_no_cache = nlmod.read.ahn.get_ahn4(extent)
- ahn_cached = nlmod.read.ahn.get_ahn4(extent, cachedir=tmpdir, cachename="ahn4.nc")
- ahn_cache = nlmod.read.ahn.get_ahn4(extent, cachedir=tmpdir, cachename="ahn4.nc")
- assert ahn_cached.equals(ahn_no_cache)
- assert ahn_cache.equals(ahn_no_cache)
-
-
-@pytest.mark.slow
-def test_ds_check_grid_false(tmpdir):
- # two models with a different grid and same time dicretisation
- ds = test_001_model.get_ds_from_cache("small_model")
- ds2 = test_001_model.get_ds_time_transient(tmpdir)
- extent = [99100.0, 99400.0, 489100.0, 489400.0]
- regis_ds = nlmod.read.regis.get_combined_layer_models(
- extent,
- use_regis=True,
- use_geotop=False,
- cachedir=tmpdir,
- cachename="comb.nc",
+ cache_name = "ahn4.nc"
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ assert not os.path.exists(os.path.join(tmpdir, cache_name)), "Cache should not exist yet1"
+ ahn_no_cache = nlmod.read.ahn.get_ahn4(extent)
+ assert not os.path.exists(os.path.join(tmpdir, cache_name)), "Cache should not exist yet2"
+
+ ahn_cached = nlmod.read.ahn.get_ahn4(extent, cachedir=tmpdir, cachename=cache_name)
+ assert os.path.exists(os.path.join(tmpdir, cache_name)), "Cache should have existed by now"
+ assert ahn_cached.equals(ahn_no_cache)
+ modification_time1 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+
+ # Check if the cache is used. If not, cache is rewritten and modification time changes
+ ahn_cache = nlmod.read.ahn.get_ahn4(extent, cachedir=tmpdir, cachename=cache_name)
+ assert ahn_cache.equals(ahn_no_cache)
+ modification_time2 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+ assert modification_time1 == modification_time2, "Cache should not be rewritten"
+
+ # Different extent should not lead to using the cache
+ extent = [119_800, 120_000, 441_900, 442_000]
+ ahn_cache = nlmod.read.ahn.get_ahn4(extent, cachedir=tmpdir, cachename=cache_name)
+ modification_time3 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+ assert modification_time1 != modification_time3, "Cache should have been rewritten"
+
+
+def test_cache_northsea_data_array():
+ """Test caching of AHN data array. Does have dataset as argument."""
+ from nlmod.read.rws import get_northsea
+ ds1 = nlmod.get_ds(
+ [119_700, 120_000, 441_900, 442_000],
+ delr=100.,
+ delc=100.,
+ top=0.,
+ botm=[-1., -2.],
+ kh=10.,
+ kv=1.,
)
- ds2 = nlmod.base.to_model_ds(regis_ds, delr=50.0, delc=50.0)
-
- check = nlmod.cache._check_ds(ds, ds2)
-
- assert not check
-
-
-@pytest.mark.skip("too slow")
-def test_use_cached_regis(tmpdir):
- extent = [98700.0, 99000.0, 489500.0, 489700.0]
- regis_ds1 = nlmod.read.regis.get_regis(extent, cachedir=tmpdir, cachename="reg.nc")
-
- regis_ds2 = nlmod.read.regis.get_regis(extent, cachedir=tmpdir, cachename="reg.nc")
-
- assert regis_ds1.equals(regis_ds2)
-
-
-@pytest.mark.skip("too slow")
-def test_do_not_use_cached_regis(tmpdir):
- # cache regis
- extent = [98700.0, 99000.0, 489500.0, 489700.0]
- regis_ds1 = nlmod.read.regis.get_regis(
- extent, cachedir=tmpdir, cachename="regis.nc"
- )
-
- # do not use cache because extent is different
- extent = [99100.0, 99400.0, 489100.0, 489400.0]
- regis_ds2 = nlmod.read.regis.get_regis(
- extent, cachedir=tmpdir, cachename="regis.nc"
+ ds2 = nlmod.get_ds(
+ [119_800, 120_000, 441_900, 444_000],
+ delr=100.,
+ delc=100.,
+ top=0.,
+ botm=[-1., -3.],
+ kh=10.,
+ kv=1.,
)
- assert not regis_ds1.equals(regis_ds2)
+ cache_name = "northsea.nc"
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ assert not os.path.exists(os.path.join(tmpdir, cache_name)), "Cache should not exist yet1"
+ out1_no_cache = get_northsea(ds1)
+ assert not os.path.exists(os.path.join(tmpdir, cache_name)), "Cache should not exist yet2"
+
+ out1_cached = get_northsea(ds1, cachedir=tmpdir, cachename=cache_name)
+ assert os.path.exists(os.path.join(tmpdir, cache_name)), "Cache should exist by now"
+ assert out1_cached.equals(out1_no_cache)
+ modification_time1 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+
+ # Check if the cache is used. If not, cache is rewritten and modification time changes
+ out1_cache = get_northsea(ds1, cachedir=tmpdir, cachename=cache_name)
+ assert out1_cache.equals(out1_no_cache)
+ modification_time2 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+ assert modification_time1 == modification_time2, "Cache should not be rewritten"
+
+ # Only properties of `coords_2d` determine if the cache is used. Cache should still be used.
+ ds1["toppertje"] = ds1.top + 1
+ out1_cache = get_northsea(ds1, cachedir=tmpdir, cachename=cache_name)
+ assert out1_cache.equals(out1_no_cache)
+ modification_time2 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+ assert modification_time1 == modification_time2, "Cache should not be rewritten"
+
+ # Different extent should not lead to using the cache
+ out2_cache = get_northsea(ds2, cachedir=tmpdir, cachename=cache_name)
+ modification_time3 = os.path.getmtime(os.path.join(tmpdir, cache_name))
+ assert modification_time1 != modification_time3, "Cache should have been rewritten"
+ assert not out2_cache.equals(out1_no_cache)
diff --git a/tests/test_007_run_notebooks.py b/tests/test_007_run_notebooks.py
index 1ed78c4d..8b27e8b8 100644
--- a/tests/test_007_run_notebooks.py
+++ b/tests/test_007_run_notebooks.py
@@ -1,4 +1,5 @@
"""run notebooks in the examples directory."""
+# ruff: noqa: D103
import os
import nbformat
@@ -19,91 +20,91 @@ def _run_notebook(nbdir, fname):
return out
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_00_model_from_scratch():
_run_notebook(nbdir, "00_model_from_scratch.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_01_basic_model():
_run_notebook(nbdir, "01_basic_model.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_02_surface_water():
_run_notebook(nbdir, "02_surface_water.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_03_local_grid_refinement():
_run_notebook(nbdir, "03_local_grid_refinement.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_04_modifying_layermodels():
_run_notebook(nbdir, "04_modifying_layermodels.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_05_caching():
_run_notebook(nbdir, "05_caching.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_06_gridding_vector_data():
_run_notebook(nbdir, "06_gridding_vector_data.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_07_resampling():
_run_notebook(nbdir, "07_resampling.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_08_gis():
_run_notebook(nbdir, "08_gis.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_09_schoonhoven():
_run_notebook(nbdir, "09_schoonhoven.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_10_modpath():
_run_notebook(nbdir, "10_modpath.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_11_grid_rotation():
_run_notebook(nbdir, "11_grid_rotation.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_12_layer_generation():
_run_notebook(nbdir, "12_layer_generation.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_13_plot_methods():
_run_notebook(nbdir, "13_plot_methods.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_14_stromingen_example():
_run_notebook(nbdir, "14_stromingen_example.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_15_geotop():
_run_notebook(nbdir, "15_geotop.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_16_groundwater_transport():
_run_notebook(nbdir, "16_groundwater_transport.ipynb")
-@pytest.mark.notebooks
+@pytest.mark.notebooks()
def test_run_notebook_17_unsaturated_zone_flow():
_run_notebook(nbdir, "17_unsaturated_zone_flow.ipynb")
diff --git a/tests/test_008_waterschappen.py b/tests/test_008_waterschappen.py
index 6b81bdc2..e0e952f3 100644
--- a/tests/test_008_waterschappen.py
+++ b/tests/test_008_waterschappen.py
@@ -3,7 +3,6 @@
import nlmod
-
# def test_download_polygons(): # is tested in test_024_administrative.test_get_waterboards
# nlmod.read.waterboard.get_polygons()
diff --git a/tests/test_009_layers.py b/tests/test_009_layers.py
index ce19af49..3b304872 100644
--- a/tests/test_009_layers.py
+++ b/tests/test_009_layers.py
@@ -1,7 +1,7 @@
import os
-import numpy as np
import matplotlib.pyplot as plt
+import numpy as np
from shapely.geometry import LineString
import nlmod
diff --git a/tests/test_014_gis.py b/tests/test_014_gis.py
index b944a7b5..0ae4231f 100644
--- a/tests/test_014_gis.py
+++ b/tests/test_014_gis.py
@@ -17,4 +17,8 @@ def test_vertex_da_to_gdf():
def test_ds_to_ugrid_nc_file():
ds = util.get_ds_vertex()
- nlmod.gis.ds_to_ugrid_nc_file(ds, os.path.join("data", "ugrid_test.nc"))
+ fname = os.path.join("data", "ugrid_test.nc")
+ nlmod.gis.ds_to_ugrid_nc_file(ds, fname)
+
+ fname = os.path.join("data", "ugrid_test_qgis.nc")
+ nlmod.gis.ds_to_ugrid_nc_file(ds, fname, for_imod_qgis_plugin=True)
diff --git a/tests/test_015_gwf_output.py b/tests/test_015_gwf_output.py
index 074e920b..fc3608ee 100644
--- a/tests/test_015_gwf_output.py
+++ b/tests/test_015_gwf_output.py
@@ -67,17 +67,17 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"):
assert np.array_equal(da.values, heads_correct, equal_nan=True)
fname_hds = os.path.join(ds.model_ws, ds.model_name + ".hds")
- grbfile = os.path.join(ds.model_ws, ds.model_name + ".dis.grb")
- da = get_heads_da(ds=None, gwf=None, fname=fname_hds, grbfile=grbfile) # fname
+ grb_file = os.path.join(ds.model_ws, ds.model_name + ".dis.grb")
+ da = get_heads_da(ds=None, gwf=None, fname=fname_hds, grb_file=grb_file) # fname
assert np.array_equal(da.values, heads_correct, equal_nan=True)
# budget
da = get_budget_da("CHD", ds=ds, gwf=None, fname=None) # ds
da = get_budget_da("CHD", ds=None, gwf=gwf, fname=None) # gwf
fname_cbc = os.path.join(ds.model_ws, ds.model_name + ".cbc")
- get_budget_da("CHD", ds=None, gwf=None, fname=fname_cbc, grbfile=grbfile) # fname
+ get_budget_da("CHD", ds=None, gwf=None, fname=fname_cbc, grb_file=grb_file) # fname
get_budget_da(
- "DATA-SPDIS", column="qz", ds=None, gwf=None, fname=fname_cbc, grbfile=grbfile
+ "DATA-SPDIS", column="qz", ds=None, gwf=None, fname=fname_cbc, grb_file=grb_file
) # fname
# unstructured
@@ -127,18 +127,18 @@ def test_create_small_model_grid_only(tmpdir, model_name="test"):
assert np.array_equal(da.values, heads_correct, equal_nan=True)
fname_hds = os.path.join(ds.model_ws, ds.model_name + ".hds")
- grbfile = os.path.join(ds.model_ws, ds.model_name + ".disv.grb")
- da = get_heads_da(ds=None, gwf=None, fname=fname_hds, grbfile=grbfile) # fname
+ grb_file = os.path.join(ds.model_ws, ds.model_name + ".disv.grb")
+ da = get_heads_da(ds=None, gwf=None, fname=fname_hds, grb_file=grb_file) # fname
assert np.array_equal(da.values, heads_correct, equal_nan=True)
# budget
da = get_budget_da("CHD", ds=ds_unstr, gwf=None, fname=None) # ds
da = get_budget_da("CHD", ds=None, gwf=gwf_unstr, fname=None) # gwf
da = get_budget_da(
- "CHD", ds=None, gwf=None, fname=fname_cbc, grbfile=grbfile
+ "CHD", ds=None, gwf=None, fname=fname_cbc, grb_file=grb_file
) # fname
_ = get_budget_da(
- "DATA-SPDIS", column="qz", ds=None, gwf=None, fname=fname_cbc, grbfile=grbfile
+ "DATA-SPDIS", column="qz", ds=None, gwf=None, fname=fname_cbc, grb_file=grb_file
) # fname
@@ -150,8 +150,8 @@ def test_get_heads_da_from_file_structured_no_grb():
def test_get_heads_da_from_file_structured_with_grb():
fname_hds = "./tests/data/mf6output/structured/test.hds"
- grbfile = "./tests/data/mf6output/structured/test.dis.grb"
- nlmod.gwf.output.get_heads_da(fname=fname_hds, grbfile=grbfile)
+ grb_file = "./tests/data/mf6output/structured/test.dis.grb"
+ nlmod.gwf.output.get_heads_da(fname=fname_hds, grb_file=grb_file)
def test_get_budget_da_from_file_structured_no_grb():
@@ -162,8 +162,8 @@ def test_get_budget_da_from_file_structured_no_grb():
def test_get_budget_da_from_file_structured_with_grb():
fname_cbc = "./tests/data/mf6output/structured/test.cbc"
- grbfile = "./tests/data/mf6output/structured/test.dis.grb"
- nlmod.gwf.output.get_budget_da("CHD", fname=fname_cbc, grbfile=grbfile)
+ grb_file = "./tests/data/mf6output/structured/test.dis.grb"
+ nlmod.gwf.output.get_budget_da("CHD", fname=fname_cbc, grb_file=grb_file)
def test_get_heads_da_from_file_vertex_no_grb():
@@ -174,8 +174,8 @@ def test_get_heads_da_from_file_vertex_no_grb():
def test_get_heads_da_from_file_vertex_with_grb():
fname_hds = "./tests/data/mf6output/vertex/test.hds"
- grbfile = "./tests/data/mf6output/vertex/test.disv.grb"
- nlmod.gwf.output.get_heads_da(fname=fname_hds, grbfile=grbfile)
+ grb_file = "./tests/data/mf6output/vertex/test.disv.grb"
+ nlmod.gwf.output.get_heads_da(fname=fname_hds, grb_file=grb_file)
def test_get_budget_da_from_file_vertex_no_grb():
@@ -186,8 +186,8 @@ def test_get_budget_da_from_file_vertex_no_grb():
def test_get_budget_da_from_file_vertex_with_grb():
fname_cbc = "./tests/data/mf6output/vertex/test.cbc"
- grbfile = "./tests/data/mf6output/vertex/test.disv.grb"
- nlmod.gwf.output.get_budget_da("CHD", fname=fname_cbc, grbfile=grbfile)
+ grb_file = "./tests/data/mf6output/vertex/test.disv.grb"
+ nlmod.gwf.output.get_budget_da("CHD", fname=fname_cbc, grb_file=grb_file)
def test_postprocess_head():
@@ -199,3 +199,19 @@ def test_postprocess_head():
nlmod.gwf.get_gwl_from_wet_cells(head, botm=ds["botm"])
nlmod.gwf.get_head_at_point(head, float(ds.x.mean()), float(ds.y.mean()), ds=ds)
+
+
+def test_get_flow_residuals():
+ ds = test_001_model.get_ds_from_cache("basic_sea_model")
+ da = nlmod.gwf.output.get_flow_residuals(ds)
+ assert "time" in da.dims
+ da = nlmod.gwf.output.get_flow_residuals(ds, kstpkper=(0, 0))
+ assert "time" not in da.dims
+
+
+def test_get_flow_lower_face():
+ ds = test_001_model.get_ds_from_cache("basic_sea_model")
+ da = nlmod.gwf.output.get_flow_lower_face(ds)
+ assert "time" in da.dims
+ da = nlmod.gwf.output.get_flow_lower_face(ds, kstpkper=(0, 0))
+ assert "time" not in da.dims
diff --git a/tests/test_016_time.py b/tests/test_016_time.py
index 1e3339c5..056f7a75 100644
--- a/tests/test_016_time.py
+++ b/tests/test_016_time.py
@@ -28,3 +28,7 @@ def test_ds_time_from_tdis_settings():
elapsed = (tidx.to_numpy() - np.datetime64("2000")) / np.timedelta64(1, "D")
assert np.allclose(elapsed, [100, 150, 200, 233.33333333, 300.0])
+
+
+def test_get_time_step_length():
+ assert (nlmod.time.get_time_step_length(100, 2, 1.5) == np.array([40, 60])).all()
diff --git a/tests/test_018_knmi_data_platform.py b/tests/test_018_knmi_data_platform.py
index 9c53bbb0..05da19c9 100644
--- a/tests/test_018_knmi_data_platform.py
+++ b/tests/test_018_knmi_data_platform.py
@@ -1,3 +1,4 @@
+# ruff: noqa: D103
import os
from pathlib import Path
@@ -19,10 +20,10 @@ def test_download_multiple_nc_files() -> None:
)
# download the last 10 files
- fnames = files[-10:]
+ fnames = files[:2]
dirname = "download"
knmi_data_platform.download_files(
- dataset_name, dataset_version, files[-10:], dirname=dirname
+ dataset_name, dataset_version, fnames, dirname=dirname
)
ds = knmi_data_platform.read_nc(os.path.join(dirname, fnames[0]))
@@ -40,7 +41,7 @@ def test_download_read_zip_file() -> None:
# download the last file
dirname = "download"
- fname = files[-1]
+ fname = files[1]
knmi_data_platform.download_file(
dataset_name, dataset_version, fname=fname, dirname=dirname
)
diff --git a/tests/test_019_attributes_encodings.py b/tests/test_019_attributes_encodings.py
index 8e18ce38..03333f9d 100644
--- a/tests/test_019_attributes_encodings.py
+++ b/tests/test_019_attributes_encodings.py
@@ -1,5 +1,4 @@
import os
-import time
from tempfile import TemporaryDirectory
import numpy as np
diff --git a/tests/test_021_nhi.py b/tests/test_021_nhi.py
index af768339..8be1c5be 100644
--- a/tests/test_021_nhi.py
+++ b/tests/test_021_nhi.py
@@ -1,14 +1,19 @@
+# ruff: noqa: D103
import os
-import numpy as np
import tempfile
-import nlmod
+
+import geopandas as gpd
+import matplotlib.pyplot as plt
+import numpy as np
import pytest
+import nlmod
+
tmpdir = tempfile.gettempdir()
-@pytest.mark.slow
-def test_buidrainage():
+@pytest.mark.slow()
+def test_buisdrainage():
model_ws = os.path.join(tmpdir, "buidrain")
ds = nlmod.get_ds([110_000, 130_000, 435_000, 445_000], model_ws=model_ws)
ds = nlmod.read.nhi.add_buisdrainage(ds)
@@ -20,3 +25,44 @@ def test_buidrainage():
# assert that all locations with a positive conductance also have a specified depth
mask = ds["buisdrain_cond"] > 0
assert np.all(~np.isnan(ds["buisdrain_depth"].data[mask]))
+
+
+def test_gwo():
+ username = os.environ["NHI_GWO_USERNAME"]
+ password = os.environ["NHI_GWO_PASSWORD"]
+
+ # download all wells from Brabant Water
+ wells = nlmod.read.nhi.get_gwo_wells(
+ username=username, password=password, organisation="Brabant Water"
+ )
+ assert isinstance(wells, gpd.GeoDataFrame)
+
+ # download extractions from well "13-PP016" of pomping station Veghel
+ measurements, gdf = nlmod.read.nhi.get_gwo_measurements(
+ username, password, well_site="veghel", filter__well__name="13-PP016"
+ )
+ assert measurements.reset_index()["Name"].isin(gdf.index).all()
+
+
+@pytest.mark.skip("too slow")
+def test_gwo_entire_pumping_station():
+ username = os.environ["NHI_GWO_USERNAME"]
+ password = os.environ["NHI_GWO_PASSWORD"]
+ measurements, gdf = nlmod.read.nhi.get_gwo_measurements(
+ username,
+ password,
+ well_site="veghel",
+ )
+ assert measurements.reset_index()["Name"].isin(gdf.index).all()
+
+ ncols = 3
+ nrows = int(np.ceil(len(gdf.index) / ncols))
+ f, axes = plt.subplots(
+ nrows=nrows, ncols=ncols, figsize=(10, 10), sharex=True, sharey=True
+ )
+ axes = axes.ravel()
+ for name, ax in zip(gdf.index, axes):
+ measurements.loc[name, "Volume"].plot(ax=ax)
+ ax.set_xlabel("")
+ ax.set_title(name)
+ f.tight_layout(pad=0.0)
diff --git a/tests/test_022_gwt.py b/tests/test_022_gwt.py
index 3f864c4f..5d9fce1e 100644
--- a/tests/test_022_gwt.py
+++ b/tests/test_022_gwt.py
@@ -1,7 +1,9 @@
-import tempfile
import os
+import tempfile
+
import pandas as pd
import xarray as xr
+
import nlmod
diff --git a/tests/test_023_hfb.py b/tests/test_023_hfb.py
index f7e2a73f..1902f23d 100644
--- a/tests/test_023_hfb.py
+++ b/tests/test_023_hfb.py
@@ -1,8 +1,10 @@
-from shapely.geometry import LineString, Polygon
-import geopandas as gpd
+# ruff: noqa: D103
import flopy
-import nlmod
+import geopandas as gpd
import util
+from shapely.geometry import LineString, Polygon
+
+import nlmod
def test_get_hfb_spd():
diff --git a/tests/test_025_modpath.py b/tests/test_025_modpath.py
index 6e22c1a6..e18fb5f7 100644
--- a/tests/test_025_modpath.py
+++ b/tests/test_025_modpath.py
@@ -1,6 +1,8 @@
import os
-import xarray as xr
+
import flopy
+import xarray as xr
+
import nlmod
diff --git a/tests/test_026_grid.py b/tests/test_026_grid.py
new file mode 100644
index 00000000..80b001e7
--- /dev/null
+++ b/tests/test_026_grid.py
@@ -0,0 +1,217 @@
+import os
+import tempfile
+
+import geopandas as gpd
+import matplotlib.pyplot as plt
+import numpy as np
+import xarray as xr
+
+import nlmod
+
+model_ws = os.path.join(tempfile.gettempdir(), "test_grid")
+extent = [98000.0, 99000.0, 489000.0, 490000.0]
+
+
+def get_bgt():
+ fname = os.path.join(model_ws, "bgt.gpkg")
+ if not os.path.isfile(fname):
+ if not os.path.isdir(model_ws):
+ os.makedirs(model_ws)
+ bgt = nlmod.read.bgt.get_bgt(extent)
+ bgt.to_file(fname)
+ return gpd.read_file(fname)
+
+
+def get_regis():
+ fname = os.path.join(model_ws, "regis.nc")
+ if not os.path.isfile(fname):
+ if not os.path.isdir(model_ws):
+ os.makedirs(model_ws)
+ regis = nlmod.read.regis.get_regis(extent)
+ regis.to_netcdf(fname)
+ return xr.open_dataset(fname)
+
+
+def get_structured_model_ds():
+ model_ws = os.path.join(tempfile.gettempdir(), "test_grid_structured")
+ fname = os.path.join(model_ws, "ds.nc")
+ if not os.path.isfile(fname):
+ if not os.path.isdir(model_ws):
+ os.makedirs(model_ws)
+ ds = nlmod.get_ds(extent, model_name="test_grid", model_ws=model_ws)
+ ds.to_netcdf(fname)
+ return xr.open_dataset(fname)
+
+
+def get_structured_model_ds_rotated():
+ model_ws = os.path.join(tempfile.gettempdir(), "test_grid_structured_rotated")
+ fname = os.path.join(model_ws, "ds.nc")
+ if not os.path.isfile(fname):
+ if not os.path.isdir(model_ws):
+ os.makedirs(model_ws)
+ ds = nlmod.get_ds(extent, model_name="test_grid", model_ws=model_ws, angrot=15)
+ ds.to_netcdf(fname)
+ return xr.open_dataset(fname)
+
+
+def get_vertex_model_ds(bgt=None):
+ model_ws = os.path.join(tempfile.gettempdir(), "test_grid_vertex")
+ fname = os.path.join(model_ws, "ds.nc")
+ if not os.path.isfile(fname):
+ if not os.path.isdir(model_ws):
+ os.makedirs(model_ws)
+ ds = get_structured_model_ds()
+ if bgt is None:
+ bgt = get_bgt()
+ ds = nlmod.grid.refine(ds, model_ws=model_ws, refinement_features=[(bgt, 1)])
+ ds.to_netcdf(fname)
+ return xr.open_dataset(fname)
+
+
+def get_vertex_model_ds_rotated(bgt=None):
+ model_ws = os.path.join(tempfile.gettempdir(), "test_grid_vertex_rotated")
+ fname = os.path.join(model_ws, "ds.nc")
+ if not os.path.isfile(fname):
+ if not os.path.isdir(model_ws):
+ os.makedirs(model_ws)
+ ds = get_structured_model_ds_rotated()
+ if bgt is None:
+ bgt = get_bgt()
+ ds = nlmod.grid.refine(ds, model_ws=model_ws, refinement_features=[(bgt, 1)])
+ ds.to_netcdf(fname)
+ return xr.open_dataset(fname)
+
+
+def test_get_ds_rotated():
+ ds0 = get_structured_model_ds_rotated()
+ assert ds0.extent[0] == 0 and ds0.extent[2] == 0
+ assert ds0.xorigin == extent[0] and ds0.yorigin == extent[2]
+
+ # test refine method, by refining in all cells that contain surface water polygons
+ ds = get_vertex_model_ds_rotated()
+ assert len(ds.area) > np.prod(ds0.area.shape)
+ assert ds.extent[0] == 0 and ds.extent[2] == 0
+ assert ds.xorigin == extent[0] and ds.yorigin == extent[2]
+
+ f0, ax0 = plt.subplots()
+ nlmod.plot.modelgrid(ds0, ax=ax0)
+ f, ax = plt.subplots()
+ nlmod.plot.modelgrid(ds, ax=ax)
+ assert (np.array(ax.axis()) == np.array(ax0.axis())).all()
+
+
+def test_vertex_da_to_ds():
+ # for a normal grid
+ ds0 = get_structured_model_ds()
+ ds = get_vertex_model_ds()
+ da = nlmod.resample.vertex_da_to_ds(ds["top"], ds0, method="linear")
+ assert not da.isnull().all()
+ da = nlmod.resample.vertex_da_to_ds(ds["botm"], ds0, method="linear")
+ assert not da.isnull().all()
+
+ # for a rotated grid
+ ds0 = get_structured_model_ds_rotated()
+ ds = get_vertex_model_ds_rotated()
+ da = nlmod.resample.vertex_da_to_ds(ds["top"], ds0, method="linear")
+ assert not da.isnull().all()
+ da = nlmod.resample.vertex_da_to_ds(ds["botm"], ds0, method="linear")
+ assert not da.isnull().all()
+
+
+def test_fillnan_da():
+ # for a structured grid
+ ds = get_structured_model_ds()
+ ds["top"][5, 5] = np.nan
+ top = nlmod.resample.fillnan_da(ds["top"], ds=ds)
+ assert not np.isnan(top[5, 5])
+
+ # also for a vertex grid
+ ds = get_vertex_model_ds()
+ ds["top"][100] = np.nan
+ mask = ds["top"].isnull()
+ assert mask.any()
+ top = nlmod.resample.fillnan_da(ds["top"], ds=ds)
+ assert not top[mask].isnull().any()
+
+
+def test_gdf_to_bool_da():
+ bgt = get_bgt()
+
+ # test for a structured grid
+ ds = get_structured_model_ds()
+ da = nlmod.grid.gdf_to_bool_da(bgt, ds)
+ assert da.any()
+
+ # test for a vertex grid
+ ds = get_vertex_model_ds()
+ da = nlmod.grid.gdf_to_bool_da(bgt, ds)
+ assert da.any()
+
+ # tets for a slightly rotated structured grid
+ ds = get_structured_model_ds_rotated()
+ da = nlmod.grid.gdf_to_bool_da(bgt, ds)
+ assert da.any()
+
+ # test for a rotated vertex grid
+ ds = get_vertex_model_ds_rotated()
+ da = nlmod.grid.gdf_to_bool_da(bgt, ds)
+ assert da.any()
+
+
+def test_gdf_to_da():
+ bgt = get_bgt()
+
+ # test for a structured grid
+ ds = get_structured_model_ds()
+ da = nlmod.grid.gdf_to_da(bgt, ds, "relatieveHoogteligging", agg_method="max_area")
+ assert not da.isnull().all()
+
+ # test for a vertex grid
+ ds = get_vertex_model_ds()
+ da = nlmod.grid.gdf_to_da(bgt, ds, "relatieveHoogteligging", agg_method="max_area")
+ assert not da.isnull().all()
+
+ # tets for a slightly rotated structured grid
+ ds = get_structured_model_ds_rotated()
+ da = nlmod.grid.gdf_to_da(bgt, ds, "relatieveHoogteligging", agg_method="max_area")
+ assert not da.isnull().all()
+
+ # test for a rotated vertex grid
+ ds = get_vertex_model_ds_rotated()
+ da = nlmod.grid.gdf_to_da(bgt, ds, "relatieveHoogteligging", agg_method="max_area")
+ assert not da.isnull().all()
+
+
+def test_update_ds_from_layer_ds():
+ bgt = get_bgt()
+ regis = get_regis()
+
+ # test for a structured grid
+ ds = nlmod.get_ds(extent, delr=200)
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="nearest")
+ assert len(np.unique(ds["top"])) > 1
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="average")
+ assert len(np.unique(ds["top"])) > 1
+
+ # test for a vertex grid
+ model_ws = os.path.join(tempfile.gettempdir(), "test_grid_vertex_200")
+ ds = nlmod.grid.refine(ds, model_ws=model_ws, refinement_features=[(bgt, 1)])
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="nearest")
+ assert len(np.unique(ds["top"])) > 1
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="average")
+ assert len(np.unique(ds["top"])) > 1
+
+ # tets for a slightly rotated structured grid
+ ds = nlmod.get_ds(extent, delr=200, angrot=15)
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="nearest")
+ assert len(np.unique(ds["top"])) > 1
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="average")
+ assert len(np.unique(ds["top"])) > 1
+
+ # test for a rotated vertex grid
+ model_ws = os.path.join(tempfile.gettempdir(), "test_grid_vertex_200_rotated")
+ ds = nlmod.grid.refine(ds, model_ws=model_ws, refinement_features=[(bgt, 2)])
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="nearest")
+ assert len(np.unique(ds["top"])) > 1
+ ds = nlmod.grid.update_ds_from_layer_ds(ds, regis, method="average")
+ assert len(np.unique(ds["top"])) > 1