Skip to content

Avoid realizing a potentially very large RangeIndex in to memory #429

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions flox/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1602,7 +1602,7 @@ def dask_groupby_agg(
engine: T_Engine = "numpy",
sort: bool = True,
chunks_cohorts=None,
) -> tuple[DaskArray, tuple[np.ndarray | DaskArray]]:
) -> tuple[DaskArray, tuple[pd.Index | np.ndarray | DaskArray]]:
import dask.array
from dask.array.core import slices_from_chunks
from dask.highlevelgraph import HighLevelGraph
Expand Down Expand Up @@ -1730,7 +1730,7 @@ def dask_groupby_agg(
group_chunks = ((np.nan,),)
else:
assert expected_groups is not None
groups = (expected_groups.to_numpy(),)
groups = (expected_groups,)
group_chunks = ((len(expected_groups),),)

elif method == "cohorts":
Expand Down Expand Up @@ -1846,7 +1846,7 @@ def cubed_groupby_agg(
engine: T_Engine = "numpy",
sort: bool = True,
chunks_cohorts=None,
) -> tuple[CubedArray, tuple[np.ndarray | CubedArray]]:
) -> tuple[CubedArray, tuple[pd.Index | np.ndarray | CubedArray]]:
import cubed
import cubed.core.groupby

Expand Down Expand Up @@ -1882,7 +1882,7 @@ def _reduction_func(a, by, axis, start_group, num_groups):
result = cubed.core.groupby.groupby_blockwise(
array, by, axis=axis, func=_reduction_func, num_groups=num_groups
)
groups = (expected_groups.to_numpy(),)
groups = (expected_groups,)
return (result, groups)

else:
Expand Down Expand Up @@ -1964,7 +1964,7 @@ def _groupby_aggregate(a, **kwargs):
num_groups=num_groups,
)

groups = (expected_groups.to_numpy(),)
groups = (expected_groups,)

return (result, groups)

Expand Down
8 changes: 8 additions & 0 deletions flox/dask_array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,22 @@
from itertools import product
from numbers import Integral

import pandas as pd
from dask import config
from dask.base import normalize_token
from dask.blockwise import lol_tuples
from toolz import partition_all

from .lib import ArrayLayer
from .types import Graph


# workaround for https://github.com/dask/dask/issues/11862
@normalize_token.register(pd.RangeIndex)
def normalize_range_index(x):
return normalize_token(type(x)), x.start, x.stop, x.step, x.dtype, x.name


# _tree_reduce and partial_reduce are copied from dask.array.reductions
# They have been modified to work purely with graphs, and without creating new Array layers
# in the graph. The `block_index` kwarg is new and avoids a concatenation by simply setting the right
Expand Down
12 changes: 0 additions & 12 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,3 @@
)
def engine(request):
return request.param


@pytest.fixture(
scope="module",
params=[
"flox",
"numpy",
pytest.param("numbagg", marks=requires_numbagg),
],
)
def engine_no_numba(request):
return request.param
27 changes: 9 additions & 18 deletions tests/test_xarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@
@pytest.mark.parametrize("min_count", [None, 1, 3])
@pytest.mark.parametrize("add_nan", [True, False])
@pytest.mark.parametrize("skipna", [True, False])
def test_xarray_reduce(skipna, add_nan, min_count, engine_no_numba, reindex):
engine = engine_no_numba
def test_xarray_reduce(skipna, add_nan, min_count, engine, reindex):
if skipna is False and min_count is not None:
pytest.skip()

Expand Down Expand Up @@ -91,11 +90,9 @@ def test_xarray_reduce(skipna, add_nan, min_count, engine_no_numba, reindex):
# TODO: sort
@pytest.mark.parametrize("pass_expected_groups", [True, False])
@pytest.mark.parametrize("chunk", (pytest.param(True, marks=requires_dask), False))
def test_xarray_reduce_multiple_groupers(pass_expected_groups, chunk, engine_no_numba):
def test_xarray_reduce_multiple_groupers(pass_expected_groups, chunk, engine):
if chunk and pass_expected_groups is False:
pytest.skip()
engine = engine_no_numba

arr = np.ones((4, 12))
labels = np.array(["a", "a", "c", "c", "c", "b", "b", "c", "c", "b", "b", "f"])
labels2 = np.array([1, 2, 2, 1])
Expand Down Expand Up @@ -140,10 +137,9 @@ def test_xarray_reduce_multiple_groupers(pass_expected_groups, chunk, engine_no_

@pytest.mark.parametrize("pass_expected_groups", [True, False])
@pytest.mark.parametrize("chunk", (pytest.param(True, marks=requires_dask), False))
def test_xarray_reduce_multiple_groupers_2(pass_expected_groups, chunk, engine_no_numba):
def test_xarray_reduce_multiple_groupers_2(pass_expected_groups, chunk, engine):
if chunk and pass_expected_groups is False:
pytest.skip()
engine = engine_no_numba

arr = np.ones((2, 12))
labels = np.array(["a", "a", "c", "c", "c", "b", "b", "c", "c", "b", "b", "f"])
Expand Down Expand Up @@ -218,8 +214,7 @@ def test_xarray_reduce_cftime_var(engine, indexer, expected_groups, func):

@requires_cftime
@requires_dask
def test_xarray_reduce_single_grouper(engine_no_numba):
engine = engine_no_numba
def test_xarray_reduce_single_grouper(engine):
# DataArray
ds = xr.Dataset(
{
Expand Down Expand Up @@ -326,17 +321,15 @@ def test_rechunk_for_blockwise(inchunks, expected):
# TODO: dim=None, dim=Ellipsis, groupby unindexed dim


def test_groupby_duplicate_coordinate_labels(engine_no_numba):
engine = engine_no_numba
def test_groupby_duplicate_coordinate_labels(engine):
# fix for http://stackoverflow.com/questions/38065129
array = xr.DataArray([1, 2, 3], [("x", [1, 1, 2])])
expected = xr.DataArray([3, 3], [("x", [1, 2])])
actual = xarray_reduce(array, array.x, func="sum", engine=engine)
assert_equal(expected, actual)


def test_multi_index_groupby_sum(engine_no_numba):
engine = engine_no_numba
def test_multi_index_groupby_sum(engine):
# regression test for xarray GH873
ds = xr.Dataset(
{"foo": (("x", "y", "z"), np.ones((3, 4, 2)))},
Expand All @@ -362,8 +355,7 @@ def test_multi_index_groupby_sum(engine_no_numba):


@pytest.mark.parametrize("chunks", (None, pytest.param(2, marks=requires_dask)))
def test_xarray_groupby_bins(chunks, engine_no_numba):
engine = engine_no_numba
def test_xarray_groupby_bins(chunks, engine):
array = xr.DataArray([1, 1, 1, 1, 1], dims="x")
labels = xr.DataArray([1, 1.5, 1.9, 2, 3], dims="x", name="labels")

Expand Down Expand Up @@ -532,11 +524,10 @@ def test_alignment_error():
@pytest.mark.parametrize("dtype_out", [np.float64, "float64", np.dtype("float64")])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("chunk", (pytest.param(True, marks=requires_dask), False))
def test_dtype(add_nan, chunk, dtype, dtype_out, engine_no_numba):
if engine_no_numba == "numbagg":
def test_dtype(add_nan, chunk, dtype, dtype_out, engine):
if engine == "numbagg":
# https://github.com/numbagg/numbagg/issues/121
pytest.skip()
engine = engine_no_numba
xp = dask.array if chunk else np
data = xp.linspace(0, 1, 48, dtype=dtype).reshape((4, 12))

Expand Down
Loading