Skip to content

Commit 95f6cb6

Browse files
committed
Format with preview flag
1 parent df1f1ce commit 95f6cb6

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+1571
-1949
lines changed

asv_bench/benchmarks/combine.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,9 @@ def setup(self) -> None:
1616
data = np.random.randn(t_size)
1717

1818
self.dsA0 = xr.Dataset({"A": xr.DataArray(data, coords={"T": t}, dims=("T"))})
19-
self.dsA1 = xr.Dataset(
20-
{"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T"))}
21-
)
19+
self.dsA1 = xr.Dataset({
20+
"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T"))
21+
})
2222

2323
def time_combine_by_coords(self) -> None:
2424
"""Also has to load and arrange t coordinate"""
@@ -54,18 +54,18 @@ def setup(self):
5454
t = np.arange(t_size)
5555
data = np.random.randn(t_size, x_size, y_size)
5656

57-
self.dsA0 = xr.Dataset(
58-
{"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))}
59-
)
60-
self.dsA1 = xr.Dataset(
61-
{"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))}
62-
)
63-
self.dsB0 = xr.Dataset(
64-
{"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))}
65-
)
66-
self.dsB1 = xr.Dataset(
67-
{"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))}
68-
)
57+
self.dsA0 = xr.Dataset({
58+
"A": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))
59+
})
60+
self.dsA1 = xr.Dataset({
61+
"A": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))
62+
})
63+
self.dsB0 = xr.Dataset({
64+
"B": xr.DataArray(data, coords={"T": t}, dims=("T", "X", "Y"))
65+
})
66+
self.dsB1 = xr.Dataset({
67+
"B": xr.DataArray(data, coords={"T": t + t_size}, dims=("T", "X", "Y"))
68+
})
6969

7070
def time_combine_nested(self):
7171
datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]]

asv_bench/benchmarks/dataset.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,10 @@
77

88
class DatasetBinaryOp:
99
def setup(self):
10-
self.ds = Dataset(
11-
{
12-
"a": (("x", "y"), np.ones((300, 400))),
13-
"b": (("x", "y"), np.ones((300, 400))),
14-
}
15-
)
10+
self.ds = Dataset({
11+
"a": (("x", "y"), np.ones((300, 400))),
12+
"b": (("x", "y"), np.ones((300, 400))),
13+
})
1614
self.mean = self.ds.mean()
1715
self.std = self.ds.std()
1816

asv_bench/benchmarks/groupby.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,11 @@
1111
class GroupBy:
1212
def setup(self, *args, **kwargs):
1313
self.n = 100
14-
self.ds1d = xr.Dataset(
15-
{
16-
"a": xr.DataArray(np.r_[np.repeat(1, self.n), np.repeat(2, self.n)]),
17-
"b": xr.DataArray(np.arange(2 * self.n)),
18-
"c": xr.DataArray(np.arange(2 * self.n)),
19-
}
20-
)
14+
self.ds1d = xr.Dataset({
15+
"a": xr.DataArray(np.r_[np.repeat(1, self.n), np.repeat(2, self.n)]),
16+
"b": xr.DataArray(np.arange(2 * self.n)),
17+
"c": xr.DataArray(np.arange(2 * self.n)),
18+
})
2119
self.ds2d = self.ds1d.expand_dims(z=10).copy()
2220
self.ds1d_mean = self.ds1d.groupby("b").mean()
2321
self.ds2d_mean = self.ds2d.groupby("b").mean()

asv_bench/benchmarks/pandas.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,11 @@
99
class MultiIndexSeries:
1010
def setup(self, dtype, subset):
1111
data = np.random.rand(100000).astype(dtype)
12-
index = pd.MultiIndex.from_product(
13-
[
14-
list("abcdefhijk"),
15-
list("abcdefhijk"),
16-
pd.date_range(start="2000-01-01", periods=1000, freq="D"),
17-
]
18-
)
12+
index = pd.MultiIndex.from_product([
13+
list("abcdefhijk"),
14+
list("abcdefhijk"),
15+
pd.date_range(start="2000-01-01", periods=1000, freq="D"),
16+
])
1917
series = pd.Series(data, index)
2018
if subset:
2119
series = series[::3]

ci/min_deps_check.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
publication date. Compare it against requirements/min-all-deps.yml to verify the
44
policy on obsolete dependencies is being followed. Print a pretty report :)
55
"""
6+
67
from __future__ import annotations
78

89
import itertools
@@ -104,18 +105,16 @@ def metadata(entry):
104105

105106
# Hardcoded fix to work around incorrect dates in conda
106107
if pkg == "python":
107-
out.update(
108-
{
109-
(2, 7): datetime(2010, 6, 3),
110-
(3, 5): datetime(2015, 9, 13),
111-
(3, 6): datetime(2016, 12, 23),
112-
(3, 7): datetime(2018, 6, 27),
113-
(3, 8): datetime(2019, 10, 14),
114-
(3, 9): datetime(2020, 10, 5),
115-
(3, 10): datetime(2021, 10, 4),
116-
(3, 11): datetime(2022, 10, 24),
117-
}
118-
)
108+
out.update({
109+
(2, 7): datetime(2010, 6, 3),
110+
(3, 5): datetime(2015, 9, 13),
111+
(3, 6): datetime(2016, 12, 23),
112+
(3, 7): datetime(2018, 6, 27),
113+
(3, 8): datetime(2019, 10, 14),
114+
(3, 9): datetime(2020, 10, 5),
115+
(3, 10): datetime(2021, 10, 4),
116+
(3, 11): datetime(2022, 10, 24),
117+
})
119118

120119
return out
121120

doc/conf.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,9 @@
5252
try:
5353
import cartopy # noqa: F401
5454
except ImportError:
55-
allowed_failures.update(
56-
[
57-
"gallery/plot_cartopy_facetgrid.py",
58-
]
59-
)
55+
allowed_failures.update([
56+
"gallery/plot_cartopy_facetgrid.py",
57+
])
6058

6159
nbsphinx_allow_errors = False
6260

doc/examples/apply_ufunc_vectorize_1d.ipynb

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -520,9 +520,10 @@
520520
"\n",
521521
"interped = xr.apply_ufunc(\n",
522522
" interp1d_np, # first the function\n",
523-
" air.chunk(\n",
524-
" {\"time\": 2, \"lon\": 2}\n",
525-
" ), # now arguments in the order expected by 'interp1_np'\n",
523+
" air.chunk({\n",
524+
" \"time\": 2,\n",
525+
" \"lon\": 2,\n",
526+
" }), # now arguments in the order expected by 'interp1_np'\n",
526527
" air.lat, # as above\n",
527528
" newlat, # as above\n",
528529
" input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n",
@@ -617,9 +618,10 @@
617618
"source": [
618619
"interped = xr.apply_ufunc(\n",
619620
" interp1d_np_gufunc, # first the function\n",
620-
" air.chunk(\n",
621-
" {\"time\": 2, \"lon\": 2}\n",
622-
" ), # now arguments in the order expected by 'interp1_np'\n",
621+
" air.chunk({\n",
622+
" \"time\": 2,\n",
623+
" \"lon\": 2,\n",
624+
" }), # now arguments in the order expected by 'interp1_np'\n",
623625
" air.lat, # as above\n",
624626
" newlat, # as above\n",
625627
" input_core_dims=[[\"lat\"], [\"lat\"], [\"new_lat\"]], # list with one entry per arg\n",

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -283,6 +283,7 @@ convention = "numpy"
283283

284284
[tool.ruff.format]
285285
docstring-code-format = true
286+
preview = true
286287

287288
[tool.pytest.ini_options]
288289
addopts = ["--strict-config", "--strict-markers"]

xarray/backends/api.py

Lines changed: 26 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1103,8 +1103,7 @@ def to_netcdf(
11031103
*,
11041104
multifile: Literal[True],
11051105
invalid_netcdf: bool = False,
1106-
) -> tuple[ArrayWriter, AbstractDataStore]:
1107-
...
1106+
) -> tuple[ArrayWriter, AbstractDataStore]: ...
11081107

11091108

11101109
# path=None writes to bytes
@@ -1121,8 +1120,7 @@ def to_netcdf(
11211120
compute: bool = True,
11221121
multifile: Literal[False] = False,
11231122
invalid_netcdf: bool = False,
1124-
) -> bytes:
1125-
...
1123+
) -> bytes: ...
11261124

11271125

11281126
# compute=False returns dask.Delayed
@@ -1140,8 +1138,7 @@ def to_netcdf(
11401138
compute: Literal[False],
11411139
multifile: Literal[False] = False,
11421140
invalid_netcdf: bool = False,
1143-
) -> Delayed:
1144-
...
1141+
) -> Delayed: ...
11451142

11461143

11471144
# default return None
@@ -1158,8 +1155,7 @@ def to_netcdf(
11581155
compute: Literal[True] = True,
11591156
multifile: Literal[False] = False,
11601157
invalid_netcdf: bool = False,
1161-
) -> None:
1162-
...
1158+
) -> None: ...
11631159

11641160

11651161
# if compute cannot be evaluated at type check time
@@ -1177,8 +1173,7 @@ def to_netcdf(
11771173
compute: bool = False,
11781174
multifile: Literal[False] = False,
11791175
invalid_netcdf: bool = False,
1180-
) -> Delayed | None:
1181-
...
1176+
) -> Delayed | None: ...
11821177

11831178

11841179
# if multifile cannot be evaluated at type check time
@@ -1196,8 +1191,7 @@ def to_netcdf(
11961191
compute: bool = False,
11971192
multifile: bool = False,
11981193
invalid_netcdf: bool = False,
1199-
) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None:
1200-
...
1194+
) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None: ...
12011195

12021196

12031197
# Any
@@ -1214,8 +1208,7 @@ def to_netcdf(
12141208
compute: bool = False,
12151209
multifile: bool = False,
12161210
invalid_netcdf: bool = False,
1217-
) -> tuple[ArrayWriter, AbstractDataStore] | bytes | Delayed | None:
1218-
...
1211+
) -> tuple[ArrayWriter, AbstractDataStore] | bytes | Delayed | None: ...
12191212

12201213

12211214
def to_netcdf(
@@ -1470,22 +1463,20 @@ def save_mfdataset(
14701463
"save_mfdataset"
14711464
)
14721465

1473-
writers, stores = zip(
1474-
*[
1475-
to_netcdf(
1476-
ds,
1477-
path,
1478-
mode,
1479-
format,
1480-
group,
1481-
engine,
1482-
compute=compute,
1483-
multifile=True,
1484-
**kwargs,
1485-
)
1486-
for ds, path, group in zip(datasets, paths, groups)
1487-
]
1488-
)
1466+
writers, stores = zip(*[
1467+
to_netcdf(
1468+
ds,
1469+
path,
1470+
mode,
1471+
format,
1472+
group,
1473+
engine,
1474+
compute=compute,
1475+
multifile=True,
1476+
**kwargs,
1477+
)
1478+
for ds, path, group in zip(datasets, paths, groups)
1479+
])
14891480

14901481
try:
14911482
writes = [w.sync(compute=compute) for w in writers]
@@ -1497,9 +1488,9 @@ def save_mfdataset(
14971488
if not compute:
14981489
import dask
14991490

1500-
return dask.delayed(
1501-
[dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]
1502-
)
1491+
return dask.delayed([
1492+
dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)
1493+
])
15031494

15041495

15051496
def _auto_detect_region(ds_new, ds_orig, dim):
@@ -1649,8 +1640,7 @@ def to_zarr(
16491640
zarr_version: int | None = None,
16501641
write_empty_chunks: bool | None = None,
16511642
chunkmanager_store_kwargs: dict[str, Any] | None = None,
1652-
) -> backends.ZarrStore:
1653-
...
1643+
) -> backends.ZarrStore: ...
16541644

16551645

16561646
# compute=False returns dask.Delayed
@@ -1673,8 +1663,7 @@ def to_zarr(
16731663
zarr_version: int | None = None,
16741664
write_empty_chunks: bool | None = None,
16751665
chunkmanager_store_kwargs: dict[str, Any] | None = None,
1676-
) -> Delayed:
1677-
...
1666+
) -> Delayed: ...
16781667

16791668

16801669
def to_zarr(

xarray/backends/locks.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ class SerializableLock:
4040
The creation of locks is itself not threadsafe.
4141
"""
4242

43-
_locks: ClassVar[
44-
WeakValueDictionary[Hashable, threading.Lock]
45-
] = WeakValueDictionary()
43+
_locks: ClassVar[WeakValueDictionary[Hashable, threading.Lock]] = (
44+
WeakValueDictionary()
45+
)
4646
token: Hashable
4747
lock: threading.Lock
4848

xarray/backends/plugins.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,9 @@ def sort_backends(
9797
for be_name in STANDARD_BACKENDS_ORDER:
9898
if backend_entrypoints.get(be_name, None) is not None:
9999
ordered_backends_entrypoints[be_name] = backend_entrypoints.pop(be_name)
100-
ordered_backends_entrypoints.update(
101-
{name: backend_entrypoints[name] for name in sorted(backend_entrypoints)}
102-
)
100+
ordered_backends_entrypoints.update({
101+
name: backend_entrypoints[name] for name in sorted(backend_entrypoints)
102+
})
103103
return ordered_backends_entrypoints
104104

105105

xarray/backends/pydap_.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -72,12 +72,10 @@ def _fix_attributes(attributes):
7272
elif is_dict_like(attributes[k]):
7373
# Make Hierarchical attributes to a single level with a
7474
# dot-separated key
75-
attributes.update(
76-
{
77-
f"{k}.{k_child}": v_child
78-
for k_child, v_child in attributes.pop(k).items()
79-
}
80-
)
75+
attributes.update({
76+
f"{k}.{k_child}": v_child
77+
for k_child, v_child in attributes.pop(k).items()
78+
})
8179
return attributes
8280

8381

xarray/backends/zarr.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,9 @@ def __init__(self, zarr_array):
7171
self.shape = self._array.shape
7272

7373
# preserve vlen string object dtype (GH 7328)
74-
if self._array.filters is not None and any(
75-
[filt.codec_id == "vlen-utf8" for filt in self._array.filters]
76-
):
74+
if self._array.filters is not None and any([
75+
filt.codec_id == "vlen-utf8" for filt in self._array.filters
76+
]):
7777
dtype = coding.strings.create_vlen_dtype(str)
7878
else:
7979
dtype = self._array.dtype

xarray/coding/cftimeindex.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -807,9 +807,9 @@ def _parse_array_of_cftime_strings(strings, date_type):
807807
-------
808808
np.array
809809
"""
810-
return np.array(
811-
[_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()]
812-
).reshape(strings.shape)
810+
return np.array([
811+
_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()
812+
]).reshape(strings.shape)
813813

814814

815815
def _contains_datetime_timedeltas(array):

0 commit comments

Comments
 (0)