@@ -901,20 +901,20 @@ def _copy(
901
901
if data is None :
902
902
data_old = self ._data
903
903
904
- if isinstance (data_old , indexing .MemoryCachedArray ):
904
+ if not isinstance (data_old , indexing .MemoryCachedArray ):
905
+ ndata = data_old
906
+ else :
905
907
# don't share caching between copies
906
908
ndata = indexing .MemoryCachedArray (data_old .array )
907
- else :
908
- ndata = data_old
909
909
910
910
if deep :
911
911
ndata = copy .deepcopy (ndata , memo )
912
912
913
913
else :
914
914
ndata = as_compatible_data (data )
915
- if self .shape != ndata .shape : # type: ignore[attr-defined]
915
+ if self .shape != ndata .shape :
916
916
raise ValueError (
917
- f"Data shape { ndata .shape } must match shape of object { self .shape } " # type: ignore[attr-defined]
917
+ f"Data shape { ndata .shape } must match shape of object { self .shape } "
918
918
)
919
919
920
920
attrs = copy .deepcopy (self ._attrs , memo ) if deep else copy .copy (self ._attrs )
@@ -1043,7 +1043,9 @@ def chunk(
1043
1043
if chunkmanager .is_chunked_array (data_old ):
1044
1044
data_chunked = chunkmanager .rechunk (data_old , chunks )
1045
1045
else :
1046
- if isinstance (data_old , indexing .ExplicitlyIndexed ):
1046
+ if not isinstance (data_old , indexing .ExplicitlyIndexed ):
1047
+ ndata = data_old
1048
+ else :
1047
1049
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
1048
1050
# that can't handle general array indexing. For example, in netCDF4 you
1049
1051
# can do "outer" indexing along two dimensions independent, which works
@@ -1055,8 +1057,6 @@ def chunk(
1055
1057
ndata = indexing .ImplicitToExplicitIndexingAdapter (
1056
1058
data_old , indexing .OuterIndexer
1057
1059
)
1058
- else :
1059
- ndata = data_old
1060
1060
1061
1061
if utils .is_dict_like (chunks ):
1062
1062
chunks = tuple (chunks .get (n , s ) for n , s in enumerate (ndata .shape ))
@@ -1504,7 +1504,9 @@ def _stack_once(self, dims: list[Hashable], new_dim: Hashable):
1504
1504
new_data = duck_array_ops .reshape (reordered .data , new_shape )
1505
1505
new_dims = reordered .dims [: len (other_dims )] + (new_dim ,)
1506
1506
1507
- return Variable (new_dims , new_data , self ._attrs , self ._encoding , fastpath = True )
1507
+ return type (self )(
1508
+ new_dims , new_data , self ._attrs , self ._encoding , fastpath = True
1509
+ )
1508
1510
1509
1511
def stack (self , dimensions = None , ** dimensions_kwargs ):
1510
1512
"""
@@ -2760,7 +2762,7 @@ def concat(
2760
2762
2761
2763
return cls (first_var .dims , data , attrs )
2762
2764
2763
- def copy (self , deep : bool = True , data : ArrayLike | None = None ):
2765
+ def copy (self , deep : bool = True , data : T_DuckArray | ArrayLike | None = None ):
2764
2766
"""Returns a copy of this object.
2765
2767
2766
2768
`deep` is ignored since data is stored in the form of
@@ -2785,7 +2787,17 @@ def copy(self, deep: bool = True, data: ArrayLike | None = None):
2785
2787
data copied from original.
2786
2788
"""
2787
2789
if data is None :
2788
- ndata = self ._data .copy (deep = deep )
2790
+ data_old = self ._data
2791
+
2792
+ if not isinstance (data_old , indexing .MemoryCachedArray ):
2793
+ ndata = data_old
2794
+ else :
2795
+ # don't share caching between copies
2796
+ ndata = indexing .MemoryCachedArray (data_old .array )
2797
+
2798
+ if deep :
2799
+ ndata = copy .deepcopy (ndata , None )
2800
+
2789
2801
else :
2790
2802
ndata = as_compatible_data (data )
2791
2803
if self .shape != ndata .shape :
0 commit comments