h"]
+_IntCCodes = Literal["intc", "i", "=i", "i"]
+_IntPCodes = Literal["intp", "int0", "p", "=p", "p"]
+_IntCodes = Literal["long", "int", "int_", "l", "=l", "l"]
+_LongLongCodes = Literal["longlong", "q", "=q", "q"]
+
+_UByteCodes = Literal["ubyte", "B", "=B", "B"]
+_UShortCodes = Literal["ushort", "H", "=H", "H"]
+_UIntCCodes = Literal["uintc", "I", "=I", "I"]
+_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"]
+_UIntCodes = Literal["ulong", "uint", "L", "=L", "L"]
+_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"]
+
+_HalfCodes = Literal["half", "e", "=e", "e"]
+_SingleCodes = Literal["single", "f", "=f", "f"]
+_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "d"]
+_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"]
+
+_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"]
+_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "D"]
+_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"]
+
+_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"]
+_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"]
+_VoidCodes = Literal["void", "void0", "V", "=V", "V"]
+_ObjectCodes = Literal["object", "object_", "O", "=O", "O"]
+
+_DT64Codes = Literal[
+ "datetime64", "=datetime64", "datetime64",
+ "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]",
+ "datetime64[M]", "=datetime64[M]", "datetime64[M]",
+ "datetime64[W]", "=datetime64[W]", "datetime64[W]",
+ "datetime64[D]", "=datetime64[D]", "datetime64[D]",
+ "datetime64[h]", "=datetime64[h]", "datetime64[h]",
+ "datetime64[m]", "=datetime64[m]", "datetime64[m]",
+ "datetime64[s]", "=datetime64[s]", "datetime64[s]",
+ "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]",
+ "datetime64[us]", "=datetime64[us]", "datetime64[us]",
+ "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]",
+ "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]",
+ "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]",
+ "datetime64[as]", "=datetime64[as]", "datetime64[as]",
+ "M", "=M", "M",
+ "M8", "=M8", "M8",
+ "M8[Y]", "=M8[Y]", "M8[Y]",
+ "M8[M]", "=M8[M]", "M8[M]",
+ "M8[W]", "=M8[W]", "M8[W]",
+ "M8[D]", "=M8[D]", "M8[D]",
+ "M8[h]", "=M8[h]", "M8[h]",
+ "M8[m]", "=M8[m]", "M8[m]",
+ "M8[s]", "=M8[s]", "M8[s]",
+ "M8[ms]", "=M8[ms]", "M8[ms]",
+ "M8[us]", "=M8[us]", "M8[us]",
+ "M8[ns]", "=M8[ns]", "M8[ns]",
+ "M8[ps]", "=M8[ps]", "M8[ps]",
+ "M8[fs]", "=M8[fs]", "M8[fs]",
+ "M8[as]", "=M8[as]", "M8[as]",
+]
+_TD64Codes = Literal[
+ "timedelta64", "=timedelta64", "timedelta64",
+ "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]",
+ "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]",
+ "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]",
+ "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]",
+ "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]",
+ "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]",
+ "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]",
+ "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]",
+ "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]",
+ "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]",
+ "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]",
+ "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]",
+ "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]",
+ "m", "=m", "m",
+ "m8", "=m8", "m8",
+ "m8[Y]", "=m8[Y]", "m8[Y]",
+ "m8[M]", "=m8[M]", "m8[M]",
+ "m8[W]", "=m8[W]", "m8[W]",
+ "m8[D]", "=m8[D]", "m8[D]",
+ "m8[h]", "=m8[h]", "m8[h]",
+ "m8[m]", "=m8[m]", "m8[m]",
+ "m8[s]", "=m8[s]", "m8[s]",
+ "m8[ms]", "=m8[ms]", "m8[ms]",
+ "m8[us]", "=m8[us]", "m8[us]",
+ "m8[ns]", "=m8[ns]", "m8[ns]",
+ "m8[ps]", "=m8[ps]", "m8[ps]",
+ "m8[fs]", "=m8[fs]", "m8[fs]",
+ "m8[as]", "=m8[as]", "m8[as]",
+]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py
new file mode 100644
index 00000000..207a99c5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_dtype_like.py
@@ -0,0 +1,246 @@
+from collections.abc import Sequence
+from typing import (
+ Any,
+ Sequence,
+ Union,
+ TypeVar,
+ Protocol,
+ TypedDict,
+ runtime_checkable,
+)
+
+import numpy as np
+
+from ._shape import _ShapeLike
+
+from ._char_codes import (
+ _BoolCodes,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _Complex64Codes,
+ _Complex128Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+ _DT64Codes,
+ _TD64Codes,
+ _StrCodes,
+ _BytesCodes,
+ _VoidCodes,
+ _ObjectCodes,
+)
+
+_SCT = TypeVar("_SCT", bound=np.generic)
+_DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any])
+
+_DTypeLikeNested = Any # TODO: wait for support for recursive types
+
+
+# Mandatory keys
+class _DTypeDictBase(TypedDict):
+ names: Sequence[str]
+ formats: Sequence[_DTypeLikeNested]
+
+
+# Mandatory + optional keys
+class _DTypeDict(_DTypeDictBase, total=False):
+ # Only `str` elements are usable as indexing aliases,
+ # but `titles` can in principle accept any object
+ offsets: Sequence[int]
+ titles: Sequence[Any]
+ itemsize: int
+ aligned: bool
+
+
+# A protocol for anything with the dtype attribute
+@runtime_checkable
+class _SupportsDType(Protocol[_DType_co]):
+ @property
+ def dtype(self) -> _DType_co: ...
+
+
+# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
+_DTypeLike = Union[
+ np.dtype[_SCT],
+ type[_SCT],
+ _SupportsDType[np.dtype[_SCT]],
+]
+
+
+# Would create a dtype[np.void]
+_VoidDTypeLike = Union[
+ # (flexible_dtype, itemsize)
+ tuple[_DTypeLikeNested, int],
+ # (fixed_dtype, shape)
+ tuple[_DTypeLikeNested, _ShapeLike],
+ # [(field_name, field_dtype, field_shape), ...]
+ #
+ # The type here is quite broad because NumPy accepts quite a wide
+ # range of inputs inside the list; see the tests for some
+ # examples.
+ list[Any],
+ # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ...,
+ # 'itemsize': ...}
+ _DTypeDict,
+ # (base_dtype, new_dtype)
+ tuple[_DTypeLikeNested, _DTypeLikeNested],
+]
+
+# Anything that can be coerced into numpy.dtype.
+# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
+DTypeLike = Union[
+ np.dtype[Any],
+ # default data type (float64)
+ None,
+ # array-scalar types and generic types
+ type[Any], # NOTE: We're stuck with `type[Any]` due to object dtypes
+ # anything with a dtype attribute
+ _SupportsDType[np.dtype[Any]],
+ # character codes, type strings or comma-separated fields, e.g., 'float64'
+ str,
+ _VoidDTypeLike,
+]
+
+# NOTE: while it is possible to provide the dtype as a dict of
+# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`),
+# this syntax is officially discourged and
+# therefore not included in the Union defining `DTypeLike`.
+#
+# See https://github.com/numpy/numpy/issues/16891 for more details.
+
+# Aliases for commonly used dtype-like objects.
+# Note that the precision of `np.number` subclasses is ignored herein.
+_DTypeLikeBool = Union[
+ type[bool],
+ type[np.bool_],
+ np.dtype[np.bool_],
+ _SupportsDType[np.dtype[np.bool_]],
+ _BoolCodes,
+]
+_DTypeLikeUInt = Union[
+ type[np.unsignedinteger],
+ np.dtype[np.unsignedinteger],
+ _SupportsDType[np.dtype[np.unsignedinteger]],
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+]
+_DTypeLikeInt = Union[
+ type[int],
+ type[np.signedinteger],
+ np.dtype[np.signedinteger],
+ _SupportsDType[np.dtype[np.signedinteger]],
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+]
+_DTypeLikeFloat = Union[
+ type[float],
+ type[np.floating],
+ np.dtype[np.floating],
+ _SupportsDType[np.dtype[np.floating]],
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+]
+_DTypeLikeComplex = Union[
+ type[complex],
+ type[np.complexfloating],
+ np.dtype[np.complexfloating],
+ _SupportsDType[np.dtype[np.complexfloating]],
+ _Complex64Codes,
+ _Complex128Codes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+]
+_DTypeLikeDT64 = Union[
+ type[np.timedelta64],
+ np.dtype[np.timedelta64],
+ _SupportsDType[np.dtype[np.timedelta64]],
+ _TD64Codes,
+]
+_DTypeLikeTD64 = Union[
+ type[np.datetime64],
+ np.dtype[np.datetime64],
+ _SupportsDType[np.dtype[np.datetime64]],
+ _DT64Codes,
+]
+_DTypeLikeStr = Union[
+ type[str],
+ type[np.str_],
+ np.dtype[np.str_],
+ _SupportsDType[np.dtype[np.str_]],
+ _StrCodes,
+]
+_DTypeLikeBytes = Union[
+ type[bytes],
+ type[np.bytes_],
+ np.dtype[np.bytes_],
+ _SupportsDType[np.dtype[np.bytes_]],
+ _BytesCodes,
+]
+_DTypeLikeVoid = Union[
+ type[np.void],
+ np.dtype[np.void],
+ _SupportsDType[np.dtype[np.void]],
+ _VoidCodes,
+ _VoidDTypeLike,
+]
+_DTypeLikeObject = Union[
+ type,
+ np.dtype[np.object_],
+ _SupportsDType[np.dtype[np.object_]],
+ _ObjectCodes,
+]
+
+_DTypeLikeComplex_co = Union[
+ _DTypeLikeBool,
+ _DTypeLikeUInt,
+ _DTypeLikeInt,
+ _DTypeLikeFloat,
+ _DTypeLikeComplex,
+]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py
new file mode 100644
index 00000000..7246b47d
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_extended_precision.py
@@ -0,0 +1,27 @@
+"""A module with platform-specific extended precision
+`numpy.number` subclasses.
+
+The subclasses are defined here (instead of ``__init__.pyi``) such
+that they can be imported conditionally via the numpy's mypy plugin.
+"""
+
+import numpy as np
+from . import (
+ _80Bit,
+ _96Bit,
+ _128Bit,
+ _256Bit,
+)
+
+uint128 = np.unsignedinteger[_128Bit]
+uint256 = np.unsignedinteger[_256Bit]
+int128 = np.signedinteger[_128Bit]
+int256 = np.signedinteger[_256Bit]
+float80 = np.floating[_80Bit]
+float96 = np.floating[_96Bit]
+float128 = np.floating[_128Bit]
+float256 = np.floating[_256Bit]
+complex160 = np.complexfloating[_80Bit, _80Bit]
+complex192 = np.complexfloating[_96Bit, _96Bit]
+complex256 = np.complexfloating[_128Bit, _128Bit]
+complex512 = np.complexfloating[_256Bit, _256Bit]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.py
new file mode 100644
index 00000000..b8d35db4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nbit.py
@@ -0,0 +1,16 @@
+"""A module with the precisions of platform-specific `~numpy.number`s."""
+
+from typing import Any
+
+# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
+_NBitByte = Any
+_NBitShort = Any
+_NBitIntC = Any
+_NBitIntP = Any
+_NBitInt = Any
+_NBitLongLong = Any
+
+_NBitHalf = Any
+_NBitSingle = Any
+_NBitDouble = Any
+_NBitLongDouble = Any
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py
new file mode 100644
index 00000000..3d0d25ae
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_nested_sequence.py
@@ -0,0 +1,86 @@
+"""A module containing the `_NestedSequence` protocol."""
+
+from __future__ import annotations
+
+from collections.abc import Iterator
+from typing import (
+ Any,
+ TypeVar,
+ Protocol,
+ runtime_checkable,
+)
+
+__all__ = ["_NestedSequence"]
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+
+@runtime_checkable
+class _NestedSequence(Protocol[_T_co]):
+ """A protocol for representing nested sequences.
+
+ Warning
+ -------
+ `_NestedSequence` currently does not work in combination with typevars,
+ *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
+
+ See Also
+ --------
+ collections.abc.Sequence
+ ABCs for read-only and mutable :term:`sequences`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ >>> from __future__ import annotations
+
+ >>> from typing import TYPE_CHECKING
+ >>> import numpy as np
+ >>> from numpy._typing import _NestedSequence
+
+ >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]:
+ ... return np.asarray(seq).dtype
+
+ >>> a = get_dtype([1.0])
+ >>> b = get_dtype([[1.0]])
+ >>> c = get_dtype([[[1.0]]])
+ >>> d = get_dtype([[[[1.0]]]])
+
+ >>> if TYPE_CHECKING:
+ ... reveal_locals()
+ ... # note: Revealed local types are:
+ ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+
+ """
+
+ def __len__(self, /) -> int:
+ """Implement ``len(self)``."""
+ raise NotImplementedError
+
+ def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]:
+ """Implement ``self[x]``."""
+ raise NotImplementedError
+
+ def __contains__(self, x: object, /) -> bool:
+ """Implement ``x in self``."""
+ raise NotImplementedError
+
+ def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
+ """Implement ``iter(self)``."""
+ raise NotImplementedError
+
+ def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
+ """Implement ``reversed(self)``."""
+ raise NotImplementedError
+
+ def count(self, value: Any, /) -> int:
+ """Return the number of occurrences of `value`."""
+ raise NotImplementedError
+
+ def index(self, value: Any, /) -> int:
+ """Return the first index of `value`."""
+ raise NotImplementedError
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.py
new file mode 100644
index 00000000..e46ff04a
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_scalars.py
@@ -0,0 +1,30 @@
+from typing import Union, Any
+
+import numpy as np
+
+# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
+# `np.bytes_` are already subclasses of their builtin counterpart
+
+_CharLike_co = Union[str, bytes]
+
+# The 6 `Like_co` type-aliases below represent all scalars that can be
+# coerced into `` (with the casting rule `same_kind`)
+_BoolLike_co = Union[bool, np.bool_]
+_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]]
+_IntLike_co = Union[_BoolLike_co, int, np.integer[Any]]
+_FloatLike_co = Union[_IntLike_co, float, np.floating[Any]]
+_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]]
+_TD64Like_co = Union[_IntLike_co, np.timedelta64]
+
+_NumberLike_co = Union[int, float, complex, np.number[Any], np.bool_]
+_ScalarLike_co = Union[
+ int,
+ float,
+ complex,
+ str,
+ bytes,
+ np.generic,
+]
+
+# `_VoidLike_co` is technically not a scalar, but it's close enough
+_VoidLike_co = Union[tuple[Any, ...], np.void]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.py
new file mode 100644
index 00000000..4f1204e4
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_shape.py
@@ -0,0 +1,7 @@
+from collections.abc import Sequence
+from typing import Union, SupportsIndex
+
+_Shape = tuple[int, ...]
+
+# Anything that can be coerced to a shape tuple
+_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi
new file mode 100644
index 00000000..9f8e0d4e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/_ufunc.pyi
@@ -0,0 +1,445 @@
+"""A module with private type-check-only `numpy.ufunc` subclasses.
+
+The signatures of the ufuncs are too varied to reasonably type
+with a single class. So instead, `ufunc` has been expanded into
+four private subclasses, one for each combination of
+`~ufunc.nin` and `~ufunc.nout`.
+
+"""
+
+from typing import (
+ Any,
+ Generic,
+ overload,
+ TypeVar,
+ Literal,
+ SupportsIndex,
+ Protocol,
+)
+
+from numpy import ufunc, _CastingKind, _OrderKACF
+from numpy.typing import NDArray
+
+from ._shape import _ShapeLike
+from ._scalars import _ScalarLike_co
+from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
+from ._dtype_like import DTypeLike
+
+_T = TypeVar("_T")
+_2Tuple = tuple[_T, _T]
+_3Tuple = tuple[_T, _T, _T]
+_4Tuple = tuple[_T, _T, _T, _T]
+
+_NTypes = TypeVar("_NTypes", bound=int)
+_IDType = TypeVar("_IDType", bound=Any)
+_NameType = TypeVar("_NameType", bound=str)
+
+
+class _SupportsArrayUFunc(Protocol):
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+
+
+# NOTE: In reality `extobj` should be a length of list 3 containing an
+# int, an int, and a callable, but there's no way to properly express
+# non-homogenous lists.
+# Use `Any` over `Union` to avoid issues related to lists invariance.
+
+# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for
+# ufuncs that don't accept two input arguments and return one output argument.
+# In such cases the respective methods are simply typed as `None`.
+
+# NOTE: Similarly, `at` won't be defined for ufuncs that return
+# multiple outputs; in such cases `at` is typed as `None`
+
+# NOTE: If 2 output types are returned then `out` must be a
+# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable
+
+class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[1]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[2]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ out: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: _SupportsArrayUFunc,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+
+ def at(
+ self,
+ a: _SupportsArrayUFunc,
+ indices: _ArrayLikeInt_co,
+ /,
+ ) -> None: ...
+
+class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+ @property
+ def signature(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __x2: _ScalarLike_co,
+ out: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ def at(
+ self,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ b: ArrayLike,
+ /,
+ ) -> None: ...
+
+ def reduce(
+ self,
+ array: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ keepdims: bool = ...,
+ initial: Any = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+
+ def accumulate(
+ self,
+ array: ArrayLike,
+ axis: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ def reduceat(
+ self,
+ array: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ # Expand `**kwargs` into explicit keyword-only arguments
+ @overload
+ def outer(
+ self,
+ A: _ScalarLike_co,
+ B: _ScalarLike_co,
+ /, *,
+ out: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def outer( # type: ignore[misc]
+ self,
+ A: ArrayLike,
+ B: ArrayLike,
+ /, *,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[1]: ...
+ @property
+ def nout(self) -> Literal[2]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __out1: None = ...,
+ __out2: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[NDArray[Any]]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: _SupportsArrayUFunc,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+
+class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[2]: ...
+ @property
+ def nargs(self) -> Literal[4]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __x2: _ScalarLike_co,
+ __out1: None = ...,
+ __out2: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _4Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _4Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[NDArray[Any]]: ...
+
+class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+
+ # NOTE: In practice the only gufunc in the main namespace is `matmul`,
+ # so we can use its signature here
+ @property
+ def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+
+ # Scalar for 1D array-likes; ndarray otherwise
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: None = ...,
+ *,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ axes: list[_2Tuple[SupportsIndex]] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: NDArray[Any] | tuple[NDArray[Any]],
+ *,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ axes: list[_2Tuple[SupportsIndex]] = ...,
+ ) -> NDArray[Any]: ...
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.py
new file mode 100644
index 00000000..24022fda
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_typing/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('_typing', parent_package, top_path)
+ config.add_data_files('*.pyi')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.py
new file mode 100644
index 00000000..388dd917
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/__init__.py
@@ -0,0 +1,29 @@
+"""
+This is a module for defining private helpers which do not depend on the
+rest of NumPy.
+
+Everything in here must be self-contained so that it can be
+imported anywhere else without creating circular imports.
+If a utility requires the import of NumPy, it probably belongs
+in ``numpy.core``.
+"""
+
+from ._convertions import asunicode, asbytes
+
+
+def set_module(module):
+ """Private decorator for overriding __module__ on a function or class.
+
+ Example usage::
+
+ @set_module('numpy')
+ def example():
+ pass
+
+ assert example.__module__ == 'numpy'
+ """
+ def decorator(func):
+ if module is not None:
+ func.__module__ = module
+ return func
+ return decorator
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.py
new file mode 100644
index 00000000..ab15a8ba
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_convertions.py
@@ -0,0 +1,18 @@
+"""
+A set of methods retained from np.compat module that
+are still used across codebase.
+"""
+
+__all__ = ["asunicode", "asbytes"]
+
+
+def asunicode(s):
+ if isinstance(s, bytes):
+ return s.decode('latin1')
+ return str(s)
+
+
+def asbytes(s):
+ if isinstance(s, bytes):
+ return s
+ return str(s).encode('latin1')
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.py
new file mode 100644
index 00000000..9a874a71
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_inspect.py
@@ -0,0 +1,191 @@
+"""Subset of inspect module from upstream python
+
+We use this instead of upstream because upstream inspect is slow to import, and
+significantly contributes to numpy import times. Importing this copy has almost
+no overhead.
+
+"""
+import types
+
+__all__ = ['getargspec', 'formatargspec']
+
+# ----------------------------------------------------------- type-checking
+def ismethod(object):
+ """Return true if the object is an instance method.
+
+ Instance method objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this method was defined
+ im_class class object in which this method belongs
+ im_func function object containing implementation of method
+ im_self instance to which this method is bound, or None
+
+ """
+ return isinstance(object, types.MethodType)
+
+def isfunction(object):
+ """Return true if the object is a user-defined function.
+
+ Function objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this function was defined
+ func_code code object containing compiled function bytecode
+ func_defaults tuple of any default values for arguments
+ func_doc (same as __doc__)
+ func_globals global namespace in which this function was defined
+ func_name (same as __name__)
+
+ """
+ return isinstance(object, types.FunctionType)
+
+def iscode(object):
+ """Return true if the object is a code object.
+
+ Code objects provide these attributes:
+ co_argcount number of arguments (not including * or ** args)
+ co_code string of raw compiled bytecode
+ co_consts tuple of constants used in the bytecode
+ co_filename name of file in which this code object was created
+ co_firstlineno number of first line in Python source code
+ co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
+ co_lnotab encoded mapping of line numbers to bytecode indices
+ co_name name with which this code object was defined
+ co_names tuple of names of local variables
+ co_nlocals number of local variables
+ co_stacksize virtual machine stack space required
+ co_varnames tuple of names of arguments and local variables
+
+ """
+ return isinstance(object, types.CodeType)
+
+# ------------------------------------------------ argument list extraction
+# These constants are from Python's compile.h.
+CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
+
+def getargs(co):
+ """Get information about the arguments accepted by a code object.
+
+ Three things are returned: (args, varargs, varkw), where 'args' is
+ a list of argument names (possibly containing nested lists), and
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+
+ """
+
+ if not iscode(co):
+ raise TypeError('arg is not a code object')
+
+ nargs = co.co_argcount
+ names = co.co_varnames
+ args = list(names[:nargs])
+
+ # The following acrobatics are for anonymous (tuple) arguments.
+ # Which we do not need to support, so remove to avoid importing
+ # the dis module.
+ for i in range(nargs):
+ if args[i][:1] in ['', '.']:
+ raise TypeError("tuple function arguments are not supported")
+ varargs = None
+ if co.co_flags & CO_VARARGS:
+ varargs = co.co_varnames[nargs]
+ nargs = nargs + 1
+ varkw = None
+ if co.co_flags & CO_VARKEYWORDS:
+ varkw = co.co_varnames[nargs]
+ return args, varargs, varkw
+
+def getargspec(func):
+ """Get the names and default values of a function's arguments.
+
+ A tuple of four things is returned: (args, varargs, varkw, defaults).
+ 'args' is a list of the argument names (it may contain nested lists).
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'defaults' is an n-tuple of the default values of the last n arguments.
+
+ """
+
+ if ismethod(func):
+ func = func.__func__
+ if not isfunction(func):
+ raise TypeError('arg is not a Python function')
+ args, varargs, varkw = getargs(func.__code__)
+ return args, varargs, varkw, func.__defaults__
+
+def getargvalues(frame):
+ """Get information about arguments passed into a particular frame.
+
+ A tuple of four things is returned: (args, varargs, varkw, locals).
+ 'args' is a list of the argument names (it may contain nested lists).
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'locals' is the locals dictionary of the given frame.
+
+ """
+ args, varargs, varkw = getargs(frame.f_code)
+ return args, varargs, varkw, frame.f_locals
+
+def joinseq(seq):
+ if len(seq) == 1:
+ return '(' + seq[0] + ',)'
+ else:
+ return '(' + ', '.join(seq) + ')'
+
+def strseq(object, convert, join=joinseq):
+ """Recursively walk a sequence, stringifying each element.
+
+ """
+ if type(object) in [list, tuple]:
+ return join([strseq(_o, convert, join) for _o in object])
+ else:
+ return convert(object)
+
+def formatargspec(args, varargs=None, varkw=None, defaults=None,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ join=joinseq):
+ """Format an argument spec from the 4 values returned by getargspec.
+
+ The first four arguments are (args, varargs, varkw, defaults). The
+ other four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments.
+
+ """
+ specs = []
+ if defaults:
+ firstdefault = len(args) - len(defaults)
+ for i in range(len(args)):
+ spec = strseq(args[i], formatarg, join)
+ if defaults and i >= firstdefault:
+ spec = spec + formatvalue(defaults[i - firstdefault])
+ specs.append(spec)
+ if varargs is not None:
+ specs.append(formatvarargs(varargs))
+ if varkw is not None:
+ specs.append(formatvarkw(varkw))
+ return '(' + ', '.join(specs) + ')'
+
+def formatargvalues(args, varargs, varkw, locals,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ join=joinseq):
+ """Format an argument spec from the 4 values returned by getargvalues.
+
+ The first four arguments are (args, varargs, varkw, locals). The
+ next four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments.
+
+ """
+ def convert(name, locals=locals,
+ formatarg=formatarg, formatvalue=formatvalue):
+ return formatarg(name) + formatvalue(locals[name])
+ specs = [strseq(arg, convert, join) for arg in args]
+
+ if varargs:
+ specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
+ if varkw:
+ specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
+ return '(' + ', '.join(specs) + ')'
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.py b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.py
new file mode 100644
index 00000000..73d0afb5
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/_utils/_pep440.py
@@ -0,0 +1,487 @@
+"""Utility to compare pep440 compatible version strings.
+
+The LooseVersion and StrictVersion classes that distutils provides don't
+work; they don't recognize anything like alpha/beta/rc/dev versions.
+"""
+
+# Copyright (c) Donald Stufft and individual contributors.
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import collections
+import itertools
+import re
+
+
+__all__ = [
+ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
+]
+
+
+# BEGIN packaging/_structures.py
+
+
+class Infinity:
+ def __repr__(self):
+ return "Infinity"
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ return False
+
+ def __le__(self, other):
+ return False
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ return True
+
+ def __ge__(self, other):
+ return True
+
+ def __neg__(self):
+ return NegativeInfinity
+
+
+Infinity = Infinity()
+
+
+class NegativeInfinity:
+ def __repr__(self):
+ return "-Infinity"
+
+ def __hash__(self):
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ return True
+
+ def __le__(self, other):
+ return True
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ return False
+
+ def __ge__(self, other):
+ return False
+
+ def __neg__(self):
+ return Infinity
+
+
+# BEGIN packaging/version.py
+
+
+NegativeInfinity = NegativeInfinity()
+
+_Version = collections.namedtuple(
+ "_Version",
+ ["epoch", "release", "dev", "pre", "post", "local"],
+)
+
+
+def parse(version):
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion:
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __lt__(self, other):
+ return self._compare(other, lambda s, o: s < o)
+
+ def __le__(self, other):
+ return self._compare(other, lambda s, o: s <= o)
+
+ def __eq__(self, other):
+ return self._compare(other, lambda s, o: s == o)
+
+ def __ge__(self, other):
+ return self._compare(other, lambda s, o: s >= o)
+
+ def __gt__(self, other):
+ return self._compare(other, lambda s, o: s > o)
+
+ def __ne__(self, other):
+ return self._compare(other, lambda s, o: s != o)
+
+ def _compare(self, other, method):
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+
+ def __init__(self, version):
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ def __str__(self):
+ return self._version
+
+ def __repr__(self):
+ return "".format(repr(str(self)))
+
+ @property
+ def public(self):
+ return self._version
+
+ @property
+ def base_version(self):
+ return self._version
+
+ @property
+ def local(self):
+ return None
+
+ @property
+ def is_prerelease(self):
+ return False
+
+ @property
+ def is_postrelease(self):
+ return False
+
+
+_legacy_version_component_re = re.compile(
+ r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
+)
+
+_legacy_version_replacement_map = {
+ "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version):
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # its adoption of the packaging library.
+ parts = []
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+ parts = tuple(parts)
+
+ return epoch, parts
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P[0-9]+)!)? # epoch
+ (?P[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P # pre-release
+ [-_\.]?
+ (?P(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ (?P # post release
+ (?:-(?P[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?Ppost|rev|r)
+ [-_\.]?
+ (?P[0-9]+)?
+ )
+ )?
+ (?P # dev release
+ [-_\.]?
+ (?Pdev)
+ [-_\.]?
+ (?P[0-9]+)?
+ )?
+ )
+ (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
+ re.VERBOSE | re.IGNORECASE,
+ )
+
+ def __init__(self, version):
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(
+ match.group("pre_l"),
+ match.group("pre_n"),
+ ),
+ post=_parse_letter_version(
+ match.group("post_l"),
+ match.group("post_n1") or match.group("post_n2"),
+ ),
+ dev=_parse_letter_version(
+ match.group("dev_l"),
+ match.group("dev_n"),
+ ),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ return "".format(repr(str(self)))
+
+ def __str__(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ # Pre-release
+ if self._version.pre is not None:
+ parts.append("".join(str(x) for x in self._version.pre))
+
+ # Post-release
+ if self._version.post is not None:
+ parts.append(".post{0}".format(self._version.post[1]))
+
+ # Development release
+ if self._version.dev is not None:
+ parts.append(".dev{0}".format(self._version.dev[1]))
+
+ # Local version segment
+ if self._version.local is not None:
+ parts.append(
+ "+{0}".format(".".join(str(x) for x in self._version.local))
+ )
+
+ return "".join(parts)
+
+ @property
+ def public(self):
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ parts = []
+
+ # Epoch
+ if self._version.epoch != 0:
+ parts.append("{0}!".format(self._version.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self._version.release))
+
+ return "".join(parts)
+
+ @property
+ def local(self):
+ version_string = str(self)
+ if "+" in version_string:
+ return version_string.split("+", 1)[1]
+
+ @property
+ def is_prerelease(self):
+ return bool(self._version.dev or self._version.pre)
+
+ @property
+ def is_postrelease(self):
+ return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+ if letter:
+ # We assume there is an implicit 0 in a pre-release if there is
+ # no numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower-case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume that if we are given a number but not given a letter,
+ # then this is using the implicit post release syntax (e.g., 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_seperators.split(local)
+ )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non-zero, then take the rest,
+ # re-reverse it back into the correct order, and make it a tuple and use
+ # that for our sorting key.
+ release = tuple(
+ reversed(list(
+ itertools.dropwhile(
+ lambda x: x == 0,
+ reversed(release),
+ )
+ ))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre-segment, but we _only_ want to do this
+ # if there is no pre- or a post-segment. If we have one of those, then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ pre = -Infinity
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ pre = Infinity
+
+ # Versions without a post-segment should sort before those with one.
+ if post is None:
+ post = -Infinity
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ dev = Infinity
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ local = -Infinity
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alphanumeric segments sort before numeric segments
+ # - Alphanumeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ local = tuple(
+ (i, "") if isinstance(i, int) else (-Infinity, i)
+ for i in local
+ )
+
+ return epoch, release, pre, post, dev, local
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.py
new file mode 100644
index 00000000..77f22788
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/__init__.py
@@ -0,0 +1,387 @@
+"""
+A NumPy sub-namespace that conforms to the Python array API standard.
+
+This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
+is still considered experimental, and will issue a warning when imported.
+
+This is a proof-of-concept namespace that wraps the corresponding NumPy
+functions to give a conforming implementation of the Python array API standard
+(https://data-apis.github.io/array-api/latest/). The standard is currently in
+an RFC phase and comments on it are both welcome and encouraged. Comments
+should be made either at https://github.com/data-apis/array-api or at
+https://github.com/data-apis/consortium-feedback/discussions.
+
+NumPy already follows the proposed spec for the most part, so this module
+serves mostly as a thin wrapper around it. However, NumPy also implements a
+lot of behavior that is not included in the spec, so this serves as a
+restricted subset of the API. Only those functions that are part of the spec
+are included in this namespace, and all functions are given with the exact
+signature given in the spec, including the use of position-only arguments, and
+omitting any extra keyword arguments implemented by NumPy but not part of the
+spec. The behavior of some functions is also modified from the NumPy behavior
+to conform to the standard. Note that the underlying array object itself is
+wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
+is implemented in pure Python with no C extensions.
+
+The array API spec is designed as a "minimal API subset" and explicitly allows
+libraries to include behaviors not specified by it. But users of this module
+that intend to write portable code should be aware that only those behaviors
+that are listed in the spec are guaranteed to be implemented across libraries.
+Consequently, the NumPy implementation was chosen to be both conforming and
+minimal, so that users can use this implementation of the array API namespace
+and be sure that behaviors that it defines will be available in conforming
+namespaces from other libraries.
+
+A few notes about the current state of this submodule:
+
+- There is a test suite that tests modules against the array API standard at
+ https://github.com/data-apis/array-api-tests. The test suite is still a work
+ in progress, but the existing tests pass on this module, with a few
+ exceptions:
+
+ - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
+ not included here, as it requires a full implementation in NumPy proper
+ first.
+
+ The test suite is not yet complete, and even the tests that exist are not
+ guaranteed to give a comprehensive coverage of the spec. Therefore, when
+ reviewing and using this submodule, you should refer to the standard
+ documents themselves. There are some tests in numpy.array_api.tests, but
+ they primarily focus on things that are not tested by the official array API
+ test suite.
+
+- There is a custom array object, numpy.array_api.Array, which is returned by
+ all functions in this module. All functions in the array API namespace
+ implicitly assume that they will only receive this object as input. The only
+ way to create instances of this object is to use one of the array creation
+ functions. It does not have a public constructor on the object itself. The
+ object is a small wrapper class around numpy.ndarray. The main purpose of it
+ is to restrict the namespace of the array object to only those dtypes and
+ only those methods that are required by the spec, as well as to limit/change
+ certain behavior that differs in the spec. In particular:
+
+ - The array API namespace does not have scalar objects, only 0-D arrays.
+ Operations on Array that would create a scalar in NumPy create a 0-D
+ array.
+
+ - Indexing: Only a subset of indices supported by NumPy are required by the
+ spec. The Array object restricts indexing to only allow those types of
+ indices that are required by the spec. See the docstring of the
+ numpy.array_api.Array._validate_indices helper function for more
+ information.
+
+ - Type promotion: Some type promotion rules are different in the spec. In
+ particular, the spec does not have any value-based casting. The spec also
+ does not require cross-kind casting, like integer -> floating-point. Only
+ those promotions that are explicitly required by the array API
+ specification are allowed in this module. See NEP 47 for more info.
+
+ - Functions do not automatically call asarray() on their input, and will not
+ work if the input type is not Array. The exception is array creation
+ functions, and Python operators on the Array object, which accept Python
+ scalars of the same type as the array dtype.
+
+- All functions include type annotations, corresponding to those given in the
+ spec (see _typing.py for definitions of some custom types). These do not
+ currently fully pass mypy due to some limitations in mypy.
+
+- Dtype objects are just the NumPy dtype objects, e.g., float64 =
+ np.dtype('float64'). The spec does not require any behavior on these dtype
+ objects other than that they be accessible by name and be comparable by
+ equality, but it was considered too much extra complexity to create custom
+ objects to represent dtypes.
+
+- All places where the implementations in this submodule are known to deviate
+ from their corresponding functions in NumPy are marked with "# Note:"
+ comments.
+
+Still TODO in this module are:
+
+- DLPack support for numpy.ndarray is still in progress. See
+ https://github.com/numpy/numpy/pull/19083.
+
+- The copy=False keyword argument to asarray() is not yet implemented. This
+ requires support in numpy.asarray() first.
+
+- Some functions are not yet fully tested in the array API test suite, and may
+ require updates that are not yet known until the tests are written.
+
+- The spec is still in an RFC phase and may still have minor updates, which
+ will need to be reflected here.
+
+- Complex number support in array API spec is planned but not yet finalized,
+ as are the fft extension and certain linear algebra functions such as eig
+ that require complex dtypes.
+
+"""
+
+import warnings
+
+warnings.warn(
+ "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
+)
+
+__array_api_version__ = "2022.12"
+
+__all__ = ["__array_api_version__"]
+
+from ._constants import e, inf, nan, pi, newaxis
+
+__all__ += ["e", "inf", "nan", "pi"]
+
+from ._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ tril,
+ triu,
+ zeros,
+ zeros_like,
+)
+
+__all__ += [
+ "asarray",
+ "arange",
+ "empty",
+ "empty_like",
+ "eye",
+ "from_dlpack",
+ "full",
+ "full_like",
+ "linspace",
+ "meshgrid",
+ "ones",
+ "ones_like",
+ "tril",
+ "triu",
+ "zeros",
+ "zeros_like",
+]
+
+from ._data_type_functions import (
+ astype,
+ broadcast_arrays,
+ broadcast_to,
+ can_cast,
+ finfo,
+ isdtype,
+ iinfo,
+ result_type,
+)
+
+__all__ += [
+ "astype",
+ "broadcast_arrays",
+ "broadcast_to",
+ "can_cast",
+ "finfo",
+ "iinfo",
+ "result_type",
+]
+
+from ._dtypes import (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ complex64,
+ complex128,
+ bool,
+)
+
+__all__ += [
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "float32",
+ "float64",
+ "bool",
+]
+
+from ._elementwise_functions import (
+ abs,
+ acos,
+ acosh,
+ add,
+ asin,
+ asinh,
+ atan,
+ atan2,
+ atanh,
+ bitwise_and,
+ bitwise_left_shift,
+ bitwise_invert,
+ bitwise_or,
+ bitwise_right_shift,
+ bitwise_xor,
+ ceil,
+ conj,
+ cos,
+ cosh,
+ divide,
+ equal,
+ exp,
+ expm1,
+ floor,
+ floor_divide,
+ greater,
+ greater_equal,
+ imag,
+ isfinite,
+ isinf,
+ isnan,
+ less,
+ less_equal,
+ log,
+ log1p,
+ log2,
+ log10,
+ logaddexp,
+ logical_and,
+ logical_not,
+ logical_or,
+ logical_xor,
+ multiply,
+ negative,
+ not_equal,
+ positive,
+ pow,
+ real,
+ remainder,
+ round,
+ sign,
+ sin,
+ sinh,
+ square,
+ sqrt,
+ subtract,
+ tan,
+ tanh,
+ trunc,
+)
+
+__all__ += [
+ "abs",
+ "acos",
+ "acosh",
+ "add",
+ "asin",
+ "asinh",
+ "atan",
+ "atan2",
+ "atanh",
+ "bitwise_and",
+ "bitwise_left_shift",
+ "bitwise_invert",
+ "bitwise_or",
+ "bitwise_right_shift",
+ "bitwise_xor",
+ "ceil",
+ "cos",
+ "cosh",
+ "divide",
+ "equal",
+ "exp",
+ "expm1",
+ "floor",
+ "floor_divide",
+ "greater",
+ "greater_equal",
+ "isfinite",
+ "isinf",
+ "isnan",
+ "less",
+ "less_equal",
+ "log",
+ "log1p",
+ "log2",
+ "log10",
+ "logaddexp",
+ "logical_and",
+ "logical_not",
+ "logical_or",
+ "logical_xor",
+ "multiply",
+ "negative",
+ "not_equal",
+ "positive",
+ "pow",
+ "remainder",
+ "round",
+ "sign",
+ "sin",
+ "sinh",
+ "square",
+ "sqrt",
+ "subtract",
+ "tan",
+ "tanh",
+ "trunc",
+]
+
+from ._indexing_functions import take
+
+__all__ += ["take"]
+
+# linalg is an extension in the array API spec, which is a sub-namespace. Only
+# a subset of functions in it are imported into the top-level namespace.
+from . import linalg
+
+__all__ += ["linalg"]
+
+from .linalg import matmul, tensordot, matrix_transpose, vecdot
+
+__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
+
+from ._manipulation_functions import (
+ concat,
+ expand_dims,
+ flip,
+ permute_dims,
+ reshape,
+ roll,
+ squeeze,
+ stack,
+)
+
+__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
+
+from ._searching_functions import argmax, argmin, nonzero, where
+
+__all__ += ["argmax", "argmin", "nonzero", "where"]
+
+from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
+
+__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
+
+from ._sorting_functions import argsort, sort
+
+__all__ += ["argsort", "sort"]
+
+from ._statistical_functions import max, mean, min, prod, std, sum, var
+
+__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
+
+from ._utility_functions import all, any
+
+__all__ += ["all", "any"]
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.py
new file mode 100644
index 00000000..5aff9863
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_array_object.py
@@ -0,0 +1,1133 @@
+"""
+Wrapper class around the ndarray object for the array API standard.
+
+The array API standard defines some behaviors differently than ndarray, in
+particular, type promotion rules are different (the standard has no
+value-based casting). The standard also specifies a more limited subset of
+array methods and functionalities than are implemented on ndarray. Since the
+goal of the array_api namespace is to be a minimal implementation of the array
+API standard, we need to define a separate wrapper class for the array_api
+namespace.
+
+The standard compliant class is only a wrapper class. It is *not* a subclass
+of ndarray.
+"""
+
+from __future__ import annotations
+
+import operator
+from enum import IntEnum
+from ._creation_functions import asarray
+from ._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _floating_dtypes,
+ _complex_floating_dtypes,
+ _numeric_dtypes,
+ _result_type,
+ _dtype_categories,
+)
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex
+import types
+
+if TYPE_CHECKING:
+ from ._typing import Any, PyCapsule, Device, Dtype
+ import numpy.typing as npt
+
+import numpy as np
+
+from numpy import array_api
+
+
+class Array:
+ """
+ n-d array object for the array API namespace.
+
+ See the docstring of :py:obj:`np.ndarray ` for more
+ information.
+
+ This is a wrapper around numpy.ndarray that restricts the usage to only
+ those things that are required by the array API namespace. Note,
+ attributes on this object that start with a single underscore are not part
+ of the API specification and should only be used internally. This object
+ should not be constructed directly. Rather, use one of the creation
+ functions, such as asarray().
+
+ """
+ _array: np.ndarray[Any, Any]
+
+ # Use a custom constructor instead of __init__, as manually initializing
+ # this class is not supported API.
+ @classmethod
+ def _new(cls, x, /):
+ """
+ This is a private method for initializing the array API Array
+ object.
+
+ Functions outside of the array_api submodule should not use this
+ method. Use one of the creation functions instead, such as
+ ``asarray``.
+
+ """
+ obj = super().__new__(cls)
+ # Note: The spec does not have array scalars, only 0-D arrays.
+ if isinstance(x, np.generic):
+ # Convert the array scalar to a 0-D array
+ x = np.asarray(x)
+ if x.dtype not in _all_dtypes:
+ raise TypeError(
+ f"The array_api namespace does not support the dtype '{x.dtype}'"
+ )
+ obj._array = x
+ return obj
+
+ # Prevent Array() from working
+ def __new__(cls, *args, **kwargs):
+ raise TypeError(
+ "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
+ )
+
+ # These functions are not required by the spec, but are implemented for
+ # the sake of usability.
+
+ def __str__(self: Array, /) -> str:
+ """
+ Performs the operation __str__.
+ """
+ return self._array.__str__().replace("array", "Array")
+
+ def __repr__(self: Array, /) -> str:
+ """
+ Performs the operation __repr__.
+ """
+ suffix = f", dtype={self.dtype.name})"
+ if 0 in self.shape:
+ prefix = "empty("
+ mid = str(self.shape)
+ else:
+ prefix = "Array("
+ mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
+ return prefix + mid + suffix
+
+ # This function is not required by the spec, but we implement it here for
+ # convenience so that np.asarray(np.array_api.Array) will work.
+ def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
+ """
+ Warning: this method is NOT part of the array API spec. Implementers
+ of other libraries need not include it, and users should not assume it
+ will be present in other implementations.
+
+ """
+ return np.asarray(self._array, dtype=dtype)
+
+ # These are various helper functions to make the array behavior match the
+ # spec in places where it either deviates from or is more strict than
+ # NumPy behavior
+
+ def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:
+ """
+ Helper function for operators to only allow specific input dtypes
+
+ Use like
+
+ other = self._check_allowed_dtypes(other, 'numeric', '__add__')
+ if other is NotImplemented:
+ return other
+ """
+
+ if self.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ if isinstance(other, (int, complex, float, bool)):
+ other = self._promote_scalar(other)
+ elif isinstance(other, Array):
+ if other.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ else:
+ return NotImplemented
+
+ # This will raise TypeError for type combinations that are not allowed
+ # to promote in the spec (even if the NumPy array operator would
+ # promote them).
+ res_dtype = _result_type(self.dtype, other.dtype)
+ if op.startswith("__i"):
+ # Note: NumPy will allow in-place operators in some cases where
+ # the type promoted operator does not match the left-hand side
+ # operand. For example,
+
+ # >>> a = np.array(1, dtype=np.int8)
+ # >>> a += np.array(1, dtype=np.int16)
+
+ # The spec explicitly disallows this.
+ if res_dtype != self.dtype:
+ raise TypeError(
+ f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
+ )
+
+ return other
+
+ # Helper function to match the type promotion rules in the spec
+ def _promote_scalar(self, scalar):
+ """
+ Returns a promoted version of a Python scalar appropriate for use with
+ operations on self.
+
+ This may raise an OverflowError in cases where the scalar is an
+ integer that is too large to fit in a NumPy integer dtype, or
+ TypeError when the scalar type is incompatible with the dtype of self.
+ """
+ # Note: Only Python scalar types that match the array dtype are
+ # allowed.
+ if isinstance(scalar, bool):
+ if self.dtype not in _boolean_dtypes:
+ raise TypeError(
+ "Python bool scalars can only be promoted with bool arrays"
+ )
+ elif isinstance(scalar, int):
+ if self.dtype in _boolean_dtypes:
+ raise TypeError(
+ "Python int scalars cannot be promoted with bool arrays"
+ )
+ if self.dtype in _integer_dtypes:
+ info = np.iinfo(self.dtype)
+ if not (info.min <= scalar <= info.max):
+ raise OverflowError(
+ "Python int scalars must be within the bounds of the dtype for integer arrays"
+ )
+ # int + array(floating) is allowed
+ elif isinstance(scalar, float):
+ if self.dtype not in _floating_dtypes:
+ raise TypeError(
+ "Python float scalars can only be promoted with floating-point arrays."
+ )
+ elif isinstance(scalar, complex):
+ if self.dtype not in _complex_floating_dtypes:
+ raise TypeError(
+ "Python complex scalars can only be promoted with complex floating-point arrays."
+ )
+ else:
+ raise TypeError("'scalar' must be a Python scalar")
+
+ # Note: scalars are unconditionally cast to the same dtype as the
+ # array.
+
+ # Note: the spec only specifies integer-dtype/int promotion
+ # behavior for integers within the bounds of the integer dtype.
+ # Outside of those bounds we use the default NumPy behavior (either
+ # cast or raise OverflowError).
+ return Array._new(np.array(scalar, self.dtype))
+
+ @staticmethod
+ def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:
+ """
+ Normalize inputs to two arg functions to fix type promotion rules
+
+ NumPy deviates from the spec type promotion rules in cases where one
+ argument is 0-dimensional and the other is not. For example:
+
+ >>> import numpy as np
+ >>> a = np.array([1.0], dtype=np.float32)
+ >>> b = np.array(1.0, dtype=np.float64)
+ >>> np.add(a, b) # The spec says this should be float64
+ array([2.], dtype=float32)
+
+ To fix this, we add a dimension to the 0-dimension array before passing it
+ through. This works because a dimension would be added anyway from
+ broadcasting, so the resulting shape is the same, but this prevents NumPy
+ from not promoting the dtype.
+ """
+ # Another option would be to use signature=(x1.dtype, x2.dtype, None),
+ # but that only works for ufuncs, so we would have to call the ufuncs
+ # directly in the operator methods. One should also note that this
+ # sort of trick wouldn't work for functions like searchsorted, which
+ # don't do normal broadcasting, but there aren't any functions like
+ # that in the array API namespace.
+ if x1.ndim == 0 and x2.ndim != 0:
+ # The _array[None] workaround was chosen because it is relatively
+ # performant. broadcast_to(x1._array, x2.shape) is much slower. We
+ # could also manually type promote x2, but that is more complicated
+ # and about the same performance as this.
+ x1 = Array._new(x1._array[None])
+ elif x2.ndim == 0 and x1.ndim != 0:
+ x2 = Array._new(x2._array[None])
+ return (x1, x2)
+
+ # Note: A large fraction of allowed indices are disallowed here (see the
+ # docstring below)
+ def _validate_index(self, key):
+ """
+ Validate an index according to the array API.
+
+ The array API specification only requires a subset of indices that are
+ supported by NumPy. This function will reject any index that is
+ allowed by NumPy but not required by the array API specification. We
+ always raise ``IndexError`` on such indices (the spec does not require
+ any specific behavior on them, but this makes the NumPy array API
+ namespace a minimal implementation of the spec). See
+ https://data-apis.org/array-api/latest/API_specification/indexing.html
+ for the full list of required indexing behavior
+
+ This function raises IndexError if the index ``key`` is invalid. It
+ only raises ``IndexError`` on indices that are not already rejected by
+ NumPy, as NumPy will already raise the appropriate error on such
+ indices. ``shape`` may be None, in which case, only cases that are
+ independent of the array shape are checked.
+
+ The following cases are allowed by NumPy, but not specified by the array
+ API specification:
+
+ - Indices to not include an implicit ellipsis at the end. That is,
+ every axis of an array must be explicitly indexed or an ellipsis
+ included. This behaviour is sometimes referred to as flat indexing.
+
+ - The start and stop of a slice may not be out of bounds. In
+ particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
+ following are allowed:
+
+ - ``i`` or ``j`` omitted (``None``).
+ - ``-n <= i <= max(0, n - 1)``.
+ - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+ - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+ - Boolean array indices are not allowed as part of a larger tuple
+ index.
+
+ - Integer array indices are not allowed (with the exception of 0-D
+ arrays, which are treated the same as scalars).
+
+ Additionally, it should be noted that indices that would return a
+ scalar in NumPy will return a 0-D array. Array scalars are not allowed
+ in the specification, only 0-D arrays. This is done in the
+ ``Array._new`` constructor, not this function.
+
+ """
+ _key = key if isinstance(key, tuple) else (key,)
+ for i in _key:
+ if isinstance(i, bool) or not (
+ isinstance(i, SupportsIndex) # i.e. ints
+ or isinstance(i, slice)
+ or i == Ellipsis
+ or i is None
+ or isinstance(i, Array)
+ or isinstance(i, np.ndarray)
+ ):
+ raise IndexError(
+ f"Single-axes index {i} has {type(i)=}, but only "
+ "integers, slices (:), ellipsis (...), newaxis (None), "
+ "zero-dimensional integer arrays and boolean arrays "
+ "are specified in the Array API."
+ )
+
+ nonexpanding_key = []
+ single_axes = []
+ n_ellipsis = 0
+ key_has_mask = False
+ for i in _key:
+ if i is not None:
+ nonexpanding_key.append(i)
+ if isinstance(i, Array) or isinstance(i, np.ndarray):
+ if i.dtype in _boolean_dtypes:
+ key_has_mask = True
+ single_axes.append(i)
+ else:
+ # i must not be an array here, to avoid elementwise equals
+ if i == Ellipsis:
+ n_ellipsis += 1
+ else:
+ single_axes.append(i)
+
+ n_single_axes = len(single_axes)
+ if n_ellipsis > 1:
+ return # handled by ndarray
+ elif n_ellipsis == 0:
+ # Note boolean masks must be the sole index, which we check for
+ # later on.
+ if not key_has_mask and n_single_axes < self.ndim:
+ raise IndexError(
+ f"{self.ndim=}, but the multi-axes index only specifies "
+ f"{n_single_axes} dimensions. If this was intentional, "
+ "add a trailing ellipsis (...) which expands into as many "
+ "slices (:) as necessary - this is what np.ndarray arrays "
+ "implicitly do, but such flat indexing behaviour is not "
+ "specified in the Array API."
+ )
+
+ if n_ellipsis == 0:
+ indexed_shape = self.shape
+ else:
+ ellipsis_start = None
+ for pos, i in enumerate(nonexpanding_key):
+ if not (isinstance(i, Array) or isinstance(i, np.ndarray)):
+ if i == Ellipsis:
+ ellipsis_start = pos
+ break
+ assert ellipsis_start is not None # sanity check
+ ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)
+ indexed_shape = (
+ self.shape[:ellipsis_start] + self.shape[ellipsis_end:]
+ )
+ for i, side in zip(single_axes, indexed_shape):
+ if isinstance(i, slice):
+ if side == 0:
+ f_range = "0 (or None)"
+ else:
+ f_range = f"between -{side} and {side - 1} (or None)"
+ if i.start is not None:
+ try:
+ start = operator.index(i.start)
+ except TypeError:
+ pass # handled by ndarray
+ else:
+ if not (-side <= start <= side):
+ raise IndexError(
+ f"Slice {i} contains {start=}, but should be "
+ f"{f_range} for an axis of size {side} "
+ "(out-of-bounds starts are not specified in "
+ "the Array API)"
+ )
+ if i.stop is not None:
+ try:
+ stop = operator.index(i.stop)
+ except TypeError:
+ pass # handled by ndarray
+ else:
+ if not (-side <= stop <= side):
+ raise IndexError(
+ f"Slice {i} contains {stop=}, but should be "
+ f"{f_range} for an axis of size {side} "
+ "(out-of-bounds stops are not specified in "
+ "the Array API)"
+ )
+ elif isinstance(i, Array):
+ if i.dtype in _boolean_dtypes and len(_key) != 1:
+ assert isinstance(key, tuple) # sanity check
+ raise IndexError(
+ f"Single-axes index {i} is a boolean array and "
+ f"{len(key)=}, but masking is only specified in the "
+ "Array API when the array is the sole index."
+ )
+ elif i.dtype in _integer_dtypes and i.ndim != 0:
+ raise IndexError(
+ f"Single-axes index {i} is a non-zero-dimensional "
+ "integer array, but advanced integer indexing is not "
+ "specified in the Array API."
+ )
+ elif isinstance(i, tuple):
+ raise IndexError(
+ f"Single-axes index {i} is a tuple, but nested tuple "
+ "indices are not specified in the Array API."
+ )
+
+ # Everything below this line is required by the spec.
+
+ def __abs__(self: Array, /) -> Array:
+ """
+ Performs the operation __abs__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __abs__")
+ res = self._array.__abs__()
+ return self.__class__._new(res)
+
+ def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __add__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__add__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__add__(other._array)
+ return self.__class__._new(res)
+
+ def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __and__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__and__(other._array)
+ return self.__class__._new(res)
+
+ def __array_namespace__(
+ self: Array, /, *, api_version: Optional[str] = None
+ ) -> types.ModuleType:
+ if api_version is not None and not api_version.startswith("2021."):
+ raise ValueError(f"Unrecognized array API version: {api_version!r}")
+ return array_api
+
+ def __bool__(self: Array, /) -> bool:
+ """
+ Performs the operation __bool__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("bool is only allowed on arrays with 0 dimensions")
+ res = self._array.__bool__()
+ return res
+
+ def __complex__(self: Array, /) -> complex:
+ """
+ Performs the operation __complex__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("complex is only allowed on arrays with 0 dimensions")
+ res = self._array.__complex__()
+ return res
+
+ def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
+ """
+ Performs the operation __dlpack__.
+ """
+ return self._array.__dlpack__(stream=stream)
+
+ def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
+ """
+ Performs the operation __dlpack_device__.
+ """
+ # Note: device support is required for this
+ return self._array.__dlpack_device__()
+
+ def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __eq__.
+ """
+ # Even though "all" dtypes are allowed, we still require them to be
+ # promotable with each other.
+ other = self._check_allowed_dtypes(other, "all", "__eq__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__eq__(other._array)
+ return self.__class__._new(res)
+
+ def __float__(self: Array, /) -> float:
+ """
+ Performs the operation __float__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("float is only allowed on arrays with 0 dimensions")
+ if self.dtype in _complex_floating_dtypes:
+ raise TypeError("float is not allowed on complex floating-point arrays")
+ res = self._array.__float__()
+ return res
+
+ def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __floordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__floordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__floordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ge__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__ge__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ge__(other._array)
+ return self.__class__._new(res)
+
+ def __getitem__(
+ self: Array,
+ key: Union[
+ int,
+ slice,
+ ellipsis,
+ Tuple[Union[int, slice, ellipsis, None], ...],
+ Array,
+ ],
+ /,
+ ) -> Array:
+ """
+ Performs the operation __getitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ self._validate_index(key)
+ if isinstance(key, Array):
+ # Indexing self._array with array_api arrays can be erroneous
+ key = key._array
+ res = self._array.__getitem__(key)
+ return self._new(res)
+
+ def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __gt__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__gt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__gt__(other._array)
+ return self.__class__._new(res)
+
+ def __int__(self: Array, /) -> int:
+ """
+ Performs the operation __int__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("int is only allowed on arrays with 0 dimensions")
+ if self.dtype in _complex_floating_dtypes:
+ raise TypeError("int is not allowed on complex floating-point arrays")
+ res = self._array.__int__()
+ return res
+
+ def __index__(self: Array, /) -> int:
+ """
+ Performs the operation __index__.
+ """
+ res = self._array.__index__()
+ return res
+
+ def __invert__(self: Array, /) -> Array:
+ """
+ Performs the operation __invert__.
+ """
+ if self.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
+ res = self._array.__invert__()
+ return self.__class__._new(res)
+
+ def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __le__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__le__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__le__(other._array)
+ return self.__class__._new(res)
+
+ def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __lshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__lshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lshift__(other._array)
+ return self.__class__._new(res)
+
+ def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __lt__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__lt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lt__(other._array)
+ return self.__class__._new(res)
+
+ def __matmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __matmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__matmul__(other._array)
+ return self.__class__._new(res)
+
+ def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mod__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__mod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mod__(other._array)
+ return self.__class__._new(res)
+
+ def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mul__(other._array)
+ return self.__class__._new(res)
+
+ def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __ne__.
+ """
+ other = self._check_allowed_dtypes(other, "all", "__ne__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ne__(other._array)
+ return self.__class__._new(res)
+
+ def __neg__(self: Array, /) -> Array:
+ """
+ Performs the operation __neg__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __neg__")
+ res = self._array.__neg__()
+ return self.__class__._new(res)
+
+ def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __or__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__or__(other._array)
+ return self.__class__._new(res)
+
+ def __pos__(self: Array, /) -> Array:
+ """
+ Performs the operation __pos__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __pos__")
+ res = self._array.__pos__()
+ return self.__class__._new(res)
+
+ def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __pow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "numeric", "__pow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow type promotion rules for 0-d
+ # arrays, so we use pow() here instead.
+ return pow(self, other)
+
+ def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rshift__(other._array)
+ return self.__class__._new(res)
+
+ def __setitem__(
+ self,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ value: Union[int, float, bool, Array],
+ /,
+ ) -> None:
+ """
+ Performs the operation __setitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ self._validate_index(key)
+ if isinstance(key, Array):
+ # Indexing self._array with array_api arrays can be erroneous
+ key = key._array
+ self._array.__setitem__(key, asarray(value)._array)
+
+ def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __sub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__sub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__sub__(other._array)
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __truediv__ should
+ # not accept int.
+ def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __truediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__truediv__(other._array)
+ return self.__class__._new(res)
+
+ def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __xor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__xor__(other._array)
+ return self.__class__._new(res)
+
+ def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __iadd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
+ if other is NotImplemented:
+ return other
+ self._array.__iadd__(other._array)
+ return self
+
+ def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __radd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__radd__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__radd__(other._array)
+ return self.__class__._new(res)
+
+ def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __iand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
+ if other is NotImplemented:
+ return other
+ self._array.__iand__(other._array)
+ return self
+
+ def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rand__(other._array)
+ return self.__class__._new(res)
+
+ def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ifloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__ifloordiv__")
+ if other is NotImplemented:
+ return other
+ self._array.__ifloordiv__(other._array)
+ return self
+
+ def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rfloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__rfloordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rfloordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __ilshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__ilshift__(other._array)
+ return self
+
+ def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rlshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rlshift__(other._array)
+ return self.__class__._new(res)
+
+ def __imatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __imatmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__imatmul__(other._array)
+ return self.__class__._new(res)
+
+ def __rmatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __rmatmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__rmatmul__(other._array)
+ return self.__class__._new(res)
+
+ def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imod__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__imod__")
+ if other is NotImplemented:
+ return other
+ self._array.__imod__(other._array)
+ return self
+
+ def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmod__.
+ """
+ other = self._check_allowed_dtypes(other, "real numeric", "__rmod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmod__(other._array)
+ return self.__class__._new(res)
+
+ def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imul__")
+ if other is NotImplemented:
+ return other
+ self._array.__imul__(other._array)
+ return self
+
+ def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmul__(other._array)
+ return self.__class__._new(res)
+
+ def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ior__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
+ if other is NotImplemented:
+ return other
+ self._array.__ior__(other._array)
+ return self
+
+ def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ror__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ror__(other._array)
+ return self.__class__._new(res)
+
+ def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ipow__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ipow__")
+ if other is NotImplemented:
+ return other
+ self._array.__ipow__(other._array)
+ return self
+
+ def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rpow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "numeric", "__rpow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow the spec type promotion rules
+ # for 0-d arrays, so we use pow() here instead.
+ return pow(other, self)
+
+ def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __irshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__irshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__irshift__(other._array)
+ return self
+
+ def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rrshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rrshift__(other._array)
+ return self.__class__._new(res)
+
+ def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __isub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__isub__")
+ if other is NotImplemented:
+ return other
+ self._array.__isub__(other._array)
+ return self
+
+ def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rsub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rsub__(other._array)
+ return self.__class__._new(res)
+
+ def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __itruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
+ if other is NotImplemented:
+ return other
+ self._array.__itruediv__(other._array)
+ return self
+
+ def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rtruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rtruediv__(other._array)
+ return self.__class__._new(res)
+
+ def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ixor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
+ if other is NotImplemented:
+ return other
+ self._array.__ixor__(other._array)
+ return self
+
+ def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rxor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rxor__(other._array)
+ return self.__class__._new(res)
+
+ def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
+ if stream is not None:
+ raise ValueError("The stream argument to to_device() is not supported")
+ if device == 'cpu':
+ return self
+ raise ValueError(f"Unsupported device {device!r}")
+
+ @property
+ def dtype(self) -> Dtype:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.dtype `.
+
+ See its docstring for more information.
+ """
+ return self._array.dtype
+
+ @property
+ def device(self) -> Device:
+ return "cpu"
+
+ # Note: mT is new in array API spec (see matrix_transpose)
+ @property
+ def mT(self) -> Array:
+ from .linalg import matrix_transpose
+ return matrix_transpose(self)
+
+ @property
+ def ndim(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.ndim `.
+
+ See its docstring for more information.
+ """
+ return self._array.ndim
+
+ @property
+ def shape(self) -> Tuple[int, ...]:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.shape `.
+
+ See its docstring for more information.
+ """
+ return self._array.shape
+
+ @property
+ def size(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.size `.
+
+ See its docstring for more information.
+ """
+ return self._array.size
+
+ @property
+ def T(self) -> Array:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.T `.
+
+ See its docstring for more information.
+ """
+ # Note: T only works on 2-dimensional arrays. See the corresponding
+ # note in the specification:
+ # https://data-apis.org/array-api/latest/API_specification/array_object.html#t
+ if self.ndim != 2:
+ raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
+ return self.__class__._new(self._array.T)
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.py
new file mode 100644
index 00000000..15ab81d1
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_constants.py
@@ -0,0 +1,7 @@
+import numpy as np
+
+e = np.e
+inf = np.inf
+nan = np.nan
+pi = np.pi
+newaxis = np.newaxis
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py
new file mode 100644
index 00000000..3b014d37
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_creation_functions.py
@@ -0,0 +1,351 @@
+from __future__ import annotations
+
+
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import (
+ Array,
+ Device,
+ Dtype,
+ NestedSequence,
+ SupportsBufferProtocol,
+ )
+ from collections.abc import Sequence
+from ._dtypes import _all_dtypes
+
+import numpy as np
+
+
+def _check_valid_dtype(dtype):
+ # Note: Only spelling dtypes as the dtype objects is supported.
+
+ # We use this instead of "dtype in _all_dtypes" because the dtype objects
+ # define equality with the sorts of things we want to disallow.
+ for d in (None,) + _all_dtypes:
+ if dtype is d:
+ return
+ raise ValueError("dtype must be one of the supported dtypes")
+
+
+def asarray(
+ obj: Union[
+ Array,
+ bool,
+ int,
+ float,
+ NestedSequence[bool | int | float],
+ SupportsBufferProtocol,
+ ],
+ /,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ copy: Optional[Union[bool, np._CopyMode]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.asarray `.
+
+ See its docstring for more information.
+ """
+ # _array_object imports in this file are inside the functions to avoid
+ # circular imports
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if copy in (False, np._CopyMode.IF_NEEDED):
+ # Note: copy=False is not yet implemented in np.asarray
+ raise NotImplementedError("copy=False is not yet implemented")
+ if isinstance(obj, Array):
+ if dtype is not None and obj.dtype != dtype:
+ copy = True
+ if copy in (True, np._CopyMode.ALWAYS):
+ return Array._new(np.array(obj._array, copy=True, dtype=dtype))
+ return obj
+ if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
+ # Give a better error message in this case. NumPy would convert this
+ # to an object array. TODO: This won't handle large integers in lists.
+ raise OverflowError("Integer out of bounds for array dtypes")
+ res = np.asarray(obj, dtype=dtype)
+ return Array._new(res)
+
+
+def arange(
+ start: Union[int, float],
+ /,
+ stop: Optional[Union[int, float]] = None,
+ step: Union[int, float] = 1,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arange `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
+
+
+def empty(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty(shape, dtype=dtype))
+
+
+def empty_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty_like(x._array, dtype=dtype))
+
+
+def eye(
+ n_rows: int,
+ n_cols: Optional[int] = None,
+ /,
+ *,
+ k: int = 0,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.eye `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
+
+
+def from_dlpack(x: object, /) -> Array:
+ from ._array_object import Array
+
+ return Array._new(np.from_dlpack(x))
+
+
+def full(
+ shape: Union[int, Tuple[int, ...]],
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if isinstance(fill_value, Array) and fill_value.ndim == 0:
+ fill_value = fill_value._array
+ res = np.full(shape, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full")
+ return Array._new(res)
+
+
+def full_like(
+ x: Array,
+ /,
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ res = np.full_like(x._array, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full_like")
+ return Array._new(res)
+
+
+def linspace(
+ start: Union[int, float],
+ stop: Union[int, float],
+ /,
+ num: int,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ endpoint: bool = True,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linspace `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
+
+
+def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.meshgrid `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ # Note: unlike np.meshgrid, only inputs with all the same dtype are
+ # allowed
+
+ if len({a.dtype for a in arrays}) > 1:
+ raise ValueError("meshgrid inputs must all have the same dtype")
+
+ return [
+ Array._new(array)
+ for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
+ ]
+
+
+def ones(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones(shape, dtype=dtype))
+
+
+def ones_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones_like(x._array, dtype=dtype))
+
+
+def tril(x: Array, /, *, k: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tril `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if x.ndim < 2:
+ # Note: Unlike np.tril, x must be at least 2-D
+ raise ValueError("x must be at least 2-dimensional for tril")
+ return Array._new(np.tril(x._array, k=k))
+
+
+def triu(x: Array, /, *, k: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.triu `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if x.ndim < 2:
+ # Note: Unlike np.triu, x must be at least 2-D
+ raise ValueError("x must be at least 2-dimensional for triu")
+ return Array._new(np.triu(x._array, k=k))
+
+
+def zeros(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros(shape, dtype=dtype))
+
+
+def zeros_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros_like(x._array, dtype=dtype))
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py
new file mode 100644
index 00000000..6f972c3b
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_data_type_functions.py
@@ -0,0 +1,197 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _signed_integer_dtypes,
+ _unsigned_integer_dtypes,
+ _integer_dtypes,
+ _real_floating_dtypes,
+ _complex_floating_dtypes,
+ _numeric_dtypes,
+ _result_type,
+)
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import Dtype
+ from collections.abc import Sequence
+
+import numpy as np
+
+
+# Note: astype is a function, not an array method as in NumPy.
+def astype(x: Array, dtype: Dtype, /, *, copy: bool = True) -> Array:
+ if not copy and dtype == x.dtype:
+ return x
+ return Array._new(x._array.astype(dtype=dtype, copy=copy))
+
+
+def broadcast_arrays(*arrays: Array) -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_arrays `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
+ ]
+
+
+def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_to `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return Array._new(np.broadcast_to(x._array, shape))
+
+
+def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
+ """
+ Array API compatible wrapper for :py:func:`np.can_cast `.
+
+ See its docstring for more information.
+ """
+ if isinstance(from_, Array):
+ from_ = from_.dtype
+ elif from_ not in _all_dtypes:
+ raise TypeError(f"{from_=}, but should be an array_api array or dtype")
+ if to not in _all_dtypes:
+ raise TypeError(f"{to=}, but should be a dtype")
+ # Note: We avoid np.can_cast() as it has discrepancies with the array API,
+ # since NumPy allows cross-kind casting (e.g., NumPy allows bool -> int8).
+ # See https://github.com/numpy/numpy/issues/20870
+ try:
+ # We promote `from_` and `to` together. We then check if the promoted
+ # dtype is `to`, which indicates if `from_` can (up)cast to `to`.
+ dtype = _result_type(from_, to)
+ return to == dtype
+ except TypeError:
+ # _result_type() raises if the dtypes don't promote together
+ return False
+
+
+# These are internal objects for the return types of finfo and iinfo, since
+# the NumPy versions contain extra data that isn't part of the spec.
+@dataclass
+class finfo_object:
+ bits: int
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ eps: float
+ max: float
+ min: float
+ smallest_normal: float
+ dtype: Dtype
+
+
+@dataclass
+class iinfo_object:
+ bits: int
+ max: int
+ min: int
+ dtype: Dtype
+
+
+def finfo(type: Union[Dtype, Array], /) -> finfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.finfo `.
+
+ See its docstring for more information.
+ """
+ fi = np.finfo(type)
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ return finfo_object(
+ fi.bits,
+ float(fi.eps),
+ float(fi.max),
+ float(fi.min),
+ float(fi.smallest_normal),
+ fi.dtype,
+ )
+
+
+def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.iinfo `.
+
+ See its docstring for more information.
+ """
+ ii = np.iinfo(type)
+ return iinfo_object(ii.bits, ii.max, ii.min, ii.dtype)
+
+
+# Note: isdtype is a new function from the 2022.12 array API specification.
+def isdtype(
+ dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]]
+) -> bool:
+ """
+ Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
+
+ See
+ https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
+ for more details
+ """
+ if isinstance(kind, tuple):
+ # Disallow nested tuples
+ if any(isinstance(k, tuple) for k in kind):
+ raise TypeError("'kind' must be a dtype, str, or tuple of dtypes and strs")
+ return any(isdtype(dtype, k) for k in kind)
+ elif isinstance(kind, str):
+ if kind == 'bool':
+ return dtype in _boolean_dtypes
+ elif kind == 'signed integer':
+ return dtype in _signed_integer_dtypes
+ elif kind == 'unsigned integer':
+ return dtype in _unsigned_integer_dtypes
+ elif kind == 'integral':
+ return dtype in _integer_dtypes
+ elif kind == 'real floating':
+ return dtype in _real_floating_dtypes
+ elif kind == 'complex floating':
+ return dtype in _complex_floating_dtypes
+ elif kind == 'numeric':
+ return dtype in _numeric_dtypes
+ else:
+ raise ValueError(f"Unrecognized data type kind: {kind!r}")
+ elif kind in _all_dtypes:
+ return dtype == kind
+ else:
+ raise TypeError(f"'kind' must be a dtype, str, or tuple of dtypes and strs, not {type(kind).__name__}")
+
+def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
+ """
+ Array API compatible wrapper for :py:func:`np.result_type `.
+
+ See its docstring for more information.
+ """
+ # Note: we use a custom implementation that gives only the type promotions
+ # required by the spec rather than using np.result_type. NumPy implements
+ # too many extra type promotions like int64 + uint64 -> float64, and does
+ # value-based casting on scalar arrays.
+ A = []
+ for a in arrays_and_dtypes:
+ if isinstance(a, Array):
+ a = a.dtype
+ elif isinstance(a, np.ndarray) or a not in _all_dtypes:
+ raise TypeError("result_type() inputs must be array_api arrays or dtypes")
+ A.append(a)
+
+ if len(A) == 0:
+ raise ValueError("at least one array or dtype is required")
+ elif len(A) == 1:
+ return A[0]
+ else:
+ t = A[0]
+ for t2 in A[1:]:
+ t = _result_type(t, t2)
+ return t
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.py
new file mode 100644
index 00000000..0e8f666e
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_dtypes.py
@@ -0,0 +1,180 @@
+import numpy as np
+
+# Note: we use dtype objects instead of dtype classes. The spec does not
+# require any behavior on dtypes other than equality.
+int8 = np.dtype("int8")
+int16 = np.dtype("int16")
+int32 = np.dtype("int32")
+int64 = np.dtype("int64")
+uint8 = np.dtype("uint8")
+uint16 = np.dtype("uint16")
+uint32 = np.dtype("uint32")
+uint64 = np.dtype("uint64")
+float32 = np.dtype("float32")
+float64 = np.dtype("float64")
+complex64 = np.dtype("complex64")
+complex128 = np.dtype("complex128")
+# Note: This name is changed
+bool = np.dtype("bool")
+
+_all_dtypes = (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ complex64,
+ complex128,
+ bool,
+)
+_boolean_dtypes = (bool,)
+_real_floating_dtypes = (float32, float64)
+_floating_dtypes = (float32, float64, complex64, complex128)
+_complex_floating_dtypes = (complex64, complex128)
+_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
+_signed_integer_dtypes = (int8, int16, int32, int64)
+_unsigned_integer_dtypes = (uint8, uint16, uint32, uint64)
+_integer_or_boolean_dtypes = (
+ bool,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+_real_numeric_dtypes = (
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+_numeric_dtypes = (
+ float32,
+ float64,
+ complex64,
+ complex128,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+
+_dtype_categories = {
+ "all": _all_dtypes,
+ "real numeric": _real_numeric_dtypes,
+ "numeric": _numeric_dtypes,
+ "integer": _integer_dtypes,
+ "integer or boolean": _integer_or_boolean_dtypes,
+ "boolean": _boolean_dtypes,
+ "real floating-point": _floating_dtypes,
+ "complex floating-point": _complex_floating_dtypes,
+ "floating-point": _floating_dtypes,
+}
+
+
+# Note: the spec defines a restricted type promotion table compared to NumPy.
+# In particular, cross-kind promotions like integer + float or boolean +
+# integer are not allowed, even for functions that accept both kinds.
+# Additionally, NumPy promotes signed integer + uint64 to float64, but this
+# promotion is not allowed here. To be clear, Python scalar int objects are
+# allowed to promote to floating-point dtypes, but only in array operators
+# (see Array._promote_scalar) method in _array_object.py.
+_promotion_table = {
+ (int8, int8): int8,
+ (int8, int16): int16,
+ (int8, int32): int32,
+ (int8, int64): int64,
+ (int16, int8): int16,
+ (int16, int16): int16,
+ (int16, int32): int32,
+ (int16, int64): int64,
+ (int32, int8): int32,
+ (int32, int16): int32,
+ (int32, int32): int32,
+ (int32, int64): int64,
+ (int64, int8): int64,
+ (int64, int16): int64,
+ (int64, int32): int64,
+ (int64, int64): int64,
+ (uint8, uint8): uint8,
+ (uint8, uint16): uint16,
+ (uint8, uint32): uint32,
+ (uint8, uint64): uint64,
+ (uint16, uint8): uint16,
+ (uint16, uint16): uint16,
+ (uint16, uint32): uint32,
+ (uint16, uint64): uint64,
+ (uint32, uint8): uint32,
+ (uint32, uint16): uint32,
+ (uint32, uint32): uint32,
+ (uint32, uint64): uint64,
+ (uint64, uint8): uint64,
+ (uint64, uint16): uint64,
+ (uint64, uint32): uint64,
+ (uint64, uint64): uint64,
+ (int8, uint8): int16,
+ (int8, uint16): int32,
+ (int8, uint32): int64,
+ (int16, uint8): int16,
+ (int16, uint16): int32,
+ (int16, uint32): int64,
+ (int32, uint8): int32,
+ (int32, uint16): int32,
+ (int32, uint32): int64,
+ (int64, uint8): int64,
+ (int64, uint16): int64,
+ (int64, uint32): int64,
+ (uint8, int8): int16,
+ (uint16, int8): int32,
+ (uint32, int8): int64,
+ (uint8, int16): int16,
+ (uint16, int16): int32,
+ (uint32, int16): int64,
+ (uint8, int32): int32,
+ (uint16, int32): int32,
+ (uint32, int32): int64,
+ (uint8, int64): int64,
+ (uint16, int64): int64,
+ (uint32, int64): int64,
+ (float32, float32): float32,
+ (float32, float64): float64,
+ (float64, float32): float64,
+ (float64, float64): float64,
+ (complex64, complex64): complex64,
+ (complex64, complex128): complex128,
+ (complex128, complex64): complex128,
+ (complex128, complex128): complex128,
+ (float32, complex64): complex64,
+ (float32, complex128): complex128,
+ (float64, complex64): complex128,
+ (float64, complex128): complex128,
+ (complex64, float32): complex64,
+ (complex64, float64): complex128,
+ (complex128, float32): complex128,
+ (complex128, float64): complex128,
+ (bool, bool): bool,
+}
+
+
+def _result_type(type1, type2):
+ if (type1, type2) in _promotion_table:
+ return _promotion_table[type1, type2]
+ raise TypeError(f"{type1} and {type2} cannot be type promoted together")
diff --git a/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py
new file mode 100644
index 00000000..8b696772
--- /dev/null
+++ b/dbdpy-env/lib/python3.9/site-packages/numpy/array_api/_elementwise_functions.py
@@ -0,0 +1,765 @@
+from __future__ import annotations
+
+from ._dtypes import (
+ _boolean_dtypes,
+ _floating_dtypes,
+ _real_floating_dtypes,
+ _complex_floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _real_numeric_dtypes,
+ _numeric_dtypes,
+ _result_type,
+)
+from ._array_object import Array
+
+import numpy as np
+
+
+def abs(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.abs `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in abs")
+ return Array._new(np.abs(x._array))
+
+
+# Note: the function name is different here
+def acos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccos `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acos")
+ return Array._new(np.arccos(x._array))
+
+
+# Note: the function name is different here
+def acosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccosh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acosh")
+ return Array._new(np.arccosh(x._array))
+
+
+def add(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.add `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in add")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.add(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def asin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsin `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asin")
+ return Array._new(np.arcsin(x._array))
+
+
+# Note: the function name is different here
+def asinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsinh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asinh")
+ return Array._new(np.arcsinh(x._array))
+
+
+# Note: the function name is different here
+def atan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan")
+ return Array._new(np.arctan(x._array))
+
+
+# Note: the function name is different here
+def atan2(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan2 `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_floating_dtypes or x2.dtype not in _real_floating_dtypes:
+ raise TypeError("Only real floating-point dtypes are allowed in atan2")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.arctan2(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def atanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctanh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atanh")
+ return Array._new(np.arctanh(x._array))
+
+
+def bitwise_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_and `.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_and(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.left_shift `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_left_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.left_shift(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_invert(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.invert `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
+ return Array._new(np.invert(x._array))
+
+
+def bitwise_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_or `.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_or(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.right_shift `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_right_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.right_shift(x1._array, x2._array))
+
+
+def bitwise_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_xor `.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_xor(x1._array, x2._array))
+
+
+def ceil(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ceil `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in ceil")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of ceil is the same as the input
+ return x
+ return Array._new(np.ceil(x._array))
+
+
+def conj(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.conj `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _complex_floating_dtypes:
+ raise TypeError("Only complex floating-point dtypes are allowed in conj")
+ return Array._new(np.conj(x))
+
+
+def cos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cos `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cos")
+ return Array._new(np.cos(x._array))
+
+
+def cosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cosh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cosh")
+ return Array._new(np.cosh(x._array))
+
+
+def divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.divide `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.divide(x1._array, x2._array))
+
+
+def equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.equal `.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.equal(x1._array, x2._array))
+
+
+def exp(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.exp `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in exp")
+ return Array._new(np.exp(x._array))
+
+
+def expm1(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expm1 `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in expm1")
+ return Array._new(np.expm1(x._array))
+
+
+def floor(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in floor")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of floor is the same as the input
+ return x
+ return Array._new(np.floor(x._array))
+
+
+def floor_divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor_divide `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in floor_divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.floor_divide(x1._array, x2._array))
+
+
+def greater(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in greater")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater(x1._array, x2._array))
+
+
+def greater_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater_equal `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in greater_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater_equal(x1._array, x2._array))
+
+
+def imag(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.imag `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _complex_floating_dtypes:
+ raise TypeError("Only complex floating-point dtypes are allowed in imag")
+ return Array._new(np.imag(x))
+
+
+def isfinite(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isfinite `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isfinite")
+ return Array._new(np.isfinite(x._array))
+
+
+def isinf(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isinf `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isinf")
+ return Array._new(np.isinf(x._array))
+
+
+def isnan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isnan `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isnan")
+ return Array._new(np.isnan(x._array))
+
+
+def less(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in less")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less(x1._array, x2._array))
+
+
+def less_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less_equal `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_numeric_dtypes or x2.dtype not in _real_numeric_dtypes:
+ raise TypeError("Only real numeric dtypes are allowed in less_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less_equal(x1._array, x2._array))
+
+
+def log(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log")
+ return Array._new(np.log(x._array))
+
+
+def log1p(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log1p `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log1p")
+ return Array._new(np.log1p(x._array))
+
+
+def log2(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log2 `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log2")
+ return Array._new(np.log2(x._array))
+
+
+def log10(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log10 `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log10")
+ return Array._new(np.log10(x._array))
+
+
+def logaddexp(x1: Array, x2: Array) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logaddexp `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _real_floating_dtypes or x2.dtype not in _real_floating_dtypes:
+ raise TypeError("Only real floating-point dtypes are allowed in logaddexp")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logaddexp(x1._array, x2._array))
+
+
+def logical_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_and `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_and(x1._array, x2._array))
+
+
+def logical_not(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_not `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_not")
+ return Array._new(np.logical_not(x._array))
+
+
+def logical_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_or `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_or(x1._array, x2._array))
+
+
+def logical_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_xor `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_xor(x1._array, x2._array))
+
+
+def multiply(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.multiply `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in multiply")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.multiply(x1._array, x2._array))
+
+
+def negative(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.negative `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in negative")
+ return Array._new(np.negative(x._array))
+
+
+def not_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.not_equal `.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.not_equal(x1._array, x2._array))
+
+
+def positive(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.positive