diff --git a/pandas/_testing/_io.py b/pandas/_testing/_io.py index edbba9452b50a..95977edb600ad 100644 --- a/pandas/_testing/_io.py +++ b/pandas/_testing/_io.py @@ -118,7 +118,7 @@ def round_trip_localpath(writer, reader, path: str | None = None): return obj -def write_to_compressed(compression, path, data, dest: str = "test"): +def write_to_compressed(compression, path, data, dest: str = "test") -> None: """ Write data to a compressed file. diff --git a/pandas/arrays/__init__.py b/pandas/arrays/__init__.py index 32e2afc0eef52..a11755275d00e 100644 --- a/pandas/arrays/__init__.py +++ b/pandas/arrays/__init__.py @@ -36,7 +36,7 @@ ] -def __getattr__(name: str): +def __getattr__(name: str) -> type[NumpyExtensionArray]: if name == "PandasArray": # GH#53694 import warnings diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 8282ec25c1d58..4f067d958b799 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -26,7 +26,7 @@ from collections.abc import Generator -def load_reduce(self): +def load_reduce(self) -> None: stack = self.stack args = stack.pop() func = stack[-1] diff --git a/pandas/conftest.py b/pandas/conftest.py index 10826f50d1fe1..ea59bbfd088a9 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -491,7 +491,7 @@ def box_with_array(request): @pytest.fixture -def dict_subclass(): +def dict_subclass() -> type[dict]: """ Fixture for a dictionary subclass. """ @@ -504,7 +504,7 @@ def __init__(self, *args, **kwargs) -> None: @pytest.fixture -def non_dict_mapping_subclass(): +def non_dict_mapping_subclass() -> type[abc.Mapping]: """ Fixture for a non-mapping dictionary subclass. """ diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 6ab2f958b8730..4d6dd8f4fd577 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -54,9 +54,9 @@ if TYPE_CHECKING: from collections.abc import ( + Generator, Hashable, Iterable, - Iterator, Sequence, ) @@ -253,7 +253,7 @@ def transform(self) -> DataFrame | Series: return result - def transform_dict_like(self, func): + def transform_dict_like(self, func) -> DataFrame: """ Compute transform in the case of a dict-like func """ @@ -315,7 +315,7 @@ def compute_list_like( op_name: Literal["agg", "apply"], selected_obj: Series | DataFrame, kwargs: dict[str, Any], - ) -> tuple[list[Hashable], list[Any]]: + ) -> tuple[list[Hashable] | Index, list[Any]]: """ Compute agg/apply results for like-like input. @@ -330,7 +330,7 @@ def compute_list_like( Returns ------- - keys : list[hashable] + keys : list[Hashable] or Index Index labels for result. results : list Data for result. When aggregating with a Series, this can contain any @@ -370,12 +370,14 @@ def compute_list_like( new_res = getattr(colg, op_name)(func, *args, **kwargs) results.append(new_res) indices.append(index) - keys = selected_obj.columns.take(indices) + # error: Incompatible types in assignment (expression has type "Any | + # Index", variable has type "list[Any | Callable[..., Any] | str]") + keys = selected_obj.columns.take(indices) # type: ignore[assignment] return keys, results def wrap_results_list_like( - self, keys: list[Hashable], results: list[Series | DataFrame] + self, keys: Iterable[Hashable], results: list[Series | DataFrame] ): from pandas.core.reshape.concat import concat @@ -772,7 +774,7 @@ def result_columns(self) -> Index: @property @abc.abstractmethod - def series_generator(self) -> Iterator[Series]: + def series_generator(self) -> Generator[Series, None, None]: pass @abc.abstractmethod @@ -1014,7 +1016,7 @@ class FrameRowApply(FrameApply): axis: AxisInt = 0 @property - def series_generator(self): + def series_generator(self) -> Generator[Series, None, None]: return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) @property @@ -1075,7 +1077,7 @@ def apply_broadcast(self, target: DataFrame) -> DataFrame: return result.T @property - def series_generator(self): + def series_generator(self) -> Generator[Series, None, None]: values = self.values values = ensure_wrapped_if_datetimelike(values) assert len(values) > 0 diff --git a/pandas/core/arrays/arrow/extension_types.py b/pandas/core/arrays/arrow/extension_types.py index 3249c1c829546..7814a77a1cdc5 100644 --- a/pandas/core/arrays/arrow/extension_types.py +++ b/pandas/core/arrays/arrow/extension_types.py @@ -48,7 +48,7 @@ def __ne__(self, other) -> bool: def __hash__(self) -> int: return hash((str(self), self.freq)) - def to_pandas_dtype(self): + def to_pandas_dtype(self) -> PeriodDtype: return PeriodDtype(freq=self.freq) @@ -105,7 +105,7 @@ def __ne__(self, other) -> bool: def __hash__(self) -> int: return hash((str(self), str(self.subtype), self.closed)) - def to_pandas_dtype(self): + def to_pandas_dtype(self) -> IntervalDtype: return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index dae0fb7782791..9f63d1f97c54f 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2410,7 +2410,7 @@ def _mode(self, dropna: bool = True) -> Categorical: # ------------------------------------------------------------------ # ExtensionArray Interface - def unique(self): + def unique(self) -> Self: """ Return the ``Categorical`` which ``categories`` and ``codes`` are unique. diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 73d4f6f38f102..58ade1ee935ec 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -110,7 +110,7 @@ ) -IntervalSideT = Union[TimeArrayLike, np.ndarray] +IntervalSide = Union[TimeArrayLike, np.ndarray] IntervalOrNA = Union[Interval, float] _interval_shared_docs: dict[str, str] = {} @@ -219,8 +219,8 @@ def ndim(self) -> Literal[1]: return 1 # To make mypy recognize the fields - _left: IntervalSideT - _right: IntervalSideT + _left: IntervalSide + _right: IntervalSide _dtype: IntervalDtype # --------------------------------------------------------------------- @@ -237,8 +237,8 @@ def __new__( data = extract_array(data, extract_numpy=True) if isinstance(data, cls): - left: IntervalSideT = data._left - right: IntervalSideT = data._right + left: IntervalSide = data._left + right: IntervalSide = data._right closed = closed or data.closed dtype = IntervalDtype(left.dtype, closed=closed) else: @@ -280,8 +280,8 @@ def __new__( @classmethod def _simple_new( cls, - left: IntervalSideT, - right: IntervalSideT, + left: IntervalSide, + right: IntervalSide, dtype: IntervalDtype, ) -> Self: result = IntervalMixin.__new__(cls) @@ -299,7 +299,7 @@ def _ensure_simple_new_inputs( closed: IntervalClosedType | None = None, copy: bool = False, dtype: Dtype | None = None, - ) -> tuple[IntervalSideT, IntervalSideT, IntervalDtype]: + ) -> tuple[IntervalSide, IntervalSide, IntervalDtype]: """Ensure correctness of input parameters for cls._simple_new.""" from pandas.core.indexes.base import ensure_index @@ -1031,8 +1031,8 @@ def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self: raise ValueError("Intervals must all be closed on the same side.") closed = closed_set.pop() - left = np.concatenate([interval.left for interval in to_concat]) - right = np.concatenate([interval.right for interval in to_concat]) + left: IntervalSide = np.concatenate([interval.left for interval in to_concat]) + right: IntervalSide = np.concatenate([interval.right for interval in to_concat]) left, right, dtype = cls._ensure_simple_new_inputs(left, right, closed=closed) @@ -1283,7 +1283,7 @@ def _format_space(self) -> str: # Vectorized Interval Properties/Attributes @property - def left(self): + def left(self) -> Index: """ Return the left endpoints of each Interval in the IntervalArray as an Index. @@ -1303,7 +1303,7 @@ def left(self): return Index(self._left, copy=False) @property - def right(self): + def right(self) -> Index: """ Return the right endpoints of each Interval in the IntervalArray as an Index. @@ -1855,11 +1855,17 @@ def isin(self, values) -> npt.NDArray[np.bool_]: return isin(self.astype(object), values.astype(object)) @property - def _combined(self) -> IntervalSideT: - left = self.left._values.reshape(-1, 1) - right = self.right._values.reshape(-1, 1) + def _combined(self) -> IntervalSide: + # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]" + # has no attribute "reshape" [union-attr] + left = self.left._values.reshape(-1, 1) # type: ignore[union-attr] + right = self.right._values.reshape(-1, 1) # type: ignore[union-attr] if needs_i8_conversion(left.dtype): - comb = left._concat_same_type([left, right], axis=1) + # error: Item "ndarray[Any, Any]" of "Any | ndarray[Any, Any]" has + # no attribute "_concat_same_type" + comb = left._concat_same_type( # type: ignore[union-attr] + [left, right], axis=1 + ) else: comb = np.concatenate([left, right], axis=1) return comb diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index a2e4b595c42aa..eeede0ad9e6d2 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -948,7 +948,7 @@ def _check_timedeltalike_freq_compat(self, other): return lib.item_from_zerodim(delta) -def raise_on_incompatible(left, right): +def raise_on_incompatible(left, right) -> IncompatibleFrequency: """ Helper function to render a consistent error message when raising IncompatibleFrequency. @@ -1089,7 +1089,7 @@ def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: def validate_dtype_freq( - dtype, freq: BaseOffsetT | timedelta | str | None + dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None ) -> BaseOffsetT: """ If both a dtype and a freq are available, ensure they match. If only @@ -1110,10 +1110,7 @@ def validate_dtype_freq( IncompatibleFrequency : mismatch between dtype and freq """ if freq is not None: - # error: Incompatible types in assignment (expression has type - # "BaseOffset", variable has type "Union[BaseOffsetT, timedelta, - # str, None]") - freq = to_offset(freq) # type: ignore[assignment] + freq = to_offset(freq) if dtype is not None: dtype = pandas_dtype(dtype) diff --git a/pandas/core/arrays/sparse/array.py b/pandas/core/arrays/sparse/array.py index e38fa0a3bdae5..00cbe1286c195 100644 --- a/pandas/core/arrays/sparse/array.py +++ b/pandas/core/arrays/sparse/array.py @@ -702,7 +702,9 @@ def npoints(self) -> int: """ return self.sp_index.npoints - def isna(self): + # error: Return type "SparseArray" of "isna" incompatible with return type + # "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray" + def isna(self) -> Self: # type: ignore[override] # If null fill value, we want SparseDtype[bool, true] # to preserve the same memory usage. dtype = SparseDtype(bool, self._null_fill_value) @@ -1421,7 +1423,7 @@ def all(self, axis=None, *args, **kwargs): return values.all() - def any(self, axis: AxisInt = 0, *args, **kwargs): + def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool: """ Tests whether at least one of elements evaluate True diff --git a/pandas/core/arrays/string_.py b/pandas/core/arrays/string_.py index 72ba95e5fa258..c90127c0e9812 100644 --- a/pandas/core/arrays/string_.py +++ b/pandas/core/arrays/string_.py @@ -59,6 +59,7 @@ NumpySorter, NumpyValueArrayLike, Scalar, + Self, npt, type_t, ) @@ -135,7 +136,7 @@ def type(self) -> type[str]: return str @classmethod - def construct_from_string(cls, string): + def construct_from_string(cls, string) -> Self: """ Construct a StringDtype from a string. diff --git a/pandas/core/arrays/string_arrow.py b/pandas/core/arrays/string_arrow.py index cc3bc5900c4c2..f438f75707265 100644 --- a/pandas/core/arrays/string_arrow.py +++ b/pandas/core/arrays/string_arrow.py @@ -53,6 +53,8 @@ npt, ) + from pandas import Series + ArrowStringScalarOrNAT = Union[str, libmissing.NAType] @@ -547,7 +549,7 @@ def _cmp_method(self, other, op): result = super()._cmp_method(other, op) return result.to_numpy(np.bool_, na_value=False) - def value_counts(self, dropna: bool = True): + def value_counts(self, dropna: bool = True) -> Series: from pandas import Series result = super().value_counts(dropna) diff --git a/pandas/core/computation/expr.py b/pandas/core/computation/expr.py index 2f94856702465..4770f403b1bdb 100644 --- a/pandas/core/computation/expr.py +++ b/pandas/core/computation/expr.py @@ -12,6 +12,7 @@ import tokenize from typing import ( Callable, + ClassVar, TypeVar, ) @@ -349,8 +350,8 @@ class BaseExprVisitor(ast.NodeVisitor): preparser : callable """ - const_type: type[Term] = Constant - term_type = Term + const_type: ClassVar[type[Term]] = Constant + term_type: ClassVar[type[Term]] = Term binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS binary_op_nodes = ( @@ -540,7 +541,7 @@ def visit_UnaryOp(self, node, **kwargs): operand = self.visit(node.operand) return op(operand) - def visit_Name(self, node, **kwargs): + def visit_Name(self, node, **kwargs) -> Term: return self.term_type(node.id, self.env, **kwargs) # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min @@ -555,11 +556,11 @@ def visit_Constant(self, node, **kwargs) -> Term: return self.const_type(node.value, self.env) # TODO(py314): deprecated since Python 3.8. Remove after Python 3.14 is min - def visit_Str(self, node, **kwargs): + def visit_Str(self, node, **kwargs) -> Term: name = self.env.add_tmp(node.s) return self.term_type(name, self.env) - def visit_List(self, node, **kwargs): + def visit_List(self, node, **kwargs) -> Term: name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts]) return self.term_type(name, self.env) @@ -569,7 +570,7 @@ def visit_Index(self, node, **kwargs): """df.index[4]""" return self.visit(node.value) - def visit_Subscript(self, node, **kwargs): + def visit_Subscript(self, node, **kwargs) -> Term: from pandas import eval as pd_eval value = self.visit(node.value) @@ -589,7 +590,7 @@ def visit_Subscript(self, node, **kwargs): name = self.env.add_tmp(v) return self.term_type(name, env=self.env) - def visit_Slice(self, node, **kwargs): + def visit_Slice(self, node, **kwargs) -> slice: """df.index[slice(4,6)]""" lower = node.lower if lower is not None: diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 852bfae1cc79a..95ac20ba39edc 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -617,5 +617,5 @@ def __init__(self, name: str) -> None: self.name = name self.func = getattr(np, name) - def __call__(self, *args): + def __call__(self, *args) -> MathCall: return MathCall(self, args) diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 77d8d79506258..138a3ee42f686 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -10,6 +10,7 @@ from typing import ( TYPE_CHECKING, Any, + ClassVar, ) import numpy as np @@ -40,7 +41,10 @@ ) if TYPE_CHECKING: - from pandas._typing import npt + from pandas._typing import ( + Self, + npt, + ) class PyTablesScope(_scope.Scope): @@ -283,7 +287,7 @@ def __repr__(self) -> str: return "Filter: Not Initialized" return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]") - def invert(self): + def invert(self) -> Self: """invert the filter""" if self.filter is not None: self.filter = ( @@ -297,7 +301,8 @@ def format(self): """return the actual filter format""" return [self.filter] - def evaluate(self): + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self | None: # type: ignore[override] if not self.is_valid: raise ValueError(f"query term is not valid [{self}]") @@ -336,7 +341,8 @@ class JointFilterBinOp(FilterBinOp): def format(self): raise NotImplementedError("unable to collapse Joint Filters") - def evaluate(self): + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self: # type: ignore[override] return self @@ -357,7 +363,8 @@ def format(self): """return the actual ne format""" return self.condition - def evaluate(self): + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self | None: # type: ignore[override] if not self.is_valid: raise ValueError(f"query term is not valid [{self}]") @@ -385,7 +392,8 @@ def evaluate(self): class JointConditionBinOp(ConditionBinOp): - def evaluate(self): + # error: Signature of "evaluate" incompatible with supertype "BinOp" + def evaluate(self) -> Self: # type: ignore[override] self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})" return self @@ -410,8 +418,8 @@ def prune(self, klass): class PyTablesExprVisitor(BaseExprVisitor): - const_type = Constant - term_type = Term + const_type: ClassVar[type[ops.Term]] = Constant + term_type: ClassVar[type[Term]] = Term def __init__(self, env, engine, parser, **kwargs) -> None: super().__init__(env, engine, parser) @@ -423,13 +431,15 @@ def __init__(self, env, engine, parser, **kwargs) -> None: lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs), ) - def visit_UnaryOp(self, node, **kwargs): + def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None: if isinstance(node.op, (ast.Not, ast.Invert)): return UnaryOp("~", self.visit(node.operand)) elif isinstance(node.op, ast.USub): return self.const_type(-self.visit(node.operand).value, self.env) elif isinstance(node.op, ast.UAdd): raise NotImplementedError("Unary addition not supported") + # TODO: return None might never be reached + return None def visit_Index(self, node, **kwargs): return self.visit(node.value).value @@ -440,7 +450,7 @@ def visit_Assign(self, node, **kwargs): ) return self.visit(cmpr) - def visit_Subscript(self, node, **kwargs): + def visit_Subscript(self, node, **kwargs) -> ops.Term: # only allow simple subscripts value = self.visit(node.value) diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e3e38961a200b..66f00a7e9a805 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -77,6 +77,7 @@ DtypeObj, IntervalClosedType, Ordered, + Self, npt, type_t, ) @@ -973,7 +974,7 @@ class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): __hash__ = PeriodDtypeBase.__hash__ _freq: BaseOffset - def __new__(cls, freq): + def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034 """ Parameters ---------- @@ -1004,11 +1005,11 @@ def __new__(cls, freq): u._freq = freq return u - def __reduce__(self): + def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]: return type(self), (self.name,) @property - def freq(self): + def freq(self) -> BaseOffset: """ The frequency object of this PeriodDtype. @@ -1729,7 +1730,7 @@ def fill_value(self): """ return self._fill_value - def _check_fill_value(self): + def _check_fill_value(self) -> None: if not lib.is_scalar(self._fill_value): raise ValueError( f"fill_value must be a scalar. Got {self._fill_value} instead" diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 9c04e57be36fc..f551716772f61 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -401,7 +401,7 @@ def is_sequence(obj) -> bool: return False -def is_dataclass(item): +def is_dataclass(item) -> bool: """ Checks if the object is a data-class instance diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6fae6273be998..4bfa8a4415785 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -12247,8 +12247,7 @@ def _to_dict_of_blocks(self, copy: bool = True): """ mgr = self._mgr # convert to BlockManager if needed -> this way support ArrayManager as well - mgr = mgr_to_mgr(mgr, "block") - mgr = cast(BlockManager, mgr) + mgr = cast(BlockManager, mgr_to_mgr(mgr, "block")) return { k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self) for k, v, in mgr.to_dict(copy=copy).items() diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 8bef167b747e2..dbabd04a87c36 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -721,8 +721,10 @@ def nunique(self, dropna: bool = True) -> Series | DataFrame: return self._reindex_output(result, fill_value=0) @doc(Series.describe) - def describe(self, **kwargs): - return super().describe(**kwargs) + def describe(self, percentiles=None, include=None, exclude=None) -> Series: + return super().describe( + percentiles=percentiles, include=include, exclude=exclude + ) def value_counts( self, @@ -770,6 +772,7 @@ def value_counts( mask = ids != -1 ids, val = ids[mask], val[mask] + lab: Index | np.ndarray if bins is None: lab, lev = algorithms.factorize(val, sort=True) llab = lambda lab, inc: lab[inc] @@ -1152,7 +1155,7 @@ def alt(obj): @property @doc(Series.plot.__doc__) - def plot(self): + def plot(self) -> GroupByPlot: result = GroupByPlot(self) return result diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 7f77c0c4826fb..11c97e30ab5cd 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -143,6 +143,7 @@ class providing the base-class of operations. if TYPE_CHECKING: from typing import Any + from pandas.core.resample import Resampler from pandas.core.window import ( ExpandingGroupby, ExponentialMovingWindowGroupby, @@ -2094,7 +2095,7 @@ def _obj_1d_constructor(self) -> Callable: @final @Substitution(name="groupby") @Substitution(see_also=_common_see_also) - def any(self, skipna: bool = True): + def any(self, skipna: bool = True) -> NDFrameT: """ Return True if any value in the group is truthful, else False. @@ -2150,7 +2151,7 @@ def any(self, skipna: bool = True): @final @Substitution(name="groupby") @Substitution(see_also=_common_see_also) - def all(self, skipna: bool = True): + def all(self, skipna: bool = True) -> NDFrameT: """ Return True if all values in the group are truthful, else False. @@ -2399,7 +2400,7 @@ def mean( return result.__finalize__(self.obj, method="groupby") @final - def median(self, numeric_only: bool = False): + def median(self, numeric_only: bool = False) -> NDFrameT: """ Compute median of groups, excluding missing values. @@ -2828,7 +2829,7 @@ def _value_counts( return result.__finalize__(self.obj, method="value_counts") @final - def sem(self, ddof: int = 1, numeric_only: bool = False): + def sem(self, ddof: int = 1, numeric_only: bool = False) -> NDFrameT: """ Compute standard error of the mean of groups, excluding missing values. @@ -3119,7 +3120,7 @@ def sum( 2 30 72""" ), ) - def prod(self, numeric_only: bool = False, min_count: int = 0): + def prod(self, numeric_only: bool = False, min_count: int = 0) -> NDFrameT: return self._agg_general( numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod ) @@ -3261,7 +3262,7 @@ def max( ) @final - def first(self, numeric_only: bool = False, min_count: int = -1): + def first(self, numeric_only: bool = False, min_count: int = -1) -> NDFrameT: """ Compute the first non-null entry of each column. @@ -3331,7 +3332,7 @@ def first(x: Series): ) @final - def last(self, numeric_only: bool = False, min_count: int = -1): + def last(self, numeric_only: bool = False, min_count: int = -1) -> NDFrameT: """ Compute the last non-null entry of each column. @@ -3518,7 +3519,7 @@ def describe( return result @final - def resample(self, rule, *args, **kwargs): + def resample(self, rule, *args, **kwargs) -> Resampler: """ Provide resampling when using a TimeGrouper. diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index add6c3ac4ec20..c51c17e04796a 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -289,12 +289,12 @@ def __init__( self.dropna = dropna self._grouper_deprecated = None - self._indexer_deprecated = None + self._indexer_deprecated: npt.NDArray[np.intp] | None = None self._obj_deprecated = None self._gpr_index = None self.binner = None self._grouper = None - self._indexer = None + self._indexer: npt.NDArray[np.intp] | None = None def _get_grouper( self, obj: NDFrameT, validate: bool = True @@ -329,8 +329,8 @@ def _get_grouper( @final def _set_grouper( - self, obj: NDFrame, sort: bool = False, *, gpr_index: Index | None = None - ): + self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None + ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]: """ given an object and the specifications, setup the internal grouper for this particular specification @@ -350,8 +350,6 @@ def _set_grouper( """ assert obj is not None - indexer = None - if self.key is not None and self.level is not None: raise ValueError("The Grouper cannot specify both a key and a level!") @@ -398,6 +396,7 @@ def _set_grouper( raise ValueError(f"The level {level} is not valid") # possibly sort + indexer: npt.NDArray[np.intp] | None = None if (self.sort or sort) and not ax.is_monotonic_increasing: # use stable sort to support first, last, nth # TODO: why does putting na_position="first" fix datetimelike cases? diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 71525c8c1a223..607059e5183ec 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -77,7 +77,7 @@ from pandas.core.generic import NDFrame -def check_result_array(obj, dtype): +def check_result_array(obj, dtype) -> None: # Our operation is supposed to be an aggregation/reduction. If # it returns an ndarray, this likely means an invalid operation has # been passed. See test_apply_without_aggregation, test_agg_must_agg diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index 5134c506b8c61..af8fa441f8b3f 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -227,7 +227,7 @@ def to_pydatetime(self): ) return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime() - def isocalendar(self): + def isocalendar(self) -> DataFrame: from pandas import DataFrame result = ( diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index da16c3c54995a..6a397862712de 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1121,7 +1121,7 @@ def take( allow_fill: bool = True, fill_value=None, **kwargs, - ): + ) -> Self: if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): @@ -1206,7 +1206,7 @@ def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: """ @Appender(_index_shared_docs["repeat"] % _index_doc_kwargs) - def repeat(self, repeats, axis: None = None): + def repeat(self, repeats, axis: None = None) -> Self: repeats = ensure_platform_int(repeats) nv.validate_repeat((), {"axis": axis}) res_values = self._values.repeat(repeats) @@ -3527,7 +3527,7 @@ def _intersection_via_get_indexer( Returns ------- - np.ndarray or ExtensionArray + np.ndarray or ExtensionArray or MultiIndex The returned array will be unique. """ left_unique = self.unique() @@ -3544,6 +3544,7 @@ def _intersection_via_get_indexer( # unnecessary in the case with sort=None bc we will sort later taker = np.sort(taker) + result: MultiIndex | ExtensionArray | np.ndarray if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: @@ -4445,7 +4446,7 @@ def _reindex_non_unique( indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 - new_labels = self.take(indexer[check]) + new_labels: Index | np.ndarray = self.take(indexer[check]) new_indexer = None if len(missing): @@ -5005,7 +5006,7 @@ def _wrap_joined_index( # expected "Self") mask = lidx == -1 join_idx = self.take(lidx) - right = other.take(ridx) + right = cast("MultiIndex", other.take(ridx)) join_index = join_idx.putmask(mask, right)._sort_levels_monotonic() return join_index.set_names(name) # type: ignore[return-value] else: @@ -6990,7 +6991,7 @@ def infer_objects(self, copy: bool = True) -> Index: result._references.add_index_reference(result) return result - def diff(self, periods: int = 1): + def diff(self, periods: int = 1) -> Self: """ Computes the difference between consecutive values in the Index object. @@ -7018,7 +7019,7 @@ def diff(self, periods: int = 1): """ return self._constructor(self.to_series().diff(periods)) - def round(self, decimals: int = 0): + def round(self, decimals: int = 0) -> Self: """ Round each value in the Index to the given number of decimals. diff --git a/pandas/core/indexes/frozen.py b/pandas/core/indexes/frozen.py index 2d1d2a81a8a71..9d528d34e3684 100644 --- a/pandas/core/indexes/frozen.py +++ b/pandas/core/indexes/frozen.py @@ -8,12 +8,18 @@ """ from __future__ import annotations -from typing import NoReturn +from typing import ( + TYPE_CHECKING, + NoReturn, +) from pandas.core.base import PandasObject from pandas.io.formats.printing import pprint_thing +if TYPE_CHECKING: + from pandas._typing import Self + class FrozenList(PandasObject, list): """ @@ -72,7 +78,7 @@ def __getitem__(self, n): return type(self)(super().__getitem__(n)) return super().__getitem__(n) - def __radd__(self, other): + def __radd__(self, other) -> Self: if isinstance(other, tuple): other = list(other) return type(self)(other + list(self)) @@ -84,7 +90,7 @@ def __eq__(self, other: object) -> bool: __req__ = __eq__ - def __mul__(self, other): + def __mul__(self, other) -> Self: return type(self)(super().__mul__(other)) __imul__ = __mul__ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index c2dd16e550f6f..144045d40a086 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1030,7 +1030,7 @@ def levshape(self) -> Shape: # Codes Methods @property - def codes(self): + def codes(self) -> FrozenList: return self._codes def _set_codes( @@ -1074,7 +1074,9 @@ def _set_codes( self._reset_cache() - def set_codes(self, codes, *, level=None, verify_integrity: bool = True): + def set_codes( + self, codes, *, level=None, verify_integrity: bool = True + ) -> MultiIndex: """ Set new codes on MultiIndex. Defaults to returning new index. @@ -1199,7 +1201,7 @@ def copy( # type: ignore[override] names=None, deep: bool = False, name=None, - ): + ) -> Self: """ Make a copy of this object. @@ -1262,7 +1264,7 @@ def __array__(self, dtype=None) -> np.ndarray: """the array interface, return my values""" return self.values - def view(self, cls=None): + def view(self, cls=None) -> Self: """this is defined as a copy with the same identity""" result = self.copy() result._id = self._id @@ -1659,7 +1661,8 @@ def _get_level_values(self, level: int, unique: bool = False) -> Index: filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value) return lev._shallow_copy(filled, name=name) - def get_level_values(self, level): + # error: Signature of "get_level_values" incompatible with supertype "Index" + def get_level_values(self, level) -> Index: # type: ignore[override] """ Return vector of label values for requested level. @@ -3296,7 +3299,7 @@ def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): raise KeyError(key) return slice(start, end) - def get_locs(self, seq): + def get_locs(self, seq) -> npt.NDArray[np.intp]: """ Get location for a sequence of labels. diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 9576997af6641..aeb7bb1813fef 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -407,7 +407,7 @@ def inferred_type(self) -> str: # Indexing Methods @doc(Index.get_loc) - def get_loc(self, key): + def get_loc(self, key) -> int: if is_integer(key) or (is_float(key) and key.is_integer()): new_key = int(key) try: @@ -1107,14 +1107,16 @@ def _arith_method(self, other, op): # test_arithmetic_explicit_conversions return super()._arith_method(other, op) - def take( + # error: Return type "Index" of "take" incompatible with return type + # "RangeIndex" in supertype "Index" + def take( # type: ignore[override] self, indices, axis: Axis = 0, allow_fill: bool = True, fill_value=None, **kwargs, - ): + ) -> Index: if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 97578e3f1668b..871e5817fdf0d 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -82,6 +82,7 @@ Axis, AxisInt, Self, + npt, ) from pandas import ( @@ -1384,7 +1385,7 @@ def _getitem_axis(self, key, axis: AxisInt): # nested tuple slicing if is_nested_tuple(key, labels): locs = labels.get_locs(key) - indexer = [slice(None)] * self.ndim + indexer: list[slice | npt.NDArray[np.intp]] = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] diff --git a/pandas/core/internals/array_manager.py b/pandas/core/internals/array_manager.py index 14969425e75a7..b4e3fdb78b77b 100644 --- a/pandas/core/internals/array_manager.py +++ b/pandas/core/internals/array_manager.py @@ -1103,7 +1103,7 @@ def _verify_integrity(self) -> None: def _normalize_axis(axis): return axis - def make_empty(self, axes=None) -> SingleArrayManager: + def make_empty(self, axes=None) -> Self: """Return an empty ArrayManager with index/array of length 0""" if axes is None: axes = [Index([], dtype=object)] diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 43f4f527afc3a..61aa8549a6790 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2126,8 +2126,9 @@ def is_view(self) -> bool: """Extension arrays are never treated as views.""" return False + # error: Cannot override writeable attribute with read-only property @cache_readonly - def is_numeric(self): + def is_numeric(self) -> bool: # type: ignore[override] return self.values.dtype._is_numeric def _slice( diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 3b9e546a99b32..6f30bc650aa36 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -193,7 +193,7 @@ def rec_array_to_mgr( return mgr -def mgr_to_mgr(mgr, typ: str, copy: bool = True): +def mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager: """ Convert to specific type of Manager. Does not copy if the type is already correct. Does not guarantee a copy otherwise. `copy` keyword only controls diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index b315a318a9a0a..4cb7b610074ba 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -93,6 +93,8 @@ npt, ) + from pandas.api.extensions import ExtensionArray + class BaseBlockManager(DataManager): """ @@ -1046,7 +1048,7 @@ def iset( value: ArrayLike, inplace: bool = False, refs: BlockValuesRefs | None = None, - ): + ) -> None: """ Set new item in-place. Does not consolidate. Adds new Block if not contained in the current set of items @@ -1870,7 +1872,7 @@ def __getstate__(self): # compatibility with 0.13.1. return axes_array, block_values, block_items, extra_state - def __setstate__(self, state): + def __setstate__(self, state) -> None: def unpickle_block(values, mgr_locs, ndim: int) -> Block: # TODO(EA2D): ndim would be unnecessary with 2D EAs # older pickles may store e.g. DatetimeIndex instead of DatetimeArray @@ -1959,7 +1961,7 @@ def internal_values(self): """The array that Series._values returns""" return self._block.values - def array_values(self): + def array_values(self) -> ExtensionArray: """The array that Series.array returns""" return self._block.array_values diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 58b0e2907b8ce..d275445983b6f 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -12,6 +12,7 @@ Any, Literal, cast, + overload, ) import numpy as np @@ -124,9 +125,33 @@ def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]: return mask -def clean_fill_method(method: str, allow_nearest: bool = False): +@overload +def clean_fill_method( + method: Literal["ffill", "pad", "bfill", "backfill"], + *, + allow_nearest: Literal[False] = ..., +) -> Literal["pad", "backfill"]: + ... + + +@overload +def clean_fill_method( + method: Literal["ffill", "pad", "bfill", "backfill", "nearest"], + *, + allow_nearest: Literal[True], +) -> Literal["pad", "backfill", "nearest"]: + ... + + +def clean_fill_method( + method: Literal["ffill", "pad", "bfill", "backfill", "nearest"], + *, + allow_nearest: bool = False, +) -> Literal["pad", "backfill", "nearest"]: if isinstance(method, str): - method = method.lower() + # error: Incompatible types in assignment (expression has type "str", variable + # has type "Literal['ffill', 'pad', 'bfill', 'backfill', 'nearest']") + method = method.lower() # type: ignore[assignment] if method == "ffill": method = "pad" elif method == "bfill": @@ -252,7 +277,9 @@ def validate_limit_area(limit_area: str | None) -> Literal["inside", "outside"] return limit_area # type: ignore[return-value] -def infer_limit_direction(limit_direction, method): +def infer_limit_direction( + limit_direction: Literal["backward", "forward", "both"] | None, method: str +) -> Literal["backward", "forward", "both"]: # Set `limit_direction` depending on `method` if limit_direction is None: if method in ("backfill", "bfill"): diff --git a/pandas/core/resample.py b/pandas/core/resample.py index e45cff0b0679f..5ff18d8a25e36 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1676,6 +1676,8 @@ def _gotitem(self, key, ndim, subset=None): class DatetimeIndexResampler(Resampler): + ax: DatetimeIndex + @property def _resampler_for_grouping(self): return DatetimeIndexResamplerGroupby @@ -1807,7 +1809,11 @@ def _wrap_result(self, result): return result -class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler): +# error: Definition of "ax" in base class "_GroupByMixin" is incompatible +# with definition in base class "DatetimeIndexResampler" +class DatetimeIndexResamplerGroupby( # type: ignore[misc] + _GroupByMixin, DatetimeIndexResampler +): """ Provides a resample of a groupby implementation """ @@ -1818,6 +1824,10 @@ def _resampler_cls(self): class PeriodIndexResampler(DatetimeIndexResampler): + # error: Incompatible types in assignment (expression has type "PeriodIndex", base + # class "DatetimeIndexResampler" defined the type as "DatetimeIndex") + ax: PeriodIndex # type: ignore[assignment] + @property def _resampler_for_grouping(self): return PeriodIndexResamplerGroupby @@ -1924,7 +1934,11 @@ def _upsample(self, method, limit: int | None = None, fill_value=None): return self._wrap_result(new_obj) -class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler): +# error: Definition of "ax" in base class "_GroupByMixin" is incompatible with +# definition in base class "PeriodIndexResampler" +class PeriodIndexResamplerGroupby( # type: ignore[misc] + _GroupByMixin, PeriodIndexResampler +): """ Provides a resample of a groupby implementation. """ @@ -1935,6 +1949,10 @@ def _resampler_cls(self): class TimedeltaIndexResampler(DatetimeIndexResampler): + # error: Incompatible types in assignment (expression has type "TimedeltaIndex", + # base class "DatetimeIndexResampler" defined the type as "DatetimeIndex") + ax: TimedeltaIndex # type: ignore[assignment] + @property def _resampler_for_grouping(self): return TimedeltaIndexResamplerGroupby @@ -1952,7 +1970,11 @@ def _adjust_binner_for_upsample(self, binner): return binner -class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler): +# error: Definition of "ax" in base class "_GroupByMixin" is incompatible with +# definition in base class "DatetimeIndexResampler" +class TimedeltaIndexResamplerGroupby( # type: ignore[misc] + _GroupByMixin, TimedeltaIndexResampler +): """ Provides a resample of a groupby implementation. """ diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index ffa7199921298..1bc548de91f01 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -76,7 +76,7 @@ def concat( axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., - keys=..., + keys: Iterable[Hashable] | None = ..., levels=..., names: list[HashableT] | None = ..., verify_integrity: bool = ..., @@ -93,7 +93,7 @@ def concat( axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., - keys=..., + keys: Iterable[Hashable] | None = ..., levels=..., names: list[HashableT] | None = ..., verify_integrity: bool = ..., @@ -110,7 +110,7 @@ def concat( axis: Literal[0, "index"] = ..., join: str = ..., ignore_index: bool = ..., - keys=..., + keys: Iterable[Hashable] | None = ..., levels=..., names: list[HashableT] | None = ..., verify_integrity: bool = ..., @@ -127,7 +127,7 @@ def concat( axis: Literal[1, "columns"], join: str = ..., ignore_index: bool = ..., - keys=..., + keys: Iterable[Hashable] | None = ..., levels=..., names: list[HashableT] | None = ..., verify_integrity: bool = ..., @@ -144,7 +144,7 @@ def concat( axis: Axis = ..., join: str = ..., ignore_index: bool = ..., - keys=..., + keys: Iterable[Hashable] | None = ..., levels=..., names: list[HashableT] | None = ..., verify_integrity: bool = ..., @@ -160,7 +160,7 @@ def concat( axis: Axis = 0, join: str = "outer", ignore_index: bool = False, - keys=None, + keys: Iterable[Hashable] | None = None, levels=None, names: list[HashableT] | None = None, verify_integrity: bool = False, @@ -405,7 +405,7 @@ def __init__( objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], axis: Axis = 0, join: str = "outer", - keys=None, + keys: Iterable[Hashable] | None = None, levels=None, names: list[HashableT] | None = None, ignore_index: bool = False, diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 13bc1008698b2..f3695fb87ea78 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -105,6 +105,7 @@ from pandas import DataFrame from pandas.core import groupby from pandas.core.arrays import DatetimeArray + from pandas.core.indexes.frozen import FrozenList _factorizers = { np.int64: libhashtable.Int64Factorizer, @@ -1738,7 +1739,7 @@ def restore_dropped_levels_multijoin( join_index: Index, lindexer: npt.NDArray[np.intp], rindexer: npt.NDArray[np.intp], -) -> tuple[list[Index], npt.NDArray[np.intp], list[Hashable]]: +) -> tuple[FrozenList, FrozenList, FrozenList]: """ *this is an internal non-public method* @@ -1814,7 +1815,7 @@ def _convert_to_multiindex(index: Index) -> MultiIndex: # error: Cannot determine type of "__add__" join_levels = join_levels + [restore_levels] # type: ignore[has-type] - join_codes = join_codes + [restore_codes] + join_codes = join_codes + [restore_codes] # type: ignore[has-type] join_names = join_names + [dropped_level_name] return join_levels, join_codes, join_names diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 71e3ea5b2588e..924b56f7a14d5 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -522,6 +522,7 @@ def pivot( cols + columns_listlike, append=append # type: ignore[operator] ) else: + index_list: list[Index] | list[Series] if index is lib.no_default: if isinstance(data.index, MultiIndex): # GH 23955 diff --git a/pandas/core/series.py b/pandas/core/series.py index a44f6bc01328d..9b5c8829fd5ff 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -4398,7 +4398,7 @@ def explode(self, ignore_index: bool = False) -> Series: return result.reset_index(drop=True) if ignore_index else result if ignore_index: - index = default_index(len(values)) + index: Index = default_index(len(values)) else: index = self.index.repeat(counts) diff --git a/pandas/core/strings/accessor.py b/pandas/core/strings/accessor.py index 124ca546c4583..71d6f9c58e2c2 100644 --- a/pandas/core/strings/accessor.py +++ b/pandas/core/strings/accessor.py @@ -372,7 +372,7 @@ def cons_row(x): if expand: result = list(result) - out = MultiIndex.from_tuples(result, names=name) + out: Index = MultiIndex.from_tuples(result, names=name) if out.nlevels == 1: # We had all tuples of length-one, which are # better represented as a regular Index. diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index fb0354ef9df6c..c0a27ecfec803 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2823,7 +2823,7 @@ def read( "cannot read on an abstract storer: subclasses should implement" ) - def write(self, **kwargs): + def write(self, obj, **kwargs) -> None: raise NotImplementedError( "cannot write on an abstract storer: subclasses should implement" ) @@ -2937,8 +2937,7 @@ def get_attrs(self) -> None: for n in self.attributes: setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None))) - # error: Signature of "write" incompatible with supertype "Fixed" - def write(self, obj, **kwargs) -> None: # type: ignore[override] + def write(self, obj, **kwargs) -> None: self.set_attrs() def read_array(self, key: str, start: int | None = None, stop: int | None = None): @@ -3226,8 +3225,7 @@ def read( result = result.astype("string[pyarrow_numpy]") return result - # error: Signature of "write" incompatible with supertype "Fixed" - def write(self, obj, **kwargs) -> None: # type: ignore[override] + def write(self, obj, **kwargs) -> None: super().write(obj, **kwargs) self.write_index("index", obj.index) self.write_array("values", obj) @@ -3303,8 +3301,7 @@ def read( return DataFrame(columns=axes[0], index=axes[1]) - # error: Signature of "write" incompatible with supertype "Fixed" - def write(self, obj, **kwargs) -> None: # type: ignore[override] + def write(self, obj, **kwargs) -> None: super().write(obj, **kwargs) # TODO(ArrayManager) HDFStore relies on accessing the blocks @@ -4355,7 +4352,7 @@ def read( """ raise NotImplementedError("WORMTable needs to implement read") - def write(self, **kwargs) -> None: + def write(self, obj, **kwargs) -> None: """ write in a format that we can search later on (but cannot append to): write out the indices and the values using _write_array @@ -4712,12 +4709,13 @@ def is_transposed(self) -> bool: def get_object(cls, obj, transposed: bool): return obj - def write(self, obj, data_columns=None, **kwargs): + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override] """we are going to write this as a frame table""" if not isinstance(obj, DataFrame): name = obj.name or "values" obj = obj.to_frame(name) - return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs) + super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs) def read( self, @@ -4750,7 +4748,8 @@ class AppendableMultiSeriesTable(AppendableSeriesTable): pandas_kind = "series_table" table_type = "appendable_multiseries" - def write(self, obj, **kwargs): + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, **kwargs) -> None: # type: ignore[override] """we are going to write this as a frame table""" name = obj.name or "values" newobj, self.levels = self.validate_multiindex(obj) @@ -4758,7 +4757,7 @@ def write(self, obj, **kwargs): cols = list(self.levels) cols.append(name) newobj.columns = Index(cols) - return super().write(obj=newobj, **kwargs) + super().write(obj=newobj, **kwargs) class GenericTable(AppendableFrameTable): @@ -4823,7 +4822,8 @@ def indexables(self): return _indexables - def write(self, **kwargs): + # error: Signature of "write" incompatible with supertype "AppendableTable" + def write(self, **kwargs) -> None: # type: ignore[override] raise NotImplementedError("cannot write on an generic table") @@ -4839,7 +4839,8 @@ class AppendableMultiFrameTable(AppendableFrameTable): def table_type_short(self) -> str: return "appendable_multi" - def write(self, obj, data_columns=None, **kwargs): + # error: Signature of "write" incompatible with supertype "Fixed" + def write(self, obj, data_columns=None, **kwargs) -> None: # type: ignore[override] if data_columns is None: data_columns = [] elif data_columns is True: @@ -4849,7 +4850,7 @@ def write(self, obj, data_columns=None, **kwargs): for n in self.levels: if n not in data_columns: data_columns.insert(0, n) - return super().write(obj=obj, data_columns=data_columns, **kwargs) + super().write(obj=obj, data_columns=data_columns, **kwargs) def read( self,