From 2b2be7539e3995944d26edca794df4f6e091770b Mon Sep 17 00:00:00 2001 From: Kurt Stolle Date: Thu, 21 Mar 2024 17:19:07 +0100 Subject: [PATCH] Feature: overrides in project name on WandB --- scripts/wandb_cleanup.py | 1 - sources/unipercept/_api_config.py | 9 +- sources/unipercept/_api_data.py | 27 +- sources/unipercept/cli/_config.py | 2 +- sources/unipercept/config.py | 21 +- sources/unipercept/data/_loader.py | 9 +- sources/unipercept/data/ops.py | 3 +- sources/unipercept/data/tensors/helpers.py | 6 +- sources/unipercept/engine/_engine.py | 12 +- sources/unipercept/engine/accelerate.py | 9 +- sources/unipercept/engine/callbacks.py | 3 +- sources/unipercept/evaluators/_base.py | 9 +- sources/unipercept/file_io.py | 63 ++-- .../integrations/wandb_integration.py | 2 +- sources/unipercept/model.py | 6 +- sources/unipercept/nn/backbones/fpn.py | 3 +- sources/unipercept/nn/layers/conv/utils.py | 6 +- sources/unipercept/state.py | 6 +- sources/unipercept/utils/decorators.py | 6 +- sources/unipercept/utils/descriptors.py | 11 +- sources/unipercept/utils/frozendict.py | 9 +- sources/unipercept/utils/function.py | 38 +-- sources/unipercept/utils/missing.py | 6 +- sources/unipercept/utils/tensorclass.py | 278 ++++++------------ .../utils/test_utils_iopath_handlers.py | 2 +- 25 files changed, 195 insertions(+), 352 deletions(-) diff --git a/scripts/wandb_cleanup.py b/scripts/wandb_cleanup.py index cab4b8c..3e9582e 100755 --- a/scripts/wandb_cleanup.py +++ b/scripts/wandb_cleanup.py @@ -8,7 +8,6 @@ import pprint import wandb - from unipercept.integrations.wandb_integration import artifact_historic_delete if __name__ == "__main__": diff --git a/sources/unipercept/_api_config.py b/sources/unipercept/_api_config.py index 1e97359..f8f3645 100644 --- a/sources/unipercept/_api_config.py +++ b/sources/unipercept/_api_config.py @@ -73,9 +73,10 @@ def _read_model_wandb(path: str) -> str: from unipercept import file_io run = _wandb_read_run(path) - import wandb from wandb.sdk.wandb_run import Run + import wandb + assert path.startswith(WANDB_RUN_PREFIX) _logger.info("Reading W&B model checkpoint from %s", path) @@ -390,8 +391,7 @@ def create_dataset( variant: T.Optional[str | re.Pattern], batch_size: int, return_loader: bool = True, -) -> tuple[torch.utils.data.DataLoader[InputData], Metadata]: - ... +) -> tuple[torch.utils.data.DataLoader[InputData], Metadata]: ... @T.overload @@ -400,8 +400,7 @@ def create_dataset( variant: T.Optional[str | re.Pattern], batch_size: int, return_loader: bool = False, -) -> tuple[T.Iterator[InputData], Metadata]: - ... +) -> tuple[T.Iterator[InputData], Metadata]: ... def create_dataset( diff --git a/sources/unipercept/_api_data.py b/sources/unipercept/_api_data.py index 270d2df..0fed5a7 100644 --- a/sources/unipercept/_api_data.py +++ b/sources/unipercept/_api_data.py @@ -24,57 +24,48 @@ def __dir__() -> list[str]: @T.overload def get_dataset( query: T.Literal["cityscapes"], # noqa: U100 - ) -> type[unisets.cityscapes.CityscapesDataset]: - ... + ) -> type[unisets.cityscapes.CityscapesDataset]: ... @T.overload def get_dataset( query: T.Literal["cityscapes-vps"], # noqa: U100 - ) -> type[unisets.cityscapes.CityscapesVPSDataset]: - ... + ) -> type[unisets.cityscapes.CityscapesVPSDataset]: ... @T.overload def get_dataset( query: T.Literal["kitti-360"], # noqa: U100 - ) -> type[unisets.kitti_360.KITTI360Dataset]: - ... + ) -> type[unisets.kitti_360.KITTI360Dataset]: ... @T.overload def get_dataset( query: T.Literal["kitti-step"], # noqa: U100 - ) -> type[unisets.kitti_step.KITTISTEPDataset]: - ... + ) -> type[unisets.kitti_step.KITTISTEPDataset]: ... @T.overload def get_dataset( query: T.Literal["kitti-sem"], # noqa: U100 - ) -> type[unisets.kitti_sem.SemKITTIDataset]: - ... + ) -> type[unisets.kitti_sem.SemKITTIDataset]: ... @T.overload def get_dataset( query: T.Literal["vistas"], # noqa: U100 - ) -> type[unisets.vistas.VistasDataset]: - ... + ) -> type[unisets.vistas.VistasDataset]: ... @T.overload def get_dataset( query: T.Literal["wilddash"], # noqa: U100 - ) -> type[unisets.wilddash.WildDashDataset]: - ... + ) -> type[unisets.wilddash.WildDashDataset]: ... @T.overload def get_dataset( query: str, # noqa: U100 - ) -> type[unisets.PerceptionDataset]: - ... + ) -> type[unisets.PerceptionDataset]: ... @T.overload def get_dataset( query: None, # noqa: U100 **kwargs: T.Any, # noqa: U100 - ) -> type[unisets.PerceptionDataset]: - ... + ) -> type[unisets.PerceptionDataset]: ... def get_dataset( diff --git a/sources/unipercept/cli/_config.py b/sources/unipercept/cli/_config.py index 367dd77..c8e52db 100644 --- a/sources/unipercept/cli/_config.py +++ b/sources/unipercept/cli/_config.py @@ -63,7 +63,7 @@ def __call__(self, parser, namespace, values, option_string=None): cfg = up.read_config(name) cfg = self.apply_overrides(cfg, overrides) cfg["CLI"] = name - cfg["CLI_OVERRIDES"] = " ".join(overrides) + cfg["CLI_OVERRIDES"] = list(overrides) setattr(namespace, self.dest + "_path", name) setattr(namespace, self.dest + "_overrides", overrides) diff --git a/sources/unipercept/config.py b/sources/unipercept/config.py index 63bc476..ca9cd9a 100644 --- a/sources/unipercept/config.py +++ b/sources/unipercept/config.py @@ -118,8 +118,7 @@ def apply(f: EnvFilter | str, v: T.Any, /) -> bool: @T.overload def get_env( __type: type[_R], /, *keys: str, default: _R, filter: EnvFilter = EnvFilter.TRUTHY -) -> _R: - ... +) -> _R: ... @T.overload @@ -129,8 +128,7 @@ def get_env( *keys: str, default: _R | None = None, filter: EnvFilter = EnvFilter.TRUTHY, -) -> _R | None: - ... +) -> _R | None: ... @functools.cache @@ -534,12 +532,10 @@ def safe_update(cfg, key, value): if T.TYPE_CHECKING: class LazyObject(T.Generic[_L]): - def __getattr__(self, name: str) -> T.Any: - ... + def __getattr__(self, name: str) -> T.Any: ... @override - def __setattr__(self, __name: str, __value: Any) -> None: - ... + def __setattr__(self, __name: str, __value: Any) -> None: ... else: import types @@ -628,18 +624,15 @@ def migrate_target(v: T.Any) -> T.Any: @T.overload -def instantiate(cfg: T.Sequence[LazyObject[_L]], /) -> T.Sequence[_L]: - ... +def instantiate(cfg: T.Sequence[LazyObject[_L]], /) -> T.Sequence[_L]: ... @T.overload -def instantiate(cfg: LazyObject[_L], /) -> _L: - ... +def instantiate(cfg: LazyObject[_L], /) -> _L: ... @T.overload -def instantiate(cfg: T.Mapping[T.Any, LazyObject[_L]], /) -> T.Mapping[T.Any, _L]: - ... +def instantiate(cfg: T.Mapping[T.Any, LazyObject[_L]], /) -> T.Mapping[T.Any, _L]: ... def instantiate(cfg: T.Any, /) -> T.Any: diff --git a/sources/unipercept/data/_loader.py b/sources/unipercept/data/_loader.py index 1a7ea05..4e33084 100644 --- a/sources/unipercept/data/_loader.py +++ b/sources/unipercept/data/_loader.py @@ -472,18 +472,15 @@ def queue_size(self) -> int: @property @abc.abstractmethod - def indices(self) -> T.Iterator[_I]: - ... + def indices(self) -> T.Iterator[_I]: ... @property @abc.abstractmethod - def sample_count(self) -> int: - ... + def sample_count(self) -> int: ... @property @abc.abstractmethod - def total_count(self) -> int: - ... + def total_count(self) -> int: ... @property def generator(self) -> torch.Generator: diff --git a/sources/unipercept/data/ops.py b/sources/unipercept/data/ops.py index b10954b..9f07d3d 100644 --- a/sources/unipercept/data/ops.py +++ b/sources/unipercept/data/ops.py @@ -77,8 +77,7 @@ def _run(self, inputs: InputData) -> InputData: if T.TYPE_CHECKING: @override - def __call__(self, inputs: InputData) -> InputData: - ... + def __call__(self, inputs: InputData) -> InputData: ... class CloneOp(Op): diff --git a/sources/unipercept/data/tensors/helpers.py b/sources/unipercept/data/tensors/helpers.py index 249fecf..30c4d0f 100644 --- a/sources/unipercept/data/tensors/helpers.py +++ b/sources/unipercept/data/tensors/helpers.py @@ -89,8 +89,7 @@ def multi_read( key: Any, *, no_entries: Literal[NoEntriesAction.ERROR] | Literal["error"], -) -> Callable[Concatenate[Sequence[Mapping[Any, Any]], _ReadParams], _ReadReturn]: - ... +) -> Callable[Concatenate[Sequence[Mapping[Any, Any]], _ReadParams], _ReadReturn]: ... @overload @@ -101,8 +100,7 @@ def multi_read( no_entries: Literal[NoEntriesAction.NONE] | Literal["none"] = NoEntriesAction.NONE, ) -> Callable[ Concatenate[Sequence[Mapping[Any, Any]], _ReadParams], _ReadReturn | None -]: - ... +]: ... def multi_read( diff --git a/sources/unipercept/engine/_engine.py b/sources/unipercept/engine/_engine.py index 06220b2..c354e48 100644 --- a/sources/unipercept/engine/_engine.py +++ b/sources/unipercept/engine/_engine.py @@ -28,7 +28,6 @@ import torch.optim import torch.types import torch.utils.data -import wandb from omegaconf import DictConfig, OmegaConf from PIL import Image as pil_image from tabulate import tabulate @@ -37,6 +36,7 @@ from torch.utils.data import Dataset from typing_extensions import override +import wandb from unipercept import file_io from unipercept.data import DataLoaderFactory from unipercept.engine._params import EngineParams, EvaluationSuite, TrainingStage @@ -634,8 +634,7 @@ def build_training_dataloader( dataloader: DataLoaderFactory, batch_size: int, gradient_accumulation: None = None, - ) -> tuple[torch.utils.data.DataLoader, int, None]: - ... + ) -> tuple[torch.utils.data.DataLoader, int, None]: ... @T.overload def build_training_dataloader( @@ -643,8 +642,7 @@ def build_training_dataloader( dataloader: DataLoaderFactory, batch_size: int, gradient_accumulation: int, - ) -> tuple[torch.utils.data.DataLoader, int, int]: - ... + ) -> tuple[torch.utils.data.DataLoader, int, int]: ... def build_training_dataloader( self, @@ -1247,7 +1245,9 @@ def _start_experiment_trackers(self, *, restart: bool = True) -> None: # Set up tracker-specific parameters specific_kwargs = { "wandb": { - "name": self.config_name, + "name": " ".join( + [self.config_name, *self.config.get("CLI_OVERRIDES", [])] + ), "job_type": job_type, "reinit": True, "group": group_name, diff --git a/sources/unipercept/engine/accelerate.py b/sources/unipercept/engine/accelerate.py index a2cf6d7..c457b81 100644 --- a/sources/unipercept/engine/accelerate.py +++ b/sources/unipercept/engine/accelerate.py @@ -102,23 +102,20 @@ def find_executable_batch_size( function: _Fin[_P, _R], *, starting_batch_size: int = 128, - ) -> _Fout[_P, _R]: - ... + ) -> _Fout[_P, _R]: ... @T.overload def find_executable_batch_size( function: None = None, *, starting_batch_size: int = 128, - ) -> T.Callable[[_Fin[_P, _R]], _Fout[_P, _R]]: - ... + ) -> T.Callable[[_Fin[_P, _R]], _Fout[_P, _R]]: ... def find_executable_batch_size( function: _Fin | None = None, *, starting_batch_size: int = 128, - ) -> T.Callable[[_Fin[_P, _R]], _Fout[_P, _R]] | _Fout[_P, _R]: - ... + ) -> T.Callable[[_Fin[_P, _R]], _Fout[_P, _R]] | _Fout[_P, _R]: ... else: find_executable_batch_size = accelerate.utils.find_executable_batch_size diff --git a/sources/unipercept/engine/callbacks.py b/sources/unipercept/engine/callbacks.py index ed45780..9e6b9e1 100644 --- a/sources/unipercept/engine/callbacks.py +++ b/sources/unipercept/engine/callbacks.py @@ -377,8 +377,7 @@ def __call__( state: State, control: Signal, **kwargs, - ) -> Signal | None: - ... + ) -> Signal | None: ... CallbackType: T.TypeAlias = CallbackProtocol | type[CallbackProtocol] diff --git a/sources/unipercept/evaluators/_base.py b/sources/unipercept/evaluators/_base.py index 90ec81c..c602457 100644 --- a/sources/unipercept/evaluators/_base.py +++ b/sources/unipercept/evaluators/_base.py @@ -56,23 +56,20 @@ def update( storage: TensorDictBase, # noqa: U100 inputs: TensorDictBase, # noqa: U100 outputs: TensorDictBase, # noqa: U100 - ) -> None: - ... + ) -> None: ... @abc.abstractmethod def compute( self, storage: TensorDictBase, # noqa: U100 **kwargs: T.Unpack[EvaluatorComputeKWArgs], # noqa: U100 - ) -> dict[str, int | float | str | bool | dict]: - ... + ) -> dict[str, int | float | str | bool | dict]: ... @abc.abstractmethod def plot( self, storage: TensorDictBase, # noqa: U100 - ) -> dict[str, pil_image.Image]: - ... + ) -> dict[str, pil_image.Image]: ... def _show_table(self, msg: str, tab: pd.DataFrame) -> None: from unipercept.log import create_table diff --git a/sources/unipercept/file_io.py b/sources/unipercept/file_io.py index 1752477..e0990dd 100644 --- a/sources/unipercept/file_io.py +++ b/sources/unipercept/file_io.py @@ -126,8 +126,7 @@ def with_local_path( *, manager: PathManager = _manager, **get_local_path_kwargs: T.Any, -) -> T.Callable[[_PathStrCallable], _PathAnyCallable]: - ... +) -> T.Callable[[_PathStrCallable], _PathAnyCallable]: ... @T.overload @@ -136,8 +135,7 @@ def with_local_path( *, manager: PathManager = _manager, **get_local_path_kwargs: T.Any, -) -> _PathAnyCallable: - ... +) -> _PathAnyCallable: ... def with_local_path( @@ -206,13 +204,11 @@ def __dir__(): def opent( path: str, mode: str = "r", buffering: int = 32, **kwargs: T.Any - ) -> T.Iterable[T.Any]: - ... + ) -> T.Iterable[T.Any]: ... def open( path: str, mode: str = "r", buffering: int = -1, **kwargs: T.Any - ) -> T.IO[str] | T.IO[bytes]: - ... + ) -> T.IO[str] | T.IO[bytes]: ... def opena( self, @@ -221,57 +217,40 @@ def opena( buffering: int = -1, callback_after_file_close: T.Optional[T.Callable[[None], None]] = None, **kwargs: T.Any, - ) -> T.IO[str] | T.IO[bytes]: - ... + ) -> T.IO[str] | T.IO[bytes]: ... - def async_join(*paths: str, **kwargs: T.Any) -> bool: - ... + def async_join(*paths: str, **kwargs: T.Any) -> bool: ... - def async_close(**kwargs: T.Any) -> bool: - ... + def async_close(**kwargs: T.Any) -> bool: ... def copy( src_path: str, dst_path: str, overwrite: bool = False, **kwargs: T.Any - ) -> bool: - ... + ) -> bool: ... - def mv(src_path: str, dst_path: str, **kwargs: T.Any) -> bool: - ... + def mv(src_path: str, dst_path: str, **kwargs: T.Any) -> bool: ... def copy_from_local( local_path: str, dst_path: str, overwrite: bool = False, **kwargs: T.Any - ) -> None: - ... + ) -> None: ... - def exists(path: str, **kwargs: T.Any) -> bool: - ... + def exists(path: str, **kwargs: T.Any) -> bool: ... - def isfile(path: str, **kwargs: T.Any) -> bool: - ... + def isfile(path: str, **kwargs: T.Any) -> bool: ... - def isdir(path: str, **kwargs: T.Any) -> bool: - ... + def isdir(path: str, **kwargs: T.Any) -> bool: ... - def ls(path: str, **kwargs: T.Any) -> list[str]: - ... + def ls(path: str, **kwargs: T.Any) -> list[str]: ... - def mkdirs(path: str, **kwargs: T.Any) -> None: - ... + def mkdirs(path: str, **kwargs: T.Any) -> None: ... - def rm(path: str, **kwargs: T.Any) -> None: - ... + def rm(path: str, **kwargs: T.Any) -> None: ... - def symlink(src_path: str, dst_path: str, **kwargs: T.Any) -> bool: - ... + def symlink(src_path: str, dst_path: str, **kwargs: T.Any) -> bool: ... - def set_cwd(path: T.Optional[str], **kwargs: T.Any) -> bool: - ... + def set_cwd(path: T.Optional[str], **kwargs: T.Any) -> bool: ... - def register_handler(handler: PathHandler, allow_override: bool = True) -> None: - ... + def register_handler(handler: PathHandler, allow_override: bool = True) -> None: ... - def set_strict_kwargs_checking(enable: bool) -> None: - ... + def set_strict_kwargs_checking(enable: bool) -> None: ... - def set_logging(enable_logging=True) -> None: - ... + def set_logging(enable_logging=True) -> None: ... diff --git a/sources/unipercept/integrations/wandb_integration.py b/sources/unipercept/integrations/wandb_integration.py index 42fc790..c5a4c51 100644 --- a/sources/unipercept/integrations/wandb_integration.py +++ b/sources/unipercept/integrations/wandb_integration.py @@ -13,9 +13,9 @@ import torch.nn as nn import typing_extensions as TX -import wandb import wandb.errors +import wandb from unipercept import file_io from unipercept.config import get_env from unipercept.engine import EngineParams diff --git a/sources/unipercept/model.py b/sources/unipercept/model.py index 7fc569b..8e10ff0 100644 --- a/sources/unipercept/model.py +++ b/sources/unipercept/model.py @@ -308,12 +308,10 @@ def __init__(self, *args, **kwargs): @abc.abstractmethod @TX.override - def forward(self, inputs: ModelInput) -> ModelOutput: - ... + def forward(self, inputs: ModelInput) -> ModelOutput: ... @TX.override - def __call__(self, inputs: ModelInput) -> ModelOutput: - ... + def __call__(self, inputs: ModelInput) -> ModelOutput: ... class ModelFactory: diff --git a/sources/unipercept/nn/backbones/fpn.py b/sources/unipercept/nn/backbones/fpn.py index 4fee23e..5c737c2 100644 --- a/sources/unipercept/nn/backbones/fpn.py +++ b/sources/unipercept/nn/backbones/fpn.py @@ -221,8 +221,7 @@ class LastLevelP6P7(ExtraFPNBlock): if T.TYPE_CHECKING: # Backwards compatability - def __init__(self, channels: int): - ... + def __init__(self, channels: int): ... else: diff --git a/sources/unipercept/nn/layers/conv/utils.py b/sources/unipercept/nn/layers/conv/utils.py index 509455b..6f554a2 100644 --- a/sources/unipercept/nn/layers/conv/utils.py +++ b/sources/unipercept/nn/layers/conv/utils.py @@ -30,15 +30,13 @@ @T.overload def get_output_channels( mod: nn.Module, *, use_weight: bool = True, none_ok: bool = False -) -> int: - ... +) -> int: ... @T.overload def get_output_channels( mod: nn.Module, *, use_weight: bool = True, none_ok: bool = True -) -> int | None: - ... +) -> int | None: ... def get_output_channels( diff --git a/sources/unipercept/state.py b/sources/unipercept/state.py index 47da0e6..72df919 100644 --- a/sources/unipercept/state.py +++ b/sources/unipercept/state.py @@ -125,13 +125,11 @@ def on_main_process(): "_N", bound=torch.Tensor | dict[T.Any, torch.Tensor] | T.Sequence[torch.Tensor] ) - def gather(tensor: _N) -> _N: - ... + def gather(tensor: _N) -> _N: ... def pad_across_processes( tensor: _N, dim: int = 0, pad_index: int = 0, pad_first: int = 0 - ) -> _N: - ... + ) -> _N: ... else: gather = accelerate.utils.gather diff --git a/sources/unipercept/utils/decorators.py b/sources/unipercept/utils/decorators.py index 1509c4d..a685596 100644 --- a/sources/unipercept/utils/decorators.py +++ b/sources/unipercept/utils/decorators.py @@ -19,12 +19,10 @@ class shadowmutate(Generic[_T, _P, _R]): @overload - def __new__(cls, fn: ShadowFunction, /, **kwargs) -> ShadowFunction: - ... + def __new__(cls, fn: ShadowFunction, /, **kwargs) -> ShadowFunction: ... @overload - def __new__(cls, **kwargs) -> Self: - ... + def __new__(cls, **kwargs) -> Self: ... def __new__(cls, *args, **kwargs) -> ShadowFunction | Self: # Case 1: Decorator as @shadowmutate without arguments diff --git a/sources/unipercept/utils/descriptors.py b/sources/unipercept/utils/descriptors.py index 978b0a3..956d9b3 100644 --- a/sources/unipercept/utils/descriptors.py +++ b/sources/unipercept/utils/descriptors.py @@ -34,8 +34,7 @@ class objectmagic(Generic[_T, _P, _R]): """ @property - def __func__(self) -> Callable[Concatenate[_T, _P], _R]: - ... + def __func__(self) -> Callable[Concatenate[_T, _P], _R]: ... def __init__(self, fn: Callable[Concatenate[_T, _P], _R]) -> None: self.fn = fn @@ -49,12 +48,12 @@ def __set_name__(self, owner: type[_T], name: str) -> None: self.owner = owner @overload - def __get__(self, obj: None, *args, **kwargs) -> Callable[_P, _R]: - ... + def __get__(self, obj: None, *args, **kwargs) -> Callable[_P, _R]: ... @overload - def __get__(self, obj: _T, *args, **kwargs) -> Callable[Concatenate[_T, _P], _R]: - ... + def __get__( + self, obj: _T, *args, **kwargs + ) -> Callable[Concatenate[_T, _P], _R]: ... def __get__( self, obj: _T | None, *args, **kwargs diff --git a/sources/unipercept/utils/frozendict.py b/sources/unipercept/utils/frozendict.py index 6bac38a..e8605a3 100644 --- a/sources/unipercept/utils/frozendict.py +++ b/sources/unipercept/utils/frozendict.py @@ -62,16 +62,13 @@ def fromkeys(cls, iterable: Iterable[_K], value: Optional[_V] = None) -> Self: return cls(super().fromkeys(iterable, value)) @overload - def __new__(cls, map_it=None, /) -> Self: - ... + def __new__(cls, map_it=None, /) -> Self: ... @overload - def __new__(cls, map_it: Iterable[tuple[_K, _V]] | dict[_K, _V], /) -> Self: - ... + def __new__(cls, map_it: Iterable[tuple[_K, _V]] | dict[_K, _V], /) -> Self: ... @overload - def __new__(cls, map_it: None, /, **kwargs: _V) -> frozendict[str, _V]: - ... + def __new__(cls, map_it: None, /, **kwargs: _V) -> frozendict[str, _V]: ... def __new__( cls, map_it: Iterable[tuple[_K, _V]] | dict[_K, _V] | None = None, /, **kwargs diff --git a/sources/unipercept/utils/function.py b/sources/unipercept/utils/function.py index 04045eb..7a6cfc5 100644 --- a/sources/unipercept/utils/function.py +++ b/sources/unipercept/utils/function.py @@ -51,8 +51,7 @@ def multi_apply( list[_R6], list[_R7], list[_R8], -]: - ... +]: ... @overload @@ -60,8 +59,9 @@ def multi_apply( func: Callable[_Ps, tuple[_R1, _R2, _R3, _R4, _R5, _R6, _R7]], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4], list[_R5], list[_R6], list[_R7]]: - ... +) -> tuple[ + list[_R1], list[_R2], list[_R3], list[_R4], list[_R5], list[_R6], list[_R7] +]: ... @overload @@ -69,8 +69,7 @@ def multi_apply( func: Callable[_Ps, tuple[_R1, _R2, _R3, _R4, _R5, _R6]], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4], list[_R5], list[_R6]]: - ... +) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4], list[_R5], list[_R6]]: ... @overload @@ -78,8 +77,7 @@ def multi_apply( func: Callable[_Ps, tuple[_R1, _R2, _R3, _R4, _R5]], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4], list[_R5]]: - ... +) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4], list[_R5]]: ... @overload @@ -87,8 +85,7 @@ def multi_apply( func: Callable[_Ps, tuple[_R1, _R2, _R3, _R4]], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4]]: - ... +) -> tuple[list[_R1], list[_R2], list[_R3], list[_R4]]: ... @overload @@ -96,8 +93,7 @@ def multi_apply( func: Callable[_Ps, tuple[_R1, _R2, _R3]], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1], list[_R2], list[_R3]]: - ... +) -> tuple[list[_R1], list[_R2], list[_R3]]: ... @overload @@ -105,8 +101,7 @@ def multi_apply( func: Callable[_Ps, tuple[_R1, _R2]], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1], list[_R2]]: - ... +) -> tuple[list[_R1], list[_R2]]: ... @overload @@ -114,8 +109,7 @@ def multi_apply( func: Callable[_Ps, _R1], *args: Iterable[Any], **kwargs: Any, -) -> tuple[list[_R1]]: - ... +) -> tuple[list[_R1]]: ... # @overload @@ -175,25 +169,21 @@ def wrapper(*args: _Pt.args, **kwargs: _Pt.kwargs): @overload -def to_ntuple(n: Literal[1]) -> Callable[[_T | Iterable[_T]], tuple[_T]]: - ... +def to_ntuple(n: Literal[1]) -> Callable[[_T | Iterable[_T]], tuple[_T]]: ... @overload -def to_ntuple(n: Literal[2]) -> Callable[[_T | Iterable[_T]], tuple[_T, _T]]: - ... +def to_ntuple(n: Literal[2]) -> Callable[[_T | Iterable[_T]], tuple[_T, _T]]: ... @overload -def to_ntuple(n: Literal[3]) -> Callable[[_T | Iterable[_T]], tuple[_T, _T, _T]]: - ... +def to_ntuple(n: Literal[3]) -> Callable[[_T | Iterable[_T]], tuple[_T, _T, _T]]: ... @overload def to_ntuple( n: Literal[4], -) -> Callable[[_T | Iterable[_T]], tuple[_T, _T, _T, _T]]: - ... +) -> Callable[[_T | Iterable[_T]], tuple[_T, _T, _T, _T]]: ... def to_ntuple(n: int) -> Callable[[_T | Iterable[_T]], tuple[_T, ...]]: diff --git a/sources/unipercept/utils/missing.py b/sources/unipercept/utils/missing.py index 5559d43..48d58ee 100644 --- a/sources/unipercept/utils/missing.py +++ b/sources/unipercept/utils/missing.py @@ -19,9 +19,9 @@ class MissingValue: """ - __sentinel_types__: T.ClassVar[ - WeakValueDictionary[str, T.Self] - ] = WeakValueDictionary() + __sentinel_types__: T.ClassVar[WeakValueDictionary[str, T.Self]] = ( + WeakValueDictionary() + ) def __class_getitem__(cls, name: str) -> types.GenericAlias: return types.GenericAlias(cls, (name.upper())) diff --git a/sources/unipercept/utils/tensorclass.py b/sources/unipercept/utils/tensorclass.py index 9fc1fa1..c0a31bb 100644 --- a/sources/unipercept/utils/tensorclass.py +++ b/sources/unipercept/utils/tensorclass.py @@ -120,79 +120,63 @@ def from_tensordict( cls, tensordict: TensorDictBase, non_tensordict: dict[str, T.Any] | None = None, - ) -> T.Self: - ... + ) -> T.Self: ... # Inherited methods @property - def shape(self) -> torch.Size: - ... + def shape(self) -> torch.Size: ... @property - def names(self): - ... + def names(self): ... @names.setter - def names(self, value): - ... + def names(self, value): ... - def refine_names(self, *names): - ... + def refine_names(self, *names): ... - def rename(self, *names, **rename_map): - ... + def rename(self, *names, **rename_map): ... - def rename_(self, *names, **rename_map): - ... + def rename_(self, *names, **rename_map): ... - def size(self, dim: int | None = None) -> torch.Size | int: - ... + def size(self, dim: int | None = None) -> torch.Size | int: ... @property - def requires_grad(self) -> bool: - ... + def requires_grad(self) -> bool: ... - def ndimension(self) -> int: - ... + def ndimension(self) -> int: ... @property - def ndim(self) -> int: - ... + def ndim(self) -> int: ... - def dim(self) -> int: - ... + def dim(self) -> int: ... - def clear_device_(self) -> T.Self: - ... + def clear_device_(self) -> T.Self: ... - def is_shared(self) -> bool: - ... + def is_shared(self) -> bool: ... - def state_dict(self) -> dict[str, T.Any]: - ... + def state_dict(self) -> dict[str, T.Any]: ... - def load_state_dict(self, state_dict: dict[str, T.Any]) -> T.Self: - ... + def load_state_dict(self, state_dict: dict[str, T.Any]) -> T.Self: ... - def is_memmap(self) -> bool: - ... + def is_memmap(self) -> bool: ... - def numel(self) -> int: - ... + def numel(self) -> int: ... - def send(self, dst: int, init_tag: int = 0, pseudo_rand: bool = False) -> None: - ... + def send( + self, dst: int, init_tag: int = 0, pseudo_rand: bool = False + ) -> None: ... - def recv(self, src: int, init_tag: int = 0, pseudo_rand: bool = False) -> int: - ... + def recv( + self, src: int, init_tag: int = 0, pseudo_rand: bool = False + ) -> int: ... - def isend(self, dst: int, init_tag: int = 0, pseudo_rand: bool = False) -> int: - ... + def isend( + self, dst: int, init_tag: int = 0, pseudo_rand: bool = False + ) -> int: ... def irecv( self, - ) -> tuple[int, list[torch.Future]] | list[torch.Future] | None: - ... + ) -> tuple[int, list[torch.Future]] | list[torch.Future] | None: ... def reduce( self, @@ -200,213 +184,147 @@ def reduce( op=torch.distributed.ReduceOp.SUM, async_op=False, return_premature=False, - ): - ... + ): ... - def pop(self) -> _CompatibleType: - ... + def pop(self) -> _CompatibleType: ... - def apply_(self, fn: T.Callable) -> T.Self: - ... + def apply_(self, fn: T.Callable) -> T.Self: ... - def apply(self) -> T.Self: - ... + def apply(self) -> T.Self: ... - def as_tensor(self): - ... + def as_tensor(self): ... - def update(self) -> T.Self: - ... + def update(self) -> T.Self: ... - def update_(self) -> T.Self: - ... + def update_(self) -> T.Self: ... - def update_at_(self) -> T.Self: - ... + def update_at_(self) -> T.Self: ... - def items(self) -> T.Iterator[tuple[str, _CompatibleType]]: - ... + def items(self) -> T.Iterator[tuple[str, _CompatibleType]]: ... - def values(self) -> T.Iterator[_CompatibleType]: - ... + def values(self) -> T.Iterator[_CompatibleType]: ... @property - def sorted_keys(self) -> list[NestedKey]: - ... + def sorted_keys(self) -> list[NestedKey]: ... - def flatten(self, start_dim=0, end_dim=-1): - ... + def flatten(self, start_dim=0, end_dim=-1): ... - def unflatten(self, dim, unflattened_size): - ... + def unflatten(self, dim, unflattened_size): ... - def exclude(self, *keys: str, inplace: bool = False) -> T.Self: - ... + def exclude(self, *keys: str, inplace: bool = False) -> T.Self: ... - def copy_(self, tensordict: T.Self) -> T.Self: - ... + def copy_(self, tensordict: T.Self) -> T.Self: ... - def copy_at_(self, tensordict: T.Self, idx: IndexType) -> T.Self: - ... + def copy_at_(self, tensordict: T.Self, idx: IndexType) -> T.Self: ... - def get_at(self) -> _CompatibleType: - ... + def get_at(self) -> _CompatibleType: ... - def memmap_like(self, prefix: str | None = None) -> T.Self: - ... + def memmap_like(self, prefix: str | None = None) -> T.Self: ... - def detach(self) -> T.Self: - ... + def detach(self) -> T.Self: ... - def to_h5(self): - ... + def to_h5(self): ... - def to_tensordict(self): - ... + def to_tensordict(self): ... - def zero_(self) -> T.Self: - ... + def zero_(self) -> T.Self: ... - def unbind(self, dim: int) -> tuple[T.Self, ...]: - ... + def unbind(self, dim: int) -> tuple[T.Self, ...]: ... - def chunk(self, chunks: int, dim: int = 0) -> tuple[T.Self, ...]: - ... + def chunk(self, chunks: int, dim: int = 0) -> tuple[T.Self, ...]: ... - def clone(self, recurse: bool = True) -> T.Self: - ... + def clone(self, recurse: bool = True) -> T.Self: ... - def cuda(self, device: int = 0) -> T.Self: - ... + def cuda(self, device: int = 0) -> T.Self: ... - def masked_select(self, mask: torch.Tensor) -> T.Self: - ... + def masked_select(self, mask: torch.Tensor) -> T.Self: ... - def to_dict(self) -> dict[str, T.Any]: - ... + def to_dict(self) -> dict[str, T.Any]: ... - def unsqueeze(self, dim: int) -> T.Self: - ... + def unsqueeze(self, dim: int) -> T.Self: ... - def squeeze(self, dim: int | None = None) -> T.Self: - ... + def squeeze(self, dim: int | None = None) -> T.Self: ... - def reshape(self) -> T.Self: - ... + def reshape(self) -> T.Self: ... - def split(self, split_size: int | list[int], dim: int = 0) -> list[T.Self]: - ... + def split(self, split_size: int | list[int], dim: int = 0) -> list[T.Self]: ... - def gather(self) -> T.Self: - ... + def gather(self) -> T.Self: ... - def view(self) -> T.Self: - ... + def view(self) -> T.Self: ... - def permute(self) -> T.Self: - ... + def permute(self) -> T.Self: ... - def all(self, dim: int | None = None) -> bool | T.Self: - ... + def all(self, dim: int | None = None) -> bool | T.Self: ... - def any(self, dim: int | None = None) -> bool | T.Self: - ... + def any(self, dim: int | None = None) -> bool | T.Self: ... - def get_sub_tensordict(self, idx: IndexType) -> T.Self: - ... + def get_sub_tensordict(self, idx: IndexType) -> T.Self: ... - def flatten_keys(self) -> T.Self: - ... + def flatten_keys(self) -> T.Self: ... - def unflatten_keys(self) -> T.Self: - ... + def unflatten_keys(self) -> T.Self: ... - def fill_(self, key: str, value: float | bool) -> T.Self: - ... + def fill_(self, key: str, value: float | bool) -> T.Self: ... - def empty(self) -> T.Self: - ... + def empty(self) -> T.Self: ... - def is_empty(self) -> bool: - ... + def is_empty(self) -> bool: ... @property - def is_locked(self) -> bool: - ... + def is_locked(self) -> bool: ... @is_locked.setter - def is_locked(self, value: bool) -> None: - ... + def is_locked(self, value: bool) -> None: ... - def lock_(self) -> T.Self: - ... + def lock_(self) -> T.Self: ... - def unlock_(self) -> T.Self: - ... + def unlock_(self) -> T.Self: ... @property - def batch_dims(self) -> int: - ... + def batch_dims(self) -> int: ... - def pin_memory(self) -> T.Self: - ... + def pin_memory(self) -> T.Self: ... - def expand(self, *shape: int) -> T.Self: - ... + def expand(self, *shape: int) -> T.Self: ... - def set(self) -> T.Self: - ... + def set(self) -> T.Self: ... - def set_(self) -> T.Self: - ... + def set_(self) -> T.Self: ... - def del_(self, key: str) -> T.Self: - ... + def del_(self, key: str) -> T.Self: ... - def rename_key_(self) -> T.Self: - ... + def rename_key_(self) -> T.Self: ... - def entry_class(self, key: NestedKey) -> type: - ... + def entry_class(self, key: NestedKey) -> type: ... - def set_at_(self) -> T.Self: - ... + def set_at_(self) -> T.Self: ... - def get(self) -> T.Any: - ... + def get(self) -> T.Any: ... - def share_memory_(self) -> T.Self: - ... + def share_memory_(self) -> T.Self: ... - def detach_(self) -> T.Self: - ... + def detach_(self) -> T.Self: ... - def memmap_(self) -> T.Self: - ... + def memmap_(self) -> T.Self: ... @classmethod - def load_memmap(cls, prefix: str) -> T.Self: - ... + def load_memmap(cls, prefix: str) -> T.Self: ... def to( self, dest: torch.types.Device | torch.Size | type, **kwargs: T.Any - ) -> T.Self: - ... + ) -> T.Self: ... - def masked_fill_(self, mask: torch.Tensor, value: float | int | bool) -> T.Self: - ... + def masked_fill_( + self, mask: torch.Tensor, value: float | int | bool + ) -> T.Self: ... - def masked_fill(self, mask: torch.Tensor, value: float | bool) -> T.Self: - ... + def masked_fill(self, mask: torch.Tensor, value: float | bool) -> T.Self: ... - def is_contiguous(self) -> bool: - ... + def is_contiguous(self) -> bool: ... - def contiguous(self) -> T.Self: - ... + def contiguous(self) -> T.Self: ... - def select(self) -> T.Self: - ... + def select(self) -> T.Self: ... - def keys(self) -> T.Sequence[str]: - ... + def keys(self) -> T.Sequence[str]: ... diff --git a/tests/unipercept/utils/test_utils_iopath_handlers.py b/tests/unipercept/utils/test_utils_iopath_handlers.py index a76b63d..6f6ce51 100644 --- a/tests/unipercept/utils/test_utils_iopath_handlers.py +++ b/tests/unipercept/utils/test_utils_iopath_handlers.py @@ -5,8 +5,8 @@ from pathlib import Path import pytest -import wandb +import wandb from unipercept import file_io from unipercept.utils.iopath_handlers import WebDAVPathHandler