From 4954020c3ab9d3c56009768ce7b8aeeb2a955891 Mon Sep 17 00:00:00 2001 From: Xi Xia Date: Sat, 16 Apr 2022 21:10:31 +0800 Subject: [PATCH] Add cuda module in core --- core/__init__.pyi | 2 ++ core.pyi => core/core.pyi | 28 +++++++++++++++++----------- core/cuda.pyi | 7 +++++++ 3 files changed, 26 insertions(+), 11 deletions(-) create mode 100644 core/__init__.pyi rename core.pyi => core/core.pyi (92%) create mode 100644 core/cuda.pyi diff --git a/core/__init__.pyi b/core/__init__.pyi new file mode 100644 index 0000000..97acc20 --- /dev/null +++ b/core/__init__.pyi @@ -0,0 +1,2 @@ +from .core import * +from . import cuda diff --git a/core.pyi b/core/core.pyi similarity index 92% rename from core.pyi rename to core/core.pyi index 0e968cd..ad1840f 100644 --- a/core.pyi +++ b/core/core.pyi @@ -9,7 +9,7 @@ from typing import ( Tuple, List, ) -from numpy import ndarray +from numpy import ArrayLike, ndarray class Blob: def __init__(self, *args, **kwargs) -> None: ... @@ -52,7 +52,8 @@ class Dtype: def byte_code(self) -> DtypeCode: ... def byte_size(self) -> int: ... -bool = Dtype.Bool +# bool = Dtype.Bool, this variable conflict with the builtin bool +bool8 = Dtype.Bool float32 = Dtype.Float32 float64 = Dtype.Float64 int8 = Dtype.Int8 @@ -101,9 +102,9 @@ class HashMap: self, init_capacity: int, key_dtype: Dtype, - key_element_shape: SizeVector, + key_element_shape: Iterable, value_dtype: Dtype, - value_element_shape: SizeVector, + value_element_shape: Iterable, device: Device = Device("CPU:0"), ) -> None: ... @overload @@ -111,9 +112,9 @@ class HashMap: self, init_capacity: int, key_dtype: Dtype, - key_element_shape: SizeVector, - value_dtypes: List[Dtype], - value_element_shapes: List[SizeVector], + key_element_shape: Iterable, + value_dtypes: Sequence[Dtype], + value_element_shapes: Sequence[Iterable], device: Device = Device("CPU:0"), ) -> None: ... def activate(self, keys: Tensor) -> Tuple[Tensor, Tensor]: ... @@ -125,9 +126,9 @@ class HashMap: def erase(self, keys: Tensor) -> Tensor: ... def find(self, keys: Tensor) -> Tuple[Tensor, Tensor]: ... @overload - def insert(keys: Tensor, values: Tensor) -> Tuple[Tensor, Tensor]: ... + def insert(self, keys: Tensor, values: Tensor) -> Tuple[Tensor, Tensor]: ... @overload - def insert(keys: Tensor, list_values: Tensor) -> Tuple[Tensor, Tensor]: ... + def insert(self, keys: Tensor, list_values: Sequence[Tensor]) -> Tuple[Tensor, Tensor]: ... def key_tensor(self) -> Tensor: ... @classmethod def load(cls, file_name: str) -> HashMap: ... @@ -172,7 +173,7 @@ class Tensor: @overload def __init__( self, - np_array: ndarray, + np_array: ArrayLike, dtype: Optional[Dtype] = None, device: Optional[Device] = None, ) -> None: ... @@ -295,10 +296,15 @@ class Tensor: def max(self, dim: Optional[SizeVector] = None, keepdim: bool = False) -> Tensor: ... def mean(self, dim: Optional[SizeVector] = None, keepdim: bool = False) -> Tensor: ... def min(self, dim: Optional[SizeVector] = None, keepdim: bool = False) -> Tensor: ... + @overload + def to(self, dtype: Dtype, copy: bool = False) -> Tensor: ... + @overload + def to(self, device: Device, copy: bool = False) -> Tensor: ... + def __getitem__(self, indices: Tensor) -> Tensor: ... def addmm(input: Tensor, A: Tensor, B: Tensor, alpha: float, beta: float) -> Tensor: ... def append(self: Tensor, values: Tensor, axis: Optional[int] = None) -> Tensor: ... -def concatenate(tensors: List[Tensor], axis: Optional[int] = None) -> Tensor: ... +def concatenate(tensors: Sequence[Tensor], axis: Optional[int] = None) -> Tensor: ... def det(A: Tensor) -> float: ... def inv(A: Tensor) -> Tensor: ... def lstsq(A: Tensor, B: Tensor) -> Tensor: ... diff --git a/core/cuda.pyi b/core/cuda.pyi new file mode 100644 index 0000000..4be54d3 --- /dev/null +++ b/core/cuda.pyi @@ -0,0 +1,7 @@ +from typing import Optional +from . import core + +def device_count() -> int: ... +def is_available() -> bool: ... +def release_cache() -> None: ... +def synchronize(device: Optional[core.Device] = None) -> None: ...