Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Increase-test-coverage-in-backend-Numpy #939

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 35 additions & 6 deletions keras_core/backend/common/variables.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,6 +386,7 @@ def initialize_all_variables():
"int64",
"bfloat16",
"bool",
"bool_",
"string",
}

Expand All @@ -401,17 +402,45 @@ def initialize_all_variables():

@keras_core_export("keras_core.backend.standardize_dtype")
def standardize_dtype(dtype):
"""Standardizes the data type.

Args:
dtype: Data type to be standardized.

Returns:
A standardized string representation of the data type.

Raises:
ValueError: If the data type is not recognized.
"""
# Return default float type if dtype is None
if dtype is None:
return config.floatx()

# Map to standard Python type, if applicable
dtype = PYTHON_DTYPES_MAP.get(dtype, dtype)

# Convert dtype to string for further checks
dtype_str = str(dtype)

# Use the 'name' attribute for numpy dtypes
if hasattr(dtype, "name"):
dtype = dtype.name
elif hasattr(dtype, "__str__") and "torch" in str(dtype):
dtype = str(dtype).split(".")[-1]
dtype_str = dtype.name
# Handle PyTorch dtypes
elif "torch" in dtype_str:
if dtype_str.startswith("<class 'torch."):
dtype_str = dtype_str.split("'")[-2].split(".")[-1]
else:
dtype_str = dtype_str.split(".")[-1]
# Handle numpy dtypes
elif dtype_str.startswith("<class 'numpy."):
dtype_str = dtype_str.split(".")[-1].replace("'>", "")

# Check if resulting dtype is allowed
if dtype_str not in ALLOWED_DTYPES:
raise ValueError(f"Invalid dtype: {dtype_str}")

if dtype not in ALLOWED_DTYPES:
raise ValueError(f"Invalid dtype: {dtype}")
return dtype
return dtype_str


def standardize_shape(shape):
Expand Down
113 changes: 101 additions & 12 deletions keras_core/backend/common/variables_test.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,20 @@
import numpy as np
import torch

from keras_core import backend
from keras_core import initializers
from keras_core.backend import config
from keras_core.backend.common.variables import ALLOWED_DTYPES
from keras_core.backend.common.variables import AutocastScope
from keras_core.backend.common.variables import KerasVariable
from keras_core.backend.common.variables import standardize_dtype
from keras_core.backend.common.variables import standardize_shape
from keras_core.testing import test_case


class VariablesTest(test_case.TestCase):
def test_deferred_initialization(self):
"""Test that variables are not initialized until they are used."""
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
Expand All @@ -24,6 +29,7 @@ def test_deferred_initialization(self):
v = backend.Variable(initializer=0)

def test_deferred_assignment(self):
"""Test that variables are not assigned until they are used."""
with backend.StatelessScope() as scope:
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
Expand All @@ -36,6 +42,7 @@ def test_deferred_assignment(self):
self.assertAllClose(out, np.ones((2, 2)))

def test_autocasting(self):
"""Test that variables are autocasted when used in an autocast scope"""
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
Expand Down Expand Up @@ -65,13 +72,14 @@ def test_autocasting(self):
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")

def test_standardize_dtype_with_torch_dtype(self):
"""Test that torch dtypes are converted to strings."""
import torch

x = torch.randn(4, 4)
backend.standardize_dtype(x.dtype)

def test_name_validation(self):
# Test when name is not a string
"""Test that variable names are validated."""
with self.assertRaisesRegex(
ValueError, "Argument `name` must be a string"
):
Expand All @@ -84,12 +92,14 @@ def test_name_validation(self):
)

def test_standardize_shape_with_none(self):
"""Raises a ValueError when shape is None."""
with self.assertRaisesRegex(
ValueError, "Undefined shapes are not supported."
):
standardize_shape(None)

def test_standardize_shape_with_non_iterable(self):
"""Raises a ValueError when shape is not iterable."""
with self.assertRaisesRegex(
ValueError, "Cannot convert '42' to a shape."
):
Expand All @@ -100,28 +110,107 @@ def test_standardize_shape_with_valid_input(self):
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))

# TODO
# (3.9,torch) FAILED keras_core/backend/common/variables_test.py
# ::VariablesTest::test_standardize_shape_with_non_integer_entry:
# - AssertionError "Cannot convert '\(3, 4, 'a'\)' to a shape.
# " does not match "invalid literal for int() with base 10: 'a'"
# def test_standardize_shape_with_non_integer_entry(self):
# with self.assertRaisesRegex(
# ValueError,
# "Cannot convert '\\(3, 4, 'a'\\)' to a shape. Found invalid",
# ):
# standardize_shape([3, 4, "a"])

def test_standardize_shape_with_non_integer_entry(self):
"""Raises a ValueError when shape contains non-integer entries."""
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, 'a'\\)' to a shape. Found invalid",
):
standardize_shape([3, 4, "a"])

def test_standardize_shape_with_negative_entry(self):
"""Raises a ValueError when shape contains negative entries."""
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
standardize_shape([3, 4, -5])

def test_autocast_scope_with_non_float_dtype(self):
"""Raises a ValueError when dtype is not a floating-point dtype."""
with self.assertRaisesRegex(
ValueError,
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32")


class StandardizeDtypeTest(test_case.TestCase):
def test_standardize_dtype_with_none(self):
"""Returns config.floatx() when dtype is None."""
self.assertEqual(standardize_dtype(None), config.floatx())

def test_standardize_dtype_with_float(self):
"""Returns "float32" when dtype is float."""
self.assertEqual(standardize_dtype(float), "float32")

def test_standardize_dtype_with_int(self):
"""Returns "int64" on TensorFlow, and "int32" on other backends."""
expected = "int64" if config.backend() == "tensorflow" else "int32"
self.assertEqual(standardize_dtype(int), expected)

def test_standardize_dtype_with_str(self):
"""Returns "string" when dtype is str."""
self.assertEqual(standardize_dtype(str), "string")

def test_standardize_dtype_with_torch_dtype(self):
"""Returns "float32" when dtype is a torch.float32."""
torch_dtype = "<class 'torch.float32'>"
self.assertEqual(standardize_dtype(torch_dtype), "float32")

def test_standardize_dtype_with_numpy_dtype(self):
"""Returns "float64" when dtype is a np.float64."""
numpy_dtype = "<class 'numpy.float64'>"
self.assertEqual(standardize_dtype(numpy_dtype), "float64")

def test_standardize_dtype_with_invalid_dtype(self):
"""Raises a ValueError when dtype is invalid."""
invalid_dtype = "invalid_dtype"
with self.assertRaisesRegex(
ValueError, f"Invalid dtype: {invalid_dtype}"
):
standardize_dtype(invalid_dtype)

def test_standardize_dtype_with_allowed_dtypes(self):
"""Returns the dtype when it is one of the allowed dtypes."""
for dtype in ALLOWED_DTYPES:
self.assertEqual(standardize_dtype(dtype), dtype)

def test_standardize_dtype_with_torch_dtypes(self):
"""Returns the dtype when it is a torch dtype."""
self.assertEqual(standardize_dtype(torch.float32), "float32")
self.assertEqual(standardize_dtype(torch.float64), "float64")
self.assertEqual(standardize_dtype(torch.int32), "int32")
self.assertEqual(standardize_dtype(torch.int64), "int64")
self.assertEqual(standardize_dtype(torch.bool), "bool")

def test_standardize_dtype_with_numpy_dtypes(self):
"""Returns the dtype when it is a numpy dtype."""
self.assertEqual(standardize_dtype(np.float32), "float32")
self.assertEqual(standardize_dtype(np.float64), "float64")
self.assertEqual(standardize_dtype(np.int32), "int32")
self.assertEqual(standardize_dtype(np.int64), "int64")
self.assertEqual(standardize_dtype(np.bool_), "bool_")

def test_standardize_dtype_with_builtin_types(self):
"""Returns the dtype when it is a builtin type, such as int or float"""
self.assertEqual(standardize_dtype(int), "int64")
self.assertEqual(standardize_dtype(float), "float32")

def test_standardize_dtype_with_invalid_types(self):
"""Raises a ValueError when dtype is invalid."""
with self.assertRaises(ValueError):
standardize_dtype("invalid_type")

def test_standardize_dtype_with_none_input(self):
"""Returns "float32" when dtype is None."""
self.assertEqual(standardize_dtype(None), "float32")

def test_standardize_dtype_with_custom_types(self):
"""Raises a ValueError when dtype is a custom type."""
class CustomType:
pass

with self.assertRaises(ValueError):
standardize_dtype(CustomType)
77 changes: 77 additions & 0 deletions keras_core/backend/numpy/core_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import numpy as np

from keras_core.backend.numpy import core as numpy_core
from keras_core.testing import test_case


class ConvertToTensorTest(test_case.TestCase):
def test_convert_numpy_to_tensor(self):
"""Convert a numpy array to tensor."""
array = np.array([1, 2, 3])
tensor = numpy_core.convert_to_tensor(array)
self.assertTrue(numpy_core.is_tensor(tensor))

def test_convert_variable_to_tensor(self):
"""Convert a Variable instance to tensor."""
value = [1, 2, 3]
var = numpy_core.Variable(value)
tensor = numpy_core.convert_to_tensor(var)
self.assertTrue(numpy_core.is_tensor(tensor))
self.assertTrue(
np.array_equal(tensor, var.value)
) # Corrected this line

def test_convert_with_dtype_float64(self):
"""Convert with specified dtype."""
array = np.array([1, 2, 3])
tensor = numpy_core.convert_to_tensor(
array, dtype=np.float64
)
self.assertTrue(numpy_core.is_tensor(tensor))
self.assertEqual(tensor.dtype, np.float64)

def test_convert_with_dtype_int32(self):
"""Convert with specified dtype."""
array = np.array([1, 2, 3])
tensor = numpy_core.convert_to_tensor(array, dtype=np.int32)
self.assertEqual(tensor.dtype, np.int32)

def test_convert_sparse_error(self):
"""Test error when sparse is True."""
with self.assertRaisesRegex(
ValueError, "`sparse=True` is not supported with numpy backend"
):
numpy_core.convert_to_tensor([1, 2, 3], sparse=True)

def test_standardize_dtype_with_known_python_types():
assert standardize_dtype(int) == "int64" # or "int32" depending on the backend
assert standardize_dtype(float) == "float32"
assert standardize_dtype(bool) == "bool"
assert standardize_dtype(str) == "string"

def test_standardize_dtype_with_numpy_types():
assert standardize_dtype(np.float32) == "float32"
assert standardize_dtype(np.float64) == "float64"
assert standardize_dtype(np.int32) == "int32"
assert standardize_dtype(np.int64) == "int64"
assert standardize_dtype(np.uint32) == "uint32"
assert standardize_dtype(np.bool_) == "bool"

def test_standardize_dtype_with_string_values():
assert standardize_dtype("float32") == "float32"
assert standardize_dtype("int64") == "int64"
assert standardize_dtype("bool") == "bool"
assert standardize_dtype("string") == "string"

def test_standardize_dtype_with_unknown_types():
with pytest.raises(ValueError, match="Invalid dtype"):
standardize_dtype("unknown_dtype")

def test_standardize_dtype_with_torch_types(mocker):
# Mocking a torch dtype object for this test
class MockTorchDtype:
def __str__(self):
return "torch.float32"
mocker.patch("keras_core.backend.config.backend", return_value="torch")
assert standardize_dtype(MockTorchDtype()) == "float32"

Loading