From c77d58d25b8939609496f996b87e61013758b715 Mon Sep 17 00:00:00 2001 From: megalinter-bot <129584137+megalinter-bot@users.noreply.github.com> Date: Fri, 12 Jul 2024 14:17:28 +0000 Subject: [PATCH] style: apply automated linter fixes --- src/safeds/ml/nn/_model.py | 4 ++-- .../ml/nn/layers/_convolutional2d_layer.py | 4 ++-- src/safeds/ml/nn/layers/_flatten_layer.py | 2 +- src/safeds/ml/nn/layers/_forward_layer.py | 2 +- src/safeds/ml/nn/layers/_gru_layer.py | 3 +-- src/safeds/ml/nn/layers/_layer.py | 2 +- src/safeds/ml/nn/layers/_lstm_layer.py | 3 +-- src/safeds/ml/nn/layers/_pooling2d_layer.py | 2 +- src/safeds/ml/nn/typing/_tensor_shape.py | 22 +++++++++---------- .../nn/layers/test_convolutional2d_layer.py | 22 +++++++++---------- .../safeds/ml/nn/layers/test_flatten_layer.py | 1 - .../safeds/ml/nn/layers/test_forward_layer.py | 7 +++--- tests/safeds/ml/nn/layers/test_gru_layer.py | 7 +++--- tests/safeds/ml/nn/layers/test_lstm_layer.py | 7 +++--- .../ml/nn/layers/test_pooling2d_layer.py | 2 +- tests/safeds/ml/nn/test_model.py | 12 +++++----- 16 files changed, 51 insertions(+), 51 deletions(-) diff --git a/src/safeds/ml/nn/_model.py b/src/safeds/ml/nn/_model.py index f2a8c9c29..aa2927af2 100644 --- a/src/safeds/ml/nn/_model.py +++ b/src/safeds/ml/nn/_model.py @@ -117,7 +117,7 @@ def get_parameter_count(self) -> int: last_input_neurons = self.input_size if isinstance(self.input_size, int) else 0 last_input_channels = self.input_size.channel if isinstance(self.input_size, ModelImageSize) else 0 for layer in self._layers: - layer._set_input_size(last_input_neurons if last_type=="int" else last_input_channels) + layer._set_input_size(last_input_neurons if last_type == "int" else last_input_channels) summand += layer.get_parameter_count(TensorShape([last_input_neurons, last_input_channels])) last_input_neurons = layer.output_size if isinstance(layer.output_size, int) else 0 last_input_channels = layer.output_size.channel if isinstance(layer.output_size, ModelImageSize) else 0 @@ -411,7 +411,7 @@ def get_parameter_count(self) -> int: last_input_neurons = self.input_size if isinstance(self.input_size, int) else 0 last_input_channels = self.input_size.channel if isinstance(self.input_size, ModelImageSize) else 0 for layer in self._layers: - layer._set_input_size(last_input_neurons if last_type=="int" else last_input_channels) + layer._set_input_size(last_input_neurons if last_type == "int" else last_input_channels) summand += layer.get_parameter_count(TensorShape([last_input_neurons, last_input_channels])) last_input_neurons = layer.output_size if isinstance(layer.output_size, int) else 0 last_input_channels = layer.output_size.channel if isinstance(layer.output_size, ModelImageSize) else 0 diff --git a/src/safeds/ml/nn/layers/_convolutional2d_layer.py b/src/safeds/ml/nn/layers/_convolutional2d_layer.py index bc4c4d4d7..c59f02480 100644 --- a/src/safeds/ml/nn/layers/_convolutional2d_layer.py +++ b/src/safeds/ml/nn/layers/_convolutional2d_layer.py @@ -158,7 +158,7 @@ def __sizeof__(self) -> int: ) def get_parameter_count(self, input_size: TensorShape) -> int: - return int((self._kernel_size*self._kernel_size*input_size._dims[1]+1)*self._output_channel) + return int((self._kernel_size * self._kernel_size * input_size._dims[1] + 1) * self._output_channel) class ConvolutionalTranspose2DLayer(Convolutional2DLayer): @@ -266,4 +266,4 @@ def __sizeof__(self) -> int: return sys.getsizeof(self._output_padding) + super().__sizeof__() def get_parameter_count(self, input_size: TensorShape) -> int: - return int((self._kernel_size*self._kernel_size*input_size._dims[1]+1)*self._output_channel) + return int((self._kernel_size * self._kernel_size * input_size._dims[1] + 1) * self._output_channel) diff --git a/src/safeds/ml/nn/layers/_flatten_layer.py b/src/safeds/ml/nn/layers/_flatten_layer.py index 9dd04df23..63a9e9cd2 100644 --- a/src/safeds/ml/nn/layers/_flatten_layer.py +++ b/src/safeds/ml/nn/layers/_flatten_layer.py @@ -86,6 +86,6 @@ def __eq__(self, other: object) -> bool: def __sizeof__(self) -> int: return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size) - + def get_parameter_count(self, input_size: TensorShape) -> int: # noqa: ARG002 return 0 diff --git a/src/safeds/ml/nn/layers/_forward_layer.py b/src/safeds/ml/nn/layers/_forward_layer.py index 52d5e7f26..9f9335e5a 100644 --- a/src/safeds/ml/nn/layers/_forward_layer.py +++ b/src/safeds/ml/nn/layers/_forward_layer.py @@ -98,4 +98,4 @@ def __sizeof__(self) -> int: return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size) def get_parameter_count(self, input_size: TensorShape) -> int: - return (input_size._dims[0]+1)*self._output_size + return (input_size._dims[0] + 1) * self._output_size diff --git a/src/safeds/ml/nn/layers/_gru_layer.py b/src/safeds/ml/nn/layers/_gru_layer.py index f9ae2e39c..3ada7082b 100644 --- a/src/safeds/ml/nn/layers/_gru_layer.py +++ b/src/safeds/ml/nn/layers/_gru_layer.py @@ -97,5 +97,4 @@ def __sizeof__(self) -> int: return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size) def get_parameter_count(self, input_size: TensorShape) -> int: - return (input_size._dims[0]+self._output_size+2)*self._output_size*3 - \ No newline at end of file + return (input_size._dims[0] + self._output_size + 2) * self._output_size * 3 diff --git a/src/safeds/ml/nn/layers/_layer.py b/src/safeds/ml/nn/layers/_layer.py index 2f4e6cfa5..a95155da1 100644 --- a/src/safeds/ml/nn/layers/_layer.py +++ b/src/safeds/ml/nn/layers/_layer.py @@ -46,4 +46,4 @@ def __sizeof__(self) -> int: @abstractmethod def get_parameter_count(self, input_size: TensorShape) -> int: - pass # pragma: no cover + pass # pragma: no cover diff --git a/src/safeds/ml/nn/layers/_lstm_layer.py b/src/safeds/ml/nn/layers/_lstm_layer.py index fa5db2a4b..06825d8af 100644 --- a/src/safeds/ml/nn/layers/_lstm_layer.py +++ b/src/safeds/ml/nn/layers/_lstm_layer.py @@ -97,5 +97,4 @@ def __sizeof__(self) -> int: return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size) def get_parameter_count(self, input_size: TensorShape) -> int: - return (input_size._dims[0]+self._output_size+2)*self._output_size*4 - \ No newline at end of file + return (input_size._dims[0] + self._output_size + 2) * self._output_size * 4 diff --git a/src/safeds/ml/nn/layers/_pooling2d_layer.py b/src/safeds/ml/nn/layers/_pooling2d_layer.py index b9e277d74..7792b0044 100644 --- a/src/safeds/ml/nn/layers/_pooling2d_layer.py +++ b/src/safeds/ml/nn/layers/_pooling2d_layer.py @@ -133,7 +133,7 @@ def __sizeof__(self) -> int: + sys.getsizeof(self._stride) + sys.getsizeof(self._padding) ) - + def get_parameter_count(self, input_size: TensorShape) -> int: # noqa: ARG002 return 0 diff --git a/src/safeds/ml/nn/typing/_tensor_shape.py b/src/safeds/ml/nn/typing/_tensor_shape.py index 6e87c0bd1..f6c4b9d4f 100644 --- a/src/safeds/ml/nn/typing/_tensor_shape.py +++ b/src/safeds/ml/nn/typing/_tensor_shape.py @@ -10,13 +10,13 @@ class TensorShape: Parameters ---------- - dims: - A list of integers where each integer represents + dims: + A list of integers where each integer represents the size of the tensor in a particular dimension. """ - + def __init__(self, dims: list[int]) -> None: - self._dims = dims + self._dims = dims def get_size(self, dimension: int | None = None) -> int: """ @@ -36,17 +36,17 @@ def get_size(self, dimension: int | None = None) -> int: OutOfBoundsError: If the actual value is outside its expected range. """ - _check_bounds("dimension",dimension, lower_bound=_ClosedBound(0)) + _check_bounds("dimension", dimension, lower_bound=_ClosedBound(0)) if dimension is not None and dimension >= self.dimensionality: - #TODO maybe add error message indicating that the dimension is out of range - return 0 - if(dimension is None): + # TODO maybe add error message indicating that the dimension is out of range + return 0 + if dimension is None: return self._dims[0] return self._dims[dimension] - + def __hash__(self) -> int: return _structural_hash(self._dims) - + @property def dimensionality(self) -> int: """ @@ -56,4 +56,4 @@ def dimensionality(self) -> int: ------- int: The number of dimensions of the tensor. """ - return len(self._dims) \ No newline at end of file + return len(self._dims) diff --git a/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py b/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py index 3721f29fe..f79595919 100644 --- a/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py +++ b/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py @@ -157,22 +157,22 @@ def test_should_raise_if_input_size_is_set_with_int( layer = conv_type(output_channel, kernel_size, stride=stride, padding=padding) with pytest.raises(TypeError, match=r"The input_size of a convolution layer has to be of type ImageSize."): layer._set_input_size(1) - + def test_conv_get_parameter_count_returns_right_amount(self) -> None: - kernel_size=5 - input_channels=3 - output_channels=3 - expected_output = int((kernel_size*kernel_size*input_channels+1)*output_channels) + kernel_size = 5 + input_channels = 3 + output_channels = 3 + expected_output = int((kernel_size * kernel_size * input_channels + 1) * output_channels) layer = Convolutional2DLayer(input_channels, kernel_size) - assert layer.get_parameter_count(TensorShape([1,input_channels])) == expected_output + assert layer.get_parameter_count(TensorShape([1, input_channels])) == expected_output def test_conv_transposed_get_parameter_count_returns_right_amount(self) -> None: - kernel_size=5 - input_channels=3 - output_channels=3 - expected_output = int((kernel_size*kernel_size*input_channels+1)*output_channels) + kernel_size = 5 + input_channels = 3 + output_channels = 3 + expected_output = int((kernel_size * kernel_size * input_channels + 1) * output_channels) layer = ConvolutionalTranspose2DLayer(input_channels, kernel_size) - assert layer.get_parameter_count(TensorShape([1,input_channels])) == expected_output + assert layer.get_parameter_count(TensorShape([1, input_channels])) == expected_output class TestEq: @pytest.mark.parametrize( diff --git a/tests/safeds/ml/nn/layers/test_flatten_layer.py b/tests/safeds/ml/nn/layers/test_flatten_layer.py index dc9519d3e..3adb15236 100644 --- a/tests/safeds/ml/nn/layers/test_flatten_layer.py +++ b/tests/safeds/ml/nn/layers/test_flatten_layer.py @@ -41,7 +41,6 @@ def test_get_parameter_count_right_output(self) -> None: layer = FlattenLayer() assert layer.get_parameter_count(TensorShape([1])) == 0 - class TestEq: def test_should_be_equal(self) -> None: assert FlattenLayer() == FlattenLayer() diff --git a/tests/safeds/ml/nn/layers/test_forward_layer.py b/tests/safeds/ml/nn/layers/test_forward_layer.py index 265402ad7..1b250f5b7 100644 --- a/tests/safeds/ml/nn/layers/test_forward_layer.py +++ b/tests/safeds/ml/nn/layers/test_forward_layer.py @@ -179,9 +179,10 @@ def test_should_assert_that_different_forward_layers_have_different_hash( def test_should_assert_that_layer_size_is_greater_than_normal_object(layer: ForwardLayer) -> None: assert sys.getsizeof(layer) > sys.getsizeof(object()) + def test_conv_transposed_get_parameter_count_returns_right_amount() -> None: - input_neurons=3 - output_neurons=3 - expected_output = int((input_neurons+1)*output_neurons) + input_neurons = 3 + output_neurons = 3 + expected_output = int((input_neurons + 1) * output_neurons) layer = ForwardLayer(output_neurons) assert layer.get_parameter_count(TensorShape([input_neurons])) == expected_output diff --git a/tests/safeds/ml/nn/layers/test_gru_layer.py b/tests/safeds/ml/nn/layers/test_gru_layer.py index 1e5c43c8b..930805771 100644 --- a/tests/safeds/ml/nn/layers/test_gru_layer.py +++ b/tests/safeds/ml/nn/layers/test_gru_layer.py @@ -189,9 +189,10 @@ def test_internal_layer_should_raise_error() -> None: with pytest.raises(ValueError, match="The input_size is not yet set."): layer._get_internal_layer(activation_function="relu") + def test_conv_transposed_get_parameter_count_returns_right_amount() -> None: - input_neurons=4 - output_neurons=16 - expected_output = int((input_neurons+output_neurons+2)*output_neurons*3) + input_neurons = 4 + output_neurons = 16 + expected_output = int((input_neurons + output_neurons + 2) * output_neurons * 3) layer = GRULayer(output_neurons) assert layer.get_parameter_count(TensorShape([input_neurons])) == expected_output diff --git a/tests/safeds/ml/nn/layers/test_lstm_layer.py b/tests/safeds/ml/nn/layers/test_lstm_layer.py index 85b638527..2230d1839 100644 --- a/tests/safeds/ml/nn/layers/test_lstm_layer.py +++ b/tests/safeds/ml/nn/layers/test_lstm_layer.py @@ -179,9 +179,10 @@ def test_should_assert_that_different_forward_layers_have_different_hash( def test_should_assert_that_layer_size_is_greater_than_normal_object(layer: LSTMLayer) -> None: assert sys.getsizeof(layer) > sys.getsizeof(object()) + def test_conv_transposed_get_parameter_count_returns_right_amount() -> None: - input_neurons=4 - output_neurons=16 - expected_output = int((input_neurons+output_neurons+2)*output_neurons*4) + input_neurons = 4 + output_neurons = 16 + expected_output = int((input_neurons + output_neurons + 2) * output_neurons * 4) layer = LSTMLayer(output_neurons) assert layer.get_parameter_count(TensorShape([input_neurons])) == expected_output diff --git a/tests/safeds/ml/nn/layers/test_pooling2d_layer.py b/tests/safeds/ml/nn/layers/test_pooling2d_layer.py index 981453317..e50d34221 100644 --- a/tests/safeds/ml/nn/layers/test_pooling2d_layer.py +++ b/tests/safeds/ml/nn/layers/test_pooling2d_layer.py @@ -58,7 +58,7 @@ def test_should_raise_if_input_size_is_set_with_int(self, strategy: Literal["max layer._set_input_size(1) @pytest.mark.parametrize( - "strategy", + "strategy", [ "max", "avg", diff --git a/tests/safeds/ml/nn/test_model.py b/tests/safeds/ml/nn/test_model.py index d2841d6ef..d14358e3f 100644 --- a/tests/safeds/ml/nn/test_model.py +++ b/tests/safeds/ml/nn/test_model.py @@ -526,13 +526,13 @@ def test_parameters_model_not_fitted(self, device: Device) -> None: def test_should_sum_parameters(self, device: Device) -> None: configure_test_with_device(device) - expected_output = 16+0+9 + expected_output = 16 + 0 + 9 model_fitted = NeuralNetworkClassifier( InputConversionTable(), [ForwardLayer(neuron_count=8), DropoutLayer(0.5), ForwardLayer(neuron_count=1)], ).fit( - Table.from_dict({"a": [1, 1, 1], "b": [2, 2, 2]}).to_tabular_dataset("a"), - epoch_size=3, + Table.from_dict({"a": [1, 1, 1], "b": [2, 2, 2]}).to_tabular_dataset("a"), + epoch_size=3, ) assert expected_output == model_fitted.get_parameter_count() @@ -913,12 +913,12 @@ def test_parameters_model_not_fitted(self, device: Device) -> None: def test_should_sum_parameters(self, device: Device) -> None: configure_test_with_device(device) - expected_output = 16+0+9 + expected_output = 16 + 0 + 9 model_fitted = NeuralNetworkRegressor( InputConversionTable(), [ForwardLayer(neuron_count=8), DropoutLayer(0.5), ForwardLayer(neuron_count=1)], ).fit( - Table.from_dict({"a": [1, 1, 1], "b": [2, 2, 2]}).to_tabular_dataset("a"), - epoch_size=3, + Table.from_dict({"a": [1, 1, 1], "b": [2, 2, 2]}).to_tabular_dataset("a"), + epoch_size=3, ) assert expected_output == model_fitted.get_parameter_count()