diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 6e406249d57a..1dbbb05c8eef 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -10,7 +10,7 @@ "source.organizeImports": true }, "[python]": { - "editor.defaultFormatter": "ms-python.black-formatter" + "editor.defaultFormatter": "charliermarsh.ruff" }, "editor.rulers": [ 80 @@ -18,8 +18,7 @@ }, "extensions": [ "charliermarsh.ruff", - "ms-python.python", - "ms-python.black-formatter" + "ms-python.python" ] } }, diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 28a7add98066..f508aec341fc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,8 +109,7 @@ section of the README. ## Code style -Keras uses [Black](https://black.readthedocs.io/en/stable/) and -[Ruff](https://docs.astral.sh/ruff/) to format the code. Please refer to +Keras uses [Ruff](https://docs.astral.sh/ruff/) to format the code. Please refer to [requirements-common.txt](https://github.com/keras-team/keras/blob/master/requirements-common.txt) for the required versions. Run the following command **at the root directory of the repo** to format your code. diff --git a/examples/demo_custom_layer_backend_agnostic.py b/examples/demo_custom_layer_backend_agnostic.py index 1b24aa5925cc..b3849c20cb50 100644 --- a/examples/demo_custom_layer_backend_agnostic.py +++ b/examples/demo_custom_layer_backend_agnostic.py @@ -47,9 +47,7 @@ def __init__(self, rate, name=None): def call(self, inputs): # Use `keras.random` for random ops. - return keras.random.dropout( - inputs, self.rate, seed=self.seed_generator - ) + return keras.random.dropout(inputs, self.rate, seed=self.seed_generator) class MyModel(Model): diff --git a/examples/demo_jax_distributed.py b/examples/demo_jax_distributed.py index 3ea434017f9e..906dc47563de 100644 --- a/examples/demo_jax_distributed.py +++ b/examples/demo_jax_distributed.py @@ -27,9 +27,9 @@ BATCH_SIZE = 192 -(x_train, train_labels), ( - x_eval, - eval_labels, +( + (x_train, train_labels), + (x_eval, eval_labels), ) = keras.datasets.mnist.load_data() x_train = np.expand_dims(x_train, axis=-1).astype( np.float32 diff --git a/guides/making_new_layers_and_models_via_subclassing.py b/guides/making_new_layers_and_models_via_subclassing.py index 666e0cc0267f..76766763320a 100644 --- a/guides/making_new_layers_and_models_via_subclassing.py +++ b/guides/making_new_layers_and_models_via_subclassing.py @@ -643,7 +643,7 @@ def __init__( intermediate_dim=64, latent_dim=32, name="autoencoder", - **kwargs + **kwargs, ): super().__init__(name=name, **kwargs) self.original_dim = original_dim diff --git a/integration_tests/dataset_tests/boston_housing_test.py b/integration_tests/dataset_tests/boston_housing_test.py index 4d4c3399beb6..635738fe5f05 100644 --- a/integration_tests/dataset_tests/boston_housing_test.py +++ b/integration_tests/dataset_tests/boston_housing_test.py @@ -3,7 +3,6 @@ class BostonHousingTest(testing.TestCase): - def test_load_data(self): (x_train, y_train), (x_test, y_test) = boston_housing.load_data() self.assertEqual(x_train.shape[1], 13) diff --git a/integration_tests/dataset_tests/california_housing_test.py b/integration_tests/dataset_tests/california_housing_test.py index d49abb7c0142..7f0cc4566177 100644 --- a/integration_tests/dataset_tests/california_housing_test.py +++ b/integration_tests/dataset_tests/california_housing_test.py @@ -3,7 +3,6 @@ class CaliforniaHousingTest(testing.TestCase): - def test_load_data_large(self): (x_train, y_train), (x_test, y_test) = california_housing.load_data( version="large" diff --git a/integration_tests/model_visualization_test.py b/integration_tests/model_visualization_test.py index 14597d70ebb7..95b3daac280d 100644 --- a/integration_tests/model_visualization_test.py +++ b/integration_tests/model_visualization_test.py @@ -41,7 +41,6 @@ def get_edge_dict(dot): class ModelVisualizationTest(testing.TestCase): - def test_plot_sequential_model(self): model = keras.Sequential( [ diff --git a/keras/src/backend/common/masking_test.py b/keras/src/backend/common/masking_test.py index d0a2eb5eefa8..f1ac8a5c26d5 100644 --- a/keras/src/backend/common/masking_test.py +++ b/keras/src/backend/common/masking_test.py @@ -6,7 +6,6 @@ class MaskingTest(testing.TestCase): - def test_mask_on_eager_tensor(self): x = ops.zeros((2, 3)) self.assertIsNone(get_keras_mask(x)) @@ -25,7 +24,6 @@ def test_mask_on_eager_tensor(self): self.assertIsNone(get_keras_mask(x)) def test_mask_on_tracer_tensor(self): - def fn(x): self.assertIsNone(get_keras_mask(x)) diff --git a/keras/src/backend/common/symbolic_scope_test.py b/keras/src/backend/common/symbolic_scope_test.py index 092dcfe0748c..72b8746cb96e 100644 --- a/keras/src/backend/common/symbolic_scope_test.py +++ b/keras/src/backend/common/symbolic_scope_test.py @@ -8,7 +8,6 @@ class TestSymbolicScope(testing.TestCase): def test_basic_flow(self): - # Define a function that behaves differently according to # `in_symbolic_scope`. def compute_loss(y, y_pred): diff --git a/keras/src/backend/jax/core.py b/keras/src/backend/jax/core.py index 77afe324ee7b..9913085fde6b 100644 --- a/keras/src/backend/jax/core.py +++ b/keras/src/backend/jax/core.py @@ -153,7 +153,6 @@ def convert_keras_tensor_to_jax(x, fill_value=None): return x def wrapped_fn(*args, **kwargs): - # Turn inputs that are sparse to BCOO tensors def to_bcoo_if_sparse(x, maybe_symbolic_x): if ( diff --git a/keras/src/backend/jax/optimizer.py b/keras/src/backend/jax/optimizer.py index 74ec92fe81d8..cf803626515a 100644 --- a/keras/src/backend/jax/optimizer.py +++ b/keras/src/backend/jax/optimizer.py @@ -13,7 +13,6 @@ class JaxOptimizer(base_optimizer.BaseOptimizer): - def _backend_apply_gradients(self, grads, trainable_variables): if self.gradient_accumulation_steps: is_update_step = ( diff --git a/keras/src/backend/jax/trainer.py b/keras/src/backend/jax/trainer.py index 7b9523ef5fbf..1e3490ac27b7 100644 --- a/keras/src/backend/jax/trainer.py +++ b/keras/src/backend/jax/trainer.py @@ -354,10 +354,9 @@ def fit( # Create the validation data using the training data. Only supported # for TF/numpy/jax arrays. ( - x, - y, - sample_weight, - ), validation_data = array_slicing.train_validation_split( + (x, y, sample_weight), + validation_data, + ) = array_slicing.train_validation_split( (x, y, sample_weight), validation_split=validation_split ) diff --git a/keras/src/backend/tensorflow/core.py b/keras/src/backend/tensorflow/core.py index 3a4005e6096d..5782ae6774c9 100644 --- a/keras/src/backend/tensorflow/core.py +++ b/keras/src/backend/tensorflow/core.py @@ -499,7 +499,6 @@ def _base_case(): ) def _recursive_case(): - odd_elems = _scan(reduced_elems) def _even_length_case(): diff --git a/keras/src/backend/tensorflow/optimizer.py b/keras/src/backend/tensorflow/optimizer.py index 1b0c6b9750f2..81e4241ffa01 100644 --- a/keras/src/backend/tensorflow/optimizer.py +++ b/keras/src/backend/tensorflow/optimizer.py @@ -18,7 +18,6 @@ class TFOptimizer(KerasAutoTrackable, base_optimizer.BaseOptimizer): - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._distribution_strategy = tf.distribute.get_strategy() diff --git a/keras/src/backend/tensorflow/saved_model_test.py b/keras/src/backend/tensorflow/saved_model_test.py index 0e3c9fd58c1f..bac8837499d0 100644 --- a/keras/src/backend/tensorflow/saved_model_test.py +++ b/keras/src/backend/tensorflow/saved_model_test.py @@ -150,22 +150,18 @@ def call(self, inputs): named_product(struct_type=["tuple", "array", "dict"]) ) def test_model_with_input_structure(self, struct_type): - class TupleModel(models.Model): - def call(self, inputs): x, y = inputs return x + ops.mean(y, axis=1) class ArrayModel(models.Model): - def call(self, inputs): x = inputs[0] y = inputs[1] return x + ops.mean(y, axis=1) class DictModel(models.Model): - def call(self, inputs): x = inputs["x"] y = inputs["y"] diff --git a/keras/src/backend/tensorflow/trainer.py b/keras/src/backend/tensorflow/trainer.py index 0754f31e43c8..98055e6cfa69 100644 --- a/keras/src/backend/tensorflow/trainer.py +++ b/keras/src/backend/tensorflow/trainer.py @@ -99,7 +99,6 @@ def predict_step(self, data): return y_pred def _make_function(self, step_function): - @tf.autograph.experimental.do_not_convert def one_step_on_data(data): """Runs a single training step on a batch of data.""" @@ -271,10 +270,9 @@ def fit( # Create the validation data using the training data. Only supported # for TF/numpy/jax arrays. ( - x, - y, - sample_weight, - ), validation_data = array_slicing.train_validation_split( + (x, y, sample_weight), + validation_data, + ) = array_slicing.train_validation_split( (x, y, sample_weight), validation_split=validation_split ) diff --git a/keras/src/backend/torch/trainer.py b/keras/src/backend/torch/trainer.py index 1acc21145dc4..0eba8aa50ab1 100644 --- a/keras/src/backend/torch/trainer.py +++ b/keras/src/backend/torch/trainer.py @@ -195,10 +195,9 @@ def fit( # for TF/numpy/jax arrays. # TODO: Support torch tensors for validation data. ( - x, - y, - sample_weight, - ), validation_data = array_slicing.train_validation_split( + (x, y, sample_weight), + validation_data, + ) = array_slicing.train_validation_split( (x, y, sample_weight), validation_split=validation_split ) diff --git a/keras/src/datasets/reuters.py b/keras/src/datasets/reuters.py index 998754d1c282..552b3997d441 100644 --- a/keras/src/datasets/reuters.py +++ b/keras/src/datasets/reuters.py @@ -124,8 +124,9 @@ def load_data( xs = [[w for w in x if skip_top <= w < num_words] for x in xs] idx = int(len(xs) * (1 - test_split)) - x_train, y_train = np.array(xs[:idx], dtype="object"), np.array( - labels[:idx] + x_train, y_train = ( + np.array(xs[:idx], dtype="object"), + np.array(labels[:idx]), ) x_test, y_test = np.array(xs[idx:], dtype="object"), np.array(labels[idx:]) diff --git a/keras/src/export/export_lib.py b/keras/src/export/export_lib.py index abcd1be609e8..923ca2e86e7a 100644 --- a/keras/src/export/export_lib.py +++ b/keras/src/export/export_lib.py @@ -166,7 +166,6 @@ def track(self, resource): # Variables in the lists below are actually part of the trackables # that get saved, because the lists are created in __init__. if backend.backend() == "jax": - trainable_variables = tree.flatten(resource.trainable_variables) non_trainable_variables = tree.flatten( resource.non_trainable_variables @@ -328,7 +327,6 @@ def serving_fn(x): fn, input_signature=input_signature, autograph=False ) else: # JAX backend - # 1. Create a stateless wrapper for `fn` # 2. jax2tf the stateless wrapper # 3. Create a stateful function that binds the variables with diff --git a/keras/src/export/export_lib_test.py b/keras/src/export/export_lib_test.py index 6f185dacbfcb..7e5a9c520108 100644 --- a/keras/src/export/export_lib_test.py +++ b/keras/src/export/export_lib_test.py @@ -74,7 +74,6 @@ def test_standard_model_export(self, model_type): named_product(model_type=["sequential", "functional", "subclass"]) ) def test_model_with_rng_export(self, model_type): - class RandomLayer(layers.Layer): def __init__(self): super().__init__() @@ -104,7 +103,6 @@ def call(self, inputs): named_product(model_type=["sequential", "functional", "subclass"]) ) def test_model_with_non_trainable_state_export(self, model_type): - class StateLayer(layers.Layer): def __init__(self): super().__init__() @@ -151,22 +149,18 @@ def test_model_with_tf_data_layer(self, model_type): named_product(struct_type=["tuple", "array", "dict"]) ) def test_model_with_input_structure(self, struct_type): - class TupleModel(models.Model): - def call(self, inputs): x, y = inputs return ops.add(x, y) class ArrayModel(models.Model): - def call(self, inputs): x = inputs[0] y = inputs[1] return ops.add(x, y) class DictModel(models.Model): - def call(self, inputs): x = inputs["x"] y = inputs["y"] @@ -214,7 +208,6 @@ def call(self, inputs): export_lib.export_model(revived_model, self.get_temp_dir()) def test_model_with_multiple_inputs(self): - class TwoInputsModel(models.Model): def call(self, x, y): return x + y @@ -302,7 +295,6 @@ def test_low_level_model_export_with_alias(self): named_product(model_type=["sequential", "functional", "subclass"]) ) def test_low_level_model_export_with_dynamic_dims(self, model_type): - class ReductionLayer(layers.Layer): def call(self, inputs): return ops.max(inputs, axis=1) @@ -382,7 +374,6 @@ def test_low_level_model_export_with_jax2tf_kwargs(self): reason="This test is only for the JAX backend.", ) def test_low_level_model_export_with_jax2tf_polymorphic_shapes(self): - class SquareLayer(layers.Layer): def call(self, inputs): return ops.matmul(inputs, inputs) diff --git a/keras/src/layers/activations/prelu.py b/keras/src/layers/activations/prelu.py index f46d974df824..652b60e22067 100644 --- a/keras/src/layers/activations/prelu.py +++ b/keras/src/layers/activations/prelu.py @@ -37,7 +37,7 @@ def __init__( alpha_regularizer=None, alpha_constraint=None, shared_axes=None, - **kwargs + **kwargs, ): super().__init__(**kwargs) self.supports_masking = True diff --git a/keras/src/layers/convolutional/conv1d.py b/keras/src/layers/convolutional/conv1d.py index d24320380aa5..ce1ced8c422b 100644 --- a/keras/src/layers/convolutional/conv1d.py +++ b/keras/src/layers/convolutional/conv1d.py @@ -110,7 +110,7 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=1, @@ -130,7 +130,7 @@ def __init__( activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) def _compute_causal_padding(self): diff --git a/keras/src/layers/convolutional/conv1d_transpose.py b/keras/src/layers/convolutional/conv1d_transpose.py index e14d04a878fd..466f1f19931f 100644 --- a/keras/src/layers/convolutional/conv1d_transpose.py +++ b/keras/src/layers/convolutional/conv1d_transpose.py @@ -108,7 +108,7 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=1, @@ -127,5 +127,5 @@ def __init__( activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/convolutional/conv2d.py b/keras/src/layers/convolutional/conv2d.py index 662de235b374..c46f8f9a0bc1 100644 --- a/keras/src/layers/convolutional/conv2d.py +++ b/keras/src/layers/convolutional/conv2d.py @@ -104,7 +104,7 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=2, @@ -124,5 +124,5 @@ def __init__( activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/convolutional/conv2d_transpose.py b/keras/src/layers/convolutional/conv2d_transpose.py index 633d57ff1665..ac13452f6263 100644 --- a/keras/src/layers/convolutional/conv2d_transpose.py +++ b/keras/src/layers/convolutional/conv2d_transpose.py @@ -110,7 +110,7 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=2, @@ -129,5 +129,5 @@ def __init__( activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/convolutional/conv3d.py b/keras/src/layers/convolutional/conv3d.py index e6ed74fed490..4badd2042c37 100644 --- a/keras/src/layers/convolutional/conv3d.py +++ b/keras/src/layers/convolutional/conv3d.py @@ -110,7 +110,7 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=3, @@ -130,5 +130,5 @@ def __init__( activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/convolutional/conv3d_transpose.py b/keras/src/layers/convolutional/conv3d_transpose.py index 953f0d278379..348ff5f5d800 100644 --- a/keras/src/layers/convolutional/conv3d_transpose.py +++ b/keras/src/layers/convolutional/conv3d_transpose.py @@ -115,7 +115,7 @@ def __init__( activity_regularizer=None, kernel_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=3, @@ -134,5 +134,5 @@ def __init__( activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/convolutional/conv_test.py b/keras/src/layers/convolutional/conv_test.py index 7b9ead0e941b..34707c4e278a 100644 --- a/keras/src/layers/convolutional/conv_test.py +++ b/keras/src/layers/convolutional/conv_test.py @@ -717,7 +717,6 @@ def test_enable_lora( @pytest.mark.requires_trainable_backend def test_lora_weight_name(self): - class MyModel(models.Model): def __init__(self): super().__init__(name="mymodel") diff --git a/keras/src/layers/convolutional/depthwise_conv1d.py b/keras/src/layers/convolutional/depthwise_conv1d.py index d787fcd0e304..51312d8447e2 100644 --- a/keras/src/layers/convolutional/depthwise_conv1d.py +++ b/keras/src/layers/convolutional/depthwise_conv1d.py @@ -114,7 +114,7 @@ def __init__( activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=1, @@ -133,5 +133,5 @@ def __init__( activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/convolutional/depthwise_conv2d.py b/keras/src/layers/convolutional/depthwise_conv2d.py index 585b37800279..71c950246e03 100644 --- a/keras/src/layers/convolutional/depthwise_conv2d.py +++ b/keras/src/layers/convolutional/depthwise_conv2d.py @@ -115,7 +115,7 @@ def __init__( activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, - **kwargs + **kwargs, ): super().__init__( rank=2, @@ -134,5 +134,5 @@ def __init__( activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/core/dense_test.py b/keras/src/layers/core/dense_test.py index fe0bbda83636..6ef9a55f42c0 100644 --- a/keras/src/layers/core/dense_test.py +++ b/keras/src/layers/core/dense_test.py @@ -273,7 +273,6 @@ def test_enable_lora(self): @pytest.mark.requires_trainable_backend def test_lora_weight_name(self): - class MyModel(models.Model): def __init__(self): super().__init__(name="mymodel") diff --git a/keras/src/layers/core/einsum_dense.py b/keras/src/layers/core/einsum_dense.py index 7dbfeccd4b01..90c7b9ca3386 100644 --- a/keras/src/layers/core/einsum_dense.py +++ b/keras/src/layers/core/einsum_dense.py @@ -877,7 +877,6 @@ def _analyze_split_string( def _analyze_quantization_info(equation, input_shape): - def get_specs(equation, input_shape): possible_labels = string.ascii_letters dot_replaced_string = re.sub(r"\.\.\.", "0", equation) diff --git a/keras/src/layers/layer_test.py b/keras/src/layers/layer_test.py index facc0e8f9f7c..5fa8bca7354d 100644 --- a/keras/src/layers/layer_test.py +++ b/keras/src/layers/layer_test.py @@ -15,7 +15,6 @@ class LayerTest(testing.TestCase): - def test_compute_output_spec(self): # Test that implementing compute_output_shape # is enough to make compute_output_spec work. @@ -1196,7 +1195,6 @@ def call(self, input): return self.post_build_modify_layer(input) class PostBuildModifyLayer(layers.Layer): - def call(self, input): return self.var + input @@ -1330,7 +1328,6 @@ def __init__(self): self.assertListEqual(layer1_names, layer2_names) def test_complex_dtype_support(self): - class MyDenseLayer(layers.Layer): def __init__(self, num_outputs): super(MyDenseLayer, self).__init__() diff --git a/keras/src/layers/pooling/average_pooling1d.py b/keras/src/layers/pooling/average_pooling1d.py index a52a031e17f9..0450149c0473 100644 --- a/keras/src/layers/pooling/average_pooling1d.py +++ b/keras/src/layers/pooling/average_pooling1d.py @@ -78,7 +78,7 @@ def __init__( padding="valid", data_format=None, name=None, - **kwargs + **kwargs, ): super().__init__( pool_size, diff --git a/keras/src/layers/pooling/average_pooling2d.py b/keras/src/layers/pooling/average_pooling2d.py index ed56f32c0ade..005a0cb9b730 100644 --- a/keras/src/layers/pooling/average_pooling2d.py +++ b/keras/src/layers/pooling/average_pooling2d.py @@ -95,7 +95,7 @@ def __init__( padding="valid", data_format=None, name=None, - **kwargs + **kwargs, ): super().__init__( pool_size, diff --git a/keras/src/layers/pooling/average_pooling3d.py b/keras/src/layers/pooling/average_pooling3d.py index 96541e2cd8a8..2e5c7448d332 100644 --- a/keras/src/layers/pooling/average_pooling3d.py +++ b/keras/src/layers/pooling/average_pooling3d.py @@ -71,7 +71,7 @@ def __init__( padding="valid", data_format=None, name=None, - **kwargs + **kwargs, ): super().__init__( pool_size, diff --git a/keras/src/layers/pooling/max_pooling1d.py b/keras/src/layers/pooling/max_pooling1d.py index 7485393b5538..c6c35d105f8f 100644 --- a/keras/src/layers/pooling/max_pooling1d.py +++ b/keras/src/layers/pooling/max_pooling1d.py @@ -79,7 +79,7 @@ def __init__( padding="valid", data_format=None, name=None, - **kwargs + **kwargs, ): super().__init__( pool_size, diff --git a/keras/src/layers/pooling/max_pooling2d.py b/keras/src/layers/pooling/max_pooling2d.py index 9d2ffdc437de..237da0670ab1 100644 --- a/keras/src/layers/pooling/max_pooling2d.py +++ b/keras/src/layers/pooling/max_pooling2d.py @@ -95,7 +95,7 @@ def __init__( padding="valid", data_format=None, name=None, - **kwargs + **kwargs, ): super().__init__( pool_size, diff --git a/keras/src/layers/pooling/max_pooling3d.py b/keras/src/layers/pooling/max_pooling3d.py index 43be140c5aa3..d6487e87f321 100644 --- a/keras/src/layers/pooling/max_pooling3d.py +++ b/keras/src/layers/pooling/max_pooling3d.py @@ -71,7 +71,7 @@ def __init__( padding="valid", data_format=None, name=None, - **kwargs + **kwargs, ): super().__init__( pool_size, diff --git a/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py b/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py index 88080621c9db..f869ac08c67e 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py +++ b/keras/src/layers/preprocessing/image_preprocessing/base_image_preprocessing_layer.py @@ -6,7 +6,6 @@ class BaseImagePreprocessingLayer(TFDataLayer): - _USE_BASE_FACTOR = True _FACTOR_BOUNDS = (-1, 1) diff --git a/keras/src/layers/preprocessing/image_preprocessing/random_flip.py b/keras/src/layers/preprocessing/image_preprocessing/random_flip.py index 104b846c0546..07f8121181f7 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/random_flip.py +++ b/keras/src/layers/preprocessing/image_preprocessing/random_flip.py @@ -48,7 +48,7 @@ def __init__( mode=HORIZONTAL_AND_VERTICAL, seed=None, data_format=None, - **kwargs + **kwargs, ): super().__init__(data_format=data_format, **kwargs) self.seed = seed diff --git a/keras/src/layers/preprocessing/image_preprocessing/resizing.py b/keras/src/layers/preprocessing/image_preprocessing/resizing.py index 0653025cc530..c5213d283e1a 100644 --- a/keras/src/layers/preprocessing/image_preprocessing/resizing.py +++ b/keras/src/layers/preprocessing/image_preprocessing/resizing.py @@ -125,7 +125,6 @@ def transform_labels(self, labels, transformation=None, training=True): return labels def get_random_transformation(self, data, training=True, seed=None): - if isinstance(data, dict): input_shape = self.backend.shape(data["images"]) else: diff --git a/keras/src/layers/preprocessing/stft_spectrogram_test.py b/keras/src/layers/preprocessing/stft_spectrogram_test.py index 9178b191d46d..fa2eb878bb03 100644 --- a/keras/src/layers/preprocessing/stft_spectrogram_test.py +++ b/keras/src/layers/preprocessing/stft_spectrogram_test.py @@ -11,7 +11,6 @@ class TestSpectrogram(testing.TestCase): - DTYPE = "float32" if backend.backend() == "torch" else "float64" @staticmethod @@ -106,7 +105,6 @@ def test_spectrogram_channels_broadcasting(self): ) @pytest.mark.requires_trainable_backend def test_spectrogram_channels_first(self): - rnd = np.random.RandomState(41) audio = rnd.uniform(-1, 1, size=(3, 16000, 7)) diff --git a/keras/src/layers/reshaping/up_sampling2d_test.py b/keras/src/layers/reshaping/up_sampling2d_test.py index e5c12891c093..0edaa276a54b 100644 --- a/keras/src/layers/reshaping/up_sampling2d_test.py +++ b/keras/src/layers/reshaping/up_sampling2d_test.py @@ -106,8 +106,8 @@ def test_upsampling_2d_bilinear(self, data_format, length_row, length_col): def test_upsampling_2d_correctness(self): input_shape = (2, 2, 1, 3) x = np.arange(np.prod(input_shape)).reshape(input_shape) + # fmt: off expected_output = np.array( - # fmt: off [[[[ 0., 1., 2.], [ 0., 1., 2.]], [[ 3., 4., 5.], @@ -116,8 +116,8 @@ def test_upsampling_2d_correctness(self): [ 6., 7., 8.]], [[ 9., 10., 11.], [ 9., 10., 11.]]]] - # fmt: on ) + # fmt: on if backend.config.image_data_format() == "channels_first": expected_output = expected_output.transpose((0, 3, 1, 2)) x = x.transpose((0, 3, 1, 2)) diff --git a/keras/src/layers/rnn/conv_lstm1d.py b/keras/src/layers/rnn/conv_lstm1d.py index d0ad56b5ce26..2d68eb748a40 100644 --- a/keras/src/layers/rnn/conv_lstm1d.py +++ b/keras/src/layers/rnn/conv_lstm1d.py @@ -149,7 +149,7 @@ def __init__( return_state=False, go_backwards=False, stateful=False, - **kwargs + **kwargs, ): super().__init__( rank=1, @@ -180,5 +180,5 @@ def __init__( dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/rnn/conv_lstm2d.py b/keras/src/layers/rnn/conv_lstm2d.py index 6837eea99298..5e14eadc25aa 100644 --- a/keras/src/layers/rnn/conv_lstm2d.py +++ b/keras/src/layers/rnn/conv_lstm2d.py @@ -149,7 +149,7 @@ def __init__( return_state=False, go_backwards=False, stateful=False, - **kwargs + **kwargs, ): super().__init__( rank=2, @@ -180,5 +180,5 @@ def __init__( dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, - **kwargs + **kwargs, ) diff --git a/keras/src/layers/rnn/conv_lstm3d.py b/keras/src/layers/rnn/conv_lstm3d.py index 534750abebef..a36ed1dddf92 100644 --- a/keras/src/layers/rnn/conv_lstm3d.py +++ b/keras/src/layers/rnn/conv_lstm3d.py @@ -148,7 +148,7 @@ def __init__( return_state=False, go_backwards=False, stateful=False, - **kwargs + **kwargs, ): super().__init__( rank=3, @@ -179,5 +179,5 @@ def __init__( dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, - **kwargs + **kwargs, ) diff --git a/keras/src/legacy/preprocessing/text.py b/keras/src/legacy/preprocessing/text.py index bd23e743fd65..44dcdae166a5 100644 --- a/keras/src/legacy/preprocessing/text.py +++ b/keras/src/legacy/preprocessing/text.py @@ -91,7 +91,7 @@ def __init__( char_level=False, oov_token=None, analyzer=None, - **kwargs + **kwargs, ): # Legacy support if "nb_words" in kwargs: diff --git a/keras/src/models/cloning_test.py b/keras/src/models/cloning_test.py index 64f1d2fd96d2..254b53fb3839 100644 --- a/keras/src/models/cloning_test.py +++ b/keras/src/models/cloning_test.py @@ -76,7 +76,6 @@ def call(self, x): @pytest.mark.requires_trainable_backend class CloneModelTest(testing.TestCase): - def assert_models_equal(self, model1, model2, ref_input): result1 = model1(ref_input) result2 = model2(ref_input) diff --git a/keras/src/models/model_test.py b/keras/src/models/model_test.py index b6c70ec2fcb8..71289f6e103f 100644 --- a/keras/src/models/model_test.py +++ b/keras/src/models/model_test.py @@ -121,7 +121,6 @@ def _get_model_multi_outputs_dict_with_single_tensor(): def _get_model_with_custom_compute_loss(): - class MyModel(Model): def __init__(self): inputs = Input(shape=(3,), name="inputs") @@ -166,7 +165,6 @@ def _get_variable_value_by_path(variables, path): @pytest.mark.requires_trainable_backend class ModelTest(testing.TestCase): - def test_functional_rerouting(self): model = _get_model() self.assertIsInstance(model, Functional) @@ -1000,9 +998,8 @@ def loss_fn(y_true, y_pred): structure, y_pred, ) - flat_y_pred, flat_y_true = tree.flatten(y_pred), tree.flatten( - y_true - ) + flat_y_pred = tree.flatten(y_pred) + flat_y_true = tree.flatten(y_true) diff = 0 for y_p, y_t in zip(flat_y_pred, flat_y_true): diff += losses.mean_absolute_error(y_t, y_p) diff --git a/keras/src/ops/core_test.py b/keras/src/ops/core_test.py index f19d8c6cd75a..0aaf4a3b1e84 100644 --- a/keras/src/ops/core_test.py +++ b/keras/src/ops/core_test.py @@ -818,7 +818,6 @@ def test_is_tensor(self): reason=f"{backend.backend()} doesn't support `custom_gradient`.", ) def test_custom_gradient(self): - # function to test custom_gradient on @ops.custom_gradient def log1pexp(x): diff --git a/keras/src/ops/image_test.py b/keras/src/ops/image_test.py index 7f346abca962..22706989d16d 100644 --- a/keras/src/ops/image_test.py +++ b/keras/src/ops/image_test.py @@ -657,7 +657,6 @@ def test_resize_uint8_round_saturate(self): [255, 255, 255, 255], ] if "torch" == backend.backend() - else # Resize without `round` and `saturate_cast` - differences in # 16 points # [ @@ -669,7 +668,7 @@ def test_resize_uint8_round_saturate(self): # # Resize with `round` and `saturate_cast` - differences in # 8 points - [ + else [ [0, 0, 0, 0], [53, 53, 53, 54], [201, 202, 202, 202], diff --git a/keras/src/ops/linalg.py b/keras/src/ops/linalg.py index 0bf80c153f92..0ecd170c7737 100644 --- a/keras/src/ops/linalg.py +++ b/keras/src/ops/linalg.py @@ -47,7 +47,6 @@ def _cholesky(x): class Det(Operation): - def __init__(self): super().__init__() @@ -84,7 +83,6 @@ def _det(x): class Eig(Operation): - def __init__(self): super().__init__() @@ -124,7 +122,6 @@ def _eig(x): class Eigh(Operation): - def __init__(self): super().__init__() @@ -165,7 +162,6 @@ def _eigh(x): class Inv(Operation): - def __init__(self): super().__init__() @@ -202,7 +198,6 @@ def _inv(x): class LuFactor(Operation): - def __init__(self): super().__init__() @@ -445,7 +440,6 @@ def qr(x, mode="reduced"): class Solve(Operation): - def __init__(self): super().__init__() @@ -490,7 +484,6 @@ def _solve(a, b): class SolveTriangular(Operation): - def __init__(self, lower=False): super().__init__() self.lower = lower @@ -538,7 +531,6 @@ def _solve_triangular(a, b, lower=False): class SVD(Operation): - def __init__(self, full_matrices=True, compute_uv=True): super().__init__() self.full_matrices = full_matrices diff --git a/keras/src/ops/linalg_test.py b/keras/src/ops/linalg_test.py index 63b362ae1671..c9f573778218 100644 --- a/keras/src/ops/linalg_test.py +++ b/keras/src/ops/linalg_test.py @@ -330,7 +330,6 @@ def test_svd(self): class LinalgOpsCorrectnessTest(testing.TestCase): - def test_cholesky(self): x = np.random.rand(4, 3, 3).astype("float32") with self.assertRaises(ValueError): diff --git a/keras/src/ops/math.py b/keras/src/ops/math.py index fd0a41d5177b..c7b80ec54348 100644 --- a/keras/src/ops/math.py +++ b/keras/src/ops/math.py @@ -43,7 +43,6 @@ def compute_output_spec(self, data, _): class SegmentSum(SegmentReduction): - def call(self, data, segment_ids): _segment_reduce_validation(data, segment_ids) return backend.math.segment_sum( @@ -90,7 +89,6 @@ def segment_sum(data, segment_ids, num_segments=None, sorted=False): class SegmentMax(SegmentReduction): - def call(self, data, segment_ids): _segment_reduce_validation(data, segment_ids) return backend.math.segment_max( diff --git a/keras/src/ops/math_test.py b/keras/src/ops/math_test.py index 09c87514c788..f7de5c40801a 100644 --- a/keras/src/ops/math_test.py +++ b/keras/src/ops/math_test.py @@ -145,7 +145,6 @@ def _max_reduce(left, right): class MathOpsDynamicShapeTest(testing.TestCase): - @parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)]) def test_segment_reduce(self, segment_reduce_op): # 1D case @@ -418,7 +417,6 @@ def test_logdet(self): class MathOpsCorrectnessTest(testing.TestCase): - def run_segment_reduce_test( self, segment_reduce_op, @@ -1345,7 +1343,6 @@ def test_undefined_fft_length_and_last_dimension(self): class TestMathErrors(testing.TestCase): - @parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)]) @pytest.mark.skipif( backend.backend() != "jax", reason="Testing Jax errors only" diff --git a/keras/src/optimizers/optimizer_sparse_test.py b/keras/src/optimizers/optimizer_sparse_test.py index 5fbe0f56422c..1d1f73ebaa45 100644 --- a/keras/src/optimizers/optimizer_sparse_test.py +++ b/keras/src/optimizers/optimizer_sparse_test.py @@ -206,21 +206,29 @@ def mock_variable_assign(variable, value): # patch "_apply_weight_decay" to exclude this special case. # patch the optimizer "assign" methods to detect sparse updates. # patch the tf.Variable "assign" methods to detect direct assign calls. - with mock.patch.object( - optimizer_to_patch, "_apply_weight_decay", autospec=True - ), mock.patch.object( - optimizer_to_patch, "assign", autospec=True - ) as optimizer_assign, mock.patch.object( - optimizer_to_patch, "assign_add", autospec=True - ) as optimizer_assign_add, mock.patch.object( - optimizer_to_patch, "assign_sub", autospec=True - ) as optimizer_assign_sub, mock.patch.object( - variable_class, "assign", autospec=True - ) as variable_assign, mock.patch.object( - variable_class, "assign_add", autospec=True - ) as variable_assign_add, mock.patch.object( - variable_class, "assign_sub", autospec=True - ) as variable_assign_sub: + with ( + mock.patch.object( + optimizer_to_patch, "_apply_weight_decay", autospec=True + ), + mock.patch.object( + optimizer_to_patch, "assign", autospec=True + ) as optimizer_assign, + mock.patch.object( + optimizer_to_patch, "assign_add", autospec=True + ) as optimizer_assign_add, + mock.patch.object( + optimizer_to_patch, "assign_sub", autospec=True + ) as optimizer_assign_sub, + mock.patch.object( + variable_class, "assign", autospec=True + ) as variable_assign, + mock.patch.object( + variable_class, "assign_add", autospec=True + ) as variable_assign_add, + mock.patch.object( + variable_class, "assign_sub", autospec=True + ) as variable_assign_sub, + ): optimizer_assign.side_effect = mock_optimizer_assign optimizer_assign_add.side_effect = mock_optimizer_assign optimizer_assign_sub.side_effect = mock_optimizer_assign diff --git a/keras/src/saving/file_editor.py b/keras/src/saving/file_editor.py index 36b9c1043ed4..09cd7b87c14c 100644 --- a/keras/src/saving/file_editor.py +++ b/keras/src/saving/file_editor.py @@ -552,7 +552,6 @@ def _weights_summary_cli(self): self._print_weights_structure(self.weights_dict, prefix=" " * 2) def _weights_summary_interactive(self): - def _generate_html_weights(dictionary, margin_left=0, font_size=1): html = "" for key, value in dictionary.items(): diff --git a/keras/src/saving/file_editor_test.py b/keras/src/saving/file_editor_test.py index 41986b866816..965c97ba863d 100644 --- a/keras/src/saving/file_editor_test.py +++ b/keras/src/saving/file_editor_test.py @@ -26,7 +26,6 @@ def get_target_model(): class SavingTest(testing.TestCase): - def test_basics(self): temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras") diff --git a/keras/src/saving/serialization_lib_test.py b/keras/src/saving/serialization_lib_test.py index 80df36f3eeb9..4891d961089d 100644 --- a/keras/src/saving/serialization_lib_test.py +++ b/keras/src/saving/serialization_lib_test.py @@ -352,7 +352,7 @@ def __init__( *, kernel_regularizer=None, kernel_initializer=None, - **kwargs + **kwargs, ): super().__init__(**kwargs) self._units = units @@ -364,7 +364,7 @@ def get_config(self): units=self._units, kernel_initializer=self._kernel_initializer, kernel_regularizer=self._kernel_regularizer, - **super().get_config() + **super().get_config(), ) def build(self, input_shape): diff --git a/keras/src/trainers/compile_utils_test.py b/keras/src/trainers/compile_utils_test.py index a8a4515d111a..4bc2a2f4db03 100644 --- a/keras/src/trainers/compile_utils_test.py +++ b/keras/src/trainers/compile_utils_test.py @@ -20,9 +20,8 @@ def test_single_output_case(self): weighted_metrics=[metrics_module.MeanSquaredError()], ) # Test symbolic build - y_true, y_pred = backend.KerasTensor((3, 4)), backend.KerasTensor( - (3, 4) - ) + y_true = backend.KerasTensor((3, 4)) + y_pred = backend.KerasTensor((3, 4)) compile_metrics.build(y_true, y_pred) # Test eager build y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) @@ -243,9 +242,8 @@ def test_single_output_case(self): loss=losses_module.MeanSquaredError(), ) # Test symbolic build - y_true, y_pred = backend.KerasTensor((3, 4)), backend.KerasTensor( - (3, 4) - ) + y_true = backend.KerasTensor((3, 4)) + y_pred = backend.KerasTensor((3, 4)) compile_loss.build(y_true, y_pred) # Test eager build y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) @@ -258,9 +256,8 @@ def test_single_output_case_with_crossentropy_loss(self): compile_loss = CompileLoss(loss="crossentropy") # Test symbolic build - y_true, y_pred = backend.KerasTensor((3, 4)), backend.KerasTensor( - (3, 4) - ) + y_true = backend.KerasTensor((3, 4)) + y_pred = backend.KerasTensor((3, 4)) compile_loss.build(y_true, y_pred) # Test eager build y_true = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]) diff --git a/keras/src/trainers/data_adapters/array_data_adapter.py b/keras/src/trainers/data_adapters/array_data_adapter.py index 10b4dc37a93a..e732f28688bd 100644 --- a/keras/src/trainers/data_adapters/array_data_adapter.py +++ b/keras/src/trainers/data_adapters/array_data_adapter.py @@ -198,7 +198,6 @@ def slice_inputs(indices_dataset, inputs): ) def grab_batch(i, data): - def grab_one(x): if isinstance(x, array_slicing.TensorflowSparseWrapper): return array_slicing.slice_tensorflow_sparse_wrapper( diff --git a/keras/src/trainers/data_adapters/py_dataset_adapter_test.py b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py index 71b27e8faadb..a70228f6aff4 100644 --- a/keras/src/trainers/data_adapters/py_dataset_adapter_test.py +++ b/keras/src/trainers/data_adapters/py_dataset_adapter_test.py @@ -24,7 +24,7 @@ def __init__( batch_size=32, delay=0, infinite=False, - **kwargs + **kwargs, ): super().__init__(**kwargs) self.x, self.y = x_set, y_set @@ -80,7 +80,6 @@ def __getitem__(self, idx): class ExceptionPyDataset(py_dataset_adapter.PyDataset): - @property def num_batches(self): return 4 @@ -285,7 +284,6 @@ def test_dict_inputs(self): self.assertEqual(tuple(by.shape), (4, 2)) def test_with_different_shapes(self): - class TestPyDataset(py_dataset_adapter.PyDataset): @property def num_batches(self): diff --git a/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py b/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py index c763bb570b9d..591941300c83 100644 --- a/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py +++ b/keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py @@ -57,7 +57,6 @@ def test_basic_dataloader(self): named_product(batch_size=[None, 3], implements_len=[True, False]) ) def test_dataloader_iterable_dataset(self, batch_size, implements_len): - class TestIterableDataset(torch.utils.data.IterableDataset): def __init__(self): self.x = torch.normal(2, 3, size=(16, 4)) diff --git a/keras/src/trainers/trainer_test.py b/keras/src/trainers/trainer_test.py index d1ad16a4c579..19e51417443b 100644 --- a/keras/src/trainers/trainer_test.py +++ b/keras/src/trainers/trainer_test.py @@ -1999,7 +1999,6 @@ def call(self, x): @pytest.mark.requires_trainable_backend def test_callbacks_can_update_state_at_batch_boundary(self): - class CounterModel(keras.Model): def __init__(self): super().__init__() @@ -2176,7 +2175,6 @@ def compute_loss( @pytest.mark.requires_trainable_backend def test_compute_loss_no_training_backwards_compatibility(self): - class MyModel(keras.Model): def __init__(self): super().__init__() diff --git a/keras/src/tree/optree_impl.py b/keras/src/tree/optree_impl.py index c6b7699cb9ea..1bc5d3466fb0 100644 --- a/keras/src/tree/optree_impl.py +++ b/keras/src/tree/optree_impl.py @@ -198,7 +198,6 @@ def truncate(value, length): def lists_to_tuples(structure): - def sequence_fn(instance, args): if isinstance(instance, list): return tuple(args) @@ -210,7 +209,6 @@ def sequence_fn(instance, args): def map_shape_structure(func, structure): - def is_shape_tuple(x): return isinstance(x, (list, tuple)) and all( isinstance(e, (int, type(None))) for e in x diff --git a/keras/src/tree/tree_test.py b/keras/src/tree/tree_test.py index 23fdacb7bc09..cf6382e4b730 100644 --- a/keras/src/tree/tree_test.py +++ b/keras/src/tree/tree_test.py @@ -38,7 +38,6 @@ @parameterized.named_parameters(TEST_CASES) class TreeTest(testing.TestCase): - def test_is_nested(self, tree_impl, is_optree): self.assertFalse(tree_impl.is_nested("1234")) self.assertFalse(tree_impl.is_nested(b"1234")) diff --git a/keras/src/utils/dataset_utils_test.py b/keras/src/utils/dataset_utils_test.py index 7853f1592766..f2ebbc340448 100644 --- a/keras/src/utils/dataset_utils_test.py +++ b/keras/src/utils/dataset_utils_test.py @@ -11,7 +11,6 @@ class MyTorchDataset(TorchDataset): - def __init__(self, x, y): self.x = x self.y = y diff --git a/keras/src/utils/jax_layer_test.py b/keras/src/utils/jax_layer_test.py index 23d9d9983db4..2c85ecc2e11a 100644 --- a/keras/src/utils/jax_layer_test.py +++ b/keras/src/utils/jax_layer_test.py @@ -207,7 +207,6 @@ def _count_params(weights): return count def verify_weights_and_params(layer): - self.assertEqual(trainable_weights, len(layer.trainable_weights)) self.assertEqual( trainable_params, @@ -329,7 +328,6 @@ def verify_identical_model(model): # test subclass model building without a build method class TestModel(models.Model): - def __init__(self, layer): super().__init__() self._layer = layer diff --git a/pyproject.toml b/pyproject.toml index 07a2111a41f0..773f53c68f18 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,19 +47,6 @@ version = {attr = "keras.src.version.__version__"} [tool.setuptools.packages.find] include = ["keras", "keras.*"] -[tool.black] -line-length = 80 -target-version = [] - -# black needs this to be a regex -# to add more exclude expressions -# append `| ` (e.g. `| .*_test\\.py`) to this list -extend-exclude = """ -( - examples/ -) -""" - [tool.ruff] line-length = 80 diff --git a/requirements-common.txt b/requirements-common.txt index fbf4b7fce9f6..150324bf30d1 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -1,5 +1,4 @@ namex>=0.0.8 -black>=22 ruff pytest numpy diff --git a/shell/format.sh b/shell/format.sh index ac3fee4c529f..4e1d191dbda2 100755 --- a/shell/format.sh +++ b/shell/format.sh @@ -1,9 +1,9 @@ #!/bin/bash -set -Eeuo pipefail +set -Euo pipefail base_dir=$(dirname $(dirname $0)) -ruff check --exit-zero --config "${base_dir}/pyproject.toml" --fix . +ruff check --config "${base_dir}/pyproject.toml" --fix . -black --config "${base_dir}/pyproject.toml" . +ruff format --config "${base_dir}/pyproject.toml" . diff --git a/shell/lint.sh b/shell/lint.sh index 36d180549e17..37e9276f2257 100755 --- a/shell/lint.sh +++ b/shell/lint.sh @@ -1,9 +1,12 @@ #!/bin/bash -set -Eeuo pipefail +set -Euo pipefail base_dir=$(dirname $(dirname $0)) -ruff check --exit-zero --config "${base_dir}/pyproject.toml" . +ruff check --config "${base_dir}/pyproject.toml" . +exitcode=$? -black --config "${base_dir}/pyproject.toml" --check . +ruff format --check --config "${base_dir}/pyproject.toml" . +exitcode=$(($exitcode + $?)) +exit $exitcode