From 621113c5c04bcbdd2e2b66d9db0fca406ca8831f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:11:58 +0530 Subject: [PATCH] chore: update pre-commit hooks (#672) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- autograd/core.py | 2 +- autograd/numpy/linalg.py | 12 ++++-------- autograd/numpy/numpy_vjps.py | 8 +++----- autograd/numpy/numpy_wrapper.py | 2 +- autograd/scipy/stats/multivariate_normal.py | 2 +- autograd/test_util.py | 2 +- examples/fluidsim/fluidsim.py | 4 +--- examples/fluidsim/wing.py | 4 +--- 9 files changed, 14 insertions(+), 24 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0f772280..6cffff74 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,7 +17,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.8.6" + rev: "v0.9.1" hooks: - id: ruff args: ["--fix", "--show-fixes"] diff --git a/autograd/core.py b/autograd/core.py index e3740b80..2116589b 100644 --- a/autograd/core.py +++ b/autograd/core.py @@ -293,7 +293,7 @@ def vspace(value): return vspace(getval(value)) else: raise TypeError( - "Can't find vector space for value {} of type {}. " "Valid types are {}".format( + "Can't find vector space for value {} of type {}. Valid types are {}".format( value, type(value), VSpace.mappings.keys() ) ) diff --git a/autograd/numpy/linalg.py b/autograd/numpy/linalg.py index c69199aa..b400a558 100644 --- a/autograd/numpy/linalg.py +++ b/autograd/numpy/linalg.py @@ -81,11 +81,9 @@ def check_implemented(): if matrix_norm: if not (ord is None or ord == "fro" or ord == "nuc"): - raise NotImplementedError( - "Gradient of matrix norm not " "implemented for ord={}".format(ord) - ) + raise NotImplementedError("Gradient of matrix norm not implemented for ord={}".format(ord)) elif not (ord is None or ord > 1): - raise NotImplementedError("Gradient of norm not " "implemented for ord={}".format(ord)) + raise NotImplementedError("Gradient of norm not implemented for ord={}".format(ord)) if axis is None: expand = lambda a: a @@ -139,11 +137,9 @@ def check_implemented(): if matrix_norm: if not (ord is None or ord == "fro" or ord == "nuc"): - raise NotImplementedError( - "Gradient of matrix norm not " "implemented for ord={}".format(ord) - ) + raise NotImplementedError("Gradient of matrix norm not implemented for ord={}".format(ord)) elif not (ord is None or ord > 1): - raise NotImplementedError("Gradient of norm not " "implemented for ord={}".format(ord)) + raise NotImplementedError("Gradient of norm not implemented for ord={}".format(ord)) if axis is None: contract = lambda a: anp.sum(a) diff --git a/autograd/numpy/numpy_vjps.py b/autograd/numpy/numpy_vjps.py index ddcfc2ce..08d4e1b7 100644 --- a/autograd/numpy/numpy_vjps.py +++ b/autograd/numpy/numpy_vjps.py @@ -251,11 +251,11 @@ def grad_rollaxis(ans, a, axis, start=0): if axis < 0: raise NotImplementedError( - "Gradient of rollaxis not implemented for axis < 0. " "Please use moveaxis instead." + "Gradient of rollaxis not implemented for axis < 0. Please use moveaxis instead." ) elif start < 0: raise NotImplementedError( - "Gradient of rollaxis not implemented for start < 0. " "Please use moveaxis instead." + "Gradient of rollaxis not implemented for start < 0. Please use moveaxis instead." ) return lambda g: anp.rollaxis(g, start - 1, axis) if start > axis else anp.rollaxis(g, start, axis + 1) @@ -293,9 +293,7 @@ def helper(g, n): def grad_gradient(ans, x, *vargs, **kwargs): axis = kwargs.pop("axis", None) if vargs or kwargs: - raise NotImplementedError( - "The only optional argument currently supported for np.gradient " "is axis." - ) + raise NotImplementedError("The only optional argument currently supported for np.gradient is axis.") if axis is None: axis = range(x.ndim) elif type(axis) is int: diff --git a/autograd/numpy/numpy_wrapper.py b/autograd/numpy/numpy_wrapper.py index baa0aed3..6dfd22ec 100644 --- a/autograd/numpy/numpy_wrapper.py +++ b/autograd/numpy/numpy_wrapper.py @@ -76,7 +76,7 @@ def array(A, *args, **kwargs): def wrap_if_boxes_inside(raw_array, slow_op_name=None): if raw_array.dtype is _np.dtype("O"): if slow_op_name: - warnings.warn("{} is slow for array inputs. " "np.concatenate() is faster.".format(slow_op_name)) + warnings.warn("{} is slow for array inputs. np.concatenate() is faster.".format(slow_op_name)) return array_from_args((), {}, *raw_array.ravel()).reshape(raw_array.shape) else: return raw_array diff --git a/autograd/scipy/stats/multivariate_normal.py b/autograd/scipy/stats/multivariate_normal.py index 170823ee..6109fb52 100644 --- a/autograd/scipy/stats/multivariate_normal.py +++ b/autograd/scipy/stats/multivariate_normal.py @@ -25,7 +25,7 @@ def generalized_outer_product(x): def covgrad(x, mean, cov, allow_singular=False): if allow_singular: raise NotImplementedError( - "The multivariate normal pdf is not " "differentiable w.r.t. a singular covariance matix" + "The multivariate normal pdf is not differentiable w.r.t. a singular covariance matix" ) J = np.linalg.inv(cov) solved = np.matmul(J, np.expand_dims(x - mean, -1)) diff --git a/autograd/test_util.py b/autograd/test_util.py index 14437d64..d6eed7a1 100644 --- a/autograd/test_util.py +++ b/autograd/test_util.py @@ -39,7 +39,7 @@ def check_vjp(f, x): vjv_exact = x_vs.inner_prod(x_v, vjp_y) vjv_numeric = y_vs.inner_prod(y_v, jvp(x_v)) assert scalar_close(vjv_numeric, vjv_exact), ( - "Derivative (VJP) check of {} failed with arg {}:\n" "analytic: {}\nnumeric: {}".format( + "Derivative (VJP) check of {} failed with arg {}:\nanalytic: {}\nnumeric: {}".format( get_name(f), x, vjv_exact, vjv_numeric ) ) diff --git a/examples/fluidsim/fluidsim.py b/examples/fluidsim/fluidsim.py index 9e75875a..fc11e1a6 100644 --- a/examples/fluidsim/fluidsim.py +++ b/examples/fluidsim/fluidsim.py @@ -143,7 +143,5 @@ def callback(params): simulate(init_vx, init_vy, init_smoke, simulation_timesteps, ax, render=True) print("Converting frames to an animated GIF...") - os.system( - "convert -delay 5 -loop 0 step*.png" " -delay 250 step100.png surprise.gif" - ) # Using imagemagick. + os.system("convert -delay 5 -loop 0 step*.png -delay 250 step100.png surprise.gif") # Using imagemagick. os.system("rm step*.png") diff --git a/examples/fluidsim/wing.py b/examples/fluidsim/wing.py index 22363df7..344ed829 100644 --- a/examples/fluidsim/wing.py +++ b/examples/fluidsim/wing.py @@ -183,8 +183,6 @@ def callback(weights): print("Converting frames to an animated GIF...") # Using imagemagick. os.system( - "convert -delay 5 -loop 0 step*.png " "-delay 250 step{:03d}.png wing.gif".format( - simulation_timesteps - ) + "convert -delay 5 -loop 0 step*.png -delay 250 step{:03d}.png wing.gif".format(simulation_timesteps) ) os.system("rm step*.png")