From 30491612e3ce61032e7d98ae50f6555c0c8cb4b4 Mon Sep 17 00:00:00 2001 From: Sam Daulton Date: Fri, 27 Sep 2024 13:27:07 -0700 Subject: [PATCH 1/2] updates --- gpytorch/kernels/linear_kernel.py | 21 +++++++++------------ test/kernels/test_linear_kernel.py | 25 +++++++++++++++++++++---- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/gpytorch/kernels/linear_kernel.py b/gpytorch/kernels/linear_kernel.py index d7ecd1014..a432190fd 100644 --- a/gpytorch/kernels/linear_kernel.py +++ b/gpytorch/kernels/linear_kernel.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -import warnings from typing import Optional, Union import torch @@ -40,14 +39,15 @@ class LinearKernel(Kernel): \top} \mathbf v)`, where the base multiply :math:`\mathbf X \mathbf v` takes only :math:`\mathcal O(ND)` time and space. + :param ard_num_dims: Set this if you want a separate variance priors for each weight. (Default: `None`) :param variance_prior: Prior over the variance parameter. (Default `None`.) :param variance_constraint: Constraint to place on variance parameter. (Default: `Positive`.) - :param active_dims: List of data dimensions to operate on. `len(active_dims)` should equal `num_dimensions`. + :param active_dims: List of data dimensions to operate on. """ def __init__( self, - num_dimensions: Optional[int] = None, + ard_num_dims: Optional[int] = None, offset_prior: Optional[Prior] = None, variance_prior: Optional[Prior] = None, variance_constraint: Optional[Interval] = None, @@ -56,15 +56,12 @@ def __init__( super(LinearKernel, self).__init__(**kwargs) if variance_constraint is None: variance_constraint = Positive() - - if num_dimensions is not None: - # Remove after 1.0 - warnings.warn("The `num_dimensions` argument is deprecated and no longer used.", DeprecationWarning) - self.register_parameter(name="offset", parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))) - if offset_prior is not None: - # Remove after 1.0 - warnings.warn("The `offset_prior` argument is deprecated and no longer used.", DeprecationWarning) - self.register_parameter(name="raw_variance", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))) + self.register_parameter( + name="raw_variance", + parameter=torch.nn.Parameter( + torch.zeros(*self.batch_shape, 1, 1 if ard_num_dims is None else ard_num_dims) + ), + ) if variance_prior is not None: if not isinstance(variance_prior, Prior): raise TypeError("Expected gpytorch.priors.Prior but got " + type(variance_prior).__name__) diff --git a/test/kernels/test_linear_kernel.py b/test/kernels/test_linear_kernel.py index b520842fd..d4eb2a033 100644 --- a/test/kernels/test_linear_kernel.py +++ b/test/kernels/test_linear_kernel.py @@ -10,14 +10,16 @@ class TestLinearKernel(unittest.TestCase, BaseKernelTestCase): + kernel_kwargs = {} + def create_kernel_no_ard(self, **kwargs): - return LinearKernel(**kwargs) + return LinearKernel(**kwargs, **self.kernel_kwargs) def test_computes_linear_function_rectangular(self): a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1) b = torch.tensor([0, 2, 1], dtype=torch.float).view(3, 1) - kernel = LinearKernel().initialize(variance=1.0) + kernel = self.create_kernel_no_ard().initialize(variance=1.0) kernel.eval() actual = torch.matmul(a, b.t()) res = kernel(a, b).to_dense() @@ -31,7 +33,7 @@ def test_computes_linear_function_rectangular(self): def test_computes_linear_function_square(self): a = torch.tensor([[4, 1], [2, 0], [8, 3]], dtype=torch.float) - kernel = LinearKernel().initialize(variance=3.14) + kernel = self.create_kernel_no_ard().initialize(variance=3.14) kernel.eval() actual = torch.matmul(a, a.t()) * 3.14 res = kernel(a, a).to_dense() @@ -57,7 +59,7 @@ def test_computes_linear_function_square(self): def test_computes_linear_function_square_batch(self): a = torch.tensor([[[4, 1], [2, 0], [8, 3]], [[1, 1], [2, 1], [1, 3]]], dtype=torch.float) - kernel = LinearKernel().initialize(variance=1.0) + kernel = self.create_kernel_no_ard().initialize(variance=1.0) kernel.eval() actual = torch.matmul(a, a.transpose(-1, -2)) res = kernel(a, a).to_dense() @@ -92,5 +94,20 @@ def test_prior_type(self): self.assertRaises(TypeError, self.create_kernel_with_prior, 1) +class TestLinearKernelARD(TestLinearKernel): + def test_kernel_ard(self) -> None: + self.kernel_kwargs = {"ard_num_dims": 2} + kernel = self.create_kernel_no_ard() + self.assertEqual(kernel.variance.shape, torch.Size([1, 2])) + + def test_computes_linear_function_rectangular(self): + self.kernel_kwargs = {"ard_num_dims": 1} + super().test_computes_linear_function_rectangular() + + def test_computes_linear_function_square_batch(self): + self.kernel_kwargs = {"ard_num_dims": 2} + super().test_computes_linear_function_square_batch() + + if __name__ == "__main__": unittest.main() From aedac78ae5f66d756ba3747be562aa9afc1928cb Mon Sep 17 00:00:00 2001 From: Sam Daulton Date: Fri, 27 Sep 2024 13:56:57 -0700 Subject: [PATCH 2/2] remove offset prior --- gpytorch/kernels/linear_kernel.py | 1 - 1 file changed, 1 deletion(-) diff --git a/gpytorch/kernels/linear_kernel.py b/gpytorch/kernels/linear_kernel.py index a432190fd..51936766e 100644 --- a/gpytorch/kernels/linear_kernel.py +++ b/gpytorch/kernels/linear_kernel.py @@ -48,7 +48,6 @@ class LinearKernel(Kernel): def __init__( self, ard_num_dims: Optional[int] = None, - offset_prior: Optional[Prior] = None, variance_prior: Optional[Prior] = None, variance_constraint: Optional[Interval] = None, **kwargs,