Skip to content

Commit

Permalink
Cleanup code, add testing for global optima with fitness 0, and add 2…
Browse files Browse the repository at this point in the history
… more bbob function definitions
  • Loading branch information
timothyatkinson committed Nov 1, 2022
1 parent f0248c6 commit 67c7df8
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 62 deletions.
6 changes: 6 additions & 0 deletions src/evotorch/bbo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,9 @@
"""
Problem types for Black-box Optimisation
"""


__all__ = ("bbob_utilities", "bbob_problem", "bbob_noiseless_suite")


from . import bbob_noiseless_suite, bbob_problem, bbob_utilities
76 changes: 61 additions & 15 deletions src/evotorch/bbo/bbob_noiseless_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,8 @@ def _initialize_meta_variables(self):
self.R = self.make_random_orthogonal_matrix()

def map_x_to_z(self, x: torch.Tensor) -> torch.Tensor:
# Lambda^10 Q T^0.5_asy ( (R(x - x_opt) )
return self.lambda_10 * bbob_utilities.apply_orthogonal_matrix(
# Lambda^1000 Q T^0.5_asy ( (R(x - x_opt) )
return self.lambda_1000 * bbob_utilities.apply_orthogonal_matrix(
bbob_utilities.T_beta_asy(
values=bbob_utilities.apply_orthogonal_matrix(
x - self._x_opt.unsqueeze(0),
Expand All @@ -419,6 +419,63 @@ def _apply_function(self, z: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
).pow(2.0) + 10 * bbob_utilities.f_pen(x)


class CompositeGriewankRosenbrock(BBOBProblem):
def make_x_opt(self) -> torch.Tensor:
# GriewankRosenbrock has special global optimum R^T (ones) / (2 z_coeff)
return (
bbob_utilities.apply_orthogonal_matrix(self.make_ones(self.solution_length).unsqueeze(0), self.R.T)
/ (2 * self.z_coeff)
)[0]

def initialize_meta_variables(self):
# x_opt must set manually for this task (note that this is hidden in the source code of COCO)
# see: https://github.com/numbbo/coco/blob/master/code-experiments/src/f_griewank_rosenbrock.c#L186
# so we actually override initialize_meta_variables, rather than _initialize_meta_variables, so that x_opt can be set after R is initialized
self.z_coeff = max(1, np.sqrt(self.solution_length) / 8)
self.R = self.make_random_orthogonal_matrix()
self._x_opt = self.make_x_opt()
self._f_opt = self.make_f_opt()

def map_x_to_z(self, x: torch.Tensor) -> torch.Tensor:
# max(1, sqrt(d)/8) R x + 1/2
return 1 / 2 + self.z_coeff * bbob_utilities.apply_orthogonal_matrix(x, self.R)

def _apply_function(self, z: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
z_starts_at_0 = z[:, : self.solution_length - 1]
z_starts_at_1 = z[:, 1:]
# Compute rosenbrock values
rosenbrock_rotated = 100 * (z_starts_at_0.pow(2.0) - z_starts_at_1).pow(2.0) + (z_starts_at_0 - 1).pow(2.0)
return (10 / (self.solution_length - 1)) * torch.sum(
rosenbrock_rotated / 4000 - torch.cos(rosenbrock_rotated), dim=-1
) + 10


class Schwefel(BBOBProblem):
def make_x_opt(self) -> torch.Tensor:
return 4.2096874633 * self.random_binary[0] / 2

def initialize_meta_variables(self):
# x_opt must set manually for this task
self.lambda_10 = self.make_lambda_alpha(10, diagonal_only=True).unsqueeze(0)
self.random_binary = self.make_random_binary_vector().unsqueeze(0)
self._x_opt = self.make_x_opt()
self._f_opt = self.make_f_opt()

def map_x_to_z(self, x: torch.Tensor) -> torch.Tensor:
x_hat = 2 * self.random_binary * x
z_hat = x_hat
z_hat[:, 1:] = z_hat[:, 1:] + 0.25 * (x_hat[:, :-1] - 2 * torch.abs(self._x_opt[:-1]).unsqueeze(0))
z = 100 * (
self.lambda_10 * (z_hat - 2 * torch.abs(self._x_opt).unsqueeze(0)) + 2 * torch.abs(self._x_opt).unsqueeze(0)
)
return z

def _apply_function(self, z: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
return (-1 / (100 * self.solution_length)) * torch.sum(z * torch.sin(torch.sqrt(torch.abs(z))), dim=-1) + (
4.189828872724339 + 100 * bbob_utilities.f_pen(z / 100)
)


# Array of functions in ordered form e.g. so that they can be accessed like 'F1' rather than by name
_functions = [
Sphere,
Expand All @@ -439,6 +496,8 @@ def _apply_function(self, z: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
Weierstrass,
SchaffersF7,
SchaffersF7IllConditioned,
CompositeGriewankRosenbrock,
Schwefel,
]


Expand All @@ -453,16 +512,3 @@ def get_function_i(i: int) -> BBOBProblem:
raise ValueError("The BBOB Noiseless suite defines only functions F1 ... F24")
function_i = _functions[i - 1]
return function_i


if __name__ == "__main__":

for i in range(len(_functions)):
func = get_function_i(i + 1)
print("Function", func)
obj = func(10)
batch = obj.generate_batch(5)
batch[0].set_values(obj._x_opt)
print(batch)
obj.evaluate(batch)
print(batch.evals - obj._f_opt)
48 changes: 1 addition & 47 deletions src/evotorch/bbo/bbob_utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def T_beta_asy(values: torch.Tensor, beta: float) -> torch.Tensor:
def T_osz(values: torch.Tensor, epsilon: float = 1e-7) -> torch.Tensor:
"""The T_osz function
Args:
values (torch.Tensor): The vaues to apply the T_osz function to, of shape [num_samples, dimension,]
values (torch.Tensor): The values to apply the T_osz function to, of shape [num_samples, dimension,]
epsilon (float): Error threshold for assuming a value is zero. The paper states that xhat and sign(x) have specific behavior at x = 0
Here, we assume that when |x| < epsilon, that rule should apply.
Returns:
Expand Down Expand Up @@ -247,49 +247,3 @@ def apply_orthogonal_matrix(values: torch.Tensor, orthogonal_matrix: torch.Tenso
transformed_values (torch.Tensor):
"""
return torch.matmul(orthogonal_matrix, values.T).T


if __name__ == "__main__":

x1 = torch.tensor([[0.0, 0.0], [0.0, 7.0]])
print(f_pen(x1))

for x in x1:
penalty = 0.0
for v in x:
c1 = v - 5
c2 = -5 - v
if c1 > 0.0:
penalty += c1 * c1
elif c2 > 0:
penalty += c2 * c2
print(x, "->", penalty)
print(torch.clamp(torch.abs(x1) - 5, min=0.0, max=None))
# import matplotlib.pyplot as plt
# import numpy as np

# d = 10
# v = torch.zeros((2, d))
# betas = [0.1, 0.2, 0.5]

# lims = [6, 1, 0.1, 0.01]

# print(random_binary_vector(d))

# for lim in lims:
# lim_low = -lim
# lim_high = lim
# xs = np.linspace(lim_low, lim_high, 1000)
# ys_osy = []
# ys_beta = {str(beta): [] for beta in betas}
# for x in xs:
# v[0, -1] = x
# ozy = T_osz(v)[0,-1]
# ys_osy.append(ozy)
# for beta in betas:
# asy = T_beta_asy(v, beta)[0,-1]
# ys_beta[str(beta)].append(asy)
# plt.plot(xs, ys_osy)
# for beta in betas:
# plt.plot(xs, ys_beta[str(beta)])
# plt.show()
22 changes: 22 additions & 0 deletions tests/test_bbo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import numpy as np
import torch

from evotorch.bbo import bbob_noiseless_suite, bbob_problem


def test_bbob_noiseless_suite_global_optima():

n_functions = len(bbob_noiseless_suite._functions)

dimensions = [2, 5, 10, 20, 40]

for dimension in dimensions:

for function_idx in range(1, n_functions + 1):
func: bbob_problem.BBOBProblem = bbob_noiseless_suite.get_function_i(function_idx)(dimension)
batch = func.generate_batch(5)
batch[0].set_values(func._x_opt)
func.evaluate(batch)
eval_of_x_opt = float(batch.evals[0] - func._f_opt)

assert np.abs(eval_of_x_opt - 0.0) < 1e-7

0 comments on commit 67c7df8

Please sign in to comment.