Skip to content

Commit

Permalink
UPDATE numerical table
Browse files Browse the repository at this point in the history
  • Loading branch information
xzhang2523 committed Aug 20, 2024
1 parent ec9c5d9 commit e34c05f
Show file tree
Hide file tree
Showing 15 changed files with 136 additions and 111 deletions.
1 change: 1 addition & 0 deletions libmoon/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from libmoon.metrics.metrics import compute_indicators
8 changes: 6 additions & 2 deletions libmoon/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,11 @@ def compute_cross_angle(sols, prefs):


def compute_indicators(objs, prefs):
'''
Input:
objs: objective arrays
prefs: preference arrays
'''
lmin = compute_lmin(objs)
soft_lmin = compute_soft_lmin(objs)
spacing = compute_spacing(objs)
Expand All @@ -105,7 +110,7 @@ def compute_indicators(objs, prefs):
pbi = compute_pbi(objs, prefs)
return {
'lmin': lmin,
'soft lmin': soft_lmin,
'soft_lmin': soft_lmin,
'spacing': spacing,
'sparsity': sparsity,
'hv': hv,
Expand All @@ -114,7 +119,6 @@ def compute_indicators(objs, prefs):
'pbi': pbi
}


if __name__ == '__main__':
objs = np.random.rand(100, 2)
prefs = np.random.rand(100, 2)
Expand Down
2 changes: 1 addition & 1 deletion libmoon/problem/synthetic/vlmop.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def _evaluate_numpy(self, x):
return np.stack((f1, f2), axis=1)


def get_pf(self):
def get_pf(self, n_pareto_points):
x = torch.linspace(-1 / np.sqrt(self.n_var), 1 / np.sqrt(self.n_var), 100)
x = torch.tile(x.unsqueeze(1), (1, self.n_var))
with torch.no_grad():
Expand Down
54 changes: 25 additions & 29 deletions libmoon/solver/gradient/methods/base_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,18 @@
from pymoo.indicators.hv import HV
import numpy as np

from libmoon.util_global.grad_util import get_moo_Jacobian_batch



class GradBaseSolver:
def __init__(self, step_size, n_iter, tol):
def __init__(self, step_size, epoch, tol, core_solver):
self.step_size = step_size
self.n_iter = n_iter
self.epoch = epoch
self.tol = tol
self.core_solver = core_solver

def solve(self, problem, x, prefs, weight_solver_cls=None):
def solve(self, problem, x , prefs):
'''
:param problem:
:param x:
Expand All @@ -22,52 +27,43 @@ def solve(self, problem, x, prefs, weight_solver_cls=None):
is a dict with keys: x, y.
'''
# The abstract class cannot be implemented directly.
n_prob = len(prefs)
n_obj = problem.n_obj
x_var = Variable(x, requires_grad=True)
optimizer = SGD([x_var], lr=self.step_size)
self.n_prob, self.n_obj = prefs.shape[0], prefs.shape[1]

xs_var = Variable(x, requires_grad=True)
optimizer = SGD([xs_var], lr=self.step_size)
ind = HV(ref_point=get_hv_ref(problem.problem_name))
hv_arr = []
y_arr = []
for i in tqdm(range(self.n_iter)):
grad_arr = [0] * n_prob
y = problem.evaluate(x_var)
y_np = y.detach().numpy()
hv_arr, y_arr = [], []
for iter_idx in tqdm(range(self.epoch)):
fs_var = problem.evaluate(xs_var)
y_np = fs_var.detach().numpy()
y_arr.append(y_np)
hv_arr.append(ind.do(y_np))
for prob_idx in range(n_prob):
grad_arr[prob_idx] = [0] * n_obj
for obj_idx in range( n_obj ):
y[prob_idx][obj_idx].backward(retain_graph=True)
grad_arr[prob_idx][obj_idx] = x.grad[prob_idx].clone()
x.grad.zero_()
grad_arr[prob_idx] = torch.stack(grad_arr[prob_idx])
grad_arr = torch.stack(grad_arr)
Jacobian_arr = get_moo_Jacobian_batch(xs_var, fs_var, self.n_obj)
y_detach = fs_var.detach()
optimizer.zero_grad()
if weight_solver_cls.core_name in ['EPOCore', 'MGDACore', 'RandomCore']:
weights = torch.stack([weight_solver_cls.get_alpha(grad_arr[idx], y[idx], idx) for idx in range(len(y)) ])
if self.core_solver.core_name in ['EPOCore', 'MGDAUBCore', 'RandomCore']:
weights = torch.stack([self.core_solver.get_alpha(Jacobian_arr[idx], y_detach[idx], idx) for idx in range( self.n_prob) ])
else:
assert False, 'Unknown core_name'

torch.sum(weights * y).backward()
torch.sum(weights * fs_var).backward()
optimizer.step()
if 'lbound' in dir(problem):
x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps,
torch.Tensor(problem.ubound) - solution_eps)
res = {}
res['x'] = x.detach().numpy()
res['y'] = y.detach().numpy()
res['y'] = y_np
res['hv_arr'] = hv_arr
res['y_arr'] = y_arr
return res



class GradAggSolver(GradBaseSolver):
def __init__(self, problem, step_size, n_iter, tol, agg):
def __init__(self, problem, step_size, epoch, tol, agg):
self.agg = agg
self.problem = problem
super().__init__(step_size, n_iter, tol)
super().__init__(step_size, epoch, tol)

def solve(self, x, prefs):
x = Variable(x, requires_grad=True)
Expand All @@ -79,7 +75,7 @@ def solve(self, x, prefs):
optimizer = SGD([x], lr=self.step_size)
agg_func = get_agg_func(self.agg)
res = {}
for i in tqdm(range(self.n_iter)):
for i in tqdm(range(self.epoch)):
y = self.problem.evaluate(x)
hv_arr.append(ind.do(y.detach().numpy()))
agg_val = agg_func(y, prefs)
Expand Down
2 changes: 2 additions & 0 deletions libmoon/solver/gradient/methods/base_solver_mtl.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@


from libmoon.util_global.weight_factor import uniform_pref
from libmoon.solver.gradient.methods.core.core_solver import EPOCore


class BaseSolverMTL:
def __init__(self):
Expand Down
19 changes: 6 additions & 13 deletions libmoon/solver/gradient/methods/core/core_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,18 +133,11 @@ def solve_epo(Jacobian, losses, pref, epo_lp):


class EPOCore():
# def __init__(self,n_var):
# # pass
# self.n_var = n_var
# def set_prefs(self, prefs):
# self.n_obj = 2
# self.epo_lp_arr = [EPO_LP(m=n_obj, n=n_var, r=1 / pref.cpu().numpy()) for pref in prefs]

def __init__(self, n_var, prefs):
'''
Input:
n_var: int, number of variables.
prefs: (n_prob, n_obj).
problem: Problem class.
'''
self.core_name = 'EPOCore'
self.prefs = prefs
Expand All @@ -153,24 +146,24 @@ def __init__(self, n_var, prefs):
prefs_np = prefs.cpu().numpy() if type(prefs) == torch.Tensor else prefs
self.epo_lp_arr = [EPO_LP(m=self.n_obj, n = self.n_var, r=1/pref) for pref in prefs_np]


def get_alpha(self, Jacobian, losses, idx):
alpha = solve_epo(Jacobian, losses, self.prefs[idx], self.epo_lp_arr[idx])
return torch.Tensor(alpha)

'''
MGDASolver.
'''
class MGDACore():
def __init__(self):
self.core_name = 'MGDACore'
class MGDAUBCore():
def __init__(self, n_var, prefs):
self.core_name = 'MGDAUBCore'

def get_alpha(self, Jacobian, losses, idx):
_, alpha = solve_mgda(Jacobian, return_coeff=True)
return alpha


class RandomCore():
def __init__(self):
def __init__(self, n_var, prefs):
self.core_name = 'RandomCore'

def get_alpha(self, Jacobian, losses, idx):
Expand Down
10 changes: 5 additions & 5 deletions libmoon/solver/gradient/methods/gradhv.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,18 @@
from libmoon.solver.gradient.methods.base_solver import GradBaseSolver
from torch.autograd import Variable


from tqdm import tqdm
from pymoo.indicators.hv import HV
from libmoon.util_global.constant import solution_eps, get_hv_ref

"""
The class HVMaxSolver is based on the algorithm described by
Wang, Hao, et al.
"Hypervolume metrics gradient ascent multi-objective optimization."
International conference on evolutionary multi-criterion optimization. Springer, Cham, 2017.
The class HVMaxSolver is based on the algorithm described by
Wang, Hao, et al.
"Hypervolume metrics gradient ascent multi-objective optimization."
International conference on evolutionary multi-criterion optimization. Springer, Cham, 2017.
"""


import numpy as np
import torch

Expand Down
4 changes: 2 additions & 2 deletions libmoon/solver/gradient/methods/mgda_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from pymoo.indicators.hv import HV

from libmoon.problem.synthetic import ZDT1, ZDT2
from libmoon.solver.gradient.methods.core.core_solver import MGDACore
from libmoon.solver.gradient.methods.core.core_solver import MGDAUBCore
from matplotlib import pyplot as plt


Expand Down Expand Up @@ -61,7 +61,7 @@
class MGDAUBSolver(GradBaseSolver):
def __init__(self, step_size, n_iter, tol, problem, prefs):
super().__init__(step_size, n_iter, tol)
self.weight_solver_cls = MGDACore()
self.weight_solver_cls = MGDAUBCore()
self.problem = problem
self.prefs = prefs
def solve(self, x):
Expand Down
2 changes: 0 additions & 2 deletions libmoon/solver/gradient/methods/pmgda_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,8 @@ def solve(self, x, prefs):
for prob_idx in range( n_prob ):
Jacobian = torch.autograd.functional.jacobian(lambda ph: self.problem.evaluate(ph).squeeze(),
x[prob_idx].unsqueeze(0))

Jacobian = torch.squeeze(Jacobian)
pref = prefs[prob_idx]

# (Step 2). Get the gradient of the constraint.
h = constraint( y[prob_idx].unsqueeze(0), pref=pref)
h.backward(retain_graph=True)
Expand Down
7 changes: 0 additions & 7 deletions libmoon/solver/gradient/methods/random_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,8 @@
import numpy as np
from libmoon.problem.synthetic.zdt import ZDT1
from matplotlib import pyplot as plt

from libmoon.solver.gradient.methods.core.core_solver import RandomCore

# def get_weight_func():
# return Tensor(np.random.rand(10,2))


class CoreRandom:
def __init__(self, args):
Expand All @@ -27,14 +23,11 @@ def __init__(self, step_size, n_iter, tol, problem, prefs):
self.problem = problem
self.prefs = prefs
self.solver_cls = RandomCore()


def solve(self, x):
return super().solve(self.problem, x, self.prefs, self.solver_cls)




if __name__ == '__main__':
problem = ZDT1(n_var=10)
solver = RandomSolver(0.1, 100, 1e-6)
Expand Down
1 change: 1 addition & 0 deletions libmoon/tester/run_mtl_psl.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def plot_train():
plt.savefig(save_fig_name)
print('save to {}'.format(save_fig_name))


if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('--n-epoch', type=int, default=10)
Expand Down
Loading

0 comments on commit e34c05f

Please sign in to comment.