Skip to content

Commit

Permalink
fix example1
Browse files Browse the repository at this point in the history
  • Loading branch information
YUYING07 committed Sep 18, 2024
1 parent bf47b4e commit a052c7d
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 29 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ archive/
.vscode/
.idea/
output/
test/

*.iml
*.ipynb_checkpoints/
Expand Down
18 changes: 9 additions & 9 deletions libmoon/solver/gradient/methods/base_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import numpy as np
from libmoon.util.gradient import get_moo_Jacobian_batch


class GradBaseSolver:
def __init__(self, step_size, epoch, tol, core_solver):
self.step_size = step_size
Expand All @@ -16,7 +17,7 @@ def __init__(self, step_size, epoch, tol, core_solver):
self.core_solver = core_solver
self.is_agg = (self.core_solver.core_name == 'AggCore')

def solve(self, problem, x , prefs):
def solve(self, problem, x, prefs):
'''
:param problem:
:param x:
Expand All @@ -38,15 +39,16 @@ def solve(self, problem, x , prefs):
y_detach = fs_var.detach()
optimizer.zero_grad()


if self.is_agg:
agg_name = self.core_solver.solver_name.split('_')[-1]
agg_func = get_agg_func(agg_name)
agg_val = agg_func(fs_var, torch.Tensor(prefs).to(fs_var.device))
torch.sum(agg_val).backward()
else:
if self.core_solver.core_name in ['EPOCore', 'MGDAUBCore', 'PMGDACore', 'RandomCore']:
alpha_array = torch.stack([self.core_solver.get_alpha(Jacobian_array[idx], y_detach[idx], idx) for idx in range( self.n_prob) ])
alpha_array = torch.stack(
[self.core_solver.get_alpha(Jacobian_array[idx], y_detach[idx], idx) for idx in
range(self.n_prob)])
elif self.core_solver.core_name in ['PMTLCore', 'MOOSVGDCore', 'HVGradCore']:
# assert False, 'Unknown core_name'
if self.core_solver.core_name == 'HVGradCore':
Expand All @@ -62,7 +64,6 @@ def solve(self, problem, x , prefs):

torch.sum(alpha_array * fs_var).backward()


optimizer.step()
if 'lbound' in dir(problem):
x.data = torch.clamp(x.data, torch.Tensor(problem.lbound) + solution_eps,
Expand All @@ -75,7 +76,6 @@ def solve(self, problem, x , prefs):
return res



class GradAggSolver(GradBaseSolver):
def __init__(self, problem, step_size, epoch, tol, agg):
self.agg = agg
Expand All @@ -84,7 +84,7 @@ def __init__(self, problem, step_size, epoch, tol, agg):

def solve(self, x, prefs):
x = Variable(x, requires_grad=True)
ind = HV(ref_point = get_hv_ref(self.problem.problem_name))
ind = HV(ref_point=get_hv_ref(self.problem.problem_name))
hv_arr = []
y_arr = []
x_arr = []
Expand All @@ -101,12 +101,12 @@ def solve(self, x, prefs):
optimizer.step()
y_arr.append(y.detach().numpy())
if 'lbound' in dir(self.problem):
x.data = torch.clamp(x.data, torch.Tensor(self.problem.lbound) + solution_eps, torch.Tensor(self.problem.ubound)-solution_eps)

x.data = torch.clamp(x.data, torch.Tensor(self.problem.lbound) + solution_eps,
torch.Tensor(self.problem.ubound) - solution_eps)

res['x'] = x.detach().numpy()
res['y'] = y.detach().numpy()
res['hv_history'] = np.array(hv_arr)
res['y_history'] = np.array(y_arr)
res['x_history'] = np.array(y_arr)
return res
return res
37 changes: 19 additions & 18 deletions libmoon/solver/gradient/methods/epo_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@
from numpy import array
from pymoo.indicators.hv import HV
import warnings

warnings.filterwarnings("ignore")
from libmoon.util.constant import solution_eps, get_hv_ref
from libmoon.util.gradient import get_moo_Jacobian
from libmoon.solver.gradient.methods.core.core_solver import EPOCore
from libmoon.problem.synthetic.zdt import ZDT1



class EPO_LP(object):
# Paper: https://proceedings.mlr.press/v119/mahapatra20a.html
# Paper: https://arxiv.org/abs/2010.06313
Expand All @@ -28,12 +28,12 @@ def __init__(self, m, n, r, eps=1e-4):
self.r = r
self.eps = eps
self.last_move = None
self.a = cp.Parameter(m) # Adjustments
self.C = cp.Parameter((m, m)) # C: Gradient inner products, G^T G
self.Ca = cp.Parameter(m) # d_bal^TG
self.rhs = cp.Parameter(m) # RHS of constraints for balancing
self.alpha = cp.Variable(m) # Variable to optimize
obj_bal = cp.Maximize(self.alpha @ self.Ca) # objective for balance
self.a = cp.Parameter(m) # Adjustments
self.C = cp.Parameter((m, m)) # C: Gradient inner products, G^T G
self.Ca = cp.Parameter(m) # d_bal^TG
self.rhs = cp.Parameter(m) # RHS of constraints for balancing
self.alpha = cp.Variable(m) # Variable to optimize
obj_bal = cp.Maximize(self.alpha @ self.Ca) # objective for balance
constraints_bal = [self.alpha >= 0, cp.sum(self.alpha) == 1, # Simplex
self.C @ self.alpha >= self.rhs]
self.prob_bal = cp.Problem(obj_bal, constraints_bal) # LP balance
Expand All @@ -45,9 +45,8 @@ def __init__(self, m, n, r, eps=1e-4):
self.C @ self.alpha >= 0]
self.prob_dom = cp.Problem(obj_dom, constraints_res) # LP dominance
self.prob_rel = cp.Problem(obj_dom, constraints_rel) # LP dominance
self.gamma = 0 # Stores the latest Optimum value of the LP problem
self.mu_rl = 0 # Stores the latest non-uniformity

self.gamma = 0 # Stores the latest Optimum value of the LP problem
self.mu_rl = 0 # Stores the latest non-uniformity

def get_alpha(self, l, G, r=None, C=False, relax=False):
r = self.r if r is None else r
Expand All @@ -61,7 +60,7 @@ def get_alpha(self, l, G, r=None, C=False, relax=False):
if len(np.where(J)[0]) > 0:
J_star_idx = np.where(rl == np.max(rl))[0]
self.rhs.value = self.Ca.value.copy()
self.rhs.value[J] = -np.inf # Not efficient; but works.
self.rhs.value[J] = -np.inf # Not efficient; but works.
self.rhs.value[J_star_idx] = 0
else:
self.rhs.value = np.zeros_like(self.Ca.value)
Expand All @@ -77,6 +76,7 @@ def get_alpha(self, l, G, r=None, C=False, relax=False):
self.last_move = "dom"
return self.alpha.value


def mu(rl, normed=False):
# Modified by Xiaoyuan to handle negative issue.
# if len(np.where(rl < 0)[0]):
Expand All @@ -95,8 +95,8 @@ def adjustments(l, r=1):
rl = r * l
l_hat = rl / rl.sum()
mu_rl = mu(l_hat, normed=True)
eps = 1e-3 # clipping by eps is to avoid log(0), zxy Dec. 5.
a = r * ( np.log( np.clip(l_hat * m, eps, np.inf) ) - mu_rl)
eps = 1e-3 # clipping by eps is to avoid log(0), zxy Dec. 5.
a = r * (np.log(np.clip(l_hat * m, eps, np.inf)) - mu_rl)
return rl, mu_rl, a


Expand All @@ -119,21 +119,22 @@ def solve_epo(grad_arr, losses, pref, epo_lp):
n = G.shape[1]
GG = G @ G.T
alpha = epo_lp.get_alpha(losses_np, G=GG, C=True)
if alpha is None: # A patch for the issue in cvxpy
if alpha is None: # A patch for the issue in cvxpy
alpha = pref / pref.sum()
gw = alpha @ G
return torch.Tensor(gw), alpha



class EPOSolver(GradBaseSolver):
def __init__(self, step_size, n_iter, tol, problem, prefs):
self.problem = problem
self.prefs = prefs
super().__init__(step_size, n_iter, tol)
self.epo_core = EPOCore(n_var=problem.n_var, prefs=prefs)
super().__init__(step_size, n_iter, tol, self.epo_core)

def solve(self, x):
return super().solve(self.problem, x, self.prefs, weight_solver_cls = self.epo_core)
# return super().solve(self.problem, x, self.prefs, weight_solver_cls=self.epo_core)
return super().solve(self.problem, x, self.prefs)


if __name__ == '__main__':
Expand All @@ -147,8 +148,8 @@ def solve(self, x):
res = solver.solve(x=x0)

from matplotlib import pyplot as plt

y_arr = res['y']
# plt.plot(y_arr)
plt.scatter(y_arr[:, 0], y_arr[:, 1], color='black')
plt.show()

9 changes: 9 additions & 0 deletions libmoon/tester/run_quick.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from libmoon.util.synthetic import synthetic_init
from libmoon.util.prefs import get_uniform_pref
from libmoon.util.problems import get_problem
from libmoon.solver.gradient.methods import EPOSolver

problem = get_problem(problem_name='ZDT1')
prefs = get_uniform_pref(n_prob=5, n_obj=problem.n_obj, clip_eps=1e-2)
solver = EPOSolver(step_size=1e-2, n_iter=1000, tol=1e-2, problem=problem, prefs=prefs)
res = solver.solve(x=synthetic_init(problem, prefs))
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
version='0.2.2',
author='Xiaoyuan Zhang et al.',
author_email='[email protected]',
description='Moon, Make MOO great again',
description='LibMOON: A Gradient-based MultiObjective OptimizatioN Library in PyTorch',
packages=find_packages(
# include=['solver.gradient.epo_solver',
# 'solver.gradient',
Expand All @@ -31,7 +31,7 @@
'cvxopt==1.3.2',
'cvxpy==1.4.2',
'ffmpeg-python',
'ffmpeg',
'ffmpeg',
'scikit-learn'
],
long_description=long_description,
Expand Down

0 comments on commit a052c7d

Please sign in to comment.