diff --git a/dependencies.xml b/dependencies.xml
index 3f5d8d5a16..e7989d9cd3 100644
--- a/dependencies.xml
+++ b/dependencies.xml
@@ -96,4 +96,4 @@ Note all install methods after "main" take
removeremove
-
+
\ No newline at end of file
diff --git a/plugins/TEAL b/plugins/TEAL
index cf79281b57..8572ef2e6f 160000
--- a/plugins/TEAL
+++ b/plugins/TEAL
@@ -1 +1 @@
-Subproject commit cf79281b57afbeaed4bce0aa26be2376799ac336
+Subproject commit 8572ef2e6fa6b89b6651db40c9973b47d8b76a45
diff --git a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py
index 817a7d5381..18044a736b 100644
--- a/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py
+++ b/ravenframework/CodeInterfaceClasses/SIMULATE3/SimulateData.py
@@ -43,6 +43,8 @@ def __init__(self,filen):
self.data["PinPowerPeaking"] = self.pinPeaking()
self.data["exposure"] = self.burnupEOC()
self.data["assembly_power"] = self.assemblyPeakingFactors()
+ self.data["fuel_type"] = self.fa_type()
+# self.data["pin_peaking"] = self.pinPeaking()
# this is a dummy variable for demonstration with MOF
# check if something has been found
if all(v is None for v in self.data.values()):
@@ -211,7 +213,7 @@ def EOCEFPD(self):
if not list_:
return ValueError("No values returned. Check Simulate File executed correctly")
else:
- outputDict = {'info_ids':['MaxEFPD'], 'values': [list_[-1]] }
+ outputDict = {'info_ids':['MaxEFPD'], 'values': [list_[-1]]}
return outputDict
@@ -486,6 +488,45 @@ def burnupEOC(self):
return outputDict
+ def fa_type(self):
+ '''
+ Extracts the fuel type and calculates the fuel cost based on the amount and enrichment of each fuel type.
+ '''
+ #fuel_type = []
+ FAlist = []
+ for line in self.lines:
+ if "'FUE.TYP'" in line:
+ p1 = line.index(",")
+ p2 = line.index("/")
+ search_space = line[p1:p2]
+ search_space = search_space.replace(",","")
+ tmp= search_space.split()
+ for ii in tmp:
+ FAlist.append(float(ii))
+ FAtype = list(set(FAlist))
+ FAlist_A = FAlist[0]
+ FAlist_B = FAlist[1:9] + FAlist[9:73:9]
+ FAlist_C = FAlist[10:18] + FAlist[19:27] + FAlist[28:36] + FAlist[37:45] + FAlist[46:54] + FAlist[55:63] + FAlist[64:72] + FAlist[73:81]
+ FAcount_A = [float(fa == FAlist_A) for fa in FAtype]
+ FAcount_B = [float(FAlist_B.count(fa)*2) for fa in FAtype]
+ FAcount_C = [float(FAlist_C.count(fa)*4) for fa in FAtype]
+ FAcount = [FAcount_A[j] + FAcount_B[j] + FAcount_C[j] for j in range(len(FAtype))]
+ print(FAcount)
+ #stop
+ #Considering that: FA type 0 is empty, type 1 reflector, type 2 2% enrichment, types 3 and 4 2.5% enrichment, and types 5 and 6 3.2% enrichment. The cost of burnable is not being considered
+ if len(FAcount) == 7:
+ fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*2.69520839 + (FAcount[3] + FAcount[4])*3.24678409 + (FAcount[5] + FAcount[6])*4.03739539
+ else:
+ fuel_cost = (FAcount[0] + FAcount[1])*0 + FAcount[2]*2.69520839 + (FAcount[3] + FAcount[4])*3.24678409 + (FAcount[5])*4.03739539
+ print(fuel_cost)
+ #fuel_type.append(float(search_space))
+ #stop
+ if not fuel_cost:
+ return ValueError("No values returned. Check Simulate File executed correctly")
+ else:
+ outputDict = {'info_ids':['fuel_cost'], 'values': [fuel_cost]}
+ return outputDict
+
def writeCSV(self, fileout):
"""
Print Data into CSV format
@@ -505,4 +546,3 @@ def writeCSV(self, fileout):
index=index+1
numpy.savetxt(fileObject, outputMatrix.T, delimiter=',', header=','.join(headers), comments='')
fileObject.close()
-
diff --git a/ravenframework/Optimizers/GeneticAlgorithm.py b/ravenframework/Optimizers/GeneticAlgorithm.py
index 8f90200ec9..fced405aed 100644
--- a/ravenframework/Optimizers/GeneticAlgorithm.py
+++ b/ravenframework/Optimizers/GeneticAlgorithm.py
@@ -17,12 +17,14 @@
Genetic Algorithm-based optimization. Multiple strategies for
mutations, cross-overs, etc. are available.
Created June,3,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ Updated Sepember,17,2023
+ @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim
References
----------
.. [1] Holland, John H. "Genetic algorithms." Scientific American 267.1 (1992): 66-73.
- [2] Z. Michalewicz, "Genetic Algorithms. + Data Structures. = Evolution Programs," Third, Revised
- and Extended Edition, Springer (1996).
+ [2] Z. Michalewicz, "Genetic Algorithms. + Data Structures. = Evolution Programs," Third, Revised and Extended Edition, Springer (1996).
+ [3] Deb, Kalyanmoy, et al. "A fast and elitist multiobjective genetic algorithm: NSGA-II." IEEE transactions on evolutionary computation 6.2 (2002): 182-197.
+ [4] Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338.
"""
# External Modules----------------------------------------------------------------------------------
from collections import deque, defaultdict
@@ -32,10 +34,11 @@
# External Modules End------------------------------------------------------------------------------
# Internal Modules----------------------------------------------------------------------------------
-from ..utils import mathUtils, InputData, InputTypes
+from ..utils import mathUtils, InputData, InputTypes, frontUtils
from ..utils.gaUtils import dataArrayToDict, datasetToDataArray
from .RavenSampled import RavenSampled
from .parentSelectors.parentSelectors import returnInstance as parentSelectionReturnInstance
+from .parentSelectors.parentSelectors import countConstViolation
from .crossOverOperators.crossovers import returnInstance as crossoversReturnInstance
from .mutators.mutators import returnInstance as mutatorsReturnInstance
from .survivorSelectors.survivorSelectors import returnInstance as survivorSelectionReturnInstance
@@ -66,16 +69,26 @@ def __init__(self):
self._acceptRerun = {} # by traj, if True then override accept for point rerun
self._convergenceInfo = {} # by traj, the persistence and convergence information for most recent opt
self._requiredPersistence = 0 # consecutive persistence required to mark convergence
- self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here
+ self.needDenormalized() # the default in all optimizers is to normalize the data which is not the case here
self.batchId = 0
- self.population = None # panda Dataset container containing the population at the beginning of each generation iteration
- self.popAge = None # population age
- self.fitness = None # population fitness
- self.ahdp = np.NaN # p-Average Hausdorff Distance between populations
- self.ahd = np.NaN # Hausdorff Distance between populations
+ self.population = None # panda Dataset container containing the population at the beginning of each generation iteration
+ self.popAge = None # population age
+ self.fitness = None # population fitness
+ self.rank = None # population rank (for Multi-objective optimization only)
+ self.constraints = None
+ self.constraintsV = None
+ self.crowdingDistance = None # population crowding distance (for Multi-objective optimization only)
+ self.ahdp = np.NaN # p-Average Hausdorff Distance between populations
+ self.ahd = np.NaN # Hausdorff Distance between populations
self.bestPoint = None
self.bestFitness = None
self.bestObjective = None
+ self.multiBestPoint = None
+ self.multiBestFitness = None
+ self.multiBestObjective = None
+ self.multiBestConstraint = None
+ self.multiBestRank = None
+ self.multiBestCD = None
self.objectiveVal = None
self._populationSize = None
self._parentSelectionType = None
@@ -97,6 +110,7 @@ def __init__(self):
self._penaltyCoeff = None
self._fitnessInstance = None
self._repairInstance = None
+ self._canHandleMultiObjective = True
##########################
# Initialization Methods #
@@ -130,7 +144,7 @@ def getInputSpecification(cls):
\item tournamentSelection.
\item rankSelection.
\end{itemize}
- \item Reproduction:
+ \item Reproduction:
\begin{itemize}
\item crossover:
\begin{itemize}
@@ -146,11 +160,16 @@ def getInputSpecification(cls):
\item bitFlipMutator.
\end{itemize}
\end{itemize}
- \item survivorSelectors:
+ \item survivorSelectors:
\begin{itemize}
\item ageBased.
\item fitnessBased.
\end{itemize}
+ \item constraintHandling:
+ \begin{itemize}
+ \item hard.
+ \item soft.
+ \end{itemize}
\end{itemize}""")
# Population Size
populationSize = InputData.parameterInputFactory('populationSize', strictMode=True,
@@ -158,6 +177,14 @@ def getInputSpecification(cls):
printPriority=108,
descr=r"""The number of chromosomes in each population.""")
GAparams.addSub(populationSize)
+
+ # Constraint Handling
+ constraintHandling = InputData.parameterInputFactory('constraintHandling', strictMode=True,
+ contentType=InputTypes.StringType,
+ printPriority=108,
+ descr=r"""a node indicating whether GA will handle constraints hardly or softly.""")
+ GAparams.addSub(constraintHandling)
+
# Parent Selection
parentSelection = InputData.parameterInputFactory('parentSelection', strictMode=True,
contentType=InputTypes.StringType,
@@ -238,27 +265,24 @@ def getInputSpecification(cls):
contentType=InputTypes.StringType,
printPriority=108,
descr=r"""a subnode containing the implemented fitness functions.
- This includes: \begin{itemize}
- \item invLinear:
- \[fitness = -a \times obj - b \times \sum\\_{j=1}^{nConstraint} max(0,-penalty\\_j) \].
-
- \item logistic:
- \[fitness = \frac{1}{1+e^{a\times(obj-b)}}\].
-
- \item
- feasibleFirst: \[fitness =
- -obj \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{for} \ \ g\\_j(x)\geq 0 \; \forall j\] and
- \[fitness = -obj\\_{worst} - \Sigma\\_{j=1}^{J} \ \ \ \ \ \ \ \ otherwise \]
- \end{itemize}.""")
+ This includes: a. invLinear: $fitness = -a \times obj - b \times \sum_{j=1}^{nConstraint} max(0,-penalty\_j) $.
+
+ b. logistic: $fitness = \frac{1}{1+e^{a\times(obj-b)}}$.
+
+ c. feasibleFirst: $fitness = \left\{\begin{matrix} -obj & g_j(x)\geq 0 \; \forall j \\ -obj_{worst}- \Sigma_{j=1}^{J} & otherwise \\ \end{matrix}\right.$
+
+ d. hardConstraint: $fitness = the number of constraints violated.$
+
+ """)
fitness.addParam("type", InputTypes.StringType, True,
- descr=r"""[invLin, logistic, feasibleFirst]""")
+ descr=r"""[invLin, logistic, feasibleFirst, hardConstraint]""")
objCoeff = InputData.parameterInputFactory('a', strictMode=True,
- contentType=InputTypes.FloatType,
+ contentType=InputTypes.FloatListType,
printPriority=108,
descr=r""" a: coefficient of objective function.""")
fitness.addSub(objCoeff)
penaltyCoeff = InputData.parameterInputFactory('b', strictMode=True,
- contentType=InputTypes.FloatType,
+ contentType=InputTypes.FloatListType,
printPriority=108,
descr=r""" b: coefficient of constraint penalty.""")
fitness.addSub(penaltyCoeff)
@@ -296,12 +320,15 @@ def getSolutionExportVariableNames(cls):
new = {}
# new = {'': 'the size of step taken in the normalized input space to arrive at each optimal point'}
new['conv_{CONV}'] = 'status of each given convergence criteria'
+ new['rank'] = 'rank'
+ new['CD'] = 'crowding distance'
new['fitness'] = 'fitness of the current chromosome'
new['age'] = 'age of current chromosome'
new['batchId'] = 'Id of the batch to whom the chromosome belongs'
new['AHDp'] = 'p-Average Hausdorff Distance between populations'
new['AHD'] = 'Hausdorff Distance between populations'
new['ConstraintEvaluation_{CONSTRAINT}'] = 'Constraint function evaluation (negative if violating and positive otherwise)'
+ new['FitnessEvaluation_{OBJ}'] = 'Fitness evaluation of each objective'
ok.update(new)
return ok
@@ -313,56 +340,118 @@ def handleInput(self, paramInput):
@ Out, None
"""
RavenSampled.handleInput(self, paramInput)
- # GAparams
+ ####################################################################################
+ # GAparams #
+ ####################################################################################
gaParamsNode = paramInput.findFirst('GAparams')
- # populationSize
+
+ ####################################################################################
+ # populationSize #
+ ####################################################################################
populationSizeNode = gaParamsNode.findFirst('populationSize')
self._populationSize = populationSizeNode.value
- # parent selection
+
+ ####################################################################################
+ # parent selection node #
+ ####################################################################################
parentSelectionNode = gaParamsNode.findFirst('parentSelection')
self._parentSelectionType = parentSelectionNode.value
self._parentSelectionInstance = parentSelectionReturnInstance(self, name=parentSelectionNode.value)
- # reproduction node
+ if len(self._objectiveVar) >=2 and self._parentSelectionType != 'tournamentSelection':
+ self.raiseAnError(IOError, f'tournamentSelection in is a sole mechanism supportive in multi-objective optimization.')
+
+ ####################################################################################
+ # reproduction node #
+ ####################################################################################
reproductionNode = gaParamsNode.findFirst('reproduction')
self._nParents = int(np.ceil(1/2 + np.sqrt(1+4*self._populationSize)/2))
self._nChildren = int(2*comb(self._nParents,2))
- # crossover node
+
+ ####################################################################################
+ # crossover node #
+ ####################################################################################
crossoverNode = reproductionNode.findFirst('crossover')
self._crossoverType = crossoverNode.parameterValues['type']
+ if self._crossoverType not in ['onePointCrossover','twoPointsCrossover','uniformCrossover']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support onePointCrossover, twoPointsCrossover and uniformCrossover as a crossover, whereas provided crossover is {self._crossoverType}')
if crossoverNode.findFirst('points') is None:
self._crossoverPoints = None
else:
self._crossoverPoints = crossoverNode.findFirst('points').value
self._crossoverProb = crossoverNode.findFirst('crossoverProb').value
self._crossoverInstance = crossoversReturnInstance(self,name = self._crossoverType)
- # mutation node
+
+ ####################################################################################
+ # mutation node #
+ ####################################################################################
mutationNode = reproductionNode.findFirst('mutation')
self._mutationType = mutationNode.parameterValues['type']
+ if self._mutationType not in ['swapMutator','scrambleMutator','inversionMutator','bitFlipMutator','randomMutator']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support swapMutator, scrambleMutator, inversionMutator, bitFlipMutator, and randomMutator as a mutator, whereas provided mutator is {self._mutationType}')
if mutationNode.findFirst('locs') is None:
self._mutationLocs = None
else:
self._mutationLocs = mutationNode.findFirst('locs').value
self._mutationProb = mutationNode.findFirst('mutationProb').value
self._mutationInstance = mutatorsReturnInstance(self,name = self._mutationType)
- # Survivor selection
+
+ ####################################################################################
+ # survivor selection node #
+ ####################################################################################
survivorSelectionNode = gaParamsNode.findFirst('survivorSelection')
self._survivorSelectionType = survivorSelectionNode.value
self._survivorSelectionInstance = survivorSelectionReturnInstance(self,name = self._survivorSelectionType)
- # Fitness
+ if self._survivorSelectionType not in ['ageBased','fitnessBased','rankNcrowdingBased']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support ageBased, fitnessBased, and rankNcrowdingBased as a survivorSelector, whereas provided survivorSelector is {self._survivorSelectionType}')
+ if len(self._objectiveVar) == 1 and self._survivorSelectionType == 'rankNcrowdingBased':
+ self.raiseAnError(IOError, f'(rankNcrowdingBased) in only supports when the number of objective in is bigger than two. ')
+
+ ####################################################################################
+ # fitness node #
+ ####################################################################################
fitnessNode = gaParamsNode.findFirst('fitness')
self._fitnessType = fitnessNode.parameterValues['type']
-
- # Check if the fitness requested is among the constrained optimization fitnesses
- # Currently, only InvLin and feasibleFirst Fitnesses deal with constrained optimization
+
+ ####################################################################################
+ # constraint node #
+ ####################################################################################
# TODO: @mandd, please explore the possibility to convert the logistic fitness into a constrained optimization fitness.
- if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','feasibleFirst']:
- self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear and feasibleFirst fitnesses, whereas provided fitness is {self._fitnessType}')
+ if 'Constraint' in self.assemblerObjects and self._fitnessType not in ['invLinear','logistic', 'feasibleFirst','hardConstraint']:
+ self.raiseAnError(IOError, f'Currently constrained Genetic Algorithms only support invLinear, logistic, feasibleFirst and hardConstraint as a fitness, whereas provided fitness is {self._fitnessType}')
+ self._expConstr = self.assemblerObjects['Constraint'][0] if 'Constraint' in self.assemblerObjects else None
+ self._impConstr = self.assemblerObjects['ImplicitConstraint'][0] if 'ImplicitConstraint' in self.assemblerObjects else None
+ if self._expConstr != None and self._impConstr != None:
+ self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External']) + len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External'])
+ elif self._expConstr == None and self._impConstr != None:
+ self._numOfConst = len([ele for ele in self._impConstr if ele != 'Functions' if ele !='External'])
+ elif self._expConstr != None and self._impConstr == None:
+ self._numOfConst = len([ele for ele in self._expConstr if ele != 'Functions' if ele !='External'])
+ else:
+ self._numOfConst = 0
+ if (self._expConstr != None) and (self._impConstr != None) and (self._penaltyCoeff != None):
+ if len(self._penaltyCoeff) != len(self._objectiveVar) * self._numOfConst:
+ self.raiseAnError(IOError, f'The number of penaltyCoeff. in should be identical with the number of objective in and the number of constraints (i.e., and )')
+ else:
+ pass
self._objCoeff = fitnessNode.findFirst('a').value if fitnessNode.findFirst('a') is not None else None
- self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None
+ #NOTE the code lines below are for 'feasibleFirst' temperarily. It will be generalized for invLinear as well.
+ if self._fitnessType == 'feasibleFirst':
+ if self._numOfConst != 0 and fitnessNode.findFirst('b') is not None:
+ self._penaltyCoeff = fitnessNode.findFirst('b').value
+ elif self._numOfConst == 0 and fitnessNode.findFirst('b') is not None:
+ self.raiseAnError(IOError, f'The number of constraints used are 0 but there are penalty coefficieints')
+ elif self._numOfConst != 0 and fitnessNode.findFirst('b') is None:
+ self._penaltyCoeff = list(np.repeat(1, self._numOfConst * len(self._objectiveVar))) #NOTE if penaltyCoeff is not provided, then assume they are all 1.
+ else:
+ self._penaltyCoeff = list(np.repeat(0, len(self._objectiveVar)))
+ else:
+ self._penaltyCoeff = fitnessNode.findFirst('b').value if fitnessNode.findFirst('b') is not None else None
self._fitnessInstance = fitnessReturnInstance(self,name = self._fitnessType)
self._repairInstance = repairReturnInstance(self,name='replacementRepair') # currently only replacement repair is implemented.
- # Convergence Criterion
+ ####################################################################################
+ # convergence criterion node #
+ ####################################################################################
convNode = paramInput.findFirst('convergence')
if convNode is not None:
for sub in convNode.subparts:
@@ -418,60 +507,89 @@ def needDenormalized(self):
# overload as needed in inheritors
return True
- ###############
- # Run Methods #
- ###############
-
- def _useRealization(self, info, rlz):
- """
- Used to feedback the collected runs into actionable items within the sampler.
- This is called by localFinalizeActualSampling, and hence should contain the main skeleton.
- @ In, info, dict, identifying information about the realization
- @ In, rlz, xr.Dataset, new batched realizations
- @ Out, None
- """
- # The whole skeleton should be here, this should be calling all classes and _private methods.
+ def singleConstraint(self, info, rlz):
traj = info['traj']
for t in self._activeTraj[1:]:
self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0)
self.incrementIteration(traj)
- info['step'] = self.counter
-
- # Developer note: each algorithm step is indicated by a number followed by the generation number
- # e.g., '5 @ n-1' refers to step 5 for generation n-1 (i.e., previous generation)
- # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams
-
- # 5 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation)
- # 5.1 @ n-1: fitnessCalculation(rlz)
- # perform fitness calculation for newly obtained children (rlz)
+ if not self._canHandleMultiObjective or len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ offSprings = datasetToDataArray(rlz, list(self.toBeSampled))
+ objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar[0]].data))
+
+ # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions)
+ constraintData = {}
+ if self._constraintFunctions or self._impConstraintFunctions:
+ params = []
+ for y in (self._constraintFunctions + self._impConstraintFunctions):
+ params += y.parameterNames()
+ for p in list(set(params) -set([self._objectiveVar[0]]) -set(list(self.toBeSampled.keys()))):
+ constraintData[p] = list(np.atleast_1d(rlz[p].data))
+ # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population
+ g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions)))
+
+ g = xr.DataArray(g0,
+ dims=['chromosome','Constraint'],
+ coords={'chromosome':np.arange(np.shape(offSprings)[0]),
+ 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]})
+ for index,individual in enumerate(offSprings):
+ newOpt = individual
+ opt = {self._objectiveVar[0]:objectiveVal[index]}
+ for p, v in constraintData.items():
+ opt[p] = v[index]
+
+ for constIndex, constraint in enumerate(self._constraintFunctions + self._impConstraintFunctions):
+ if constraint in self._constraintFunctions:
+ g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint)
+ else:
+ g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint)
+
+ offSpringFitness = self._fitnessInstance(rlz,
+ objVar=self._objectiveVar[0],
+ a=self._objCoeff,
+ b=self._penaltyCoeff,
+ penalty=None,
+ constraintFunction=g,
+ constraintNum = self._numOfConst,
+ type=self._minMax)
+
+ self._collectOptPoint(rlz, offSpringFitness, objectiveVal, g)
+ self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info)
+ return traj, g, objectiveVal, offSprings, offSpringFitness
+
+ def multiConstraint(self, info, rlz):
+ traj = info['traj']
+ for t in self._activeTraj[1:]:
+ self._closeTrajectory(t, 'cancel', 'Currently GA is single trajectory', 0)
+ self.incrementIteration(traj)
+ objectiveVal = []
offSprings = datasetToDataArray(rlz, list(self.toBeSampled))
- objectiveVal = list(np.atleast_1d(rlz[self._objectiveVar].data))
+ for i in range(len(self._objectiveVar)):
+ objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data)))
- # collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions)
+ # Collect parameters that the constraints functions need (neglecting the default params such as inputs and objective functions)
constraintData = {}
if self._constraintFunctions or self._impConstraintFunctions:
params = []
for y in (self._constraintFunctions + self._impConstraintFunctions):
params += y.parameterNames()
- for p in list(set(params) -set([self._objectiveVar]) -set(list(self.toBeSampled.keys()))):
+ for p in list(set(params) -set(self._objectiveVar) -set(list(self.toBeSampled.keys()))):
constraintData[p] = list(np.atleast_1d(rlz[p].data))
- # Compute constraint function g_j(x) for all constraints (j = 1 .. J)
- # and all x's (individuals) in the population
+ # Compute constraint function g_j(x) for all constraints (j = 1 .. J) and all x's (individuals) in the population
g0 = np.zeros((np.shape(offSprings)[0],len(self._constraintFunctions)+len(self._impConstraintFunctions)))
g = xr.DataArray(g0,
- dims=['chromosome','Constraint'],
- coords={'chromosome':np.arange(np.shape(offSprings)[0]),
- 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]})
- # FIXME The constraint handling is following the structure of the RavenSampled.py,
- # there are many utility functions that can be simplified and/or merged together
- # _check, _handle, and _apply, for explicit and implicit constraints.
- # This can be simplified in the near future in GradientDescent, SimulatedAnnealing, and here in GA
+ dims=['chromosome','Constraint'],
+ coords={'chromosome':np.arange(np.shape(offSprings)[0]),
+ 'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]})
+
for index,individual in enumerate(offSprings):
newOpt = individual
- opt = {self._objectiveVar:objectiveVal[index]}
+ objOpt = dict(zip(self._objectiveVar,
+ list(map(lambda x:-1 if x=="max" else 1 , self._minMax))))
+ opt = dict(zip(self._objectiveVar, [item[index] for item in objectiveVal]))
+ opt = {k: objOpt[k]*opt[k] for k in opt}
for p, v in constraintData.items():
opt[p] = v[index]
@@ -480,50 +598,177 @@ def _useRealization(self, info, rlz):
g.data[index, constIndex] = self._handleExplicitConstraints(newOpt, constraint)
else:
g.data[index, constIndex] = self._handleImplicitConstraints(newOpt, opt, constraint)
+
offSpringFitness = self._fitnessInstance(rlz,
- objVar=self._objectiveVar,
- a=self._objCoeff,
- b=self._penaltyCoeff,
- penalty=None,
- constraintFunction=g,
- type=self._minMax)
+ objVar=self._objectiveVar,
+ a=self._objCoeff,
+ b=self._penaltyCoeff,
+ constraintFunction=g,
+ constraintNum = self._numOfConst,
+ type =self._minMax)
+ return traj, g, objectiveVal, offSprings, offSpringFitness
+
+
+
+ #########################################################################################################
+ # Run Methods #
+ #########################################################################################################
+
+ #########################################################################################################
+ # Developer note:
+ # Each algorithm step is indicated by a number followed by the generation number
+ # e.g., '0 @ n-1' refers to step 0 for generation n-1 (i.e., previous generation)
+ # for more details refer to GRP-Raven-development/Disceret_opt channel on MS Teams.
+ #########################################################################################################
+
+ def _useRealization(self, info, rlz):
+ """
+ Used to feedback the collected runs into actionable items within the sampler.
+ This is called by localFinalizeActualSampling, and hence should contain the main skeleton.
+ @ In, info, dict, identifying information about the realization
+ @ In, rlz, xr.Dataset, new batched realizations
+ @ Out, None
+ """
+
+ info['step'] = self.counter
+
+ # 0 @ n-1: Survivor Selection from previous iteration (children+parents merging from previous generation)
+ # 0.1 @ n-1: fitnessCalculation(rlz): Perform fitness calculation for newly obtained children (rlz)
+
+ objInd = 1 if len(self._objectiveVar) == 1 else 2
+ constraintFuncs: dict = {1: GeneticAlgorithm.singleConstraint, 2: GeneticAlgorithm.multiConstraint}
+ const = constraintFuncs.get(objInd, GeneticAlgorithm.singleConstraint)
+ traj, g, objectiveVal, offSprings, offSpringFitness = const(self, info, rlz)
- self._collectOptPoint(rlz, offSpringFitness, objectiveVal,g)
- self._resolveNewGeneration(traj, rlz, objectiveVal, offSpringFitness, g, info)
+ # 0.2@ n-1: Survivor selection(rlz): Update population container given obtained children
if self._activeTraj:
- # 5.2@ n-1: Survivor selection(rlz)
- # update population container given obtained children
- if self.counter > 1:
- self.population,self.fitness,age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge,
- variables=list(self.toBeSampled),
- population=self.population,
- fitness=self.fitness,
- newRlz=rlz,
- offSpringsFitness=offSpringFitness,
- popObjectiveVal=self.objectiveVal)
- self.popAge = age
- else:
- self.population = offSprings
- self.fitness = offSpringFitness
- self.objectiveVal = rlz[self._objectiveVar].data
+ if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ if self.counter > 1:
+ self.population, self.fitness,\
+ age,self.objectiveVal = self._survivorSelectionInstance(age=self.popAge,
+ variables=list(self.toBeSampled),
+ population=self.population,
+ fitness=self.fitness,
+ newRlz=rlz,
+ offSpringsFitness=offSpringFitness,
+ popObjectiveVal=self.objectiveVal)
+ self.popAge = age
+ else:
+ self.population = offSprings
+ self.fitness = offSpringFitness
+ self.objectiveVal = rlz[self._objectiveVar[0]].data
+
+ else: # This is for a multi-objective Optimization case.
+ if self.counter > 1:
+ self.population,self.rank, \
+ self.popAge,self.crowdingDistance, \
+ self.objectiveVal,self.fitness, \
+ self.constraintsV = self._survivorSelectionInstance(age=self.popAge,
+ variables=list(self.toBeSampled),
+ population=self.population,
+ offsprings=rlz,
+ popObjectiveVal=self.objectiveVal,
+ offObjectiveVal=objectiveVal,
+ popFit = self.fitness,
+ offFit = offSpringFitness,
+ popConstV = self.constraintsV,
+ offConstV = g
+ )
+
+
+
+ self._collectOptPointMulti(self.population,
+ self.rank,
+ self.crowdingDistance,
+ self.objectiveVal,
+ self.fitness,
+ self.constraintsV)
+ self._resolveNewGenerationMulti(traj, rlz, info)
+
+ ##############################################################################
+ ##TODO: remove all the plots and maybe design new plots in outstreams if our current cannot be used
+ ## These are currently for debugging purposes
+ import matplotlib.pyplot as plt
+
+ signChange = list(map(lambda x:-1 if x=="max" else 1 , self._minMax))
+ for i in range(0, len(self.multiBestObjective)):
+ newMultiBestObjective = self.multiBestObjective * signChange
+
+ plt.title(str('BatchID = ' + str(self.batchId)))
+ plt.plot(newMultiBestObjective[:,0],
+ newMultiBestObjective[:,1],'*')
+
+ for i in range(len(self.multiBestObjective[:,0])):
+ plt.text(newMultiBestObjective[i,0],
+ newMultiBestObjective[i,1], str(self.batchId))
+ plt.savefig('PF'+str(i)+'_'+str(self.batchId)+'.png')
+ ##############################################################################
+
+ else:
+ self.population = offSprings
+ self.fitness = offSpringFitness
+ self.constraintsV = g
+
+ # offspringObjsVals for Rank and CD calculation
+ offObjVal = []
+ for i in range(len(self._objectiveVar)):
+ offObjVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data)))
+
+ # offspringFitVals for Rank and CD calculation
+ fitVal = datasetToDataArray(self.fitness, self._objectiveVar).data
+ offspringFitVals = fitVal.tolist()
+ offSpringRank = frontUtils.rankNonDominatedFrontiers(np.array(offspringFitVals))
+ self.rank = xr.DataArray(offSpringRank,
+ dims=['rank'],
+ coords={'rank': np.arange(np.shape(offSpringRank)[0])})
+ offSpringCD = frontUtils.crowdingDistance(rank=offSpringRank,
+ popSize=len(offSpringRank),
+ objectives=np.array(offspringFitVals))
+
+ self.crowdingDistance = xr.DataArray(offSpringCD,
+ dims=['CrowdingDistance'],
+ coords={'CrowdingDistance': np.arange(np.shape(offSpringCD)[0])})
+
+ self.objectiveVal = []
+ for i in range(len(self._objectiveVar)):
+ self.objectiveVal.append(list(np.atleast_1d(rlz[self._objectiveVar[i]].data)))
+
+ self._collectOptPointMulti(self.population,
+ self.rank,
+ self.crowdingDistance,
+ self.objectiveVal,
+ self.fitness,
+ self.constraintsV)
+ self._resolveNewGenerationMulti(traj, rlz, info)
# 1 @ n: Parent selection from population
# pair parents together by indexes
- parents = self._parentSelectionInstance(self.population,
- variables=list(self.toBeSampled),
- fitness=self.fitness,
- nParents=self._nParents)
+ if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ parents = self._parentSelectionInstance(self.population,
+ variables=list(self.toBeSampled),
+ fitness=self.fitness,
+ nParents=self._nParents)
+
+ else: # This is for a multi-objective Optimization case.
+
+ parents = self._parentSelectionInstance(self.population,
+ variables=list(self.toBeSampled),
+ nParents=self._nParents,
+ rank = self.rank,
+ crowdDistance = self.crowdingDistance,
+ fitness = self.fitness
+ )
# 2 @ n: Crossover from set of parents
- # create childrenCoordinates (x1,...,xM)
+ # Create childrenCoordinates (x1,...,xM)
childrenXover = self._crossoverInstance(parents=parents,
variables=list(self.toBeSampled),
crossoverProb=self._crossoverProb,
points=self._crossoverPoints)
# 3 @ n: Mutation
- # perform random directly on childrenCoordinates
+ # Perform random directly on childrenCoordinates
childrenMutated = self._mutationInstance(offSprings=childrenXover,
distDict=self.distDict,
locs=self._mutationLocs,
@@ -531,7 +776,7 @@ def _useRealization(self, info, rlz):
variables=list(self.toBeSampled))
# 4 @ n: repair/replacement
- # repair should only happen if multiple genes in a single chromosome have the same values (),
+ # Repair should only happen if multiple genes in a single chromosome have the same values (),
# and at the same time the sampling of these genes should be with Out replacement.
needsRepair = False
for chrom in range(self._nChildren):
@@ -550,12 +795,12 @@ def _useRealization(self, info, rlz):
children = children[:self._populationSize, :]
daChildren = xr.DataArray(children,
- dims=['chromosome','Gene'],
- coords={'chromosome': np.arange(np.shape(children)[0]),
- 'Gene':list(self.toBeSampled)})
+ dims=['chromosome','Gene'],
+ coords={'chromosome': np.arange(np.shape(children)[0]),
+ 'Gene':list(self.toBeSampled)})
# 5 @ n: Submit children batch
- # submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates
+ # Submit children coordinates (x1,...,xm), i.e., self.childrenCoordinates
for i in range(self.batch):
newRlz = {}
for _, var in enumerate(self.toBeSampled.keys()):
@@ -579,7 +824,6 @@ def _submitRun(self, point, traj, step, moreInfo=None):
})
# NOTE: Currently, GA treats explicit and implicit constraints similarly
# while box constraints (Boundary constraints) are automatically handled via limits of the distribution
- #
self.raiseADebug(f'Adding run to queue: {self.denormalizeData(point)} | {info}')
self._submissionQueue.append((point, info))
@@ -593,12 +837,20 @@ def flush(self):
self.population = None
self.popAge = None
self.fitness = None
+ self.rank = None
+ self.crowdingDistance = None
self.ahdp = np.NaN
self.ahd = np.NaN
self.bestPoint = None
self.bestFitness = None
self.bestObjective = None
self.objectiveVal = None
+ self.multiBestPoint = None
+ self.multiBestFitness = None
+ self.multiBestObjective = None
+ self.multiBestConstraint = None
+ self.multiBestRank = None
+ self.multiBestCD = None
# END queuing Runs
# * * * * * * * * * * * * * * * *
@@ -627,8 +879,8 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info):
for i in range(rlz.sizes['RAVEN_sample_ID']):
varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys())
rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars)
- rlzDict[self._objectiveVar] = np.atleast_1d(rlz[self._objectiveVar].data)[i]
- rlzDict['fitness'] = np.atleast_1d(fitness.data)[i]
+ rlzDict[self._objectiveVar[0]] = np.atleast_1d(rlz[self._objectiveVar[0]].data)[i]
+ rlzDict['fitness'] = np.atleast_1d(fitness.to_array()[:,i])
for ind, consName in enumerate(g['Constraint'].values):
rlzDict['ConstraintEvaluation_'+consName] = g[i,ind]
self._updateSolutionExport(traj, rlzDict, acceptable, None)
@@ -636,7 +888,7 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info):
if acceptable in ['accepted', 'first']:
# record history
bestRlz = {}
- bestRlz[self._objectiveVar] = self.bestObjective
+ bestRlz[self._objectiveVar[0]] = self.bestObjective
bestRlz['fitness'] = self.bestFitness
bestRlz.update(self.bestPoint)
self._optPointHistory[traj].append((bestRlz, info))
@@ -645,6 +897,76 @@ def _resolveNewGeneration(self, traj, rlz, objectiveVal, fitness, g, info):
else: # e.g. rerun
pass # nothing to do, just keep moving
+ def _resolveNewGenerationMulti(self, traj, rlz, info):
+ """
+ Store a new Generation after checking convergence
+ @ In, traj, int, trajectory for this new point
+ @ In, rlz, dict, realized realization
+ @ In, objectiveVal, list, objective values at each chromosome of the realization
+ @ In, fitness, xr.DataArray, fitness values at each chromosome of the realization
+ @ In, g, xr.DataArray, the constraint evaluation function
+ @ In, info, dict, identifying information about the realization
+ """
+ self.raiseADebug('*'*80)
+ self.raiseADebug(f'Trajectory {traj} iteration {info["step"]} resolving new state ...')
+ # note the collection of the opt point
+ self._stepTracker[traj]['opt'] = (rlz, info)
+ acceptable = 'accepted' if self.counter > 1 else 'first'
+ old = self.population
+ converged = self._updateConvergence(traj, rlz, old, acceptable)
+ if converged:
+ self._closeTrajectory(traj, 'converge', 'converged', self.bestObjective)
+ # NOTE: the solution export needs to be updated BEFORE we run rejectOptPoint or extend the opt
+ # point history.
+ objVal = [[] for x in range(len(self.objectiveVal[0]))]
+ for i in range(len(self.objectiveVal[0])):
+ objVal[i] = [item[i] for item in self.objectiveVal]
+
+ objVal = xr.DataArray(objVal,
+ dims=['chromosome','obj'],
+ coords={'chromosome':np.arange(np.shape(objVal)[0]),
+ 'obj': self._objectiveVar})
+ if self._writeSteps == 'every':
+ print("### rlz.sizes['RAVEN_sample_ID'] = {}".format(rlz.sizes['RAVEN_sample_ID']))
+ print("### self.population.shape is {}".format(self.population.shape))
+ for i in range(rlz.sizes['RAVEN_sample_ID']):
+ varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys())
+ # rlzDict = dict((var,np.atleast_1d(rlz[var].data)[i]) for var in set(varList) if var in rlz.data_vars)
+ rlzDict = dict((var,self.population.data[i][j]) for j, var in enumerate(self.population.Gene.data))
+ rlzDict['batchId'] = rlz['batchId'].data[i]
+ for j in range(len(self._objectiveVar)):
+ rlzDict[self._objectiveVar[j]] = objVal.data[i][j]
+ rlzDict['rank'] = np.atleast_1d(self.rank.data)[i]
+ rlzDict['CD'] = np.atleast_1d(self.crowdingDistance.data)[i]
+ for ind, fitName in enumerate(list(self.fitness.keys())):
+ rlzDict['FitnessEvaluation_'+fitName] = self.fitness[fitName].data[i]
+ for ind, consName in enumerate([y.name for y in (self._constraintFunctions + self._impConstraintFunctions)]):
+ rlzDict['ConstraintEvaluation_'+consName] = self.constraintsV.data[i,ind]
+ self._updateSolutionExport(traj, rlzDict, acceptable, None)
+
+ # decide what to do next
+ if acceptable in ['accepted', 'first']:
+ # record history
+ bestRlz = {}
+ varList = self._solutionExport.getVars('input') + self._solutionExport.getVars('output') + list(self.toBeSampled.keys())
+ bestRlz = dict((var,np.atleast_1d(rlz[var].data)) for var in set(varList) if var in rlz.data_vars)
+ for i in range(len(self._objectiveVar)):
+ bestRlz[self._objectiveVar[i]] = [item[i] for item in self.multiBestObjective]
+
+ bestRlz['rank'] = self.multiBestRank
+ bestRlz['CD'] = self.multiBestCD
+ if len(self.multiBestConstraint) != 0: # No constraints
+ for ind, consName in enumerate(self.multiBestConstraint.Constraint):
+ bestRlz['ConstraintEvaluation_'+consName.values.tolist()] = self.multiBestConstraint[ind].values
+ for ind, fitName in enumerate(list(self.multiBestFitness.keys())):
+ bestRlz['FitnessEvaluation_'+ fitName] = self.multiBestFitness[fitName].data
+ bestRlz.update(self.multiBestPoint)
+ self._optPointHistory[traj].append((bestRlz, info))
+ elif acceptable == 'rejected':
+ self._rejectOptPoint(traj, info, old)
+ else: # e.g. rerun
+ pass # nothing to do, just keep moving
+
def _collectOptPoint(self, rlz, fitness, objectiveVal, g):
"""
Collects the point (dict) from a realization
@@ -653,12 +975,14 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g):
@ In, fitness, xr.DataArray, fitness values at each chromosome of the realization
@ Out, point, dict, point used in this realization
"""
-
varList = list(self.toBeSampled.keys()) + self._solutionExport.getVars('input') + self._solutionExport.getVars('output')
varList = set(varList)
selVars = [var for var in varList if var in rlz.data_vars]
population = datasetToDataArray(rlz, selVars)
- optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),np.atleast_1d(fitness.data),objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))])
+ if self._fitnessType == 'hardConstraint':
+ optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1],-x[2]))])
+ else:
+ optPoints,fit,obj,gOfBest = zip(*[[x,y,z,w] for x, y, z,w in sorted(zip(np.atleast_2d(population.data),datasetToDataArray(fitness, self._objectiveVar).data,objectiveVal,np.atleast_2d(g.data)),reverse=True,key=lambda x: (x[1]))])
point = dict((var,float(optPoints[0][i])) for i, var in enumerate(selVars) if var in rlz.data_vars)
gOfBest = dict(('ConstraintEvaluation_'+name,float(gOfBest[0][i])) for i, name in enumerate(g.coords['Constraint'].values))
if (self.counter > 1 and obj[0] <= self.bestObjective and fit[0] >= self.bestFitness) or self.counter == 1:
@@ -669,6 +993,51 @@ def _collectOptPoint(self, rlz, fitness, objectiveVal, g):
return point
+ def _collectOptPointMulti(self, population, rank, CD, objVal, fitness, constraintsV):
+ """
+ Collects the point (dict) from a realization
+ @ In, population, Dataset, container containing the population
+ @ In, objectiveVal, list, objective values at each chromosome of the realization
+ @ In, rank, xr.DataArray, rank values at each chromosome of the realization
+ @ In, crowdingDistance, xr.DataArray, crowdingDistance values at each chromosome of the realization
+ @ Out, point, dict, point used in this realization
+ """
+ rankOneIDX = [i for i, rankValue in enumerate(rank.data) if rankValue == 1]
+ optPoints = population[rankOneIDX]
+ optObjVal = np.array([list(ele) for ele in list(zip(*objVal))])[rankOneIDX]
+ count = 0
+ for i in list(fitness.keys()):
+ data = fitness[i][rankOneIDX]
+ if count == 0:
+ fitSet = data.to_dataset(name = i)
+ else:
+ fitSet[i] = data
+ count = count + 1
+ optConstraintsV = constraintsV.data[rankOneIDX]
+ optRank = rank.data[rankOneIDX]
+ optCD = CD.data[rankOneIDX]
+
+ optPointsDic = dict((var,np.array(optPoints)[:,i]) for i, var in enumerate(population.Gene.data))
+ optConstNew = []
+ for i in range(len(optConstraintsV)):
+ optConstNew.append(optConstraintsV[i])
+ optConstNew = list(map(list, zip(*optConstNew)))
+ if (len(optConstNew)) != 0:
+ optConstNew = xr.DataArray(optConstNew,
+ dims=['Constraint','Evaluation'],
+ coords={'Constraint':[y.name for y in (self._constraintFunctions + self._impConstraintFunctions)],
+ 'Evaluation':np.arange(np.shape(optConstNew)[1])})
+
+ self.multiBestPoint = optPointsDic
+ self.multiBestFitness = fitSet
+ self.multiBestObjective = optObjVal
+ self.multiBestConstraint = optConstNew
+ self.multiBestRank = optRank
+ self.multiBestCD = optCD
+
+ return optPointsDic
+
+
def _checkAcceptability(self, traj):
"""
This is an abstract method for all RavenSampled Optimizer, whereas for GA all children are accepted
@@ -685,16 +1054,26 @@ def checkConvergence(self, traj, new, old):
@ Out, any(convs.values()), bool, True of any of the convergence criteria was reached
@ Out, convs, dict, on the form convs[conv] = bool, where conv is in self._convergenceCriteria
"""
- convs = {}
- for conv in self._convergenceCriteria:
- fName = conv[:1].upper() + conv[1:]
- # get function from lookup
- f = getattr(self, f'_checkConv{fName}')
- # check convergence function
- okay = f(traj, new=new, old=old)
- # store and update
- convs[conv] = okay
-
+ if len(self._objectiveVar) == 1:
+ convs = {}
+ for conv in self._convergenceCriteria:
+ fName = conv[:1].upper() + conv[1:]
+ # get function from lookup
+ f = getattr(self, f'_checkConv{fName}')
+ # check convergence function
+ okay = f(traj, new=new, old=old)
+ # store and update
+ convs[conv] = okay
+ else:
+ convs = {}
+ for conv in self._convergenceCriteria:
+ fName = conv[:1].upper() + conv[1:]
+ # get function from lookup
+ f = getattr(self, f'_checkConv{fName}')
+ # check convergence function
+ okay = f(traj, new=new, old=old)
+ # store and update
+ convs[conv] = okay
return any(convs.values()), convs
def _checkConvObjective(self, traj, **kwargs):
@@ -704,16 +1083,23 @@ def _checkConvObjective(self, traj, **kwargs):
@ In, kwargs, dict, dictionary of parameters for convergence criteria
@ Out, converged, bool, convergence state
"""
- if len(self._optPointHistory[traj]) < 2:
- return False
- o1, _ = self._optPointHistory[traj][-1]
- obj = o1[self._objectiveVar]
- converged = (obj == self._convergenceCriteria['objective'])
- self.raiseADebug(self.convFormat.format(name='objective',
- conv=str(converged),
- got=obj,
- req=self._convergenceCriteria['objective']))
-
+ if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ if len(self._optPointHistory[traj]) < 2:
+ return False
+ o1, _ = self._optPointHistory[traj][-1]
+ obj = o1[self._objectiveVar[0]]
+ converged = (obj == self._convergenceCriteria['objective'])
+ self.raiseADebug(self.convFormat.format(name='objective',
+ conv=str(converged),
+ got=obj,
+ req=self._convergenceCriteria['objective']))
+ else: # This is for a multi-objective Optimization case.
+ if len(self._optPointHistory[traj]) < 2:
+ return False
+ o1, _ = self._optPointHistory[traj][-1]
+ obj1 = o1[self._objectiveVar[0]]
+ obj2 = o1[self._objectiveVar[1]]
+ converged = (obj1 == self._convergenceCriteria['objective'] and obj2 == self._convergenceCriteria['objective'])
return converged
def _checkConvAHDp(self, traj, **kwargs):
@@ -834,14 +1220,24 @@ def _updateConvergence(self, traj, new, old, acceptable):
@ Out, converged, bool, True if converged on ANY criteria
"""
# NOTE we have multiple "if acceptable" trees here, as we need to update soln export regardless
- if acceptable == 'accepted':
- self.raiseADebug(f'Convergence Check for Trajectory {traj}:')
- # check convergence
- converged, convDict = self.checkConvergence(traj, new, old)
- else:
- converged = False
- convDict = dict((var, False) for var in self._convergenceInfo[traj])
- self._convergenceInfo[traj].update(convDict)
+ if len(self._objectiveVar) == 1: # This is for a single-objective Optimization case.
+ if acceptable == 'accepted':
+ self.raiseADebug(f'Convergence Check for Trajectory {traj}:')
+ # check convergence
+ converged, convDict = self.checkConvergence(traj, new, old)
+ else:
+ converged = False
+ convDict = dict((var, False) for var in self._convergenceInfo[traj])
+ self._convergenceInfo[traj].update(convDict)
+ else: # This is for a multi-objective Optimization case.
+ if acceptable == 'accepted':
+ self.raiseADebug(f'Convergence Check for Trajectory {traj}:')
+ # check convergence
+ converged, convDict = self.checkConvergence(traj, new, old)
+ else:
+ converged = False
+ convDict = dict((var, False) for var in self._convergenceInfo[traj])
+ self._convergenceInfo[traj].update(convDict)
return converged
@@ -877,8 +1273,9 @@ def _rejectOptPoint(self, traj, info, old):
"""
return
- # * * * * * * * * * * * *
- # Constraint Handling
+ ###############################
+ # Constraint Handling #
+ ###############################
def _handleExplicitConstraints(self, point, constraint):
"""
Computes explicit (i.e. input-based) constraints
@@ -941,9 +1338,9 @@ def _checkImpFunctionalConstraints(self, point, opt, impConstraint):
g = impConstraint.evaluate('implicitConstraint', inputs)
return g
-
- # END constraint handling
- # * * * * * * * * * * * *
+ ###############################
+ # END constraint handling #
+ ###############################
def _addToSolutionExport(self, traj, rlz, acceptable):
"""
Contributes additional entries to the solution export.
@@ -955,9 +1352,11 @@ def _addToSolutionExport(self, traj, rlz, acceptable):
# meta variables
toAdd = {'age': 0 if self.popAge is None else self.popAge,
'batchId': self.batchId,
- 'fitness': rlz['fitness'],
+ # 'fitness': rlz['fitness'],
'AHDp': self.ahdp,
- 'AHD': self.ahd}
+ 'AHD': self.ahd,
+ 'rank': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['rank'],
+ 'CD': 0 if ((type(self._objectiveVar) == list and len(self._objectiveVar) == 1) or type(self._objectiveVar) == str) else rlz['CD']}
for var, val in self.constants.items():
toAdd[var] = val
@@ -984,6 +1383,8 @@ def _formatSolutionExportVariableNames(self, acceptable):
new.extend([template.format(CONV=conv) for conv in self._convergenceCriteria])
elif '{VAR}' in template:
new.extend([template.format(VAR=var) for var in self.toBeSampled])
+ elif '{OBJ}' in template:
+ new.extend([template.format(OBJ=obj) for obj in self._objectiveVar])
elif '{CONSTRAINT}' in template:
new.extend([template.format(CONSTRAINT=constraint.name) for constraint in self._constraintFunctions + self._impConstraintFunctions])
else:
diff --git a/ravenframework/Optimizers/GradientDescent.py b/ravenframework/Optimizers/GradientDescent.py
index 452f579f4e..7a37205c28 100644
--- a/ravenframework/Optimizers/GradientDescent.py
+++ b/ravenframework/Optimizers/GradientDescent.py
@@ -212,6 +212,7 @@ def __init__(self):
self._followerProximity = 1e-2 # distance at which annihilation can start occurring, in ?normalized? space
self._trajectoryFollowers = defaultdict(list) # map of trajectories to the trajectories following them
self._functionalConstraintExplorationLimit = 500 # number of input-space explorations allowable for functional constraints
+ self._canHandleMultiObjective = False # Currently Gradient Descent cannot handle multiobjective optimization
# __private
# additional methods
# register adaptive sample identification criteria
@@ -338,7 +339,11 @@ def _useRealization(self, info, rlz):
@ Out, None
"""
traj = info['traj']
- optVal = rlz[self._objectiveVar]
+ # if not self._canHandleMultiObjective and len(self._objectiveVar) == 1:
+ # self._objectiveVar = self._objectiveVar[0]
+ if len(self._objectiveVar) > 1 and type(self._objectiveVar)==list:
+ self.raiseAnError(IOError, 'Gradient Descent does not support multiObjective optimization yet! objective variable must be a single variable for now!')
+ optVal = rlz[self._objectiveVar[0]]
info['optVal'] = optVal
purpose = info['purpose']
if purpose.startswith('opt'):
@@ -353,13 +358,13 @@ def _useRealization(self, info, rlz):
gradMag, gradVersor, _ = self._gradientInstance.evaluate(opt,
grads,
gradInfos,
- self._objectiveVar)
+ self._objectiveVar[0])
self.raiseADebug(' ... gradient calculated ...')
self._gradHistory[traj].append((gradMag, gradVersor))
# get new step information
try:
newOpt, stepSize, stepInfo = self._stepInstance.step(opt,
- objVar=self._objectiveVar,
+ objVar=self._objectiveVar[0],
optHist=self._optPointHistory[traj],
gradientHist=self._gradHistory[traj],
prevStepSize=self._stepHistory[traj],
@@ -378,7 +383,7 @@ def _useRealization(self, info, rlz):
except NoConstraintResolutionFound:
# we've tried everything, but we just can't hack it
self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.')
- self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar])
+ self._closeTrajectory(traj, 'converge', 'no constraint resolution', opt[self._objectiveVar[0]])
return
# update values if modified by constraint handling
@@ -598,7 +603,7 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# Check acceptability
if self._optPointHistory[traj]:
old, _ = self._optPointHistory[traj][-1]
- oldVal = old[self._objectiveVar]
+ oldVal = old[self._objectiveVar[0]]
# check if following another trajectory
if self._terminateFollowers:
following = self._stepInstance.trajIsFollowing(traj, self.denormalizeData(opt), info,
@@ -815,7 +820,7 @@ def _checkConvObjective(self, traj):
return False
o1, _ = self._optPointHistory[traj][-1]
o2, _ = self._optPointHistory[traj][-2]
- delta = mathUtils.relativeDiff(o2[self._objectiveVar], o1[self._objectiveVar])
+ delta = mathUtils.relativeDiff(o2[self._objectiveVar[0]], o1[self._objectiveVar[0]])
converged = abs(delta) < self._convergenceCriteria['objective']
self.raiseADebug(self.convFormat.format(name='objective',
conv=str(converged),
diff --git a/ravenframework/Optimizers/Optimizer.py b/ravenframework/Optimizers/Optimizer.py
index bf5face36e..a9b5600615 100644
--- a/ravenframework/Optimizers/Optimizer.py
+++ b/ravenframework/Optimizers/Optimizer.py
@@ -78,9 +78,9 @@ def getInputSpecification(cls):
specs.description = 'Optimizers'
# objective variable
- specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringType, strictMode=True,
+ specs.addSub(InputData.parameterInputFactory('objective', contentType=InputTypes.StringListType, strictMode=True,
printPriority=90, # more important than
- descr=r"""Name of the response variable (or ``objective function'') that should be optimized
+ descr=r"""Name of the objective variable (or ``objective function'') that should be optimized
(minimized or maximized)."""))
# modify Sampler variable nodes
@@ -103,7 +103,8 @@ def getInputSpecification(cls):
descr=r"""seed for random number generation. Note that by default RAVEN uses an internal seed,
so this seed must be changed to observe changed behavior. \default{RAVEN-determined}""")
minMaxEnum = InputTypes.makeEnumType('MinMax', 'MinMaxType', ['min', 'max'])
- minMax = InputData.parameterInputFactory('type', contentType=minMaxEnum,
+ minMaxList = InputTypes.StringListType()
+ minMax = InputData.parameterInputFactory('type', contentType=minMaxList,
descr=r"""the type of optimization to perform. \xmlString{min} will search for the lowest
\xmlNode{objective} value, while \xmlString{max} will search for the highest value.""")
init.addSub(seed)
@@ -161,7 +162,7 @@ def __init__(self):
# public
# _protected
self._seed = None # random seed to apply
- self._minMax = 'min' # maximization or minimization?
+ self._minMax = ['min'] # maximization or minimization?
self._activeTraj = [] # tracks live trajectories
self._cancelledTraj = {} # tracks cancelled trajectories, and reasons
self._convergedTraj = {} # tracks converged trajectories, and values obtained
@@ -249,7 +250,6 @@ def handleInput(self, paramInput):
@ Out, None
"""
# the reading of variables (dist or func) and constants already happened in _readMoreXMLbase in Sampler
- # objective var
self._objectiveVar = paramInput.findFirst('objective').value
# sampler init
@@ -264,6 +264,10 @@ def handleInput(self, paramInput):
minMax = init.findFirst('type')
if minMax is not None:
self._minMax = minMax.value
+ if len(self._minMax) != len(self._objectiveVar):
+ self.raiseAnError(IOError, 'The number of in -- and in - must be of the same length!')
+ if list(set(self._minMax)-set(['min','max'])) != []:
+ self.raiseAnError(IOError, " under - must be a either 'min' and/or 'max'")
# variables additional reading
for varNode in paramInput.findAll('variable'):
diff --git a/ravenframework/Optimizers/RavenSampled.py b/ravenframework/Optimizers/RavenSampled.py
index d85181657c..b3d13274bc 100644
--- a/ravenframework/Optimizers/RavenSampled.py
+++ b/ravenframework/Optimizers/RavenSampled.py
@@ -300,8 +300,16 @@ def localFinalizeActualSampling(self, jobObject, model, myInput):
# # testing suggests no big deal on smaller problem
# the sign of the objective function is flipped in case we do maximization
# so get the correct-signed value into the realization
- if self._minMax == 'max':
- rlz[self._objectiveVar] *= -1
+
+ if 'max' in self._minMax:
+ if not self._canHandleMultiObjective and len(self._objectiveVar) == 1:
+ rlz[self._objectiveVar[0]] *= -1
+ elif type(self._objectiveVar) == list:
+ for i in range(len(self._objectiveVar)):
+ if self._minMax[i] == 'max':
+ rlz[self._objectiveVar[i]] *= -1
+ else:
+ rlz[self._objectiveVar] *= -1
# TODO FIXME let normalizeData work on an xr.DataSet (batch) not just a dictionary!
rlz = self.normalizeData(rlz)
self._useRealization(info, rlz)
@@ -312,57 +320,127 @@ def finalizeSampler(self, failedRuns):
@ In, failedRuns, list, runs that failed as part of this sampling
@ Out, None
"""
- # get and print the best trajectory obtained
- bestValue = None
- bestTraj = None
- bestPoint = None
- s = -1 if self._minMax == 'max' else 1
- # check converged trajectories
- self.raiseAMessage('*' * 80)
- self.raiseAMessage('Optimizer Final Results:')
- self.raiseADebug('')
- self.raiseADebug(' - Trajectory Results:')
- self.raiseADebug(' TRAJ STATUS VALUE')
- statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
- # print cancelled traj
- for traj, info in self._cancelledTraj.items():
- val = info['value']
- status = info['reason']
- self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
- # check converged traj
- for traj, info in self._convergedTraj.items():
+ if not self._canHandleMultiObjective or len(self._objectiveVar) == 1:
+ # get and print the best trajectory obtained
+ bestValue = None
+ bestTraj = None
+ bestPoint = None
+ s = -1 if 'max' in self._minMax else 1
+ # check converged trajectories
+ self.raiseAMessage('*' * 80)
+ self.raiseAMessage('Optimizer Final Results:')
+ self.raiseADebug('')
+ self.raiseADebug(' - Trajectory Results:')
+ self.raiseADebug(' TRAJ STATUS VALUE')
+ statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
+ # print cancelled traj
+ for traj, info in self._cancelledTraj.items():
+ val = info['value']
+ status = info['reason']
+ self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
+ # check converged traj
+ for traj, info in self._convergedTraj.items():
+ opt = self._optPointHistory[traj][-1][0]
+ val = info['value']
+ self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
+ if bestValue is None or val < bestValue:
+ bestTraj = traj
+ bestValue = val
+ # further check active unfinished trajectories
+ # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
+ traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
+ # sanity check: if there's no history (we never got any answers) then report rather than crash
+ if len(self._optPointHistory[traj]) == 0:
+ self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
+ 'Perhaps the Model failed?')
opt = self._optPointHistory[traj][-1][0]
- val = info['value']
- self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
+ val = opt[self._objectiveVar[0]]
+ self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
if bestValue is None or val < bestValue:
- bestTraj = traj
bestValue = val
- # further check active unfinished trajectories
- # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
- traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
- # sanity check: if there's no history (we never got any answers) then report than rather than crash
- if len(self._optPointHistory[traj]) == 0:
- self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
- 'Perhaps the Model failed?')
- opt = self._optPointHistory[traj][-1][0]
- val = opt[self._objectiveVar]
- self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
- if bestValue is None or val < bestValue:
- bestValue = val
- bestTraj = traj
- bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
- bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
- self.raiseADebug('')
- self.raiseAMessage(' - Final Optimal Point:')
- finalTemplate = ' {name:^20s} {value: 1.3e}'
- finalTemplateInt = ' {name:^20s} {value: 3d}'
- self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue))
- self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
- for var, val in bestPoint.items():
- self.raiseAMessage(finalTemplate.format(name=var, value=val))
- self.raiseAMessage('*' * 80)
- # write final best solution to soln export
- self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
+ bestTraj = traj
+ bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
+ bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
+ self.raiseADebug('')
+ self.raiseAMessage(' - Final Optimal Point:')
+ finalTemplate = ' {name:^20s} {value: 1.3e}'
+ finalTemplateInt = ' {name:^20s} {value: 3d}'
+ self.raiseAMessage(finalTemplate.format(name=self._objectiveVar[0], value=s * bestValue))
+ self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
+ for var, val in bestPoint.items():
+ self.raiseAMessage(finalTemplate.format(name=var, value=val))
+ self.raiseAMessage('*' * 80)
+ # write final best solution to soln export
+ self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
+ else:
+ # get and print the best trajectory obtained
+ bestValue = None
+ bestTraj = None
+ bestPoint = None
+ s = -1 if self._minMax == 'max' else 1
+ # check converged trajectories
+ self.raiseAMessage('*' * 80)
+ self.raiseAMessage('Optimizer Final Results:')
+ self.raiseADebug('')
+ self.raiseADebug(' - Trajectory Results:')
+ self.raiseADebug(' TRAJ STATUS VALUE')
+ statusTemplate = ' {traj:2d} {status:^11s} {val: 1.3e}'
+ statusTemplate_multi = ' {traj:2d} {status:^11s} {val1: ^11s} {val2: ^11s}'
+
+ # print cancelled traj
+ for traj, info in self._cancelledTraj.items():
+ val = info['value']
+ status = info['reason']
+ self.raiseADebug(statusTemplate.format(status=status, traj=traj, val=s * val))
+ # check converged traj
+ for traj, info in self._convergedTraj.items():
+ opt = self._optPointHistory[traj][-1][0]
+ val = info['value']
+ self.raiseADebug(statusTemplate.format(status='converged', traj=traj, val=s * val))
+ if bestValue is None or val < bestValue:
+ bestTraj = traj
+ bestValue = val
+ # further check active unfinished trajectories
+ # FIXME why should there be any active, unfinished trajectories when we're cleaning up sampler?
+ traj = 0 # FIXME why only 0?? what if it's other trajectories that are active and unfinished?
+ # sanity check: if there's no history (we never got any answers) then report rather than crash
+ if len(self._optPointHistory[traj]) == 0:
+ self.raiseAnError(RuntimeError, f'There is no optimization history for traj {traj}! ' +
+ 'Perhaps the Model failed?')
+
+ if len(self._objectiveVar) == 1:
+ opt = self._optPointHistory[traj][-1][0]
+ val = opt[self._objectiveVar]
+ self.raiseADebug(statusTemplate.format(status='active', traj=traj, val=s * val))
+ if bestValue is None or val < bestValue:
+ bestValue = val
+ bestTraj = traj
+ bestOpt = self.denormalizeData(self._optPointHistory[bestTraj][-1][0])
+ bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
+ self.raiseADebug('')
+ self.raiseAMessage(' - Final Optimal Point:')
+ finalTemplate = ' {name:^20s} {value: 1.3e}'
+ finalTemplateInt = ' {name:^20s} {value: 3d}'
+ # self.raiseAMessage(finalTemplate.format(name=self._objectiveVar, value=s * bestValue))
+ self.raiseAMessage(finalTemplateInt.format(name='trajID', value=bestTraj))
+ for var, val in bestPoint.items():
+ self.raiseAMessage(finalTemplate.format(name=var, value=val))
+ self.raiseAMessage('*' * 80)
+ # write final best solution to soln export
+ self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
+ else:
+ for i in range(len(self._optPointHistory[traj][-1][0][self._objectiveVar[0]])):
+ opt = self._optPointHistory[traj][-1][0]
+ key = list(opt.keys())
+ val = [item[i] for item in opt.values()]
+ optElm = {key[a]: val[a] for a in range(len(key))}
+ optVal = [(-1*(self._minMax[b]=='max')+(self._minMax[b]=='min'))*optElm[self._objectiveVar[b]] for b in range(len(self._objectiveVar))]
+
+ bestTraj = traj
+ bestOpt = self.denormalizeData(optElm)
+ bestPoint = dict((var, bestOpt[var]) for var in self.toBeSampled)
+
+ self._updateSolutionExport(bestTraj, self.normalizeData(bestOpt), 'final', 'None')
def flush(self):
"""
@@ -498,10 +576,10 @@ def _handleImplicitConstraints(self, previous):
@ Out, accept, bool, whether point was satisfied implicit constraints
"""
normed = copy.deepcopy(previous)
- oldVal = normed[self._objectiveVar]
- normed.pop(self._objectiveVar, oldVal)
+ oldVal = normed[self._objectiveVar[0]]
+ normed.pop(self._objectiveVar[0], oldVal)
denormed = self.denormalizeData(normed)
- denormed[self._objectiveVar] = oldVal
+ denormed[self._objectiveVar[0]] = oldVal
accept = self._checkImpFunctionalConstraints(denormed)
return accept
@@ -569,9 +647,12 @@ def _resolveNewOptPoint(self, traj, rlz, optVal, info):
# TODO could we ever use old rerun gradients to inform the gradient direction as well?
self._rerunsSinceAccept[traj] += 1
N = self._rerunsSinceAccept[traj] + 1
- oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar]
+ if len(self._objectiveVar) == 1:
+ oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]]
+ else:
+ oldVal = self._optPointHistory[traj][-1][0][self._objectiveVar[0]]
newAvg = ((N-1)*oldVal + optVal) / N
- self._optPointHistory[traj][-1][0][self._objectiveVar] = newAvg
+ self._optPointHistory[traj][-1][0][self._objectiveVar[0]] = newAvg
else:
self.raiseAnError(f'Unrecognized acceptability: "{acceptable}"')
@@ -635,15 +716,22 @@ def _updateSolutionExport(self, traj, rlz, acceptable, rejectReason):
'rejectReason': rejectReason
})
# optimal point input and output spaces
- objValue = rlz[self._objectiveVar]
- if self._minMax == 'max':
- objValue *= -1
- toExport[self._objectiveVar] = objValue
+ if len(self._objectiveVar) == 1: # Single Objective Optimization
+ objValue = rlz[self._objectiveVar[0]]
+ if 'max' in self._minMax:
+ objValue *= -1
+ toExport[self._objectiveVar[0]] = objValue
+ else: # Multi Objective Optimization
+ for i in range(len(self._objectiveVar)):
+ objValue = rlz[self._objectiveVar[i]]
+ if self._minMax[i] == 'max':
+ objValue *= -1
+ toExport[self._objectiveVar[i]] = objValue
toExport.update(self.denormalizeData(dict((var, rlz[var]) for var in self.toBeSampled)))
# constants and functions
toExport.update(self.constants)
toExport.update(dict((var, rlz[var]) for var in self.dependentSample if var in rlz))
- # additional from from inheritors
+ # additional from inheritors
toExport.update(self._addToSolutionExport(traj, rlz, acceptable))
# check for anything else that solution export wants that rlz might provide
for var in self._solutionExport.getVars():
diff --git a/ravenframework/Optimizers/SimulatedAnnealing.py b/ravenframework/Optimizers/SimulatedAnnealing.py
index 03f1ba1445..18364ac655 100644
--- a/ravenframework/Optimizers/SimulatedAnnealing.py
+++ b/ravenframework/Optimizers/SimulatedAnnealing.py
@@ -190,6 +190,7 @@ def __init__(self):
self._coolingMethod = None # initializing cooling method
self._coolingParameters = {} # initializing the cooling schedule parameters
self.info = {}
+ self._canHandleMultiObjective = False # Currently Simulated Annealing can only handle single objective
def handleInput(self, paramInput):
"""
@@ -300,9 +301,11 @@ def _useRealization(self, info, rlz):
@ Out, None
"""
traj = info['traj']
- info['optVal'] = rlz[self._objectiveVar]
+ if len(self._objectiveVar) > 1 and type(self._objectiveVar)==str:
+ self.raiseAnError(IOError, 'Simulated Annealing does not support multiObjective yet! objective variable must be a single variable for now!')
+ info['optVal'] = rlz[self._objectiveVar[0]]
self.incrementIteration(traj)
- self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar], info)
+ self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar[0]], info)
if self._stepTracker[traj]['opt'] is None:
# revert to the last accepted point
rlz = self._optPointHistory[traj][-1][0]
@@ -321,7 +324,7 @@ def _useRealization(self, info, rlz):
except NoConstraintResolutionFound:
# we've tried everything, but we just can't hack it
self.raiseAMessage(f'Optimizer "{self.name}" trajectory {traj} was unable to continue due to functional or boundary constraints.')
- self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar])
+ self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar[0]])
return
self._submitRun(suggested, traj, self.getIteration(traj))
@@ -393,7 +396,7 @@ def _checkConvObjective(self, traj):
return False
o1, _ = self._optPointHistory[traj][-1]
o2, _ = self._optPointHistory[traj][-2]
- delta = o2[self._objectiveVar]-o1[self._objectiveVar]
+ delta = o2[self._objectiveVar[0]]-o1[self._objectiveVar[0]]
converged = abs(delta) < self._convergenceCriteria['objective']
self.raiseADebug(self.convFormat.format(name='objective',
conv=str(converged),
@@ -442,9 +445,9 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# NOTE: if self._optPointHistory[traj]: -> faster to use "try" for all but the first time
try:
old, _ = self._optPointHistory[traj][-1]
- oldVal = old[self._objectiveVar]
+ oldVal = old[self._objectiveVar[0]]
# check if same point
- self.raiseADebug(f' ... change: {opt[self._objectiveVar]-oldVal:1.3e} new objective: {opt[self._objectiveVar]:1.6e} old objective: {oldVal:1.6e}')
+ self.raiseADebug(f' ... change: {opt[self._objectiveVar[0]]-oldVal:1.3e} new objective: {opt[self._objectiveVar[0]]:1.6e} old objective: {oldVal:1.6e}')
# if this is an opt point rerun, accept it without checking.
if self._acceptRerun[traj]:
acceptable = 'rerun'
@@ -453,7 +456,7 @@ def _checkAcceptability(self, traj, opt, optVal, info):
# this is the classic "same point" trap; we accept the same point, and check convergence later
acceptable = 'accepted'
else:
- if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar])>randomUtils.random(dim=1, samples=1): # TODO replace it back
+ if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar[0]])>randomUtils.random(dim=1, samples=1): # TODO replace it back
acceptable = 'accepted'
else:
acceptable = 'rejected'
diff --git a/ravenframework/Optimizers/fitness/fitness.py b/ravenframework/Optimizers/fitness/fitness.py
index 53a27ff1c6..3dc0625e75 100644
--- a/ravenframework/Optimizers/fitness/fitness.py
+++ b/ravenframework/Optimizers/fitness/fitness.py
@@ -16,11 +16,17 @@
currently the implemented fitness function is a linear combination of the objective function and penalty function for constraint violation:
Created June,16,2020
- @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
+ Updated September,17,2023
+ @authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi, Junyung Kim
"""
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils import frontUtils
+from ..parentSelectors.parentSelectors import countConstViolation
+
# External Imports
import numpy as np
import xarray as xr
+import sys
# Internal Imports
# [MANDD] Note: the fitness function are bounded by 2 parameters: a and b
@@ -54,26 +60,66 @@ def invLinear(rlz,**kwargs):
@ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome.
"""
if kwargs['a'] == None:
- a = 1.0
+ a = [1.0]
else:
a = kwargs['a']
if kwargs['b'] == None:
- b = 10.0
+ b = [10.0]
else:
b = kwargs['b']
if kwargs['constraintFunction'].all() == None:
penalty = 0.0
else:
penalty = kwargs['constraintFunction'].data
-
- objVar = kwargs['objVar']
- data = np.atleast_1d(rlz[objVar].data)
-
- fitness = -a * (rlz[objVar].data).reshape(-1,1) - b * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)
- fitness = xr.DataArray(np.squeeze(fitness),
+ if isinstance(kwargs['objVar'], str) == True:
+ objVar = [kwargs['objVar']]
+ else:
+ objVar = kwargs['objVar']
+ for j in range(len(objVar)):
+ data = np.atleast_1d(rlz[objVar][objVar[j]].data)
+ fitness = -a[0] * (rlz[objVar][objVar[j]].data).reshape(-1,1) - b[0] * np.sum(np.maximum(0,-penalty),axis=-1).reshape(-1,1)
+ fitness = xr.DataArray(np.squeeze(fitness),
dims=['chromosome'],
coords={'chromosome': np.arange(len(data))})
- return fitness
+ if j == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[j])
+ else:
+ fitnessSet[objVar[j]] = fitness
+ return fitnessSet
+
+def hardConstraint(rlz,**kwargs):
+ r"""
+ Fitness method counting the number of constraints violated
+
+ @ In, rlz, xr.Dataset, containing the evaluation of a certain
+ set of individuals (can be the initial population for the very first iteration,
+ or a population of offsprings)
+ @ In, kwargs, dict, dictionary of parameters for this rank_crowding method:
+ objVar, string, the names of the objective variables
+ @ Out, offSpringRank, xr.DataArray, the rank of the given objective corresponding to a specific chromosome.
+ offSpringCD, xr.DataArray, the crowding distance of the given objective corresponding to a specific chromosome.
+ """
+ if isinstance(kwargs['objVar'], str) == True:
+ objVar = [kwargs['objVar']]
+ else:
+ objVar = kwargs['objVar']
+ g = kwargs['constraintFunction']
+
+ for j in range(len(objVar)):
+ fitness = np.zeros((len(g.data), 1))
+ for i in range(len(fitness)):
+ fitness[i] = countConstViolation(g.data[i])
+ fitness = [-item for sublist in fitness.tolist() for item in sublist]
+ fitness = xr.DataArray(fitness,
+ dims=['NumOfConstraintViolated'],
+ coords={'NumOfConstraintViolated':np.arange(np.shape(fitness)[0])})
+ if j == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[j])
+ else:
+ fitnessSet[objVar[j]] = fitness
+
+ return fitnessSet
+
def feasibleFirst(rlz,**kwargs):
r"""
@@ -83,11 +129,13 @@ def feasibleFirst(rlz,**kwargs):
1. As the objective function decreases (comes closer to the min value), the fitness value increases
2. As the objective function increases (away from the min value), the fitness value decreases
3. As the solution violates the constraints the fitness should decrease and hence the solution is less favored by the algorithm.
- 4. For the violating solutions, the fitness is starts from the worst solution in the population
+ 4. For the violating solutions, the fitness starts from the worst solution in the population
(i.e., max objective in minimization problems and min objective in maximization problems)
For maximization problems the objective value is multiplied by -1 and hence the previous trends are inverted.
A great quality of this fitness is that if the objective value is equal for multiple solutions it selects the furthest from constraint violation.
+
+ Reference: Deb, Kalyanmoy. "An efficient constraint handling method for genetic algorithms." Computer methods in applied mechanics and engineering 186.2-4 (2000): 311-338.
.. math::
@@ -105,23 +153,38 @@ def feasibleFirst(rlz,**kwargs):
'constraintFunction', xr.Dataarray, containing all constraint functions (explicit and implicit) evaluations for the whole population
@ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome.
"""
- objVar = kwargs['objVar']
- g = kwargs['constraintFunction']
- data = np.atleast_1d(rlz[objVar].data)
- worstObj = max(data)
- fitness = []
- for ind in range(data.size):
- if np.all(g.data[ind, :]>=0):
- fit=(data[ind])
+ if isinstance(kwargs['objVar'], str) == True:
+ objVar = [kwargs['objVar']]
+ else:
+ objVar = kwargs['objVar']
+ if kwargs['constraintNum'] == 0:
+ pen = kwargs['b']
+ else:
+ g = kwargs['constraintFunction']
+ penalty = kwargs['b']
+ pen = [penalty[i:i+len(g['Constraint'].data)] for i in range(0, len(penalty), len(g['Constraint'].data))]
+
+ objPen = dict(map(lambda i,j : (i,j), objVar, pen))
+ for i in range(len(objVar)):
+ data = np.atleast_1d(rlz[objVar][objVar[i]].data)
+ worstObj = max(data)
+ fitness = []
+ for ind in range(data.size):
+ if kwargs['constraintNum'] == 0 or np.all(g.data[ind, :]>=0):
+ fit=(data[ind])
+ else:
+ fit = worstObj
+ for constInd,_ in enumerate(g['Constraint'].data):
+ fit+= objPen[objVar[i]][constInd]*(max(0,-1*g.data[ind, constInd])) #NOTE: objPen[objVar[i]][constInd] is "objective & Constraint specific penalty."
+ fitness.append(-1*fit)
+ fitness = xr.DataArray(np.array(fitness),
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(len(data))})
+ if i == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[i])
else:
- fit = worstObj
- for constInd,_ in enumerate(g['Constraint'].data):
- fit+=(max(0,-1 * g.data[ind, constInd]))
- fitness.append(-1 * fit)
- fitness = xr.DataArray(np.array(fitness),
- dims=['chromosome'],
- coords={'chromosome': np.arange(len(data))})
- return fitness
+ fitnessSet[objVar[i]] = fitness
+ return fitnessSet
def logistic(rlz,**kwargs):
"""
@@ -141,31 +204,38 @@ def logistic(rlz,**kwargs):
@ Out, fitness, xr.DataArray, the fitness function of the given objective corresponding to a specific chromosome.
"""
if kwargs['a'] == None:
- a = 1.0
+ a = [1.0]
else:
a = kwargs['a']
-
if kwargs['b'] == None:
- b = 0.0
+ b = [0.0]
else:
b = kwargs['b']
+ if isinstance(kwargs['objVar'], str) == True:
+ objVar = [kwargs['objVar']]
+ else:
+ objVar = kwargs['objVar']
+ for i in range(len(objVar)):
+ val = rlz[objVar][objVar[i]].data
+ data = np.atleast_1d(rlz[objVar][objVar[i]].data)
+ denom = 1.0 + np.exp(-a[0] * (val - b[0]))
+ fitness = 1.0 / denom
+ fitness = xr.DataArray(fitness.data,
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(len(data))})
+ if i == 0:
+ fitnessSet = fitness.to_dataset(name = objVar[i])
+ else:
+ fitnessSet[objVar[i]] = fitness
- objVar = kwargs['objVar']
- val = rlz[objVar]
- data = np.atleast_1d(rlz[objVar].data)
- denom = 1.0 + np.exp(-a * (val - b))
- fitness = 1.0 / denom
- fitness = xr.DataArray(np.array(fitness),
- dims=['chromosome'],
- coords={'chromosome': np.arange(len(data))})
-
- return fitness
+ return fitnessSet
__fitness = {}
__fitness['invLinear'] = invLinear
__fitness['logistic'] = logistic
__fitness['feasibleFirst'] = feasibleFirst
+__fitness['hardConstraint'] = hardConstraint
def returnInstance(cls, name):
@@ -176,5 +246,5 @@ def returnInstance(cls, name):
@ Out, __crossovers[name], instance of class
"""
if name not in __fitness:
- cls.raiseAnError (IOError, "{} FITNESS FUNCTION NOT IMPLEMENTED!!!!!".format(name))
+ cls.raiseAnError (IOError, "{} is not a supported fitness function. ".format(name))
return __fitness[name]
diff --git a/ravenframework/Optimizers/mutators/mutators.py b/ravenframework/Optimizers/mutators/mutators.py
index 11d0aec836..d5e58e4661 100644
--- a/ravenframework/Optimizers/mutators/mutators.py
+++ b/ravenframework/Optimizers/mutators/mutators.py
@@ -33,14 +33,19 @@ def swapMutator(offSprings, distDict, **kwargs):
E.g.:
child=[a,b,c,d,e] --> b and d are selected --> child = [a,d,c,b,e]
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
locs, list, the 2 locations of the genes to be swapped
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
variables, list, variables names.
@ Out, children, xr.DataArray, the mutated chromosome, i.e., the child.
"""
- loc1,loc2 = locationsGenerator(offSprings, kwargs['locs'])
+ if kwargs['locs'] == None:
+ locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
+ loc1 = np.minimum(locs[0], locs[1])
+ loc2 = np.maximum(locs[0], locs[1])
+ else:
+ loc1 = np.minimum(kwargs['locs'][0], kwargs['locs'][1])
+ loc2 = np.maximum(kwargs['locs'][0], kwargs['locs'][1])
# initializing children
children = xr.DataArray(np.zeros((np.shape(offSprings))),
@@ -64,7 +69,6 @@ def scrambleMutator(offSprings, distDict, **kwargs):
This method performs the scramble mutator. For each child, a subset of genes is chosen
and their values are shuffled randomly.
@ In, offSprings, xr.DataArray, offsprings after crossover
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
chromosome, numpy.array, the chromosome that will mutate to the new child
locs, list, the locations of the genes to be randomly scrambled
@@ -72,7 +76,12 @@ def scrambleMutator(offSprings, distDict, **kwargs):
variables, list, variables names.
@ Out, child, np.array, the mutated chromosome, i.e., the child.
"""
- loc1,loc2 = locationsGenerator(offSprings, kwargs['locs'])
+ if kwargs['locs'] == None:
+ locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
+ locs.sort()
+ else:
+ locs = [kwargs['locs'][0], kwargs['locs'][1]]
+ locs.sort()
# initializing children
children = xr.DataArray(np.zeros((np.shape(offSprings))),
@@ -85,9 +94,9 @@ def scrambleMutator(offSprings, distDict, **kwargs):
children[i,j] = distDict[offSprings[i].coords['Gene'].values[j]].cdf(float(offSprings[i,j].values))
for i in range(np.shape(offSprings)[0]):
- for ind,element in enumerate([loc1,loc2]):
+ for ind,element in enumerate(locs):
if randomUtils.random(dim=1,samples=1)< kwargs['mutationProb']:
- children[i,loc1:loc2+1] = randomUtils.randomPermutation(list(children.data[i,loc1:loc2+1]),None)
+ children[i,locs[0]:locs[-1]+1] = randomUtils.randomPermutation(list(children.data[i,locs[0]:locs[-1]+1]),None)
for i in range(np.shape(offSprings)[0]):
for j in range(np.shape(offSprings)[1]):
@@ -102,7 +111,6 @@ def bitFlipMutator(offSprings, distDict, **kwargs):
The gene to be flipped is completely random.
The new value of the flipped gene is is completely random.
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
@@ -128,7 +136,6 @@ def randomMutator(offSprings, distDict, **kwargs):
"""
This method is designed to randomly mutate a single gene in each chromosome with probability = mutationProb.
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
@@ -155,13 +162,18 @@ def inversionMutator(offSprings, distDict, **kwargs):
E.g. given chromosome C = [0,1,2,3,4,5,6,7,8,9] and sampled locL=2 locU=6;
New chromosome C' = [0,1,6,5,4,3,2,7,8,9]
@ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, distDict, dict, dictionary containing distribution associated with each gene
@ In, kwargs, dict, dictionary of parameters for this mutation method:
mutationProb, float, probability that governs the mutation process, i.e., if prob < random number, then the mutation will occur
@ Out, offSprings, xr.DataArray, children resulting from the crossover process
"""
# sample gene locations: i.e., determine locL and locU
- locL,locU = locationsGenerator(offSprings, kwargs['locs'])
+ if kwargs['locs'] == None:
+ locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
+ locL = np.minimum(locs[0], locs[1])
+ locU = np.maximum(locs[0], locs[1])
+ else:
+ locL = np.minimum(kwargs['locs'][0], kwargs['locs'][1])
+ locU = np.maximum(kwargs['locs'][0], kwargs['locs'][1])
for child in offSprings:
# the mutation is performed for each child independently
@@ -170,7 +182,7 @@ def inversionMutator(offSprings, distDict, **kwargs):
seq = np.arange(locL,locU+1)
allElems = []
for i,elem in enumerate(seq):
- allElems.append(distDict[child.coords['Gene'].values[i]].cdf(float(child[elem].values)))
+ allElems.append(distDict[child.coords['Gene'].values[i]].cdf(float(child[elem].values)))
mirrSeq = allElems[::-1]
mirrElems = []
@@ -181,23 +193,6 @@ def inversionMutator(offSprings, distDict, **kwargs):
return offSprings
-def locationsGenerator(offSprings,locs):
- """
- Methods designed to process the locations for the mutators. These locations can be either user specified or
- randomly generated.
- @ In, offSprings, xr.DataArray, children resulting from the crossover process
- @ In, locs, list, the two locations of the genes to be swapped
- @ Out, loc1, loc2, int, the two ordered processed locations required by the mutators
- """
- if locs == None:
- locs = list(set(randomUtils.randomChoice(list(np.arange(offSprings.data.shape[1])),size=2,replace=False)))
- loc1 = np.minimum(locs[0], locs[1])
- loc2 = np.maximum(locs[0], locs[1])
- else:
- loc1 = np.minimum(locs[0], locs[1])
- loc2 = np.maximum(locs[0], locs[1])
- return loc1, loc2
-
__mutators = {}
__mutators['swapMutator'] = swapMutator
__mutators['scrambleMutator'] = scrambleMutator
diff --git a/ravenframework/Optimizers/parentSelectors/parentSelectors.py b/ravenframework/Optimizers/parentSelectors/parentSelectors.py
index fc82522271..5ddc0d87ae 100644
--- a/ravenframework/Optimizers/parentSelectors/parentSelectors.py
+++ b/ravenframework/Optimizers/parentSelectors/parentSelectors.py
@@ -21,10 +21,15 @@
Created June,16,2020
@authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
"""
-
+# External Modules----------------------------------------------------------------------------------
import numpy as np
import xarray as xr
from ...utils import randomUtils
+# External Modules----------------------------------------------------------------------------------
+
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils.gaUtils import dataArrayToDict, datasetToDataArray
+# Internal Modules End------------------------------------------------------------------------------
# For mandd: to be updated with RAVEN official tools
from itertools import combinations
@@ -42,7 +47,7 @@ def rouletteWheel(population,**kwargs):
"""
# Arguments
pop = population
- fitness = kwargs['fitness']
+ fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist])
nParents= kwargs['nParents']
# if nparents = population size then do nothing (whole population are parents)
if nParents == pop.shape[0]:
@@ -62,11 +67,11 @@ def rouletteWheel(population,**kwargs):
roulettePointer = randomUtils.random(dim=1, samples=1)
# initialize Probability
counter = 0
- if np.all(fitness.data>=0) or np.all(fitness.data<=0):
- selectionProb = fitness.data/np.sum(fitness.data) # Share of the pie (rouletteWheel)
+ if np.all(fitness>=0) or np.all(fitness<=0):
+ selectionProb = fitness/np.sum(fitness) # Share of the pie (rouletteWheel)
else:
# shift the fitness to be all positive
- shiftedFitness = fitness.data + abs(min(fitness.data))
+ shiftedFitness = fitness + abs(min(fitness))
selectionProb = shiftedFitness/np.sum(shiftedFitness) # Share of the pie (rouletteWheel)
sumProb = selectionProb[counter]
@@ -78,6 +83,9 @@ def rouletteWheel(population,**kwargs):
fitness = np.delete(fitness,counter,axis=0)
return selectedParent
+def countConstViolation(const):
+ return sum(1 for i in const if i < 0)
+
def tournamentSelection(population,**kwargs):
"""
Tournament Selection mechanism for parent selection
@@ -88,25 +96,29 @@ def tournamentSelection(population,**kwargs):
variables, list, variable names
@ Out, newPopulation, xr.DataArray, selected parents,
"""
- fitness = kwargs['fitness']
+
nParents= kwargs['nParents']
pop = population
popSize = population.values.shape[0]
- if 'rank' in kwargs:
+ if 'rank' in kwargs.keys():
# the key rank is used in multi-objective optimization where rank identifies which front the point belongs to
rank = kwargs['rank']
+ crowdDistance = kwargs['crowdDistance']
+ # constraintInfo = kwargs['constraint']
multiObjectiveRanking = True
- matrixOperationRaw = np.zeros((popSize,3))
+ matrixOperationRaw = np.zeros((popSize, 3)) #NOTE if constraint is needed to eliminate chromosome violating constraints, then poopSize should be 4.
matrixOperationRaw[:,0] = np.transpose(np.arange(popSize))
- matrixOperationRaw[:,1] = np.transpose(fitness.data)
+ matrixOperationRaw[:,1] = np.transpose(crowdDistance.data)
matrixOperationRaw[:,2] = np.transpose(rank.data)
- matrixOperation = np.zeros((popSize,3))
+ # matrixOperationRaw[:,3] = np.transpose(constraintInfo.data)
+ matrixOperation = np.zeros((popSize,len(matrixOperationRaw[0])))
else:
+ fitness = np.array([item for sublist in datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data for item in sublist])
multiObjectiveRanking = False
matrixOperationRaw = np.zeros((popSize,2))
matrixOperationRaw[:,0] = np.transpose(np.arange(popSize))
- matrixOperationRaw[:,1] = np.transpose(fitness.data)
+ matrixOperationRaw[:,1] = np.transpose(fitness)
matrixOperation = np.zeros((popSize,2))
indexes = list(np.arange(popSize))
@@ -118,30 +130,27 @@ def tournamentSelection(population,**kwargs):
for idx, val in enumerate(indexesShuffled):
matrixOperation[idx,:] = matrixOperationRaw[val,:]
- selectedParent = xr.DataArray(
- np.zeros((nParents,np.shape(pop)[1])),
- dims=['chromosome','Gene'],
- coords={'chromosome':np.arange(nParents),
- 'Gene': kwargs['variables']})
+ selectedParent = xr.DataArray(np.zeros((nParents,np.shape(pop)[1])),
+ dims=['chromosome','Gene'],
+ coords={'chromosome':np.arange(nParents),
+ 'Gene': kwargs['variables']})
- if not multiObjectiveRanking: # single-objective implementation of tournamentSelection
+ if not multiObjectiveRanking: # single-objective implementation of tournamentSelection
for i in range(nParents):
if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]:
index = int(matrixOperation[2*i,0])
else:
index = int(matrixOperation[2*i+1,0])
selectedParent[i,:] = pop.values[index,:]
- else: # multi-objective implementation of tournamentSelection
- for i in range(nParents-1):
- if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]:
- index = int(matrixOperation[i,0])
- elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]:
- index = int(matrixOperation[i+1,0])
- else: # same rank case
- if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]:
- index = int(matrixOperation[i,0])
- else:
- index = int(matrixOperation[i+1,0])
+ else: # multi-objective implementation of tournamentSelection
+ for i in range(nParents):
+ if matrixOperation[2*i,2] > matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i+1,0])
+ elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i,0])
+ elif matrixOperation[2*i,2] == matrixOperation[2*i+1,2]: # if same rank, then compare CD
+ if matrixOperation[2*i,1] > matrixOperation[2*i+1,1]: index = int(matrixOperation[2*i,0])
+ elif matrixOperation[2*i,2] < matrixOperation[2*i+1,2]: index = int(matrixOperation[2*i+1,0])
+ else: # same rank and same CD
+ index = int(matrixOperation[2*i+1,0]) #NOTE if rank and CD are same, then any chromosome can be selected.
selectedParent[i,:] = pop.values[index,:]
return selectedParent
diff --git a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py
index 1b754af494..9702a91192 100644
--- a/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py
+++ b/ravenframework/Optimizers/survivorSelectors/survivorSelectors.py
@@ -21,9 +21,15 @@
Created June,16,2020
@authors: Mohammad Abdo, Diego Mandelli, Andrea Alfonsi
"""
-
+# External Modules----------------------------------------------------------------------------------
import numpy as np
import xarray as xr
+from ravenframework.utils import frontUtils
+# External Modules End------------------------------------------------------------------------------
+
+# Internal Modules----------------------------------------------------------------------------------
+from ...utils.gaUtils import dataArrayToDict, datasetToDataArray
+# Internal Modules End------------------------------------------------------------------------------
# @profile
def ageBased(newRlz,**kwargs):
@@ -80,7 +86,7 @@ def fitnessBased(newRlz,**kwargs):
It combines the parents and children/offsprings then keeps the fittest individuals
to revert to the same population size.
@ In, newRlz, xr.DataSet, containing either a single realization, or a batch of realizations.
- @ In, kwargs, dict, dictionary of parameters for this mutation method:
+ @ In, kwargs, dict, dictionary of parameters for this survivor slection method:
age, list, ages of each chromosome in the population of the previous generation
offSpringsFitness, xr.DataArray, fitness of each new child, i.e., np.shape(offSpringsFitness) = nChildren x nGenes
variables
@@ -96,11 +102,12 @@ def fitnessBased(newRlz,**kwargs):
else:
popAge = kwargs['age']
- offSpringsFitness = np.atleast_1d(kwargs['offSpringsFitness'])
+ offSpringsFitness = datasetToDataArray(kwargs['offSpringsFitness'], list(kwargs['offSpringsFitness'].keys())).data
+ offSpringsFitness = np.array([item for sublist in offSpringsFitness for item in sublist])
offSprings = np.atleast_2d(newRlz[kwargs['variables']].to_array().transpose().data)
population = np.atleast_2d(kwargs['population'].data)
- popFitness = np.atleast_1d(kwargs['fitness'].data)
-
+ popFitness = datasetToDataArray(kwargs['fitness'], list(kwargs['fitness'].keys())).data
+ popFitness = np.array([item for sublist in popFitness for item in sublist])
newPopulation = population
newFitness = popFitness
newAge = list(map(lambda x:x+1, popAge))
@@ -116,19 +123,121 @@ def fitnessBased(newRlz,**kwargs):
newAge = sortedAgeT[:-len(offSprings)]
newPopulationArray = xr.DataArray(newPopulationSorted,
- dims=['chromosome','Gene'],
- coords={'chromosome':np.arange(np.shape(newPopulationSorted)[0]),
- 'Gene': kwargs['variables']})
+ dims=['chromosome','Gene'],
+ coords={'chromosome':np.arange(np.shape(newPopulationSorted)[0]),
+ 'Gene': kwargs['variables']})
newFitness = xr.DataArray(newFitness,
dims=['chromosome'],
coords={'chromosome':np.arange(np.shape(newFitness)[0])})
+ newFitness = newFitness.to_dataset(name = list(kwargs['fitness'].keys())[0])
#return newPopulationArray,newFitness,newAge
return newPopulationArray,newFitness,newAge,kwargs['popObjectiveVal']
+# @profile
+def rankNcrowdingBased(offsprings, **kwargs):
+ """
+ rankNcrowdingBased survivorSelection mechanism for new generation selection
+ It combines the parents and children/offsprings then calculates their rank and crowding distance.
+ After having ranks and crowding distance, it keeps the lowest ranks (and highest crowding distance if indivisuals have same rank.
+ @ In, newRlz, xr.DataSet, containing either a single realization, or a batch of realizations.
+ @ In, kwargs, dict, dictionary of parameters for this survivor slection method:
+ variables
+ population
+ @ Out, newPopulation, xr.DataArray, newPopulation for the new generation, i.e. np.shape(newPopulation) = populationSize x nGenes.
+ @ Out, newRank, xr.DataArray, rank of each chromosome in the new population
+ @ Out, newCD, xr.DataArray, crowding distance of each chromosome in the new population.
+ """
+ popSize = np.shape(kwargs['population'])[0]
+ if ('age' not in kwargs.keys() or kwargs['age'] == None):
+ popAge = [0]*popSize
+ else:
+ popAge = kwargs['age']
+
+ population = np.atleast_2d(kwargs['population'].data)
+ offSprings = np.atleast_2d(offsprings[kwargs['variables']].to_array().transpose().data)
+ popObjectiveVal = kwargs['popObjectiveVal']
+ offObjectiveVal = kwargs['offObjectiveVal']
+ popFit = kwargs['popFit']
+ popFitArray = []
+ offFit = kwargs['offFit']
+ offFitArray = []
+ for i in list(popFit.keys()): #NOTE popFit.keys() and offFit.keys() must be same.
+ popFitArray.append(popFit[i].data.tolist())
+ offFitArray.append(offFit[i].data.tolist())
+
+ newFitMerged = np.array([i + j for i, j in zip(popFitArray, offFitArray)])
+ newFitMerged_pair = [list(ele) for ele in list(zip(*newFitMerged))]
+
+ popConstV = kwargs['popConstV'].data
+ offConstV = kwargs['offConstV'].data
+ newConstVMerged = np.array(popConstV.tolist() + offConstV.tolist())
+
+ newObjectivesMerged = np.array([i + j for i, j in zip(popObjectiveVal, offObjectiveVal)])
+ newObjectivesMerged_pair = [list(ele) for ele in list(zip(*newObjectivesMerged))]
+
+ newPopRank = frontUtils.rankNonDominatedFrontiers(np.array(newFitMerged_pair))
+ newPopRank = xr.DataArray(newPopRank,
+ dims=['rank'],
+ coords={'rank': np.arange(np.shape(newPopRank)[0])})
+
+ newPopCD = frontUtils.crowdingDistance(rank=newPopRank, popSize=len(newPopRank), objectives=np.array(newFitMerged_pair))
+ newPopCD = xr.DataArray(newPopCD,
+ dims=['CrowdingDistance'],
+ coords={'CrowdingDistance': np.arange(np.shape(newPopCD)[0])})
+
+ newAge = list(map(lambda x:x+1, popAge))
+ newPopulationMerged = np.concatenate([population,offSprings])
+ newAge.extend([0]*len(offSprings))
+
+ sortedRank,sortedCD,sortedAge,sortedPopulation,sortedFit,sortedObjectives,sortedConstV = \
+ zip(*[(x,y,z,i,j,k,a) for x,y,z,i,j,k,a in \
+ sorted(zip(newPopRank.data, newPopCD.data, newAge, newPopulationMerged.tolist(), newFitMerged_pair, newObjectivesMerged_pair, newConstVMerged),reverse=False,key=lambda x: (x[0], -x[1]))])
+ sortedRankT, sortedCDT, sortedAgeT, sortedPopulationT, sortedFitT, sortedObjectivesT, sortedConstVT = \
+ np.atleast_1d(list(sortedRank)), list(sortedCD), list(sortedAge),np.atleast_1d(list(sortedPopulation)),np.atleast_1d(list(sortedFit)),np.atleast_1d(list(sortedObjectives)),np.atleast_1d(list(sortedConstV))
+
+ newPopulation = sortedPopulationT[:-len(offSprings)]
+ newObjectives = sortedObjectivesT[:-len(offSprings)]
+ newFit = sortedFitT[:-len(offSprings)]
+
+ newRank = frontUtils.rankNonDominatedFrontiers(newObjectives)
+ newRank = xr.DataArray(newRank,
+ dims=['rank'],
+ coords={'rank': np.arange(np.shape(newRank)[0])})
+
+ newObjectivesP = [list(ele) for ele in list(zip(*newObjectives))]
+ newCD = frontUtils.crowdingDistance(rank=newRank, popSize=len(newRank), objectives=newObjectives)
+ newCD = xr.DataArray(newCD,
+ dims=['CrowdingDistance'],
+ coords={'CrowdingDistance': np.arange(np.shape(newCD)[0])})
+
+ newAge = sortedAgeT[:-len(offSprings)]
+ newConstV = sortedConstVT[:-len(offSprings)]
+
+ for i in range(len(list(popFit.keys()))):
+ fitness = xr.DataArray(newFit[:,i],
+ dims=['chromosome'],
+ coords={'chromosome': np.arange(len(newFit[:,i]))})
+ if i == 0:
+ newFitnessSet = fitness.to_dataset(name = list(popFit.keys())[i])
+ else:
+ newFitnessSet[list(popFit.keys())[i]] = fitness
+
+ newPopulationArray = xr.DataArray(newPopulation,
+ dims=['chromosome','Gene'],
+ coords={'chromosome':np.arange(np.shape(newPopulation)[0]),
+ 'Gene': kwargs['variables']})
+ newConstV = xr.DataArray(newConstV,
+ dims=['chromosome','ConstEvaluation'],
+ coords={'chromosome':np.arange(np.shape(newPopulation)[0]),
+ 'ConstEvaluation':np.arange(np.shape(newConstV)[1])})
+
+ return newPopulationArray,newRank,newAge,newCD,newObjectivesP,newFitnessSet,newConstV
+
__survivorSelectors = {}
__survivorSelectors['ageBased'] = ageBased
__survivorSelectors['fitnessBased'] = fitnessBased
+__survivorSelectors['rankNcrowdingBased'] = rankNcrowdingBased
def returnInstance(cls, name):
"""
diff --git a/ravenframework/utils/frontUtils.py b/ravenframework/utils/frontUtils.py
index c63eca57fc..8bb0e11c72 100644
--- a/ravenframework/utils/frontUtils.py
+++ b/ravenframework/utils/frontUtils.py
@@ -44,6 +44,7 @@ def nonDominatedFrontier(data, returnMask, minMask=None):
Reference: the following code has been adapted from https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
"""
+
if minMask is None:
pass
elif minMask is not None and minMask.shape[0] != data.shape[1]:
@@ -56,8 +57,8 @@ def nonDominatedFrontier(data, returnMask, minMask=None):
isEfficient = np.arange(data.shape[0])
nPoints = data.shape[0]
nextPointIndex = 0
- while nextPointIndex= 0,
+ so if:
+ 1) f(x,y) >= 0 then g = f
+ 2) f(x,y) >= a then g = f - a
+ 3) f(x,y) <= b then g = b - f
+ 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint)
+ """
+ g = eval(Input.name)(Input)
+ return g
+
+
+def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml#
+ """
+ Let's assume that the constraint is:
+ $ x3+x4 < 8 $
+ then g the constraint evaluation function (which has to be > 0) is taken to be:
+ g = 8 - (x3+x4)
+ in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 1 evaluation function
+ """
+ g = 8 - Input.x3 - Input.x4
+ return g
+
+def expConstr2(Input):
+ """
+ Explicit Equality Constraint:
+ let's consider the constraint x1**2 + x2**2 = 25
+ The way to write g is to use a very small number for instance, epsilon = 1e-12
+ and then g = epsilon - abs(constraint)
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 2 evaluation function
+ """
+ g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25)
+ return g
+
+def expConstr3(Input):
+ """
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 3 evaluation function
+ """
+ g = 10 - Input.x3 - Input.x4
+ return g
+
+def impConstr1(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 1 evaluation function
+ """
+ g = 10 - Input.x1**2 - Input.obj
+ return g
+
+def impConstr2(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 2 evaluation function
+ """
+ g = Input.x1**2 + Input.obj1 - 10
+ return g
+
+def impConstr3(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint #3 evaluation function
+ """
+ g = 100 - Input.obj1
+ return g
diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum.py b/tests/framework/AnalyticModels/optimizing/myLocalSum.py
index faec353eb6..d4b6fec246 100644
--- a/tests/framework/AnalyticModels/optimizing/myLocalSum.py
+++ b/tests/framework/AnalyticModels/optimizing/myLocalSum.py
@@ -81,5 +81,4 @@ def constrain(self):
and negative if violated.
"""
explicitConstrain = constraint(self)
- return explicitConstrain
-
+ return explicitConstrain
\ No newline at end of file
diff --git a/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py
new file mode 100644
index 0000000000..2bad9f6b44
--- /dev/null
+++ b/tests/framework/AnalyticModels/optimizing/myLocalSum_multi.py
@@ -0,0 +1,37 @@
+# Copyright 2017 Battelle Energy Alliance, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# @author: Mohammad Abdo (@Jimmy-INL)
+
+def evaluate(Inputs):
+ Sum = 0
+ LocalSum1 = 0
+ LocalSum2 = 0
+ for ind,var in enumerate(Inputs.keys()):
+ # write the objective function here
+ Sum += (ind + 1) * Inputs[var]
+ if (ind == 0) or (ind == 1):
+ LocalSum1 += (ind + 1) * Inputs[var]
+ if (ind == 2) or (ind == 3):
+ LocalSum2 += (ind + 1) * Inputs[var]
+ return Sum[:], LocalSum1[:], LocalSum2[:]
+
+def run(self,Inputs):
+ """
+ RAVEN API
+ @ In, self, object, RAVEN container
+ @ In, Inputs, dict, additional inputs
+ @ Out, None
+ """
+ self.obj1,self.obj2,self.obj3 = evaluate(Inputs)
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml
new file mode 100644
index 0000000000..d100a928b0
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/continuous/unconstrained/ZDT1.xml
@@ -0,0 +1,132 @@
+
+
+
+ raven/tests/framework/Optimizers/GA.MultiObjZDT1
+ Junyung Kim
+ 2023-02-21
+
+ ZDT1 test using NSGA-II
+
+
+
+ ZDT1
+ optimize,print
+ 1
+
+
+
+
+ placeholder
+ ZDT
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x1,x2,x3,obj1,obj2
+
+
+
+
+
+ 0
+ 1
+
+
+
+
+
+
+ 15
+ 42
+ every
+ min,min
+
+
+
+ 10
+ tournamentSelection
+
+
+ 1.0
+
+
+ 1.0
+
+
+
+
+ rankNcrowdingBased
+
+
+
+ 0.0
+
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+ obj1, obj2
+ optOut
+ MC_samp
+
+
+
+
+
+
+ 10
+ 050877
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+
+
+
+
+
+ x1,x2,x3
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml
new file mode 100644
index 0000000000..1483ffa8f5
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/MinwoRepMultiObjective.xml
@@ -0,0 +1,164 @@
+
+
+
+ raven/tests/framework/Optimizers/GeneticAlgorithms.NSGAII
+ Junyung Kim
+ 2022-12-21
+
+ NSGA-II min-min test
+
+
+
+ Multi_MinwoReplacement_wo_constraints_min_max_min_50_20
+ optimize,print
+ 4
+
+
+
+
+ placeholder
+ myLocalSum
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6,obj1,obj2,obj3
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6
+
+
+
+ x1,x2,x3,x4,x5,x6,obj1
+
+
+
+
+
+ 2
+ 7
+ withoutReplacement
+
+
+
+
+
+
+ 5
+ 42
+ every
+ min, max, min
+
+
+ 50
+ tournamentSelection
+
+
+ 0.8
+
+
+ 0.8
+
+
+
+ 50, 50, 100, 100, 100, 150
+
+ rankNcrowdingBased
+
+
+ 0.0
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+ obj1, obj2, obj3
+ optOut
+ MC_samp
+ expConstr3
+
+ impConstr3
+
+
+
+
+
+
+ 50
+ 050877
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml
index 4da8636350..08267e5c89 100644
--- a/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml
+++ b/tests/framework/Optimizers/GeneticAlgorithms/discrete/constrained/testGAMinwRepConstrained.xml
@@ -55,14 +55,14 @@
- 20
+ 542every20
- rouletteWheel
+ tournamentSelection0.8
@@ -71,7 +71,7 @@
0.9
-
+ fitnessBased
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv
new file mode 100644
index 0000000000..25ec63951b
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/continuous/unconstrained/ZDT1/opt_export_0.csv
@@ -0,0 +1,160 @@
+x1,x2,x3,obj1,obj2,age,batchId,rank,CD,fitness,accepted
+0.902940987587,0.947612243227,0.840374259707,0.902940987587,3.96681957049,0.0,1.0,3.0,inf,0.0,first
+0.227236453264,0.847510234417,0.362760231915,0.227236453264,3.60499993579,0.0,1.0,2.0,inf,0.0,first
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,1.0,1.0,inf,0.0,first
+0.633202111729,0.793545654927,0.564774226762,0.633202111729,3.28234279694,0.0,1.0,2.0,2.0,0.0,first
+0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,1.0,1.0,1.32735741676,0.0,first
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,1.0,1.0,inf,0.0,first
+0.331692186261,0.571854743308,0.965348788995,0.331692186261,4.24730587019,0.0,1.0,3.0,inf,0.0,first
+0.267873673297,0.166777967281,0.847808119107,0.267873673297,3.00298144409,0.0,1.0,1.0,0.749061564967,0.0,first
+0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,1.0,2.0,inf,0.0,first
+0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,1.0,1.0,0.672642583243,0.0,first
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,2.0,1.0,inf,0.0,accepted
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,2.0,1.0,inf,0.0,accepted
+0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,2.0,1.0,1.17548106192,0.0,accepted
+0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,2.0,1.0,1.09190519404,0.0,accepted
+0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,2.0,1.0,0.908094805963,0.0,accepted
+0.713407223745,0.604534715322,0.183404509952,0.713407223745,1.81469798087,0.0,2.0,2.0,inf,0.0,accepted
+0.13264102096,0.192211290866,0.4560699904,0.13264102096,2.31985816876,0.0,2.0,2.0,inf,0.0,accepted
+0.713407223745,0.641621648018,0.334449647072,0.713407223745,2.25417202135,0.0,2.0,3.0,inf,0.0,accepted
+0.13264102096,0.630711117673,0.405532905694,0.13264102096,3.37050011696,0.0,2.0,3.0,inf,0.0,accepted
+0.524774661876,0.641621648018,0.39961784645,0.524774661876,2.65265663127,0.0,2.0,3.0,2.0,0.0,accepted
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,3.0,1.0,inf,0.0,accepted
+0.110044764846,0.567700327273,0.738899003886,0.110044764846,4.18400045796,0.0,3.0,1.0,inf,0.0,accepted
+0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,0.0,3.0,1.0,1.17365382082,0.0,accepted
+0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,0.0,3.0,1.0,0.929582947246,0.0,accepted
+0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,3.0,1.0,0.826346179184,0.0,accepted
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,3.0,2.0,inf,0.0,accepted
+0.713407223745,0.604534715322,0.183404509952,0.713407223745,1.81469798087,0.0,3.0,2.0,inf,0.0,accepted
+0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,3.0,2.0,1.2778379808,0.0,accepted
+0.306377726911,0.00975325447734,0.613563748918,0.306377726911,1.93224686343,0.0,3.0,2.0,1.1605567663,0.0,accepted
+0.306377726911,0.192211290866,0.449754129036,0.306377726911,1.97909667942,0.0,3.0,3.0,inf,0.0,accepted
+0.110044764846,0.567700327273,0.738899003886,0.110044764846,4.18400045796,0.0,4.0,1.0,inf,0.0,accepted
+0.772244771889,0.00975325447734,0.39961784645,0.772244771889,0.916378249788,0.0,4.0,1.0,inf,0.0,accepted
+0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,0.0,4.0,1.0,1.05555538382,0.0,accepted
+0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,0.0,4.0,1.0,0.916323733867,0.0,accepted
+0.713407223745,0.192211290866,0.431945021132,0.713407223745,1.44095222672,0.0,4.0,1.0,0.813065271377,0.0,accepted
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,0.0,4.0,1.0,0.249388538585,0.0,accepted
+0.110044764846,0.604534715322,0.738899003886,0.110044764846,4.28628616584,0.0,4.0,2.0,inf,0.0,accepted
+0.13264102096,0.00975325447734,0.37081825509,0.13264102096,1.60872372044,0.0,4.0,2.0,inf,0.0,accepted
+0.13264102096,0.641621648018,0.000778764719325,0.13264102096,2.3040905251,0.0,4.0,3.0,inf,0.0,accepted
+0.258779981001,0.192211290866,0.293488176375,0.258779981001,1.65969770087,0.0,4.0,3.0,inf,0.0,accepted
+0.107891428309,0.192211290866,0.173364647239,0.107891428309,1.6211030056,4.0,5.0,1.0,inf,0.0,accepted
+0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,4.0,5.0,1.0,inf,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,4.0,5.0,1.0,1.6002571283,0.0,accepted
+0.258779981001,0.00975325447734,0.39961784645,0.258779981001,1.46877733092,4.0,5.0,1.0,1.00964323224,0.0,accepted
+0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,4.0,5.0,1.0,0.399742871701,0.0,accepted
+0.772244771889,0.00975325447734,0.39961784645,0.772244771889,0.916378249788,4.0,5.0,2.0,inf,0.0,accepted
+0.110044764846,0.192211290866,0.624354044354,0.110044764846,2.83356210322,4.0,5.0,2.0,inf,0.0,accepted
+0.13264102096,0.00975325447734,0.37081825509,0.13264102096,1.60872372044,4.0,5.0,2.0,1.67384162643,0.0,accepted
+0.713407223745,0.161221285621,0.431945021132,0.713407223745,1.37133891227,4.0,5.0,2.0,1.10926623497,0.0,accepted
+0.766431005617,0.192211290866,0.39961784645,0.766431005617,1.3169883176,4.0,5.0,2.0,0.326158373566,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,6.0,1.0,inf,0.0,accepted
+0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,6.0,1.0,inf,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,6.0,1.0,1.10932398766,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,6.0,1.0,0.890676012342,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,6.0,1.0,0.712923687869,0.0,accepted
+0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,1.0,6.0,2.0,inf,0.0,accepted
+0.0648922466358,0.00975325447734,0.37081825509,0.0648922466358,1.76891341897,1.0,6.0,2.0,inf,0.0,accepted
+0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,1.0,6.0,2.0,1.72344612754,0.0,accepted
+0.13264102096,0.192211290866,0.173364647239,0.13264102096,1.56936445005,1.0,6.0,2.0,0.626926698592,0.0,accepted
+0.107891428309,0.192211290866,0.173364647239,0.107891428309,1.6211030056,1.0,6.0,2.0,0.27655387246,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,7.0,1.0,inf,0.0,accepted
+0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,7.0,1.0,inf,0.0,accepted
+0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,1.0,7.0,1.0,0.865064939691,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,7.0,1.0,0.712923687869,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,7.0,1.0,0.651756198699,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,7.0,1.0,0.634832572855,0.0,accepted
+0.0648922466358,0.00975325447734,0.242159936633,0.0648922466358,1.41819863445,1.0,7.0,1.0,0.422011372441,0.0,accepted
+0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,1.0,7.0,2.0,inf,0.0,accepted
+0.00695213070301,0.00975325447734,0.367783134656,0.00695213070301,2.01084637477,1.0,7.0,2.0,inf,0.0,accepted
+0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,1.0,7.0,2.0,2.0,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,2.0,8.0,1.0,inf,0.0,accepted
+0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,2.0,8.0,1.0,inf,0.0,accepted
+0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,2.0,8.0,1.0,0.865064939691,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,2.0,8.0,1.0,0.712923687869,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,2.0,8.0,1.0,0.651756198699,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,2.0,8.0,1.0,0.634832572855,0.0,accepted
+0.0648922466358,0.00975325447734,0.242159936633,0.0648922466358,1.41819863445,2.0,8.0,1.0,0.422011372441,0.0,accepted
+0.713407223745,0.00975325447734,0.228798159219,0.713407223745,0.609326925579,2.0,8.0,2.0,inf,0.0,accepted
+0.00695213070301,0.00975325447734,0.367783134656,0.00695213070301,2.01084637477,2.0,8.0,2.0,inf,0.0,accepted
+0.222107806295,0.00975325447734,0.228798159219,0.222107806295,1.09835350413,2.0,8.0,2.0,2.0,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,0.0,9.0,1.0,inf,0.0,accepted
+0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,0.0,9.0,1.0,inf,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,0.0,9.0,1.0,0.651756198699,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,0.0,9.0,1.0,0.513796266194,0.0,accepted
+0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,0.0,9.0,1.0,0.468664639901,0.0,accepted
+0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,0.0,9.0,1.0,0.454750192604,0.0,accepted
+0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,0.0,9.0,1.0,0.422011372441,0.0,accepted
+0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,0.0,9.0,1.0,0.410314747086,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,0.0,9.0,1.0,0.353145324752,0.0,accepted
+0.568308599426,0.00975325447734,0.242159936633,0.568308599426,0.756839229015,0.0,9.0,1.0,0.228270418055,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,10.0,1.0,inf,0.0,accepted
+0.696304270694,0.00975325447734,0.173364647239,0.696304270694,0.510690332486,1.0,10.0,1.0,inf,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,1.0,10.0,1.0,0.651756198699,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,1.0,10.0,1.0,0.513796266194,0.0,accepted
+0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,1.0,10.0,1.0,0.454750192604,0.0,accepted
+0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,1.0,10.0,1.0,0.422011372441,0.0,accepted
+0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,1.0,10.0,1.0,0.410314747086,0.0,accepted
+0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,1.0,10.0,1.0,0.468664639901,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,10.0,1.0,0.353145324752,0.0,accepted
+0.568308599426,0.00975325447734,0.242159936633,0.568308599426,0.756839229015,1.0,10.0,1.0,0.228270418055,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,2.0,11.0,1.0,inf,0.0,accepted
+0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,2.0,11.0,1.0,inf,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,2.0,11.0,1.0,0.650175533333,0.0,accepted
+0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,2.0,11.0,1.0,0.634536882707,0.0,accepted
+0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,2.0,11.0,1.0,0.420037013968,0.0,accepted
+0.649632900872,0.00975325447734,0.242159936633,0.649632900872,0.68775727376,2.0,11.0,1.0,0.463954261861,0.0,accepted
+0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,2.0,11.0,1.0,0.357366573279,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,2.0,11.0,1.0,0.351855878369,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,2.0,11.0,1.0,0.31630181039,0.0,accepted
+0.178822707194,0.00975325447734,0.173364647239,0.178822707194,1.02298916991,2.0,11.0,1.0,0.303090139359,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,6.0,12.0,1.0,inf,0.0,accepted
+0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,6.0,12.0,1.0,inf,0.0,accepted
+0.314355981377,0.00975325447734,0.173364647239,0.314355981377,0.851465028867,6.0,12.0,1.0,0.650175533333,0.0,accepted
+0.649632900872,0.00518486043559,0.184333673023,0.649632900872,0.559107570583,6.0,12.0,1.0,0.444060435829,0.0,accepted
+0.535774680445,0.00975325447734,0.228798159219,0.535774680445,0.756902916305,6.0,12.0,1.0,0.426680098555,0.0,accepted
+0.0648922466358,0.00975325447734,0.237249083872,0.0648922466358,1.40488522668,6.0,12.0,1.0,0.420037013968,0.0,accepted
+0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,6.0,12.0,1.0,0.465833190838,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,6.0,12.0,1.0,0.546479400168,0.0,accepted
+0.540635119784,0.00518486043559,0.228798159219,0.540635119784,0.742713199876,6.0,12.0,1.0,0.344191262312,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,6.0,12.0,1.0,0.31630181039,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,7.0,13.0,1.0,inf,0.0,accepted
+0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,7.0,13.0,1.0,inf,0.0,accepted
+0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,7.0,13.0,1.0,1.11412979519,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,7.0,13.0,1.0,0.818090041033,0.0,accepted
+0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,7.0,13.0,1.0,0.527591261357,0.0,accepted
+0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,7.0,13.0,1.0,0.368715073082,0.0,accepted
+0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,7.0,13.0,1.0,0.358278943448,0.0,accepted
+0.649632900872,0.00518486043559,0.184333673023,0.649632900872,0.559107570583,7.0,13.0,2.0,inf,0.0,accepted
+0.00695213070301,0.00975325447734,0.34187967245,0.00695213070301,1.93537503877,7.0,13.0,2.0,inf,0.0,accepted
+0.13264102096,0.00975325447734,0.269412333441,0.13264102096,1.34380916339,7.0,13.0,2.0,2.0,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,1.0,14.0,1.0,inf,0.0,accepted
+0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,1.0,14.0,1.0,inf,0.0,accepted
+0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,1.0,14.0,1.0,1.149052421,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,1.0,14.0,1.0,0.776808582325,0.0,accepted
+0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,1.0,14.0,1.0,0.46393693418,0.0,accepted
+0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,1.0,14.0,1.0,0.333067034374,0.0,accepted
+0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,1.0,14.0,1.0,0.228915255524,0.0,accepted
+0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,1.0,14.0,1.0,0.191580577061,0.0,accepted
+0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,1.0,14.0,2.0,inf,0.0,accepted
+0.00695213070301,0.00975325447734,0.34187967245,0.00695213070301,1.93537503877,1.0,14.0,2.0,inf,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,4.0,15.0,1.0,inf,0.0,accepted
+0.902552906634,0.00518486043559,0.0460026422623,0.902552906634,0.13319434186,4.0,15.0,1.0,inf,0.0,accepted
+0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,4.0,15.0,1.0,0.94061235925,0.0,accepted
+0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,4.0,15.0,1.0,0.879443568797,0.0,accepted
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,4.0,15.0,1.0,0.618089346206,0.0,accepted
+0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,4.0,15.0,1.0,0.35946352349,0.0,accepted
+0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,4.0,15.0,1.0,0.269867635023,0.0,accepted
+0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,4.0,15.0,1.0,0.171430659521,0.0,accepted
+0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,4.0,15.0,1.0,0.150132989794,0.0,accepted
+0.696304270694,0.00518486043559,0.173364647239,0.696304270694,0.501589228366,4.0,15.0,2.0,inf,0.0,accepted
+0.00695213070301,0.00975325447734,0.228798159219,0.00695213070301,1.60644136398,4.0,15.0,1.0,inf,0.0,final
+0.902552906634,0.00518486043559,0.0460026422623,0.902552906634,0.13319434186,4.0,15.0,1.0,inf,0.0,final
+0.649632900872,0.00518486043559,0.0944429603625,0.649632900872,0.380298990737,4.0,15.0,1.0,0.94061235925,0.0,final
+0.314355981377,0.00518486043559,0.0460026422623,0.314355981377,0.551375432252,4.0,15.0,1.0,0.879443568797,0.0,final
+0.218764222976,0.00975325447734,0.173364647239,0.218764222976,0.967165454303,4.0,15.0,1.0,0.618089346206,0.0,final
+0.155041618542,0.00975325447734,0.228798159219,0.155041618542,1.19990448549,4.0,15.0,1.0,0.35946352349,0.0,final
+0.0171611045993,0.00975325447734,0.173364647239,0.0171611045993,1.38629352915,4.0,15.0,1.0,0.269867635023,0.0,final
+0.0693613020865,0.00975325447734,0.184333673023,0.0693613020865,1.2509789265,4.0,15.0,1.0,0.171430659521,0.0,final
+0.0480589254405,0.00975325447734,0.173364647239,0.0480589254405,1.27647960561,4.0,15.0,1.0,0.150132989794,0.0,final
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv
new file mode 100644
index 0000000000..dc39c524eb
--- /dev/null
+++ b/tests/framework/Optimizers/GeneticAlgorithms/gold/discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv
@@ -0,0 +1,23 @@
+x1,x2,x3,x4,x5,x6,obj1,obj2,age,batchId,rank,CD,ConstraintEvaluation_expConstr3,ConstraintEvaluation_impConstr3,fitness,accepted
+4.0,3.0,5.0,7.0,2.0,6.0,99.0,10.0,0.0,1.0,2.0,inf,-2.0,1.0,1.0,first
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,0.0,1.0,2.0,inf,3.0,11.0,0.0,first
+4.0,3.0,6.0,7.0,2.0,5.0,96.0,10.0,0.0,1.0,1.0,inf,-3.0,4.0,1.0,first
+7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,0.0,1.0,1.0,inf,2.0,14.0,0.0,first
+4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,0.0,1.0,3.0,inf,1.0,10.0,0.0,first
+4.0,6.0,2.0,5.0,7.0,3.0,95.0,16.0,0.0,1.0,4.0,inf,3.0,5.0,0.0,first
+4.0,7.0,2.0,5.0,3.0,6.0,95.0,18.0,0.0,1.0,5.0,inf,3.0,5.0,0.0,first
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,0.0,1.0,2.0,2.0,3.0,3.0,0.0,first
+4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,0.0,1.0,1.0,2.0,1.0,6.0,0.0,first
+2.0,4.0,6.0,5.0,3.0,7.0,105.0,10.0,0.0,1.0,3.0,inf,-1.0,-5.0,2.0,first
+7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,accepted
+7.0,4.0,3.0,5.0,6.0,2.0,86.0,15.0,1.0,2.0,2.0,inf,2.0,14.0,0.0,accepted
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,accepted
+6.0,4.0,7.0,2.0,3.0,5.0,88.0,14.0,1.0,2.0,2.0,inf,1.0,12.0,0.0,accepted
+6.0,4.0,3.0,7.0,5.0,2.0,88.0,14.0,1.0,2.0,3.0,inf,0.0,12.0,0.0,accepted
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,4.0,inf,3.0,11.0,0.0,accepted
+4.0,5.0,3.0,6.0,7.0,2.0,94.0,14.0,1.0,2.0,4.0,inf,1.0,6.0,0.0,accepted
+4.0,5.0,7.0,2.0,3.0,6.0,94.0,14.0,1.0,2.0,5.0,inf,1.0,6.0,0.0,accepted
+7.0,4.0,5.0,2.0,3.0,6.0,89.0,15.0,1.0,2.0,5.0,inf,3.0,11.0,0.0,accepted
+4.0,6.0,7.0,2.0,3.0,5.0,90.0,16.0,1.0,2.0,6.0,inf,1.0,10.0,0.0,accepted
+7.0,3.0,5.0,4.0,6.0,2.0,86.0,13.0,1.0,2.0,1.0,inf,1.0,14.0,0.0,final
+6.0,3.0,5.0,2.0,4.0,7.0,97.0,12.0,1.0,2.0,1.0,inf,3.0,3.0,0.0,final
diff --git a/tests/framework/Optimizers/GeneticAlgorithms/tests b/tests/framework/Optimizers/GeneticAlgorithms/tests
index 84ec37dbd1..a983de57bd 100644
--- a/tests/framework/Optimizers/GeneticAlgorithms/tests
+++ b/tests/framework/Optimizers/GeneticAlgorithms/tests
@@ -331,4 +331,24 @@
rel_err = 0.001
[../]
[../]
+
+ [./NSGA-II_MinwoRepMultiObjective]
+ type = 'RavenFramework'
+ input = 'discrete/constrained/MinwoRepMultiObjective.xml'
+ [./csv]
+ type = OrderedCSV
+ output = 'discrete/constrained/Multi_MinwoReplacement/opt_export_0.csv'
+ rel_err = 0.001
+ [../]
+ [../]
+
+ [./NSGA-II_ZDT1]
+ type = 'RavenFramework'
+ input = 'continuous/unconstrained/ZDT1.xml'
+ [./csv]
+ type = OrderedCSV
+ output = 'continuous/unconstrained/ZDT1/opt_export_0.csv'
+ rel_err = 0.001
+ [../]
+ [../]
[]
diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml
new file mode 100644
index 0000000000..700d248103
--- /dev/null
+++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/MinwoRepMultiObjective.xml
@@ -0,0 +1,160 @@
+
+
+
+ \raven\tests\framework\Optimizers\NSGAII\discrete\constrained\
+ Junyung Kim, Mohammad Abdo
+ 2022-12-21
+
+ NSGA-II min-min test
+
+
+
+ Multi_MinwoReplacement_Figure_720
+ optimize,print
+ 2
+
+
+
+
+ placeholder
+ myLocalSum
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6,obj1,obj2
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6
+
+
+ x1,x2,x3,x4,x5,x6,obj1
+
+
+
+
+
+ 2
+ 7
+ withoutReplacement
+
+
+
+
+
+
+ 3
+ 42
+ every
+ min, max
+
+
+
+ 15
+ tournamentSelection
+
+
+ 0.7
+
+
+ 0.7
+
+
+
+
+ rankNcrowdingBased
+
+
+ 0.0
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+ obj1, obj2
+ optOut
+ MC_samp
+ expConstr3
+ impConstr3
+
+
+
+
+
+
+ 15
+ 050877
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+ woRep_dist
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py
new file mode 100644
index 0000000000..4d3b5f51c9
--- /dev/null
+++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myConstraints.py
@@ -0,0 +1,150 @@
+# Copyright 2017 Battelle Energy Alliance, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# @ author: Mohammad Abdo (@Jimmy-INL)
+
+import numpy as np
+
+def constrain(Input):#Complete this: give the function the correct name#
+ """
+ This function calls the explicit constraint whose name is passed through Input.name
+ the evaluation function g is negative if the explicit constraint is violated and positive otherwise.
+ This suits the constraint handling in the Genetic Algorithms,
+ but not the Gradient Descent as the latter expects True if the solution passes the constraint and False if it violates it.
+ @ In, Input, object, RAVEN container
+ @ Out, g, float, explicit constraint evaluation (negative if violated and positive otherwise)
+ """
+ g = eval(Input.name)(Input)
+ return g
+
+def implicitConstraint(Input):
+ """
+ Evaluates the implicit constraint function at a given point/solution ($\vec(x)$)
+ @ In, Input, object, RAVEN container
+ @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function
+ the way the constraint is designed is that
+ the constraint function has to be >= 0,
+ so if:
+ 1) f(x,y) >= 0 then g = f
+ 2) f(x,y) >= a then g = f - a
+ 3) f(x,y) <= b then g = b - f
+ 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint)
+ """
+ g = eval(Input.name)(Input)
+ return g
+
+
+def expConstr1(Input):#You are free to pick this name but it has to be similar to the one in the xml#
+ """
+ Let's assume that the constraint is:
+ $ x3+x4 < 8 $
+ then g the constraint evaluation function (which has to be > 0) is taken to be:
+ g = 8 - (x3+x4)
+ in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 1 evaluation function
+ """
+ g = 8 - Input.x3 - Input.x4
+ return g
+
+def expConstr2(Input):
+ """
+ Explicit Equality Constraint:
+ let's consider the constraint x1**2 + x2**2 = 25
+ The way to write g is to use a very small number for instance, epsilon = 1e-12
+ and then g = epsilon - abs(constraint)
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 2 evaluation function
+ """
+ g = 1e-12 - abs(Input.x1**2 + Input.x2**2 - 25)
+ return g
+
+def expConstr3(Input):
+ """
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 3 evaluation function
+ """
+ g = 10 - Input.x3 - Input.x4
+ return g
+
+def impConstr1(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 1 evaluation function
+ """
+ g = 10 - Input.x1**2 - Input.obj
+ return g
+
+def impConstr2(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 2 evaluation function
+ """
+ g = Input.x1**2 + Input.obj - 10
+ return g
+
+def impConstr3(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 3 evaluation function
+ """
+ g = 100 - Input.obj1
+ return g
+
+def impConstr4(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 3 evaluation function
+ """
+ g = Input.obj2 - 16
+ return g
+
+def impConstr5(Input):
+ """
+ The implicit constraint involves variables from the output space, for example the objective variable or
+ a dependent variable that is not in the optimization search space
+ @ In, Input, object, RAVEN container
+ @ out, g, float, implicit constraint 3 evaluation function
+ """
+ g = 200 - Input.obj1
+ return g
+
+ """
+ Evaluates the implicit constraint function at a given point/solution ($\vec(x)$)
+ @ In, Input, object, RAVEN container
+ @ Out, g(inputs x1,x2,..,output or dependent variable), float, implicit constraint evaluation function
+ the way the constraint is designed is that
+ the constraint function has to be >= 0,
+ so if:
+ 1) f(x,y) >= 0 then g = f
+ 2) f(x,y) >= a then g = f - a
+ 3) f(x,y) <= b then g = b - f
+ 4) f(x,y) = c then g = 1e-6 - abs((f(x,y) - c)) (equality constraint)
+ """
+ """
+ Let's assume that the constraint is:
+ $ x3+x4 < 8 $
+ then g the constraint evaluation function (which has to be > 0) is taken to be:
+ g = 8 - (x3+x4)
+ in this case if g(\vec(x)) < 0 then this x violates the constraint and vice versa
+ @ In, Input, object, RAVEN container
+ @ out, g, float, explicit constraint 1 evaluation function
+ """
\ No newline at end of file
diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py
new file mode 100644
index 0000000000..86ef17bdeb
--- /dev/null
+++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/MultiSumwConst/myLocalSum_multi.py
@@ -0,0 +1,43 @@
+# Copyright 2017 Battelle Energy Alliance, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# @author: Mohammad Abdo (@Jimmy-INL)
+
+def evaluate(Inputs):
+ Sum = 0
+ LocalSum1 = 0
+ LocalSum2 = 0
+ # for ind,var in enumerate(Inputs.keys()):
+ # # write the objective function here
+ # Sum += (ind + 1) * Inputs[var]
+ # if (ind == 1):
+ # LocalSum1 = Sum
+ # return Sum[:], LocalSum1[:]
+ for ind,var in enumerate(Inputs.keys()):
+ # write the objective function here
+ Sum += (ind + 1) * Inputs[var]
+ if (ind == 0) or (ind == 1):
+ LocalSum1 += (ind + 1) * Inputs[var]
+ if (ind == 2) or (ind == 3):
+ LocalSum2 += (ind + 1) * Inputs[var]
+ return Sum[:], LocalSum1[:], LocalSum2[:]
+
+def run(self,Inputs):
+ """
+ RAVEN API
+ @ In, self, object, RAVEN container
+ @ In, Inputs, dict, additional inputs
+ @ Out, None
+ """
+ self.obj1,self.obj2,self.obj3 = evaluate(Inputs) # make sure the name of the objective is consistent with obj1, obj2, obj3.
diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml
new file mode 100644
index 0000000000..80ad0c28a7
--- /dev/null
+++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT1.xml
@@ -0,0 +1,295 @@
+
+
+
+ raven\tests\framework\Optimizers\NSGAII\discrete\constrained\
+ Junyung Kim, Mohammad Abdo
+ 2023-02-21
+
+ ZDT1 test using NSGA-II
+
+
+
+ ZDT1_result_300iter_150Popu
+ optimize,print
+ 1
+
+
+
+
+ placeholder
+ ZDT
+ GAopt
+ opt_export
+
+
+
+
+ opt_export
+ optOut
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30,obj1,obj2
+
+
+
+
+
+ 0
+ 1
+
+
+
+
+
+
+ 300
+ 42
+ every
+ min
+
+
+
+ 150
+ tournamentSelection
+
+
+ 1.0
+
+
+ 1.0
+
+
+
+
+ rankNcrowdingBased
+
+
+
+ 0.0
+
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+ obj1, obj2
+ optOut
+ MC_samp
+
+
+
+
+
+
+ 150
+ 050877
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+ unifDist
+
+
+
+
+
+
+
+ x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21,x22,x23,x24,x25,x26,x27,x28,x29,x30
+
+
+
+ trajID
+
+
+
+
+
+
+ csv
+
+
+
+ csv
+
+ trajID
+
+
+
diff --git a/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py
new file mode 100644
index 0000000000..829307f73e
--- /dev/null
+++ b/tests/framework/Optimizers/NSGAII/discrete/constrained/ZDT1/ZDT_model.py
@@ -0,0 +1,43 @@
+# Copyright 2017 Battelle Energy Alliance, LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# @author: Junyung Kim (@JunyungKim-INL) and Mohammad Abdo (@Jimmy-INL)
+
+import math
+
+def evaluate(Inputs):
+ Sum = 0
+ obj1 = 0
+
+ for ind,var in enumerate(Inputs.keys()):
+ # write the objective function here
+ if (ind == 0) :
+ obj1 = Inputs[var]
+ if (ind != 0):
+ Sum += Inputs[var]
+ g = 1 + (9/len(Inputs.keys())*Sum )
+ h = 1 - math.sqrt(obj1/g)
+ obj2 = g*h
+ return obj1[:], obj2[:]
+
+def run(self,Inputs):
+ """
+ RAVEN API
+ @ In, self, object, RAVEN container
+ @ In, Inputs, dict, additional inputs
+ @ Out, None
+ """
+ self.obj1,self.obj2 = evaluate(Inputs)
+
+