Skip to content

Commit

Permalink
Merge pull request #970 from numbbo/development
Browse files Browse the repository at this point in the history
merge of development branch into master before release
  • Loading branch information
brockho committed Apr 14, 2016
2 parents b805f3c + 70f68b2 commit f1f9162
Show file tree
Hide file tree
Showing 25 changed files with 603 additions and 565 deletions.
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,10 @@ Getting Started <a name="Getting-Started"></a>
---------------
0. Check out the [_Requirements_](#Requirements) above.

1. **Download** the [COCO framework code](https://github.com/numbbo/coco) from
[github](https://github.com),
1. **Download** the COCO framework code from github,

- either by clicking [here](https://github.com/numbbo/coco/archive/master.zip) and unzip the
`zip` file,
- either by clicking the [Download ZIP button](https://github.com/numbbo/coco/archive/master.zip)
and unzip the `zip` file,
- or (preferred) by typing `git clone https://github.com/numbbo/coco.git`. This way
allows to remain up-to-date easily (but needs `git` to be installed). After
cloning, `git pull` keeps the code up-to-date with the latest release.
Expand Down
8 changes: 4 additions & 4 deletions code-experiments/src/coco_problem.c
Original file line number Diff line number Diff line change
Expand Up @@ -342,17 +342,17 @@ size_t coco_problem_get_evaluations(const coco_problem_t *problem) {
*/
static int coco_problem_best_parameter_not_zero(const coco_problem_t *problem) {
size_t i = 0;
int zero = 0;
int best_is_zero = 1;

if (coco_vector_contains_nan(problem->best_parameter, problem->number_of_variables))
return 1;

while (i < problem->number_of_variables && !zero) {
zero = coco_double_almost_equal(problem->best_parameter[i], 0, 1e-9);
while (i < problem->number_of_variables && best_is_zero) {
best_is_zero = coco_double_almost_equal(problem->best_parameter[i], 0, 1e-9);
i++;
}

return zero;
return !best_is_zero;
}

/**
Expand Down
2 changes: 1 addition & 1 deletion code-experiments/src/coco_utilities.c
Original file line number Diff line number Diff line change
Expand Up @@ -851,7 +851,7 @@ static size_t coco_double_to_size_t(const double number) {
* @brief Returns 1 if |a - b| < accuracy and 0 otherwise.
*/
static int coco_double_almost_equal(const double a, const double b, const double accuracy) {
return ((fabs(a - b) < accuracy) == 0);
return (fabs(a - b) < accuracy);
}

/**@}*/
Expand Down
7 changes: 5 additions & 2 deletions code-experiments/src/mo_utilities.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,24 @@
* @brief Checks the dominance relation in the unconstrained minimization case between objectives1 and
* objectives2.
*
* If two values are closer together than 1e-13, they are treated as equal.
*
* @return
* 1 if objectives1 dominates objectives2 <br>
* 0 if objectives1 and objectives2 are non-dominated <br>
* -1 if objectives2 dominates objectives1 <br>
* -2 if objectives1 is identical to objectives2
*/
static int mo_get_dominance(const double *objectives1, const double *objectives2, const size_t num_obj) {
/* TODO: Should we care about comparison precision? */
size_t i;

int flag1 = 0;
int flag2 = 0;

for (i = 0; i < num_obj; i++) {
if (objectives1[i] < objectives2[i]) {
if (coco_double_almost_equal(objectives1[i], objectives2[i], 1e-13)) {
continue;
} else if (objectives1[i] < objectives2[i]) {
flag1 = 1;
} else if (objectives1[i] > objectives2[i]) {
flag2 = 1;
Expand Down
12 changes: 3 additions & 9 deletions code-postprocessing/bbob_pproc/bestalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,14 @@

import os
import sys
import glob
import getopt
import pickle
import gzip
from pdb import set_trace
import warnings
import numpy as np

from . import genericsettings, readalign, pproc
from . import readalign, pproc
from .toolsdivers import print_done
from .ppfig import Usage
from . import toolsstats

bestalgentries2009 = {}
Expand Down Expand Up @@ -61,13 +59,9 @@
# o a best algorithm and an algorithm portfolio are almost the same,
# they should derive from a CombinedAlgorithmDataSet?

#CLASS DEFINITIONS
# CLASS DEFINITIONS


class Usage(Exception):
def __init__(self, msg):
self.msg = msg

class BestAlgSet():
"""Unit element of best algorithm data set.
Expand Down
115 changes: 62 additions & 53 deletions code-postprocessing/bbob_pproc/comp2/ppscatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import os
import numpy
import numpy as np
import warnings
from pdb import set_trace
from matplotlib import pyplot as plt
try:
Expand All @@ -45,10 +46,6 @@
from .. import pproc

dimensions = (2, 3, 5, 10, 20, 40)
fixed_targets = pproc.TargetValues(np.logspace(-8, 2, 46))
#runlength_based_targets = pproc.RunlengthBasedTargetValues(np.logspace(numpy.log10(0.5), numpy.log10(50), 8))
# runlength_based_targets = pproc.RunlengthBasedTargetValues([0.5, 1, 3, 10, 50])
targets = fixed_targets # default

# formattings
markersize = 14 # modified in config.py
Expand All @@ -58,57 +55,68 @@
max_evals_line_length = 9 # length away from the diagonal as a factor, line indicates maximal evaluations for each data
offset = 0. #0.02 offset provides a way to move away the box boundaries to display the outer markers fully, clip_on=False is more effective

caption_start_fixed = r"""Average running time (\aRT\ in $\log_{10}$ of number of function evaluations)
of \algorithmB\ ($x$-axis) versus \algorithmA\ ($y$-axis) for $NBTARGETS$ target values
$\Df \in [NBLOW, NBUP]$ in each dimension on functions #1. """
caption_start_rlbased = r"""Average running time (\aRT\ in $\log_{10}$ of number of function evaluations)
of \algorithmA\ ($y$-axis) versus \algorithmB\ ($x$-axis) for $NBTARGETS$ runlength-based target
function values for budgets between $NBLOW$ and $NBUP$ evaluations.
Each runlength-based target $f$-value is chosen such that the \aRT{}s of the
REFERENCE_ALGORITHM artificial algorithm for the given and a slightly easier
target bracket the reference budget. """
caption_finish = r"""Markers on the upper or right edge indicate that the respective target
value was never reached. Markers represent dimension:
2:{\color{cyan}+},
3:{\color{green!45!black}$\triangledown$},
5:{\color{blue}$\star$},
10:$\circ$,
20:{\color{red}$\Box$},
40:{\color{magenta}$\Diamond$}. """

def figure_caption():
if isinstance(targets, pproc.RunlengthBasedTargetValues):
s = caption_start_rlbased
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('REFERENCE_ALGORITHM', targets.reference_algorithm)

def prepare_figure_caption():

caption_start_fixed = r"""Average running time (\aRT\ in $\log_{10}$ of number of function evaluations)
of \algorithmB\ ($x$-axis) versus \algorithmA\ ($y$-axis) for $NBTARGETS$ target values
$\Df \in [NBLOW, NBUP]$ in each dimension on functions #1. """

caption_start_rlbased = r"""Average running time (\aRT\ in $\log_{10}$ of number of function evaluations)
of \algorithmA\ ($y$-axis) versus \algorithmB\ ($x$-axis) for $NBTARGETS$ runlength-based target
function values for budgets between $NBLOW$ and $NBUP$ evaluations.
Each runlength-based target $f$-value is chosen such that the \aRT{}s of the
REFERENCE_ALGORITHM artificial algorithm for the given and a slightly easier
target bracket the reference budget. """

caption_finish = r"""Markers on the upper or right edge indicate that the respective target
value was never reached. Markers represent dimension:
2:{\color{cyan}+},
3:{\color{green!45!black}$\triangledown$},
5:{\color{blue}$\star$},
10:$\circ$,
20:{\color{red}$\Box$},
40:{\color{magenta}$\Diamond$}. """


if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
caption = caption_start_fixed + caption_finish
elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
caption = caption_start_rlbased + caption_finish
else:
caption = caption_start_fixed + caption_finish
else:
s = caption_start_fixed
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)))
s = s.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)))
s += caption_finish
return s

def figure_caption_html():
if isinstance(targets, pproc.RunlengthBasedTargetValues):
s = htmldesc.getValue('##bbobppscatterlegendrlbased##')
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_html(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('NBUP', toolsdivers.number_to_html(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('REFERENCEALGORITHM', targets.reference_algorithm)
warnings.warn("Current settings do not support ppfigdim caption.")

return caption


def figure_caption(for_html = False):

targets = genericsettings.current_testbed.ppscatter_target_values
if for_html:
caption = htmldesc.getValue('##bbobppscatterlegend' +
genericsettings.current_testbed.scenario + '##')
else:
caption = prepare_figure_caption()

caption = caption.replace('NBTARGETS', str(len(targets)))

if genericsettings.runlength_based_targets:
caption = caption.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
caption = caption.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
caption = caption.replace('REFERENCE_ALGORITHM', targets.reference_algorithm)
caption = caption.replace('REFERENCEALGORITHM', targets.reference_algorithm)
else:
s = htmldesc.getValue('##bbobppscatterlegendfixed##')
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_html(targets.label(0)))
s = s.replace('NBUP', toolsdivers.number_to_html(targets.label(-1)))
s += htmldesc.getValue('##bbobppscatterlegendend##')
return s
caption = caption.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)))
caption = caption.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)))

return caption


def beautify():
a = plt.gca()
Expand Down Expand Up @@ -159,6 +167,7 @@ def main(dsList0, dsList1, outputdir, verbose=True):
dictFunc1 = dsList1.dictByFunc()
funcs = set(dictFunc0.keys()) & set(dictFunc1.keys())

targets = genericsettings.current_testbed.ppscatter_target_values
if isinstance(targets, pproc.RunlengthBasedTargetValues):
linewidth = linewidth_rld_based
else:
Expand Down
40 changes: 27 additions & 13 deletions code-postprocessing/bbob_pproc/comp2/pptable2.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@

from pdb import set_trace

targetsOfInterest = pproc.TargetValues((1e+1, 1e-1, 1e-3, 1e-5, 1e-7))
targetf = 1e-8 # value for determining the success ratio

samplesize = genericsettings.simulated_runlength_bootstrap_sample_size

def get_table_caption():
Expand All @@ -48,24 +47,32 @@ def get_table_caption():
(preceded by the target \Df-value in \textit{italics}) in the first row.
\#succ is the number of trials that reached the target value of the last column.
"""
table_caption_two_bi = r"""%
target, the corresponding best aRT
in the first row. The different target \Df-values are shown in the top row.
\#succ is the number of trials that reached the (final) target
table_caption_bi = r"""%
Average runtime (\aRT) to reach given targets, measured
in number of function evaluations in dimensions 5 (left) and 20 (right).
For each function, the \aRT\
and, in braces as dispersion measure, the half difference between 10 and
90\%-tile of (bootstrapped) runtimes is shown for the different
target \Df-values as shown in the top row.
\#succ is the number of trials that reached the last target
$\hvref + """ + genericsettings.current_testbed.hardesttargetlatex + r"""$.
"""
table_caption_rest = r"""%
table_caption_rest = (r"""%
The median number of conducted function evaluations is additionally given in
\textit{italics}, if the target in the last column was never reached.
\textit{italics}, if the last target was never reached.
1:\algorithmAshort\ is \algorithmA\ and 2:\algorithmBshort\ is \algorithmB.
Bold entries are statistically significantly better compared to the other algorithm,
with $p=0.05$ or $p=10^{-k}$ where $k\in\{2,3,4,\dots\}$ is the number
following the $\star$ symbol, with Bonferroni correction of #1.
A $\downarrow$ indicates the same tested against the best algorithm of BBOB-2009.
"""
following the $\star$ symbol, with Bonferroni correction of #1.""" +
(r"""A $\downarrow$ indicates the same tested against the best
algorithm of BBOB-2009."""
if not (genericsettings.current_testbed.name == genericsettings.testbed_name_bi)
else "")
)

if genericsettings.current_testbed.name == genericsettings.testbed_name_bi:
# NOTE: no runlength-based targets supported yet
table_caption = table_caption_one + table_caption_two_bi + table_caption_rest
table_caption = table_caption_bi + table_caption_rest
elif genericsettings.current_testbed.name == genericsettings.testbed_name_single:
if genericsettings.runlength_based_targets:
table_caption = table_caption_one + table_caption_two2 + table_caption_rest
Expand All @@ -82,6 +89,10 @@ def main(dsList0, dsList1, dimsOfInterest, outputdir, info='', verbose=True):

#TODO: method is long, split if possible

testbed = genericsettings.current_testbed
targetsOfInterest = testbed.pptable2_targetsOfInterest


dictDim0 = dsList0.dictByDim()
dictDim1 = dsList1.dictByDim()

Expand Down Expand Up @@ -130,7 +141,10 @@ def main(dsList0, dsList1, dimsOfInterest, outputdir, info='', verbose=True):
for f in sorted(funcs):
tableHtml.append('<tr>\n')
targets = targetsOfInterest((f, d))
targetf = targets[-1]
if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues):
targetf = targets[-1]
else:
targetf = testbed.pptable_ftarget

curline = [r'${\bf f_{%d}}$' % f]
curlineHtml = ['<th><b>f<sub>%d</sub></b></th>\n' % f]
Expand Down
Loading

0 comments on commit f1f9162

Please sign in to comment.