Skip to content

Commit

Permalink
NgIoh variants (#1551)
Browse files Browse the repository at this point in the history
* po

* newplots

* Dactoid with many new methods (#1546)

* bobyqa_coming

* black_and_static

* Update recastlib.py

* Add_ax

* smac

* scipyplus

* scipyplus

* fix

* fix

* fix

* fix

* fix

* fix

* black

* Update recastlib.py

* Update mypy.ini

* Update recastlib.py

* Update mypy.ini

* fix

* fix

* fix

* fix

* wtf

* fix

* fix

* flute

* fix

* fix

* wtf

* black

* smac

* fix

* fix

* fix

* fix

* so_dirty

* smac2

* Add a bound option to ArtificialFunction

* Use normalizer for BO optimizers

* changelo

* fix

* Polishing

* Changes in experiments.py for xp in bounded domains

* black

* Add normalizer to AX

* Add normalizer to SMAC

* smac / no-transform: fix bounds

thx Jeremy

* fix bounds ax

* risky

* fixadcas

* fix

* fix

* ouie

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* po

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* po

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* okarc

* fix

* fix

* fix

* fix

* fix

* fix

* bfgs (#1532)

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* pou

* fix

* fix

* fix

* fix

* fix

* oldstuff

* qopso

* fix

* fix

* fix

* fix

* po

* po

* fix

* fix

* fix

* noslowcircleci

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* po

* po

* fo

---------

Co-authored-by: Elena Raponi <[email protected]>
Co-authored-by: Jeremy Rapin <[email protected]>

* fix

* ZZpo

* rmrfvenv

* fixbudgetnone

* black

* po

* fix

* fix

* po

* fix

* fix

* fix

---------

Co-authored-by: Elena Raponi <[email protected]>
Co-authored-by: Jeremy Rapin <[email protected]>
  • Loading branch information
3 people authored Sep 19, 2023
1 parent bb67982 commit 144c999
Show file tree
Hide file tree
Showing 10 changed files with 390 additions and 50 deletions.
7 changes: 6 additions & 1 deletion nevergrad/benchmark/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,12 @@ def save_or_append_to_csv(df: pd.DataFrame, path: Path) -> None:
"""Saves a dataframe to a file in append mode"""
if path.exists():
print("Appending to existing file")
predf = pd.read_csv(str(path))
try:
predf = pd.read_csv(
str(path), on_bad_lines="warn"
) # , error_bad_lines=False)#, error_bad_lines=False)
except: # for some versions of Panda.
predf = pd.read_csv(str(path)) # , error_bad_lines=False)#, error_bad_lines=False)
df = pd.concat([predf, df], sort=False)
df.to_csv(path, index=False)

Expand Down
37 changes: 7 additions & 30 deletions nevergrad/benchmark/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@

def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]:
# return ["RandomSearch", "OnePlusOne", "DE", "PSO"]
return ["NgIoh", "Shiwa", "NGOpt"]

algos = {}
algos["aquacrop_fao"] = [
"CMA",
Expand Down Expand Up @@ -409,7 +409,7 @@ def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]:

# Below, we use the best in the records above.
benchmark = str(inspect.stack()[1].function)
if benchmark in algos and "tunin" in benchmark and np.random.randint(2) > 0 and False:
if benchmark in algos and "tunin" in benchmark and np.random.randint(2) > 0:
return algos[benchmark][:5]

# Here, we pseudo-randomly draw one optim in the provided list,
Expand Down Expand Up @@ -462,34 +462,11 @@ def doint(s): # Converting a string into an int.
import socket

host = socket.gethostname()
list_optims = ["Carola1", "Carola2", "Carola3", "NgIoh", "NgIoh", "NgIoh", "MetaModel", "Cobyla"]
list_optims = ["NgIoh", "NgOpt"]
if "tuning" in benchmark:
list_optims = [
"NgIoh",
"NgIoh",
"NgIoh",
"NgIoh",
"NGOpt",
"NGOpt",
"NGOpt",
"NGOpt",
"HyperOpt",
"RandomSearch",
"PSO",
"DE",
"SQOPSO",
"Cobyla",
# "AX",
"LHSSearch",
"QODE",
"SODE",
]
if "iscre" in benchmark:
list_optims = ["RecombiningPortfolioDiscreteOnePlusOne"]

return [list_optims[doint(host) % len(list_optims)]]
return x # ["Zero"] #return x


# return x # ["Zero"] #return x


# return ["MultiSQP", "MultiCobyla", "MultiBFGS"]
Expand Down Expand Up @@ -3373,8 +3350,8 @@ def pbo_suite(seed: tp.Optional[int] = None, reduced: bool = False) -> tp.Iterat
for x in ng.optimizers.registry.keys()
if "iscre" in x and "ois" not in x and "ptim" not in x and "oerr" not in x
]
optims = ["NGOpt", "NGOptRW"]
optims = refactor_optims(optims)
list_optims = ["NGOpt", "NGOptRW"]
list_optims = refactor_optims(list_optims)
for dim in [16, 64, 100]:
for fid in range(1, 24):
for iid in range(1, 5):
Expand Down
15 changes: 10 additions & 5 deletions nevergrad/benchmark/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ def create_plots(
print("\n# new case #", fixed, case)
casedf = df.select(**dict(zip(fixed, case)))
data_df = FightPlotter.winrates_from_selection(
casedf, fight_descriptors, num_rows=num_rows, num_cols=50
casedf, fight_descriptors, num_rows=num_rows, num_cols=350
)
fplotter = FightPlotter(data_df)
# Competence maps: we find out the best algorithm for each attribute1=valuei/attribute2=valuej.
Expand Down Expand Up @@ -403,7 +403,7 @@ def create_plots(
fplotter.save(str(output_folder / "fight_all_pure.png"), dpi=_DPI)
else:
fplotter.save(str(output_folder / name) + "_pure.png", dpi=_DPI)
print(f"# {len(data_df.columns[:])}")
print(f"# {len(data_df.columns[:])} {data_df.columns[:]}")
if order == 2 and competencemaps and best_algo: # With order 2 we can create a competence map.
print("\n# Competence map")
name = "competencemap_" + ",".join("{}".format(x) for x in fixed) + ".tex"
Expand Down Expand Up @@ -779,10 +779,15 @@ def __init__(self, winrates_df: pd.DataFrame) -> None:
self.winrates = winrates_df
self._fig = plt.figure()
self._ax = self._fig.add_subplot(111)
max_cols = 25
self._cax = self._ax.imshow(
100 * np.array(self.winrates), cmap=cm.seismic, interpolation="none", vmin=0, vmax=100
100 * np.array(self.winrates)[:, :max_cols],
cmap=cm.seismic,
interpolation="none",
vmin=0,
vmax=100,
)
x_names = self.winrates.columns
x_names = self.winrates.columns[:max_cols] # we plot only the 50 best
self._ax.set_xticks(list(range(len(x_names))))
self._ax.set_xticklabels(x_names, rotation=45, ha="right", fontsize=7)
y_names = self.winrates.index
Expand All @@ -799,7 +804,7 @@ def winrates_from_selection(
df: utils.Selector,
categories: tp.List[str],
num_rows: int = 5,
num_cols: int = 50,
num_cols: int = 350,
complete_runs_only: bool = False,
) -> pd.DataFrame:
"""Creates a fight plot win rate data out of the given run dataframe,
Expand Down
17 changes: 13 additions & 4 deletions nevergrad/functions/ml/mlfunctionlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,15 @@ def make_dataset(self, data_dimension: tp.Optional[int], dataset: str) -> None:
assert dataset in ["diabetes", "kerasBoston", "auto-mpg", "red-wine", "white-wine"]
assert data_dimension is None
sets_url = {
"auto-mpg": "http://www-lisic.univ-littoral.fr/~teytaud/files/Cours/Apprentissage/data/auto-mpg.data",
"red-wine": "http://www-lisic.univ-littoral.fr/~teytaud/files/Cours/Apprentissage/data/winequality-red.csv",
"white-wine": "http://www-lisic.univ-littoral.fr/~teytaud/files/Cours/Apprentissage/data/winequality-white.csv",
# "auto-mpg": "http://www-lisic.univ-littoral.fr/~teytaud/files/Cours/Apprentissage/data/auto-mpg.data",
# "red-wine": "http://www-lisic.univ-littoral.fr/~teytaud/files/Cours/Apprentissage/data/winequality-red.csv",
# "white-wine": "http://www-lisic.univ-littoral.fr/~teytaud/files/Cours/Apprentissage/data/winequality-white.csv",
# "auto-mpg": "https://github.com/plotly/datasets/blob/master/auto-mpg.csv",
# "red-wine": "https://github.com/plotly/datasets/blob/master/winequality-red.csv",
# "white-wine": "https://github.com/stedy/Machine-Learning-with-R-datasets/blob/master/winequality-white.csv",
"auto-mpg": "https://raw.githubusercontent.com/plotly/datasets/master/auto-mpg.csv",
"red-wine": "https://raw.githubusercontent.com/plotly/datasets/master/winequality-red.csv",
"white-wine": "https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/winequality-white.csv",
}
sets_tag = {"auto-mpg": "mpg", "red-wine": "quality", "white-wine": "quality"}
if dataset == "kerasBoston":
Expand All @@ -259,7 +265,10 @@ def make_dataset(self, data_dimension: tp.Optional[int], dataset: str) -> None:

data = keras.datasets.boston_housing
elif dataset in sets_tag:
data = pd.read_csv(sets_url[dataset])
try:
data = pd.read_csv(sets_url[dataset])
except Exception as e:
assert False, f"failing with error {e} for dataset {dataset}"
else:
data = {"diabetes": sklearn.datasets.load_diabetes,}[
# data = {"boston": sklearn.datasets.load_boston, "diabetes": sklearn.datasets.load_diabetes,}[
Expand Down
2 changes: 1 addition & 1 deletion nevergrad/functions/rocket/rocket.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def net_force(Ex, Ey, Ez, Evx, Evy, Evz, m):
percentage = row[0] / np.sum(
thrust_list
) # percentage of total thrust to find percentage of mass lost
assert percentage >= 0.0
assert percentage >= -1e-5, f"percentage is {percentage}"
assert percentage <= 1.0
mass_loss = mass_reman * percentage
mass_reman -= mass_loss
Expand Down
Loading

0 comments on commit 144c999

Please sign in to comment.