diff --git a/mloptimizer/test/test_genoptimizer/test_optimizers.py b/mloptimizer/test/test_genoptimizer/test_optimizers.py index 62bf5b1..18cec19 100644 --- a/mloptimizer/test/test_genoptimizer/test_optimizers.py +++ b/mloptimizer/test/test_genoptimizer/test_optimizers.py @@ -5,6 +5,7 @@ GradientBoostingOptimizer, SVCOptimizer, XGBClassifierOptimizer, KerasClassifierOptimizer, \ CustomXGBClassifierOptimizer, CatBoostClassifierOptimizer, \ BaseOptimizer +from mloptimizer.evaluation import kfold_score from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, \ balanced_accuracy_score, precision_score, recall_score, \ average_precision_score, log_loss, mean_squared_error, mean_absolute_error, \ @@ -39,3 +40,28 @@ def test_get_subclasses(): ] assert all([subclass.__name__ in subclasses_names for subclass in subclasses]) and \ len(subclasses) == len(subclasses_names) + + +@pytest.mark.parametrize('optimizer', + (TreeOptimizer, ForestOptimizer, + # ExtraTreesOptimizer, GradientBoostingOptimizer, + XGBClassifierOptimizer, + # SVCOptimizer,KerasClassifierOptimizer + )) +def test_reproducibility(optimizer): + X, y = load_iris(return_X_y=True) + population = 2 + generations = 2 + seed = 25 + distinct_seed = 2 + optimizer1 = optimizer(X, y, score_function=balanced_accuracy_score, + eval_function=kfold_score, seed=seed) + result1 = optimizer1.optimize_clf(population=population, generations=generations) + optimizer2 = optimizer(X, y, score_function=balanced_accuracy_score, + eval_function=kfold_score, seed=seed) + result2 = optimizer2.optimize_clf(population=population, generations=generations) + optimizer3 = optimizer(X, y, score_function=balanced_accuracy_score, + eval_function=kfold_score, seed=distinct_seed) + result3 = optimizer3.optimize_clf(population=population, generations=generations) + assert str(result1) == str(result2) + assert str(result1) != str(result3)