-
Notifications
You must be signed in to change notification settings - Fork 7
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Separated scoring and ML predicting into own modules, added simple te…
…sts for scoring, updated relevant tests and notebook
- Loading branch information
Showing
7 changed files
with
295 additions
and
486 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,58 @@ | ||
import numpy as np | ||
import pandas as pd | ||
from beartype import beartype | ||
from beartype.typing import Tuple, Union | ||
from sklearn.base import BaseEstimator | ||
from tensorflow import keras | ||
|
||
|
||
@beartype | ||
def predict_classifier( | ||
data: Union[np.ndarray, pd.DataFrame], model: Union[BaseEstimator, keras.Model], include_probabilities: bool = True | ||
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: | ||
""" | ||
Predict with a trained model. | ||
Args: | ||
data: Data used to make predictions. | ||
model: Trained classifier or regressor. Can be any machine learning model trained with | ||
EIS Toolkit (Sklearn and Keras models). | ||
include_probabilities: If the probability array should be returned too. Defaults to True. | ||
Returns: | ||
Predicted labels and optionally predicted probabilities by a classifier model. | ||
""" | ||
if isinstance(model, keras.Model): | ||
probabilities = model.predict(data) | ||
labels = probabilities.argmax(axis=-1) | ||
if include_probabilities: | ||
return labels, probabilities | ||
else: | ||
return labels | ||
elif include_probabilities: | ||
probabilities = model.predict_proba(data) | ||
labels = model.predict(data) | ||
return labels, probabilities | ||
else: | ||
labels = model.predict(data) | ||
return labels | ||
|
||
|
||
@beartype | ||
def predict_regressor( | ||
data: Union[np.ndarray, pd.DataFrame], | ||
model: Union[BaseEstimator, keras.Model], | ||
) -> np.ndarray: | ||
""" | ||
Predict with a trained model. | ||
Args: | ||
data: Data used to make predictions. | ||
model: Trained classifier or regressor. Can be any machine learning model trained with | ||
EIS Toolkit (Sklearn and Keras models). | ||
Returns: | ||
Regression model prediction array. | ||
""" | ||
result = model.predict(data) | ||
return result |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
from numbers import Number | ||
|
||
import numpy as np | ||
import pandas as pd | ||
from beartype import beartype | ||
from beartype.typing import Dict, Sequence, Union | ||
from sklearn.metrics import ( | ||
accuracy_score, | ||
f1_score, | ||
mean_absolute_error, | ||
mean_squared_error, | ||
precision_score, | ||
r2_score, | ||
recall_score, | ||
) | ||
|
||
from eis_toolkit.exceptions import InvalidParameterValueException | ||
|
||
|
||
@beartype | ||
def score_predictions( | ||
y_true: Union[np.ndarray, pd.Series], y_pred: Union[np.ndarray, pd.Series], metrics: Union[str, Sequence[str]] | ||
) -> Union[Number, Dict[str, Number]]: | ||
""" | ||
Score model predictions with given metrics. | ||
One or multiple metrics can be defined for scoring. | ||
Supported classifier metrics: "accuracy", "precision", "recall", "f1". | ||
Supported regressor metrics: "mse", "rmse", "mae", "r2". | ||
Args: | ||
y_true: Target values ("ground truth") against which scoring is performed. | ||
y_pred: Predicted labels. | ||
metrics: The metrics to use for scoring the model. Select only metrics applicable | ||
for the model type. | ||
Returns: | ||
Metric scores as a dictionary if multiple metrics, otherwise just the metric value. | ||
""" | ||
if isinstance(metrics, str): | ||
score = _score_predictions(y_true, y_pred, metrics) | ||
return score | ||
else: | ||
out_metrics = {} | ||
for metric in metrics: | ||
score = _score_predictions(y_true, y_pred, metric) | ||
out_metrics[metric] = score | ||
return out_metrics | ||
|
||
|
||
@beartype | ||
def _score_predictions( | ||
y_true: Union[np.ndarray, pd.Series], y_pred: Union[np.ndarray, pd.Series], metric: str | ||
) -> Number: | ||
# Multiclass classification | ||
if len(y_true) > 2: | ||
average_method = "micro" | ||
# Binary classification | ||
else: | ||
average_method = "binary" | ||
|
||
if metric == "mae": | ||
score = mean_absolute_error(y_true, y_pred) | ||
elif metric == "mse": | ||
score = mean_squared_error(y_true, y_pred) | ||
elif metric == "rmse": | ||
score = mean_squared_error(y_true, y_pred, squared=False) | ||
elif metric == "r2": | ||
score = r2_score(y_true, y_pred) | ||
elif metric == "accuracy": | ||
score = accuracy_score(y_true, y_pred) | ||
elif metric == "precision": | ||
score = precision_score(y_true, y_pred, average=average_method) | ||
elif metric == "recall": | ||
score = recall_score(y_true, y_pred, average=average_method) | ||
elif metric == "f1": | ||
score = f1_score(y_true, y_pred, average=average_method) | ||
else: | ||
raise InvalidParameterValueException(f"Unrecognized metric: {metric}") | ||
|
||
return score |
Oops, something went wrong.