Skip to content

Commit

Permalink
Return average and individual scores
Browse files Browse the repository at this point in the history
  • Loading branch information
silvanocerza committed Mar 20, 2024
1 parent eb1a48c commit 9e03a54
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 11 deletions.
19 changes: 11 additions & 8 deletions haystack/components/evaluators/answer_f1.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from typing import Dict, List
from typing import Any, Dict, List

from haystack.core.component import component


@component
class AnswerF1Evaluator:
"""
Evaluator that calculates the F1 score of the matches between the predicted and the ground truth answers.
Evaluator that calculates the average F1 score of the matches between the predicted and the ground truth answers.
We first calculate the F1 score for each question, sum all the scores and divide by the number of questions.
The result is a number from 0.0 to 1.0.
Expand All @@ -22,15 +22,19 @@ class AnswerF1Evaluator:
ground_truth_answers=[["Berlin"], ["Paris"]],
predicted_answers=[["Berlin"], ["London"]],
)
print(result["scores"])
# [1.0, 0.0]
print(result["result"])
# 0.5
```
"""

@component.output_types(result=float)
@component.output_types(scores=List[float], average=float)
def run(
self, questions: List[str], ground_truth_answers: List[List[str]], predicted_answers: List[List[str]]
) -> Dict[str, float]:
) -> Dict[str, Any]:
"""
Run the AnswerF1Evaluator on the given inputs.
All lists must have the same length.
Expand All @@ -43,7 +47,8 @@ def run(
A list of predicted answers for each question.
:returns:
A dictionary with the following outputs:
- `result`: A number from 0.0 to 1.0 that represents the average F1 score of the predicted
- `scores`: A list of numbers from 0.0 to 1.0 that represents the F1 score for each question.
- `average`: A number from 0.0 to 1.0 that represents the average F1 score of the predicted
answer matched with the ground truth answers.
"""
if not len(questions) == len(ground_truth_answers) == len(predicted_answers):
Expand All @@ -64,6 +69,4 @@ def run(
f1 = 0.0
scores.append(f1)

result = sum(scores) / len(questions)

return {"result": result}
return {"scores": scores, "average": sum(scores) / len(questions)}
6 changes: 3 additions & 3 deletions test/components/evaluators/test_answer_f1.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def test_run_with_all_matching():
predicted_answers=[["Berlin"], ["Paris"]],
)

assert result["result"] == 1.0
assert result == {"scores": [1.0, 1.0], "average": 1.0}


def test_run_with_no_matching():
Expand All @@ -22,7 +22,7 @@ def test_run_with_no_matching():
predicted_answers=[["Paris"], ["London"]],
)

assert result["result"] == 0.0
assert result == {"scores": [0.0, 0.0], "average": 0.0}


def test_run_with_partial_matching():
Expand All @@ -33,7 +33,7 @@ def test_run_with_partial_matching():
predicted_answers=[["Berlin"], ["London"]],
)

assert result["result"] == 0.5
assert result == {"scores": [1.0, 0.0], "average": 0.5}


def test_run_with_different_lengths():
Expand Down

0 comments on commit 9e03a54

Please sign in to comment.