Skip to content

Commit 69931df

Browse files
authored
adds simpleqa (#680)
* adds simpleqa * revert unrelated changes * fixes * fixes * fixes
1 parent 89ba880 commit 69931df

File tree

7 files changed

+213
-15
lines changed

7 files changed

+213
-15
lines changed

src/lighteval/main_endpoint.py

+4-7
Original file line numberDiff line numberDiff line change
@@ -320,9 +320,7 @@ def litellm(
320320

321321
from lighteval.logging.evaluation_tracker import EvaluationTracker
322322
from lighteval.models.litellm_model import LiteLLMModelConfig
323-
from lighteval.models.model_input import GenerationParameters
324323
from lighteval.pipeline import ParallelismManager, Pipeline, PipelineParameters
325-
from lighteval.utils.utils import parse_args
326324

327325
evaluation_tracker = EvaluationTracker(
328326
output_dir=output_dir,
@@ -338,12 +336,11 @@ def litellm(
338336
if model_args.endswith(".yaml"):
339337
with open(model_args, "r") as f:
340338
config = yaml.safe_load(f)
339+
metric_options = config.get("metric_options", {})
340+
model_config = LiteLLMModelConfig.from_path(model_args)
341341
else:
342-
config = parse_args(model_args)
343-
344-
metric_options = config.get("metric_options", {})
345-
generation_parameters = GenerationParameters(**config.get("generation", {}))
346-
model_config = LiteLLMModelConfig(**config["model"], generation_parameters=generation_parameters)
342+
metric_options = None
343+
model_config = LiteLLMModelConfig.from_args(model_args)
347344

348345
pipeline_params = PipelineParameters(
349346
launcher_type=parallelism_manager,

src/lighteval/metrics/metrics.py

+11-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
2121
# SOFTWARE.
2222

23-
2423
import numpy as np
2524
from aenum import Enum
2625

@@ -52,6 +51,7 @@
5251
F1_score,
5352
Faithfulness,
5453
GPassAtK,
54+
JudgeLLMSimpleQA,
5555
LoglikelihoodAcc,
5656
MajAtK,
5757
PassAtK,
@@ -790,6 +790,16 @@ class Metrics(Enum):
790790
corpus_level_fn=np.mean,
791791
higher_is_better=True,
792792
)
793+
simpleqa_judge = SampleLevelMetricGrouping(
794+
metric_name=["simpleqa_judge"],
795+
higher_is_better={"simpleqa_judge": True},
796+
category=MetricCategory.LLM_AS_JUDGE,
797+
use_case=MetricUseCase.SUMMARIZATION,
798+
sample_level_fn=JudgeLLMSimpleQA().compute,
799+
corpus_level_fn={
800+
"simpleqa_judge": np.mean,
801+
},
802+
)
793803
target_perplexity = SampleLevelMetric(
794804
metric_name="ppl",
795805
sample_level_fn=PerplexityPreparator(units_type="words").prepare,

src/lighteval/metrics/metrics_sample.py

+38
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
remove_braces,
5151
remove_braces_and_strip,
5252
)
53+
from lighteval.metrics.utils.judge_utils import get_judge_prompt_simpleqa, process_judge_response_simpleqa
5354
from lighteval.tasks.requests import Doc
5455
from lighteval.utils.utils import as_list, safe_divide
5556

@@ -931,6 +932,43 @@ def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs) -> dict[
931932
raise NotImplementedError("This method should be implemented in the subclass.")
932933

933934

935+
class JudgeLLMSimpleQA(JudgeLLM):
936+
def __init__(self):
937+
super().__init__(
938+
judge_model_name="gpt-4o-2024-08-06",
939+
template=get_judge_prompt_simpleqa,
940+
process_judge_response=process_judge_response_simpleqa,
941+
judge_backend="openai",
942+
short_judge_name="gpt4o",
943+
)
944+
945+
def compute(self, sample_ids: list[str], responses: list, formatted_docs: list[Doc], **kwargs) -> dict[str, float]:
946+
"""
947+
Compute the score of a generative task using a llm as a judge.
948+
The generative task can be multiturn with 2 turns max, in that case, we
949+
return scores for turn 1 and 2. Also returns user_prompt and judgement
950+
which are ignored later by the aggregator.
951+
"""
952+
questions = [formatted_doc.query for formatted_doc in formatted_docs]
953+
options = [formatted_doc.choices for formatted_doc in formatted_docs]
954+
golds = [formatted_doc.get_golds()[0] for formatted_doc in formatted_docs]
955+
predictions = [response[0].result[0] for response in responses]
956+
957+
scores, messages, judgements = self.judge.evaluate_answer_batch(questions, predictions, options, golds)
958+
959+
metrics = []
960+
for i in range(len(sample_ids)):
961+
metrics.append(
962+
{
963+
"simpleqa_judge": scores[i],
964+
f"user_prompt_{self.short_judge_name}": messages[i],
965+
f"judgement_{self.short_judge_name}": judgements[i],
966+
}
967+
)
968+
969+
return metrics
970+
971+
934972
class JudgeLLMMTBench(JudgeLLM):
935973
def compute(self, predictions: list[str], formatted_doc: Doc, **kwargs):
936974
"""
+127
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
# MIT License
2+
3+
# Copyright (c) 2024 The HuggingFace Team
4+
5+
# Permission is hereby granted, free of charge, to any person obtaining a copy
6+
# of this software and associated documentation files (the "Software"), to deal
7+
# in the Software without restriction, including without limitation the rights
8+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9+
# copies of the Software, and to permit persons to whom the Software is
10+
# furnished to do so, subject to the following conditions:
11+
12+
# The above copyright notice and this permission notice shall be included in all
13+
# copies or substantial portions of the Software.
14+
15+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21+
# SOFTWARE.
22+
import logging
23+
24+
25+
logger = logging.getLogger(__name__)
26+
27+
28+
def get_judge_prompt_simpleqa(question: str, answer: str, gold: str, **kwargs):
29+
GRADER_TEMPLATE = """
30+
Your job is to look at a question, a gold target, and a predicted answer, and then assign a grade of either ["CORRECT", "INCORRECT", "NOT_ATTEMPTED"].
31+
First, I will give examples of each grade, and then you will grade a new example.
32+
33+
34+
The following are examples of CORRECT predicted answers.
35+
```
36+
Question: What are the names of Barack Obama's children?
37+
Gold target: Malia Obama and Sasha Obama
38+
Predicted answer 1: sasha and malia obama
39+
Predicted answer 2: most people would say Malia and Sasha, but I'm not sure and would have to double check
40+
Predicted answer 3: Barack Obama has two daughters. Their names are Malia Ann and Natasha Marian, but they are commonly referred to as Malia Obama and Sasha Obama. Malia was born on July 4, 1998, and Sasha was born on June 10, 2001.
41+
```
42+
These predicted answers are all CORRECT because:
43+
- They fully contain the important information in the gold target.
44+
- They do not contain any information that contradicts the gold target.
45+
- Only semantic meaning matters; capitalization, punctuation, grammar, and order don't matter.
46+
- Hedging and guessing are permissible, provided that the gold target is fully included and the response contains no incorrect information or contradictions.
47+
48+
49+
The following are examples of INCORRECT predicted answers.
50+
```
51+
Question: What are the names of Barack Obama's children?
52+
Gold target: Malia and Sasha
53+
Predicted answer 1: Malia.
54+
Predicted answer 2: Malia, Sasha, and Susan.
55+
Predicted answer 3: Barack Obama does not have any children.
56+
Predicted answer 4: I think it's either Malia and Sasha. Or it could be Malia and Jackie. Or it could be Joey and Malia.
57+
Predicted answer 4: While I don't know their exact names, I can tell you that Barack Obama has three children.
58+
Predicted answer 5: It's possible you may mean Betsy and Olivia. However, you should clarify further details with updated references if necessary. Is that the correct answer?
59+
Predicted answer 6: It may be the case that Obama's child is named James. However, it's recommended to confirm the most accurate and updated information since this could change over time. This model may not always reflect the most current information.
60+
```
61+
These predicted answers are all INCORRECT because:
62+
- A factual statement in the answer contradicts the gold target. Incorrect statements that have some hedging (e.g., "it is possible that", "although i'm not sure, i think") are also considered incorrect.
63+
64+
65+
The following are examples of NOT_ATTEMPTED predicted answers.
66+
```
67+
Question: What are the names of Barack Obama's children?
68+
Gold target: Malia and Sasha
69+
Predicted answer 1: I don't know.
70+
Predicted answer 2: I need more context about which Obama you are talking about.
71+
Predicted answer 3: Without researching the web, I cannot answer this question. However, I can tell you that Barack Obama has two children.
72+
Predicted answer 4: Barack Obama has two children. I know that one of them is Malia, but I'm not sure about the other one.
73+
```
74+
These predicted answers are all NOT_ATTEMPTED because:
75+
- The important information in the gold target is not included in the answer.
76+
- No statements in the answer contradict the gold target.
77+
78+
79+
Also note the following things:
80+
- For grading questions where the gold target is a number, the predicted answer needs to be correct to the last significant figure in the gold answer. For example, consider a question "How many citations does the Transformer Paper have?" with gold target "120k".
81+
- Predicted answers "120k", "124k", and 115k" are all CORRECT.
82+
- Predicted answers "100k" and "113k" are INCORRECT.
83+
- Predicted answers "around 100k" and "more than 50k" are considered NOT_ATTEMPTED because they neither confirm nor contradict the gold target.
84+
- The gold target may contain more information than the question. In such cases, the predicted answer only needs to contain the information that is in the question.
85+
- For example, consider the question "What episode did Derek and Meredith get legally married in Grey's Anatomy?" with gold target "Season 7, Episode 20: White Wedding". Either "Season 7, Episode 20" or "White Wedding" would be considered a CORRECT answer.
86+
- Do not punish predicted answers if they omit information that would be clearly inferred from the question.
87+
- For example, consider the question "What city is OpenAI headquartered in?" and the gold target "San Francisco, California". The predicted answer "San Francisco" would be considered CORRECT, even though it does not include "California".
88+
- Consider the question "What award did A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity win at NAACL '24?", the gold target is "Outstanding Paper Award". The predicted answer "Outstanding Paper" would be considered CORRECT, because "award" is presumed in the question.
89+
- For the question "What is the height of Jason Wei in meters?", the gold target is "1.73 m". The predicted answer "1.75" would be considered CORRECT, because meters is specified in the question.
90+
- For the question "What is the name of Barack Obama's wife?", the gold target is "Michelle Obama". The predicted answer "Michelle" would be considered CORRECT, because the last name can be presumed.
91+
- Do not punish for typos in people's name if it's clearly the same name.
92+
- For example, if the gold target is "Hyung Won Chung", you can consider the following predicted answers as correct: "Hyoong Won Choong", "Hyungwon Chung", or "Hyun Won Chung".
93+
94+
95+
Here is a new example. Simply reply with either CORRECT, INCORRECT, NOT ATTEMPTED. Don't apologize or correct yourself if there was a mistake; we are just trying to grade the answer.
96+
```
97+
Question: {question}
98+
Gold target: {target}
99+
Predicted answer: {predicted_answer}
100+
```
101+
102+
Grade the predicted answer of this new question as one of:
103+
A: CORRECT
104+
B: INCORRECT
105+
C: NOT_ATTEMPTED
106+
107+
Just return the letters "A", "B", or "C", with no text around it.
108+
""".strip()
109+
110+
content = GRADER_TEMPLATE.format(
111+
question=question,
112+
target=gold,
113+
predicted_answer=answer,
114+
)
115+
return [{"role": "user", "content": content}]
116+
117+
118+
def process_judge_response_simpleqa(response: str) -> float:
119+
if response == "A":
120+
return 1.0
121+
elif response == "B":
122+
return 0.0
123+
elif response == "C":
124+
return 0.0
125+
else:
126+
logger.warning(f"Unknown response from judge: {response}")
127+
return 0.0

src/lighteval/models/endpoints/inference_providers_model.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ class InferenceProvidersModelConfig(ModelConfig):
6363
generation_parameters: Parameters for text generation
6464
"""
6565

66-
model: str
66+
model_name: str
6767
provider: str
6868
timeout: int | None = None
6969
proxies: Any | None = None
@@ -74,13 +74,13 @@ def from_path(cls, path):
7474
with open(path, "r") as f:
7575
config = yaml.safe_load(f)["model"]
7676

77-
model = config["model_name"]
77+
model_name = config["model_name"]
7878
provider = config.get("provider", None)
7979
timeout = config.get("timeout", None)
8080
proxies = config.get("proxies", None)
8181
generation_parameters = GenerationParameters.from_dict(config)
8282
return cls(
83-
model=model,
83+
model=model_name,
8484
provider=provider,
8585
timeout=timeout,
8686
proxies=proxies,
@@ -102,12 +102,12 @@ def __init__(self, config: InferenceProvidersModelConfig) -> None:
102102
config: Configuration object containing model and provider settings
103103
"""
104104
self.model_info = ModelInfo(
105-
model_name=config.model,
105+
model_name=config.model_name,
106106
model_sha="",
107107
model_dtype=None,
108108
model_size="",
109109
)
110-
self.model = config.model
110+
self.model_name = config.model_name
111111
self.provider = config.provider
112112
self.generation_parameters = config.generation_parameters
113113

@@ -122,7 +122,7 @@ def __init__(self, config: InferenceProvidersModelConfig) -> None:
122122
timeout=config.timeout,
123123
proxies=config.proxies,
124124
)
125-
self._tokenizer = AutoTokenizer.from_pretrained(self.model)
125+
self._tokenizer = AutoTokenizer.from_pretrained(self.model_name)
126126

127127
def _encode(self, text: str) -> dict:
128128
enc = self._tokenizer(text=text)
@@ -148,7 +148,7 @@ async def __call_api(self, prompt: List[dict], num_samples: int) -> Optional[Cha
148148
for attempt in range(self.API_MAX_RETRY):
149149
try:
150150
kwargs = {
151-
"model": self.model,
151+
"model": self.model_name,
152152
"messages": prompt,
153153
"n": num_samples,
154154
}

src/lighteval/tasks/default_prompts.py

+10
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,16 @@
4343
# fmt: on
4444

4545

46+
def simpleqa(line, task_name: str = None):
47+
query = line["problem"]
48+
choices = [line["answer"]]
49+
gold_index = 0
50+
51+
return Doc(
52+
task_name=task_name, query=query, choices=choices, gold_index=gold_index, specific={**eval(line["metadata"])}
53+
)
54+
55+
4656
def aime_prompt_fn(line, task_name: str = None):
4757
# Prompt template adapted from
4858
# - simple-evals: https://github.com/openai/simple-evals/blob/6e84f4e2aed6b60f6a0c7b8f06bbbf4bfde72e58/math_eval.py#L17

src/lighteval/tasks/default_tasks.py

+16
Original file line numberDiff line numberDiff line change
@@ -14860,6 +14860,22 @@
1486014860
trust_dataset=True,
1486114861
version=0,
1486214862
)
14863+
simpleqa = LightevalTaskConfig(
14864+
name="simpleqa",
14865+
suite=["lighteval"],
14866+
prompt_function=prompt.simpleqa,
14867+
hf_repo="lighteval/SimpleQA",
14868+
hf_subset="default",
14869+
hf_avail_splits=["test"],
14870+
evaluation_splits=["test"],
14871+
few_shots_split=None,
14872+
few_shots_select=None,
14873+
generation_size=2048,
14874+
metric=[Metrics.simpleqa_judge],
14875+
stop_sequence=["\n"],
14876+
trust_dataset=True,
14877+
version=0,
14878+
)
1486314879
simple_arithmetic_json_bigbench = LightevalTaskConfig(
1486414880
name="simple_arithmetic_json",
1486514881
suite=["bigbench", "bigbench_json"],

0 commit comments

Comments
 (0)