diff --git a/bigcode_eval/tasks/humanevalpack.py b/bigcode_eval/tasks/humanevalpack.py index 64a5f673f..6f7fe2741 100644 --- a/bigcode_eval/tasks/humanevalpack.py +++ b/bigcode_eval/tasks/humanevalpack.py @@ -245,6 +245,8 @@ def get_prompt(self, prompt_base, instruction, context=None): prompt = f"Source: user\n\n {inp.strip()} Source: assistant\nDestination: user \n\n{prompt_base}" elif self.prompt == "aurora-m": prompt = f'### Instruction:\n{inp}\n### Response:\n{prompt_base}' + elif self.prompt == "llama3": + prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{inp}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n{prompt_base}" else: raise ValueError(f"The --prompt argument {self.prompt} wasn't provided or isn't supported") # Strip off the final \n to make the tokens more natural