From 9045e1c5f570fab3e123a8cf94bb4ca2bafb1e98 Mon Sep 17 00:00:00 2001 From: Rishabh Srivastava Date: Fri, 11 Aug 2023 22:14:44 +0000 Subject: [PATCH] added latency of hf evaluator --- eval/hf_runner.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/eval/hf_runner.py b/eval/hf_runner.py index 876c7ad..4817dcf 100644 --- a/eval/hf_runner.py +++ b/eval/hf_runner.py @@ -5,6 +5,7 @@ from utils.pruning import prune_metadata_str from tqdm import tqdm from psycopg2.extensions import QueryCanceledError +from time import time def prepare_questions_df(questions_file, num_questions): question_query_df = pd.read_csv(questions_file, nrows=num_questions) @@ -69,6 +70,7 @@ def run_hf_eval( with tqdm(total=len(df)) as pbar: for row in df.to_dict("records"): total_tried += 1 + start_time = time() generated_query = pipe( row['prompt'], max_new_tokens=600, @@ -78,8 +80,10 @@ def run_hf_eval( eos_token_id=eos_token_id, pad_token_id=eos_token_id, )[0]['generated_text'].split("```sql")[-1].split(";")[0].strip() + end_time = time() row["generated_query"] = generated_query + row["latency_seconds"] = end_time - start_time golden_query = row["query"] db_name = row["db_name"] question = row["question"]