From 336dd4fa5d42e6e30312ac2372ad7876c2e007ae Mon Sep 17 00:00:00 2001 From: Emmett McFaralne Date: Wed, 4 Sep 2024 16:19:29 -0400 Subject: [PATCH] reduced LLM temperatures --- thepipe/extract.py | 2 +- thepipe/scraper.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/thepipe/extract.py b/thepipe/extract.py index 7323f64..b13b1ca 100644 --- a/thepipe/extract.py +++ b/thepipe/extract.py @@ -75,7 +75,7 @@ def extract_from_chunk(chunk: Chunk, chunk_index: int, schema: str, ai_model: st model=ai_model, messages=messages, response_format={"type": "json_object"}, - temperature=0.2 + temperature=0.1, ) llm_response = response.choices[0].message.content input_tokens = calculate_tokens([chunk]) diff --git a/thepipe/scraper.py b/thepipe/scraper.py index 0a7418d..b9c25a2 100644 --- a/thepipe/scraper.py +++ b/thepipe/scraper.py @@ -190,7 +190,7 @@ def process_page(page_num): response = openrouter_client.chat.completions.create( model=ai_model, messages=messages, - temperature=0.2 + temperature=0.1 ) try: llm_response = response.choices[0].message.content.strip() @@ -361,7 +361,7 @@ def ai_extract_webpage_content(url: str, text_only: Optional[bool] = False, verb response = openrouter_client.chat.completions.create( model=ai_model, messages=messages, - temperature=0.2 + temperature=0.1 ) llm_response = response.choices[0].message.content chunk = Chunk(path=url, texts=[llm_response], images=[stacked_image])