Skip to content

Commit

Permalink
Update model used in test
Browse files Browse the repository at this point in the history
  • Loading branch information
Winston-503 committed Dec 12, 2024
1 parent a0a21eb commit e31d851
Showing 1 changed file with 7 additions and 7 deletions.
14 changes: 7 additions & 7 deletions tests/integration/llm/test_groq_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@

class TestGroqLLM(unittest.TestCase):
@staticmethod
def get_gemma_7b():
def get_gemma():
dotenv.load_dotenv()
with OsEnviron("GROQ_LLM_MODEL", "gemma-7b-it"):
with OsEnviron("GROQ_LLM_MODEL", "gemma2-9b-it"):
return GroqLLM.from_env()

def test_message(self):
messages = [LLMMessage.user_message("what is the capital of France?")]
llm = self.get_gemma_7b()
llm = self.get_gemma()

result = llm.post_chat_request(LLMContext.empty(), messages)
assert "Paris" in result.choices[0]
Expand All @@ -29,15 +29,15 @@ def test_message(self):

def test_consumptions(self):
messages = [LLMMessage.user_message("Hello how are you?")]
llm = self.get_gemma_7b()
llm = self.get_gemma()
result = llm.post_chat_request(LLMContext.empty(), messages)

assert len(result.consumptions) == 12 # call, duration, 3 token kinds, 3 cost kinds and 4 groq duration
for consumption in result.consumptions:
assert consumption.kind.startswith("gemma-7b-it")
assert consumption.kind.startswith("gemma2-9b-it")

def test_max_tokens_param(self):
llm = self.get_gemma_7b()
llm = self.get_gemma()
llm.configuration.temperature.set(0.8)
llm.configuration.max_tokens.set(7)

Expand All @@ -47,7 +47,7 @@ def test_max_tokens_param(self):

def test_json_mode(self):
messages = [LLMMessage.user_message("Output a JSON object with the data about RPG character.")]
llm = self.get_gemma_7b()
llm = self.get_gemma()
result = llm.post_chat_request(LLMContext.empty(), messages, response_format={"type": "json_object"})

data = json.loads(result.first_choice)
Expand Down

0 comments on commit e31d851

Please sign in to comment.