From e2b3a20f438f6f3af5428d0831162b590df1037d Mon Sep 17 00:00:00 2001 From: Mayk Caldas Date: Wed, 29 Jan 2025 11:59:51 -0800 Subject: [PATCH] Fixed tool_choice injection in test_sucessful_memory_agent --- tests/test_agents.py | 2 +- tests/test_paperqa.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_agents.py b/tests/test_agents.py index af94cf026..bc0eb8871 100644 --- a/tests/test_agents.py +++ b/tests/test_agents.py @@ -353,7 +353,7 @@ async def llm_model_call(*args, **kwargs): # https://docs.anthropic.com/en/docs/build-with-claude/tool-use#chain-of-thought # kwargs.pop("tool_choice", LiteLLMModel.TOOL_CHOICE_REQUIRED) # tool_choice is now a arg, not a kwarg - args.pop() # removing it from args # ASK: I accept ideas on how to handle this better + args = args[:-1] # removing last element from args return await orig_llm_model_call(*args, tool_choice="auto", **kwargs) # type: ignore[misc] with patch.object(LiteLLMModel, "call", side_effect=llm_model_call, autospec=True): diff --git a/tests/test_paperqa.py b/tests/test_paperqa.py index b09b23313..ac73e8750 100644 --- a/tests/test_paperqa.py +++ b/tests/test_paperqa.py @@ -427,9 +427,7 @@ def test_llm_parse_json_newlines() -> None: @pytest.mark.asyncio async def test_chain_completion() -> None: - s = Settings( - llm="babbage-002", temperature=0.2 - ) + s = Settings(llm="babbage-002", temperature=0.2) outputs = [] def accum(x) -> None: