')
html_output.append(f'
{8 - i}
')
for piece in row:
diff --git a/cookbook/examples/apps/llm_os/app.py b/cookbook/examples/apps/llm_os/app.py
index 10fdf907a0..e9971c8905 100644
--- a/cookbook/examples/apps/llm_os/app.py
+++ b/cookbook/examples/apps/llm_os/app.py
@@ -183,7 +183,7 @@ def main() -> None:
st.session_state["llm_os_run_id"] = None
else:
st.session_state["llm_os_run_id"] = llm_os.new_session()
- except Exception:
+ except Exception as e:
st.session_state["llm_os_run_id"] = None
# Modify the chat history loading to work without storage
diff --git a/cookbook/examples/apps/parallel_world_builder/app.py b/cookbook/examples/apps/parallel_world_builder/app.py
index bd50ea4621..df71ee8f5e 100644
--- a/cookbook/examples/apps/parallel_world_builder/app.py
+++ b/cookbook/examples/apps/parallel_world_builder/app.py
@@ -1,3 +1,5 @@
+from typing import Optional
+
import streamlit as st
from agents import World, get_world_builder
from agno.agent import Agent
diff --git a/cookbook/examples/apps/parallel_world_builder/utils.py b/cookbook/examples/apps/parallel_world_builder/utils.py
index f25ec65d46..df888b707e 100644
--- a/cookbook/examples/apps/parallel_world_builder/utils.py
+++ b/cookbook/examples/apps/parallel_world_builder/utils.py
@@ -91,7 +91,7 @@ def display_tool_calls(tool_calls_container, tools):
st.markdown("**Results:**")
try:
st.json(_content)
- except Exception:
+ except Exception as e:
st.markdown(_content)
if _metrics:
diff --git a/cookbook/examples/apps/sql_agent/agents.py b/cookbook/examples/apps/sql_agent/agents.py
index 1e927c691d..153d128f8a 100644
--- a/cookbook/examples/apps/sql_agent/agents.py
+++ b/cookbook/examples/apps/sql_agent/agents.py
@@ -178,7 +178,7 @@ def get_sql_agent(
- Race strategy evaluation
You combine deep F1 knowledge with advanced SQL expertise to uncover insights from decades of racing data."""),
- instructions=dedent("""\
+ instructions=dedent(f"""\
You are a SQL expert focused on writing precise, efficient queries.
When a user messages you, determine if you need query the database or can respond directly.
diff --git a/cookbook/examples/apps/sql_agent/utils.py b/cookbook/examples/apps/sql_agent/utils.py
index 3054323a7f..fbb531d0ae 100644
--- a/cookbook/examples/apps/sql_agent/utils.py
+++ b/cookbook/examples/apps/sql_agent/utils.py
@@ -81,7 +81,7 @@ def display_tool_calls(tool_calls_container, tools):
st.markdown("**Results:**")
try:
st.json(_content)
- except Exception:
+ except Exception as e:
st.markdown(_content)
if _metrics:
diff --git a/cookbook/models/anthropic/memory.py b/cookbook/models/anthropic/memory.py
index 5159fef5db..9821634fdc 100644
--- a/cookbook/models/anthropic/memory.py
+++ b/cookbook/models/anthropic/memory.py
@@ -10,6 +10,7 @@
from agno.memory.db.postgres import PgMemoryDb
from agno.models.anthropic import Claude
from agno.storage.agent.postgres import PostgresAgentStorage
+from rich.pretty import pprint
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
diff --git a/cookbook/models/huggingface/llama_essay_writer.py b/cookbook/models/huggingface/llama_essay_writer.py
index 00cd885718..0d22fe2192 100644
--- a/cookbook/models/huggingface/llama_essay_writer.py
+++ b/cookbook/models/huggingface/llama_essay_writer.py
@@ -1,3 +1,6 @@
+import os
+from getpass import getpass
+
from agno.agent import Agent
from agno.models.huggingface import HuggingFace
diff --git a/cookbook/models/mistral/basic_stream.py b/cookbook/models/mistral/basic_stream.py
index 90c2c4a2e4..535d89f6b8 100644
--- a/cookbook/models/mistral/basic_stream.py
+++ b/cookbook/models/mistral/basic_stream.py
@@ -1,3 +1,5 @@
+import os
+
from agno.agent import Agent, RunResponse # noqa
from agno.models.mistral import MistralChat
diff --git a/cookbook/models/mistral/structured_output.py b/cookbook/models/mistral/structured_output.py
index 9d3ccef4d3..d1dcf3610f 100644
--- a/cookbook/models/mistral/structured_output.py
+++ b/cookbook/models/mistral/structured_output.py
@@ -1,3 +1,4 @@
+import os
from typing import List
from agno.agent import Agent, RunResponse # noqa
diff --git a/cookbook/models/nvidia/tool_use.py b/cookbook/models/nvidia/tool_use.py
index 4d58b7d353..aff7dcadad 100644
--- a/cookbook/models/nvidia/tool_use.py
+++ b/cookbook/models/nvidia/tool_use.py
@@ -1,5 +1,7 @@
"""Run `pip install duckduckgo-search` to install dependencies."""
+import os
+
from agno.agent import Agent
from agno.models.nvidia import Nvidia
from agno.tools.duckduckgo import DuckDuckGoTools
diff --git a/cookbook/models/ollama/demo_phi4.py b/cookbook/models/ollama/demo_phi4.py
index da6f2328f3..628c4ad338 100644
--- a/cookbook/models/ollama/demo_phi4.py
+++ b/cookbook/models/ollama/demo_phi4.py
@@ -1,5 +1,6 @@
from agno.agent import Agent, RunResponse # noqa
from agno.models.ollama import Ollama
+from agno.tools.duckduckgo import DuckDuckGoTools
agent = Agent(model=Ollama(id="phi4"), markdown=True)
diff --git a/cookbook/models/openai/audio_input_agent.py b/cookbook/models/openai/audio_input_agent.py
index 0de51acfc9..a1132c6bb9 100644
--- a/cookbook/models/openai/audio_input_agent.py
+++ b/cookbook/models/openai/audio_input_agent.py
@@ -1,3 +1,5 @@
+import base64
+
import requests
from agno.agent import Agent, RunResponse # noqa
from agno.media import Audio
diff --git a/cookbook/models/openai/audio_input_output_output.py b/cookbook/models/openai/audio_input_output_output.py
index df5b059f9f..24e14e8e37 100644
--- a/cookbook/models/openai/audio_input_output_output.py
+++ b/cookbook/models/openai/audio_input_output_output.py
@@ -1,3 +1,5 @@
+import base64
+
import requests
from agno.agent import Agent
from agno.media import Audio
diff --git a/evals/performance/instantiation.py b/evals/performance/instantiation.py
index bd131aa744..b73d7c9f8b 100644
--- a/evals/performance/instantiation.py
+++ b/evals/performance/instantiation.py
@@ -3,10 +3,8 @@
from agno.agent import Agent
from agno.eval.perf import PerfEval
-
def instantiate_agent():
- return Agent(system_message="Be concise, reply with one sentence.")
-
+ return Agent(system_message='Be concise, reply with one sentence.')
instantiation_perf = PerfEval(func=instantiate_agent, num_iterations=1000)
diff --git a/evals/performance/instantiation_with_tool.py b/evals/performance/instantiation_with_tool.py
index b8e82a1f8d..45ca8a2c9b 100644
--- a/evals/performance/instantiation_with_tool.py
+++ b/evals/performance/instantiation_with_tool.py
@@ -6,7 +6,6 @@
from agno.models.openai import OpenAIChat
from agno.eval.perf import PerfEval
-
def get_weather(city: Literal["nyc", "sf"]):
"""Use this to get weather information."""
if city == "nyc":
@@ -16,13 +15,10 @@ def get_weather(city: Literal["nyc", "sf"]):
else:
raise AssertionError("Unknown city")
-
tools = [get_weather]
-
def instantiate_agent():
- return Agent(model=OpenAIChat(id="gpt-4o"), tools=tools)
-
+ return Agent(model=OpenAIChat(id='gpt-4o'), tools=tools)
instantiation_perf = PerfEval(func=instantiate_agent, num_iterations=1000)
diff --git a/evals/performance/other/crewai_instantiation.py b/evals/performance/other/crewai_instantiation.py
index fe9498ed10..d1e3d4e741 100644
--- a/evals/performance/other/crewai_instantiation.py
+++ b/evals/performance/other/crewai_instantiation.py
@@ -1,5 +1,4 @@
"""Run `pip install openai memory_profiler crewai crewai[tools]` to install dependencies."""
-
from typing import Literal
from crewai.agent import Agent
@@ -17,19 +16,14 @@ def get_weather(city: Literal["nyc", "sf"]):
else:
raise AssertionError("Unknown city")
-
tools = [get_weather]
-
def instantiate_agent():
- return Agent(
- llm="gpt-4o",
- role="Test Agent",
- goal="Be concise, reply with one sentence.",
- tools=tools,
- backstory="Test",
- )
-
+ return Agent(llm='gpt-4o',
+ role='Test Agent',
+ goal='Be concise, reply with one sentence.',
+ tools=tools,
+ backstory='Test')
crew_instantiation = PerfEval(func=instantiate_agent, num_iterations=1000)
diff --git a/evals/performance/other/langgraph_instantiation.py b/evals/performance/other/langgraph_instantiation.py
index a7ff153671..6c550f5b13 100644
--- a/evals/performance/other/langgraph_instantiation.py
+++ b/evals/performance/other/langgraph_instantiation.py
@@ -8,7 +8,6 @@
from agno.eval.perf import PerfEval
-
@tool
def get_weather(city: Literal["nyc", "sf"]):
"""Use this to get weather information."""
@@ -19,14 +18,11 @@ def get_weather(city: Literal["nyc", "sf"]):
else:
raise AssertionError("Unknown city")
-
tools = [get_weather]
-
def instantiate_agent():
return create_react_agent(model=ChatOpenAI(model="gpt-4o"), tools=tools)
-
langgraph_instantiation = PerfEval(func=instantiate_agent, num_iterations=1000)
if __name__ == "__main__":
diff --git a/evals/performance/other/pydantic_ai_instantiation.py b/evals/performance/other/pydantic_ai_instantiation.py
index 5872f97dcb..85a6839b86 100644
--- a/evals/performance/other/pydantic_ai_instantiation.py
+++ b/evals/performance/other/pydantic_ai_instantiation.py
@@ -1,5 +1,4 @@
"""Run `pip install openai pydantic-ai` to install dependencies."""
-
from typing import Literal
from pydantic_ai import Agent
@@ -7,7 +6,7 @@
def instantiate_agent():
- agent = Agent("openai:gpt-4o", system_prompt="Be concise, reply with one sentence.")
+ agent = Agent('openai:gpt-4o', system_prompt='Be concise, reply with one sentence.')
@agent.tool_plain
def get_weather(city: Literal["nyc", "sf"]):
@@ -21,7 +20,6 @@ def get_weather(city: Literal["nyc", "sf"]):
return agent
-
pydantic_instantiation = PerfEval(func=instantiate_agent, num_iterations=1000)
if __name__ == "__main__":
diff --git a/evals/performance/other/smolagents_instantiation.py b/evals/performance/other/smolagents_instantiation.py
index 38dc1113b4..ee9e4711b6 100644
--- a/evals/performance/other/smolagents_instantiation.py
+++ b/evals/performance/other/smolagents_instantiation.py
@@ -1,4 +1,5 @@
"""Run `pip install memory_profiler smolagents` to install dependencies."""
+from typing import Literal
from agno.eval.perf import PerfEval
from smolagents import ToolCallingAgent, HfApiModel, Tool
@@ -26,12 +27,9 @@ def forward(self, city: str):
raise AssertionError("Unknown city")
-def instantiate_agent():
- return ToolCallingAgent(
- tools=[WeatherTool()],
- model=HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct"),
- )
+def instantiate_agent():
+ return ToolCallingAgent(tools=[WeatherTool()], model=HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct"))
smolagents_instantiation = PerfEval(func=instantiate_agent, num_iterations=1000)
diff --git a/evals/performance/simple_response.py b/evals/performance/simple_response.py
index ad534f22a2..851697b1c3 100644
--- a/evals/performance/simple_response.py
+++ b/evals/performance/simple_response.py
@@ -4,17 +4,12 @@
from agno.models.openai import OpenAIChat
from agno.eval.perf import PerfEval
-
def simple_response():
- agent = Agent(
- model=OpenAIChat(id="gpt-4o-mini"),
- system_message="Be concise, reply with one sentence.",
- )
- response = agent.run("What is the capital of France?")
+ agent = Agent(model=OpenAIChat(id='gpt-4o-mini'), system_message='Be concise, reply with one sentence.')
+ response = agent.run('What is the capital of France?')
print(response.content)
return response
-
simple_response_perf = PerfEval(func=simple_response, num_iterations=10)
if __name__ == "__main__":
diff --git a/evals/reliability/multiple_tool_calls/openai/calculator.py b/evals/reliability/multiple_tool_calls/openai/calculator.py
index df85b40e79..39daba9340 100644
--- a/evals/reliability/multiple_tool_calls/openai/calculator.py
+++ b/evals/reliability/multiple_tool_calls/openai/calculator.py
@@ -8,13 +8,12 @@
def multiply_and_exponentiate():
- agent = Agent(
+
+ agent=Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[CalculatorTools(add=True, multiply=True, exponentiate=True)],
)
- response: RunResponse = agent.run(
- "What is 10*5 then to the power of 2? do it step by step"
- )
+ response: RunResponse = agent.run("What is 10*5 then to the power of 2? do it step by step")
evaluation = ReliabilityEval(
agent_response=response,
expected_tool_calls=["multiply", "exponentiate"],
diff --git a/evals/reliability/single_tool_calls/openai/calculator.py b/evals/reliability/single_tool_calls/openai/calculator.py
index 26c56cebb0..b0d71adfc5 100644
--- a/evals/reliability/single_tool_calls/openai/calculator.py
+++ b/evals/reliability/single_tool_calls/openai/calculator.py
@@ -8,7 +8,8 @@
def factorial():
- agent = Agent(
+
+ agent=Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[CalculatorTools(factorial=True)],
)