Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add LLM Integration Tests #603

Open
wants to merge 16 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
16 commits
Select commit Hold shift + click to select a range
6c3c203
Add integration tests for additional LLM providers
devin-ai-integration[bot] Dec 24, 2024
5b39eda
Fix style issues in test files
devin-ai-integration[bot] Dec 24, 2024
2a5e4fa
Remove extra blank lines between imports and decorators
devin-ai-integration[bot] Dec 24, 2024
e64d0e6
Fix style: Add single blank line between imports and decorators
devin-ai-integration[bot] Dec 24, 2024
d70231f
Fix style: Remove double blank lines between imports and decorators
devin-ai-integration[bot] Dec 24, 2024
36c5199
Fix style: Remove trailing whitespace and fix blank lines
devin-ai-integration[bot] Dec 24, 2024
fb460b3
Fix style: Replace print statements with pass in Mistral integration …
devin-ai-integration[bot] Dec 24, 2024
19f2aaa
Fix style: Remove extra blank line in test_anthropic_integration.py
devin-ai-integration[bot] Dec 24, 2024
78c0154
Fix style: Remove extra blank lines in remaining test files
devin-ai-integration[bot] Dec 24, 2024
d94c01a
Fix style: Remove extra blank lines between imports and decorators
devin-ai-integration[bot] Dec 24, 2024
4439cff
style: Apply ruff-format changes to maintain consistent spacing
devin-ai-integration[bot] Dec 24, 2024
83b840a
ci: Add AGENTOPS_API_KEY to top-level workflow environment
devin-ai-integration[bot] Dec 24, 2024
b21b239
revert: Restore original OpenAI test file to maintain existing test b…
devin-ai-integration[bot] Dec 24, 2024
dff9ca6
fix: Update Mistral provider to use new API methods (create, create_a…
devin-ai-integration[bot] Dec 24, 2024
f2f6186
style: Apply ruff-format changes and remove try-except blocks
devin-ai-integration[bot] Dec 24, 2024
fc2438e
fix: Remove try-except blocks and fix formatting in Mistral provider
devin-ai-integration[bot] Dec 24, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions .github/workflows/python-testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,13 @@ jobs:
runs-on: ubuntu-latest
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENTOPS_API_KEY: ${{ secrets.AGENTOPS_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}

strategy:
matrix:
Expand All @@ -42,3 +49,9 @@ jobs:
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AGENTOPS_API_KEY: ${{ secrets.AGENTOPS_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
AI21_API_KEY: ${{ secrets.AI21_API_KEY }}
41 changes: 41 additions & 0 deletions agentops/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,47 @@
except ModuleNotFoundError:
pass

from .llms.providers import (
ai21,
anthropic,
cohere,
groq,
litellm,
mistral,
openai,
)

# Initialize providers when imported
if "ai21" in sys.modules:
from ai21 import AI21Client

ai21.AI21Provider(client=AI21Client()).override()

if "anthropic" in sys.modules:
from anthropic import Anthropic

anthropic.AnthropicProvider(client=Anthropic()).override()

if "cohere" in sys.modules:
import cohere as cohere_sdk

cohere.CohereProvider(client=cohere_sdk).override()

if "groq" in sys.modules:
from groq import Groq

groq.GroqProvider(client=Groq()).override()

if "litellm" in sys.modules:
import litellm as litellm_sdk

litellm.LiteLLMProvider(client=litellm_sdk).override()

if "mistralai" in sys.modules:
from mistralai import Mistral

mistral.MistralProvider(client=Mistral()).override()

if "autogen" in sys.modules:
Client().configure(instrument_llm_calls=False)
Client()._initialize_autogen_logger()
Expand Down
81 changes: 16 additions & 65 deletions agentops/llms/providers/ai21.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,98 +145,49 @@ async def async_generator():
def override(self):
self._override_completion()
self._override_completion_async()
self._override_answer()
self._override_answer_async()

def _override_completion(self):
from ai21.clients.studio.resources.chat import ChatCompletions
from ai21.clients.studio.ai21_client import AI21Client

global original_create
original_create = ChatCompletions.create
# Store original method
self.original_create = AI21Client.chat.completions.create

def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()
session = kwargs.get("session", None)
if "session" in kwargs.keys():
del kwargs["session"]
result = original_create(*args, **kwargs)
result = self.original_create(*args, **kwargs)
return self.handle_response(result, kwargs, init_timestamp, session=session)

# Override the original method with the patched one
ChatCompletions.create = patched_function
AI21Client.chat.completions.create = patched_function

def _override_completion_async(self):
from ai21.clients.studio.resources.chat import AsyncChatCompletions
from ai21.clients.studio.async_ai21_client import AsyncAI21Client

global original_create_async
original_create_async = AsyncChatCompletions.create
# Store original method
self.original_create_async = AsyncAI21Client.chat.completions.create

async def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()
session = kwargs.get("session", None)
if "session" in kwargs.keys():
del kwargs["session"]
result = await original_create_async(*args, **kwargs)
result = await self.original_create_async(*args, **kwargs)
return self.handle_response(result, kwargs, init_timestamp, session=session)

# Override the original method with the patched one
AsyncChatCompletions.create = patched_function
AsyncAI21Client.chat.completions.create = patched_function

def _override_answer(self):
from ai21.clients.studio.resources.studio_answer import StudioAnswer

global original_answer
original_answer = StudioAnswer.create

def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()

session = kwargs.get("session", None)
if "session" in kwargs.keys():
del kwargs["session"]
result = original_answer(*args, **kwargs)
return self.handle_response(result, kwargs, init_timestamp, session=session)

StudioAnswer.create = patched_function

def _override_answer_async(self):
from ai21.clients.studio.resources.studio_answer import AsyncStudioAnswer

global original_answer_async
original_answer_async = AsyncStudioAnswer.create

async def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()

session = kwargs.get("session", None)
if "session" in kwargs.keys():
del kwargs["session"]
result = await original_answer_async(*args, **kwargs)
return self.handle_response(result, kwargs, init_timestamp, session=session)

AsyncStudioAnswer.create = patched_function
# Answer functionality removed as it's not available in current version

def undo_override(self):
if (
self.original_create is not None
and self.original_create_async is not None
and self.original_answer is not None
and self.original_answer_async is not None
):
from ai21.clients.studio.resources.chat import (
ChatCompletions,
AsyncChatCompletions,
)
from ai21.clients.studio.resources.studio_answer import (
StudioAnswer,
AsyncStudioAnswer,
)
if self.original_create is not None and self.original_create_async is not None:
from ai21.clients.studio.ai21_client import AI21Client
from ai21.clients.studio.async_ai21_client import AsyncAI21Client

ChatCompletions.create = self.original_create
AsyncChatCompletions.create = self.original_create_async
StudioAnswer.create = self.original_answer
AsyncStudioAnswer.create = self.original_answer_async
AI21Client.chat.completions.create = self.original_create
AsyncAI21Client.chat.completions.create = self.original_create_async
8 changes: 7 additions & 1 deletion agentops/llms/providers/groq.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,14 @@ def _override_async_chat(self):
async def patched_function(*args, **kwargs):
# Call the original function with its original arguments
init_timestamp = get_ISO_time()
session = kwargs.get("session", None)
if "session" in kwargs.keys():
del kwargs["session"]
areibman marked this conversation as resolved.
Show resolved Hide resolved
result = await self.original_async_create(*args, **kwargs)
return self.handle_response(result, kwargs, init_timestamp)
# Convert the result to a coroutine if it's not already awaitable
if not hasattr(result, "__await__"):
result = completions.ChatCompletion.model_validate(result)
areibman marked this conversation as resolved.
Show resolved Hide resolved
return self.handle_response(result, kwargs, init_timestamp, session=session)

# Override the original method with the patched one
completions.AsyncCompletions.create = patched_function
44 changes: 37 additions & 7 deletions agentops/llms/providers/litellm.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import inspect
import pprint
from typing import Optional

Expand Down Expand Up @@ -113,13 +114,42 @@ def generator():

# litellm uses a CustomStreamWrapper
if isinstance(response, CustomStreamWrapper):

def generator():
for chunk in response:
handle_stream_chunk(chunk)
yield chunk

return generator()
if inspect.isasyncgen(response):

async def async_generator():
try:
async for chunk in response:
handle_stream_chunk(chunk)
yield chunk
except Exception as e:
logger.warning(f"Error in async stream: {e}")
raise

return async_generator()
elif hasattr(response, "__aiter__"):

async def async_generator():
try:
async for chunk in response:
handle_stream_chunk(chunk)
yield chunk
except Exception as e:
logger.warning(f"Error in async stream: {e}")
raise

return async_generator()
else:

def generator():
try:
for chunk in response:
handle_stream_chunk(chunk)
yield chunk
except Exception as e:
logger.warning(f"Error in sync stream: {e}")
raise

return generator()

# For asynchronous AsyncStream
elif isinstance(response, AsyncStream):
Expand Down
Loading
Loading