Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: DIA-1362: Add custom LLM endpoint testing scripts #210

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions adala/runtimes/_litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ class LiteLLMChatRuntime(Runtime):
with the provider of your specified model.
base_url (Optional[str]): Base URL, optional. If provided, will be used to talk to an OpenAI-compatible API provider besides OpenAI.
api_version (Optional[str]): API version, optional except for Azure.
extra_headers (Optional[Dict[str, str]]): Extra headers to be sent with the request.
timeout: Timeout in seconds.
"""

Expand Down Expand Up @@ -303,6 +304,7 @@ class AsyncLiteLLMChatRuntime(AsyncRuntime):
with the provider of your specified model.
base_url (Optional[str]): Base URL, optional. If provided, will be used to talk to an OpenAI-compatible API provider besides OpenAI.
api_version (Optional[str]): API version, optional except for Azure.
extra_headers (Optional[Dict[str, str]]): Extra headers to be sent with the request.
timeout: Timeout in seconds.
"""

Expand Down
75 changes: 75 additions & 0 deletions tests/manual_test_scripts/auth_proxy_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import os
import httpx
from loguru import logger
from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import StreamingResponse

"""
This script is a simple HTTP proxy server that forwards requests to a target URL.
It requires the TARGET_URL environment variable to be set to the target URL.
It also requires the EXPECTED_HEADER environment variable to be set to the expected Authorization header value.

To install the dependencies, run the following command:
```
pip install fastapi httpx loguru
```

To run the server:
```
TARGET_URL=https://example.com EXPECTED_HEADER=secret uvicorn auth_proxy_server:app
```

This will forward all requests to `https://example.com` and check for the `Authorization` header to be equal to `secret`.
"""
app = FastAPI()

TARGET_URL = os.getenv('TARGET_URL')
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add a check to ensure TARGET_URL is set. If not, raise an exception or log an error to prevent runtime issues.

EXPECTED_HEADER = os.getenv('EXPECTED_HEADER')


async def proxy_request(request: Request):
# Check for authentication header
auth_header = request.headers.get("Authorization")
if not auth_header:
raise HTTPException(status_code=401, detail="Authorization header missing")
if EXPECTED_HEADER and auth_header != EXPECTED_HEADER:
raise HTTPException(status_code=403, detail=f"Invalid Authorization header."
f" Provided: {auth_header}. Required: {EXPECTED_HEADER}")

# Prepare the URL for the proxied request
path = request.url.path
if request.url.query:
path += f"?{request.url.query}"
url = f"{TARGET_URL}{path}"

# Prepare headers
headers = dict(request.headers)
headers["host"] = TARGET_URL.split("://")[1]

logger.info(f"Forwarding request to {url}, headers: {headers}")

# Create httpx client
async with httpx.AsyncClient(timeout=60) as client:
# Forward the request
response = await client.request(
method=request.method,
url=url,
headers=headers,
content=await request.body()
)

# Stream the response back to the client
return StreamingResponse(
response.aiter_bytes(),
status_code=response.status_code,
headers=response.headers
)


@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "PATCH", "TRACE"])
async def catch_all(request: Request, path: str):
return await proxy_request(request)

if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8010)
47 changes: 47 additions & 0 deletions tests/test_agent_custom_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import pytest
import pandas as pd
import responses
import asyncio


@responses.activate
def test_agent_with_custom_base_url():
from adala.agents import Agent # type: ignore

agent_json = {
"skills": [
{
"type": "ClassificationSkill",
"name": "ClassificationResult",
"instructions": "",
"input_template": "Classify sentiment of the input text: {input}",
"field_schema": {
"output": {
"type": "string",
"enum": ["positive", "negative", "neutral"],
}
},
}
],
"runtimes": {
"default": {
"type": "AsyncLiteLLMChatRuntime",
"api_version": "v1",
"max_tokens": 4096,
"model": "openai/llama3.1",
"temperature": 0,
"batch_size": 100,
"timeout": 120,
"verbose": False,
"base_url": "http://localhost:11434/v1/",
"api_key": "ollama",
"auth_token": "SECRET-TEST-TOKEN",
}
},
}
agent = Agent(**agent_json)

df = pd.DataFrame([["I'm happy"], ["I'm sad"], ["I'm neutral"]], columns=["input"])

results = asyncio.run(agent.arun(input=df))
print(results)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add assertions to verify the expected behavior of the agent. Currently, the test only prints results without checking for correctness.

Loading