Skip to content

Commit cc60fad

Browse files
committed
🚧(back) manage steaming with the ai service
We want to handle both streaming or not when interacting with the AI backend service.
1 parent 70956c4 commit cc60fad

File tree

5 files changed

+95
-26
lines changed

5 files changed

+95
-26
lines changed

src/backend/core/api/viewsets.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1358,8 +1358,20 @@ def ai_proxy(self, request, *args, **kwargs):
13581358
serializer = serializers.AIProxySerializer(data=request.data)
13591359
serializer.is_valid(raise_exception=True)
13601360

1361-
response = AIService().proxy(request.data)
1362-
return drf.response.Response(response, status=drf.status.HTTP_200_OK)
1361+
ai_service = AIService()
1362+
1363+
if settings.AI_STREAM:
1364+
return StreamingHttpResponse(
1365+
ai_service.stream(request.data),
1366+
content_type="text/event-stream",
1367+
status=drf.status.HTTP_200_OK,
1368+
)
1369+
else:
1370+
ai_response = ai_service.proxy(request.data)
1371+
return drf.response.Response(
1372+
ai_response.model_dump(),
1373+
status=drf.status.HTTP_200_OK,
1374+
)
13631375

13641376
@drf.decorators.action(
13651377
detail=True,
@@ -1741,6 +1753,7 @@ def get(self, request):
17411753
"AI_BOT",
17421754
"AI_FEATURE_ENABLED",
17431755
"AI_MODEL",
1756+
"AI_STREAM",
17441757
"COLLABORATION_WS_URL",
17451758
"COLLABORATION_WS_NOT_CONNECTED_READY_ONLY",
17461759
"CRISP_WEBSITE_ID",

src/backend/core/services/ai_services.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""AI services."""
22

33
import logging
4+
from typing import Generator
45

56
from django.conf import settings
67
from django.core.exceptions import ImproperlyConfigured
@@ -23,9 +24,15 @@ def __init__(self):
2324
raise ImproperlyConfigured("AI configuration not set")
2425
self.client = OpenAI(base_url=settings.AI_BASE_URL, api_key=settings.AI_API_KEY)
2526

26-
def proxy(self, data: dict) -> dict:
27+
def proxy(self, data: dict, stream: bool = False) -> Generator[str, None, None]:
2728
"""Proxy AI API requests to the configured AI provider."""
28-
data["stream"] = False
29+
data["stream"] = stream
30+
return self.client.chat.completions.create(**data)
2931

30-
response = self.client.chat.completions.create(**data)
31-
return response.model_dump()
32+
def stream(self, data: dict) -> Generator[str, None, None]:
33+
"""Stream AI API requests to the configured AI provider."""
34+
stream = self.proxy(data, stream=True)
35+
for chunk in stream:
36+
yield (f"data: {chunk.model_dump_json()}\n\n")
37+
38+
yield ("data: [DONE]\n\n")

src/backend/core/tests/test_api_config.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
AI_BOT={"name": "Test Bot", "color": "#000000"},
2222
AI_FEATURE_ENABLED=False,
2323
AI_MODEL="test-model",
24+
AI_STREAM=False,
2425
COLLABORATION_WS_URL="http://testcollab/",
2526
COLLABORATION_WS_NOT_CONNECTED_READY_ONLY=True,
2627
CRISP_WEBSITE_ID="123",
@@ -46,6 +47,7 @@ def test_api_config(is_authenticated):
4647
"AI_BOT": {"name": "Test Bot", "color": "#000000"},
4748
"AI_FEATURE_ENABLED": False,
4849
"AI_MODEL": "test-model",
50+
"AI_STREAM": False,
4951
"COLLABORATION_WS_URL": "http://testcollab/",
5052
"COLLABORATION_WS_NOT_CONNECTED_READY_ONLY": True,
5153
"CRISP_WEBSITE_ID": "123",

src/backend/core/tests/test_services_ai_services.py

Lines changed: 64 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,9 @@
22
Test ai API endpoints in the impress core app.
33
"""
44

5-
from unittest.mock import MagicMock, patch
5+
from unittest.mock import patch
66

77
from django.core.exceptions import ImproperlyConfigured
8-
from django.test.utils import override_settings
98

109
import pytest
1110
from openai import OpenAIError
@@ -15,6 +14,15 @@
1514
pytestmark = pytest.mark.django_db
1615

1716

17+
@pytest.fixture(autouse=True)
18+
def ai_settings(settings):
19+
"""Fixture to set AI settings."""
20+
settings.AI_MODEL = "llama"
21+
settings.AI_BASE_URL = "http://example.com"
22+
settings.AI_API_KEY = "test-key"
23+
settings.AI_FEATURE_ENABLED = True
24+
25+
1826
@pytest.mark.parametrize(
1927
"setting_name, setting_value",
2028
[
@@ -23,22 +31,19 @@
2331
("AI_MODEL", None),
2432
],
2533
)
26-
def test_api_ai_setting_missing(setting_name, setting_value):
34+
def test_services_ai_setting_missing(setting_name, setting_value, settings):
2735
"""Setting should be set"""
36+
setattr(settings, setting_name, setting_value)
2837

29-
with override_settings(**{setting_name: setting_value}):
30-
with pytest.raises(
31-
ImproperlyConfigured,
32-
match="AI configuration not set",
33-
):
34-
AIService()
38+
with pytest.raises(
39+
ImproperlyConfigured,
40+
match="AI configuration not set",
41+
):
42+
AIService()
3543

3644

37-
@override_settings(
38-
AI_BASE_URL="http://example.com", AI_API_KEY="test-key", AI_MODEL="test-model"
39-
)
4045
@patch("openai.resources.chat.completions.Completions.create")
41-
def test_api_ai__client_error(mock_create):
46+
def test_services_ai_proxy_client_error(mock_create):
4247
"""Fail when the client raises an error"""
4348

4449
mock_create.side_effect = OpenAIError("Mocked client error")
@@ -50,15 +55,11 @@ def test_api_ai__client_error(mock_create):
5055
AIService().proxy({"messages": [{"role": "user", "content": "hello"}]})
5156

5257

53-
@override_settings(
54-
AI_BASE_URL="http://example.com", AI_API_KEY="test-key", AI_MODEL="test-model"
55-
)
5658
@patch("openai.resources.chat.completions.Completions.create")
57-
def test_api_ai__success(mock_create):
59+
def test_services_ai_proxy_success(mock_create):
5860
"""The AI request should work as expect when called with valid arguments."""
5961

60-
mock_response = MagicMock()
61-
mock_response.model_dump.return_value = {
62+
mock_create.return_value = {
6263
"id": "chatcmpl-test",
6364
"object": "chat.completion",
6465
"created": 1234567890,
@@ -71,7 +72,6 @@ def test_api_ai__success(mock_create):
7172
}
7273
],
7374
}
74-
mock_create.return_value = mock_response
7575

7676
response = AIService().proxy({"messages": [{"role": "user", "content": "hello"}]})
7777

@@ -89,3 +89,47 @@ def test_api_ai__success(mock_create):
8989
],
9090
}
9191
assert response == expected_response
92+
mock_create.assert_called_once_with(
93+
messages=[{"role": "user", "content": "hello"}], stream=False
94+
)
95+
96+
97+
@patch("openai.resources.chat.completions.Completions.create")
98+
def test_services_ai_proxy_with_stream(mock_create):
99+
"""The AI request should work as expect when called with valid arguments."""
100+
101+
mock_create.return_value = {
102+
"id": "chatcmpl-test",
103+
"object": "chat.completion",
104+
"created": 1234567890,
105+
"model": "test-model",
106+
"choices": [
107+
{
108+
"index": 0,
109+
"message": {"role": "assistant", "content": "Salut"},
110+
"finish_reason": "stop",
111+
}
112+
],
113+
}
114+
115+
response = AIService().proxy(
116+
{"messages": [{"role": "user", "content": "hello"}]}, stream=True
117+
)
118+
119+
expected_response = {
120+
"id": "chatcmpl-test",
121+
"object": "chat.completion",
122+
"created": 1234567890,
123+
"model": "test-model",
124+
"choices": [
125+
{
126+
"index": 0,
127+
"message": {"role": "assistant", "content": "Salut"},
128+
"finish_reason": "stop",
129+
}
130+
],
131+
}
132+
assert response == expected_response
133+
mock_create.assert_called_once_with(
134+
messages=[{"role": "user", "content": "hello"}], stream=True
135+
)

src/backend/impress/settings.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,9 @@ class Base(Configuration):
619619
default=False, environ_name="AI_FEATURE_ENABLED", environ_prefix=None
620620
)
621621
AI_MODEL = values.Value(None, environ_name="AI_MODEL", environ_prefix=None)
622+
AI_STREAM = values.BooleanValue(
623+
default=False, environ_name="AI_STREAM", environ_prefix=None
624+
)
622625
AI_USER_RATE_THROTTLE_RATES = {
623626
"minute": 3,
624627
"hour": 50,

0 commit comments

Comments
 (0)