-
Notifications
You must be signed in to change notification settings - Fork 92
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fix: add llm coverage and fix azure openai calling
- Loading branch information
Showing
2 changed files
with
135 additions
and
17 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,116 @@ | ||
import pytest | ||
import numpy as np | ||
from unittest.mock import AsyncMock, Mock, patch | ||
from nano_graphrag import _llm | ||
|
||
|
||
@pytest.fixture | ||
def mock_openai_client(): | ||
with patch("nano_graphrag._llm.AsyncOpenAI") as mock_openai: | ||
mock_client = AsyncMock() | ||
mock_openai.return_value = mock_client | ||
yield mock_client | ||
|
||
|
||
@pytest.fixture | ||
def mock_azure_openai_client(): | ||
with patch("nano_graphrag._llm.AsyncAzureOpenAI") as mock_openai: | ||
mock_client = AsyncMock() | ||
mock_openai.return_value = mock_client | ||
yield mock_client | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_openai_gpt4o(mock_openai_client): | ||
mock_response = AsyncMock() | ||
mock_response.choices = [Mock(message=Mock(content="1"))] | ||
messages = [{"role": "system", "content": "3"}, {"role": "user", "content": "2"}] | ||
mock_openai_client.chat.completions.create.return_value = mock_response | ||
|
||
response = await _llm.gpt_4o_complete("2", system_prompt="3") | ||
|
||
mock_openai_client.chat.completions.create.assert_awaited_once_with( | ||
model="gpt-4o", | ||
messages=messages, | ||
) | ||
assert response == "1" | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_openai_gpt4o_mini(mock_openai_client): | ||
mock_response = AsyncMock() | ||
mock_response.choices = [Mock(message=Mock(content="1"))] | ||
messages = [{"role": "system", "content": "3"}, {"role": "user", "content": "2"}] | ||
mock_openai_client.chat.completions.create.return_value = mock_response | ||
|
||
response = await _llm.gpt_4o_mini_complete("2", system_prompt="3") | ||
|
||
mock_openai_client.chat.completions.create.assert_awaited_once_with( | ||
model="gpt-4o-mini", | ||
messages=messages, | ||
) | ||
assert response == "1" | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_azure_openai_gpt4o(mock_azure_openai_client): | ||
mock_response = AsyncMock() | ||
mock_response.choices = [Mock(message=Mock(content="1"))] | ||
messages = [{"role": "system", "content": "3"}, {"role": "user", "content": "2"}] | ||
mock_azure_openai_client.chat.completions.create.return_value = mock_response | ||
|
||
response = await _llm.azure_gpt_4o_complete("2", system_prompt="3") | ||
|
||
mock_azure_openai_client.chat.completions.create.assert_awaited_once_with( | ||
model="gpt-4o", | ||
messages=messages, | ||
) | ||
assert response == "1" | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_azure_openai_gpt4o_mini(mock_azure_openai_client): | ||
mock_response = AsyncMock() | ||
mock_response.choices = [Mock(message=Mock(content="1"))] | ||
messages = [{"role": "system", "content": "3"}, {"role": "user", "content": "2"}] | ||
mock_azure_openai_client.chat.completions.create.return_value = mock_response | ||
|
||
response = await _llm.azure_gpt_4o_mini_complete("2", system_prompt="3") | ||
|
||
mock_azure_openai_client.chat.completions.create.assert_awaited_once_with( | ||
model="gpt-4o-mini", | ||
messages=messages, | ||
) | ||
assert response == "1" | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_openai_embedding(mock_openai_client): | ||
mock_response = AsyncMock() | ||
mock_response.data = [Mock(embedding=[1, 1, 1])] | ||
texts = ["Hello world"] | ||
mock_openai_client.embeddings.create.return_value = mock_response | ||
|
||
response = await _llm.openai_embedding(texts) | ||
|
||
mock_openai_client.embeddings.create.assert_awaited_once_with( | ||
model="text-embedding-3-small", input=texts, encoding_format="float" | ||
) | ||
# print(response) | ||
assert np.allclose(response, np.array([[1, 1, 1]])) | ||
|
||
|
||
@pytest.mark.asyncio | ||
async def test_azure_openai_embedding(mock_azure_openai_client): | ||
mock_response = AsyncMock() | ||
mock_response.data = [Mock(embedding=[1, 1, 1])] | ||
texts = ["Hello world"] | ||
mock_azure_openai_client.embeddings.create.return_value = mock_response | ||
|
||
response = await _llm.azure_openai_embedding(texts) | ||
|
||
mock_azure_openai_client.embeddings.create.assert_awaited_once_with( | ||
model="text-embedding-3-small", input=texts, encoding_format="float" | ||
) | ||
# print(response) | ||
assert np.allclose(response, np.array([[1, 1, 1]])) |