Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cli(chat): stream answer, add DDGS.chat_yield (response message generator) #283

Merged
merged 3 commits into from
Feb 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 7 additions & 5 deletions duckduckgo_search/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,16 +181,18 @@ def chat(load, proxy, multiline, timeout, verify, model):
client._chat_tokens_count = cache.get("tokens", 0)

while True:
print(f"{'-' * 78}\nYou[{model=} tokens={client._chat_tokens_count}]: ", end="")
click.secho(f"You[{model=} tokens={client._chat_tokens_count}]: ", fg="blue", nl=False)
if multiline:
print(f"""[multiline, send message: ctrl+{"Z" if sys.platform == "win32" else "D"}]""")
click.secho(f"""[multiline, send message: ctrl+{"Z" if sys.platform == "win32" else "D"}]""", fg="green")
user_input = sys.stdin.read()
print("...")
print()
else:
user_input = input()
if user_input.strip():
resp_answer = client.chat(keywords=user_input, model=model, timeout=timeout)
click.secho(f"AI: {resp_answer}", fg="green")
click.secho("AI: ", fg="red", nl=False)
for chunk in client.chat_yield(keywords=user_input, model=model, timeout=timeout):
print(chunk, end="")
print()

cache = {"vqd": client._chat_vqd, "tokens": client._chat_tokens_count, "messages": client._chat_messages}
_save_json(cache_file, cache)
Expand Down
75 changes: 46 additions & 29 deletions duckduckgo_search/duckduckgo_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import logging
import os
import warnings
from collections.abc import Iterator
from datetime import datetime, timezone
from functools import cached_property
from itertools import cycle
Expand Down Expand Up @@ -128,7 +129,7 @@ def _get_vqd(self, keywords: str) -> str:
resp_content = self._get_url("GET", "https://duckduckgo.com", params={"q": keywords})
return _extract_vqd(resp_content, keywords)

def chat(self, keywords: str, model: str = "gpt-4o-mini", timeout: int = 30) -> str:
def chat_yield(self, keywords: str, model: str = "gpt-4o-mini", timeout: int = 30) -> Iterator[str]:
"""Initiates a chat session with DuckDuckGo AI.

Args:
Expand All @@ -137,8 +138,8 @@ def chat(self, keywords: str, model: str = "gpt-4o-mini", timeout: int = 30) ->
"o3-mini", "mixtral-8x7b". Defaults to "gpt-4o-mini".
timeout (int): Timeout value for the HTTP client. Defaults to 20.

Returns:
str: The response from the AI.
Yields:
str: Chunks of the response from the AI.
"""
# vqd
if not self._chat_vqd:
Expand All @@ -154,36 +155,52 @@ def chat(self, keywords: str, model: str = "gpt-4o-mini", timeout: int = 30) ->
"model": self._chat_models[model],
"messages": self._chat_messages,
}
resp = self.client.post(
"https://duckduckgo.com/duckchat/v1/chat",
with self.client.stream(
method="POST",
url="https://duckduckgo.com/duckchat/v1/chat",
headers={"x-vqd-4": self._chat_vqd},
json=json_data,
timeout=timeout,
)
self._chat_vqd = resp.headers.get("x-vqd-4", "")

data = ",".join(x for line in resp.text.rstrip("[DONE]LIMT_CVRSA\n").split("data:") if (x := line.strip()))
data = json_loads("[" + data + "]")

results: list[str] = []
for x in data:
if isinstance(x, dict):
if x.get("action") == "error":
err_message = x.get("type", "")
if x.get("status") == 429:
raise (
ConversationLimitException(err_message)
if err_message == "ERR_CONVERSATION_LIMIT"
else RatelimitException(err_message)
)
raise DuckDuckGoSearchException(err_message)
elif message := x.get("message"):
results.append(message)
result = "".join(results)

) as resp:
self._chat_vqd = resp.headers.get("x-vqd-4", "")
chunks = []
for line in resp.iter_lines():
if line and line.startswith("data:"):
if line == "data: [DONE]":
break
x = json_loads(line[5:].strip())
if isinstance(x, dict):
if x.get("action") == "error":
err_message = x.get("type", "")
if x.get("status") == 429:
raise (
ConversationLimitException(err_message)
if err_message == "ERR_CONVERSATION_LIMIT"
else RatelimitException(err_message)
)
raise DuckDuckGoSearchException(err_message)
elif message := x.get("message"):
chunks.append(message)
yield message

result = "".join(chunks)
self._chat_messages.append({"role": "assistant", "content": result})
self._chat_tokens_count += len(results)
return result
self._chat_tokens_count += len(result)

def chat(self, keywords: str, model: str = "gpt-4o-mini", timeout: int = 30) -> str:
"""Initiates a chat session with DuckDuckGo AI.

Args:
keywords (str): The initial message or question to send to the AI.
model (str): The model to use: "gpt-4o-mini", "llama-3.3-70b", "claude-3-haiku",
"o3-mini", "mixtral-8x7b". Defaults to "gpt-4o-mini".
timeout (int): Timeout value for the HTTP client. Defaults to 20.

Returns:
str: The response from the AI.
"""
answer_generator = self.chat_yield(keywords, model, timeout)
return "".join(answer_generator)

def text(
self,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

@pytest.fixture(autouse=True)
def pause_between_tests():
time.sleep(1)
time.sleep(2)


def test_version_command():
Expand Down
2 changes: 1 addition & 1 deletion tests/test_duckduckgo_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

@pytest.fixture(autouse=True)
def pause_between_tests():
time.sleep(1)
time.sleep(2)


def test_context_manager():
Expand Down