-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsearch_handler.py
124 lines (104 loc) · 4.94 KB
/
search_handler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import asyncio
from openai import OpenAI, AsyncOpenAI
from models import RelatedQuestion
from termcolor import colored
import re
from config import OPENAI_API_KEY, PERPLEXITY_API_KEY
# Initialize API clients
gpt_client = OpenAI(api_key=OPENAI_API_KEY)
perplexity_client = AsyncOpenAI(api_key=PERPLEXITY_API_KEY, base_url="https://api.perplexity.ai")
async def generate_related_questions(question: str, count: int) -> list[str]:
"""
Generates related questions using GPT.
Args:
question (str): The original question provided by the user.
count (int): The number of related questions to generate.
Returns:
list[str]: A list of related questions.
"""
try:
print(colored(f"\n🤔 Generating {count} related questions for: ", 'cyan') +
colored(question, 'yellow', attrs=['bold']))
response = gpt_client.beta.chat.completions.parse(
model="gpt-4o",
messages=[
{"role": "system", "content": f"Generate {count} specific, focused questions to provide a comprehensive answer."},
{"role": "user", "content": question}
],
response_format=RelatedQuestion
)
questions = response.choices[0].message.parsed.questions
print(colored("\n📝 Generated questions:", 'green'))
for i, q in enumerate(questions, 1):
print(colored(f" {i}. ", 'green') + colored(q, 'white'))
return questions[:count]
except Exception as e:
print(colored(f"\n❌ Error generating questions: {str(e)}", 'red'))
return []
async def search_perplexity(question: str) -> str:
"""
Searches the Perplexity API for a given question.
Args:
question (str): The question to search.
Returns:
str: The answer from Perplexity API.
"""
try:
print(colored(f"\n🔍 Searching Perplexity for: ", 'magenta') + colored(question, 'yellow'))
async with asyncio.timeout(15.0):
response = await perplexity_client.chat.completions.create(
model="llama-3.1-sonar-large-128k-online",
messages=[
{"role": "system", "content": "Provide a concise answer to the finance question."},
{"role": "user", "content": question}
]
)
answer = response.choices[0].message.content
print(colored("✅ Answer received", 'green') + colored(f" for: {question[:50]}...", 'yellow'))
return answer
except asyncio.TimeoutError:
print(colored("⚠️ Timeout", 'red') + colored(f" for question: {question[:50]}...", 'yellow'))
return "⚠️ Timeout occurred. Question skipped."
except Exception as e:
print(colored(f"❌ Error during Perplexity search: {str(e)}", 'red'))
return f"⚠️ Error occurred: {str(e)}"
async def generate_final_answer(original_question: str, related_answers: list[dict]) -> str:
"""
Generates a final synthesized answer using GPT.
Args:
original_question (str): The user's original question.
related_answers (list[dict]): A list of related questions and answers.
Returns:
str: A synthesized answer.
"""
try:
print(colored("\n🤖 Generating final answer...", 'cyan', attrs=['bold']))
context = "\n\n".join([
f"Question: {qa['question']}\nAnswer: {qa['answer']}"
for qa in related_answers
])
prompt = f"""You are a research expert tasked with synthesizing information from multiple sources.
Based on the following research related to the user's question: "{original_question}":
{context}
Provide a concise and informative answer that directly addresses the user's question.
Synthesize the information from the provided research, drawing connections between the different sources where appropriate.
Format your response using markdown to enhance readability. You can use:
* Headings to structure the answer.
* Bullet points or numbered lists to organize key findings.
* Bold text to highlight important terms or concepts.
* Tables to compare and contrast information.
Focus on providing a factual and objective summary based solely on the provided research.
**Key fact**: Value
</response>"""
response = gpt_client.chat.completions.create(
model="o1-mini",
messages=[{"role": "user", "content": prompt}]
)
final_answer = response.choices[0].message.content
print(colored("✨ Final answer generated", 'green'))
# Extract markdown content
match = re.search(r'<response>(.*?)</response>', final_answer, re.DOTALL)
return match.group(1).strip() if match else final_answer
except Exception as e:
print(colored(f"❌ Error generating final answer: {str(e)}", 'red'))
return f"⚠️ Error occurred: {str(e)}"