Skip to content
This repository has been archived by the owner on Apr 29, 2024. It is now read-only.

Commit

Permalink
Fix reading multi-message assistant response
Browse files Browse the repository at this point in the history
  • Loading branch information
id-ilych committed Apr 5, 2024
1 parent 3fb020d commit d442621
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 32 deletions.
16 changes: 6 additions & 10 deletions interactions/identify_knowledge_gap.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from configuration import interaction_retrieval_count, interactions_collection_name
from configuration import quizz_assistant_id
from open_ai.embedding.embed_manager import embed_text
from open_ai.assistants.utility import initiate_client
from open_ai.assistants.utility import extract_assistant_response, initiate_client
from open_ai.assistants.thread_manager import ThreadManager
from open_ai.assistants.assistant_manager import AssistantManager
from database.interaction_manager import QAInteractionManager, QAInteractions
Expand Down Expand Up @@ -144,25 +144,25 @@ def query_assistant_with_context(context, formatted_interactions, thread_id=None

# Format the question with context and query the assistant

formatted_question = (f"""After analyzing the provided questions_text,
formatted_question = (f"""After analyzing the provided questions_text,
Keep only the questions that are related to {context}
From these identify those that were not provided a satisfactory answer in the answer_text
These questions should reflect gaps in our current knowledge or documentation.
Compile these questions so we can ask them to the domain experts and recover that knowledge.
Provide them strictly in a JSON array, following the specified structure.
Each entry should include the question and a brief explanation of why it was
Each entry should include the question and a brief explanation of why it was
included, how it relates to the {context} domain, and what part of the question wasn't covered.
Only include questions relevant to this domain:{context}\n
Only include questions relevant to this domain:{context}\n
f"Context:{formatted_interactions}\n
""")

print(f"Formatted question:\n{formatted_question}\n")

# Query the assistant
messages, thread_id = thread_manager.add_message_and_wait_for_reply(formatted_question, [])
messages, thread_id = thread_manager.add_message_and_wait_for_reply(formatted_question)
print(f"The thread_id is: {thread_id}\n Messages received: {messages}\n")
if messages and messages.data:
assistant_response = messages.data[0].content[0].text.value
assistant_response = extract_assistant_response(messages)
print(f"Assistant full response: {assistant_response}\n")
else:
assistant_response = "No response received."
Expand Down Expand Up @@ -246,7 +246,3 @@ def identify_knowledge_gaps(context):

# Updated function call to match the expected input
quiz_questions = post_questions_to_slack(channel_id=channel_id, quiz_question_dtos=quiz_question_dtos, user_ids=user_ids)


if __name__ == "__main__":
identify_knowledge_gaps("infrastructure")
19 changes: 2 additions & 17 deletions open_ai/assistants/query_assistant_from_documents.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# ./oai_assistants/query_assistant_from_documents.py
from open_ai.assistants.utility import initiate_client
from open_ai.assistants.utility import extract_assistant_response, initiate_client
from open_ai.assistants.thread_manager import ThreadManager
from open_ai.assistants.assistant_manager import AssistantManager
from configuration import qa_assistant_id, file_system_path
Expand Down Expand Up @@ -98,25 +98,10 @@ def query_assistant_with_context(question, page_ids, thread_id=None):
messages, thread_id = thread_manager.add_message_and_wait_for_reply(formatted_question)
print(f"The thread_id is: {thread_id}\n Messages received: {messages}\n")
if messages and messages.data:
assistant_response = messages.data[0].content[0].text.value
assistant_response = extract_assistant_response(messages)
print(f"Assistant full response: {assistant_response}\n")
else:
assistant_response = "No response received."
print(f"No response received.\n")

return assistant_response, thread_id


if __name__ == "__main__":
# First query - introduce a piece of information
initial_question = "My name is Roland, what do you know about my name?"
initial_response, thread_id = query_assistant_with_context(initial_question, [])

print("Initial Response:", initial_response)
print("Thread ID from Initial Query:", thread_id)

# Second query - follow-up question using the thread ID from the first query
follow_up_question = "What was my name?"
follow_up_response, _ = query_assistant_with_context(follow_up_question, [], thread_id=thread_id)

print("Follow-up Response:", follow_up_response)
9 changes: 4 additions & 5 deletions open_ai/assistants/thread_manager.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
# ./oai_assistants/thread_manager.py
import time
import json
from context.prepare_context import get_context
from openai import OpenAI

from context.prepare_context import get_context

class ThreadManager:
"""
Expand All @@ -15,7 +16,7 @@ class ThreadManager:
Attributes:
client (OpenAI_Client): An instance of the client used for handling thread operations.
"""
def __init__(self, client, assistant_id, thread_id=None):
def __init__(self, client: OpenAI, assistant_id, thread_id=None):
"""
Initializes the ThreadManager with a client to manage threads.
Expand Down Expand Up @@ -69,7 +70,7 @@ def add_message_and_wait_for_reply(self, user_message):
# If the run was successful, display messages as usual
self.display_messages(messages)
print("\nAssistant run completed.")
break
return messages, self.thread_id
elif run_status.status == "failed":
print("\nAssistant run failed.")
# If there's a last_error, use it to inform the user
Expand All @@ -94,8 +95,6 @@ def add_message_and_wait_for_reply(self, user_message):
print("Waiting for run to complete...")
time.sleep(5) # Adjust sleep time as needed

return messages, self.thread_id

def check_run_status(self, run_id):
"""
Checks the status of a thread run.
Expand Down
23 changes: 23 additions & 0 deletions open_ai/assistants/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,29 @@ def initiate_client():
return client


def extract_assistant_response(messages) -> str:
"""
Extracts the assistant's reply from a list of messages.
Args:
messages (list): A list of messages, including the assistant's response to a question.
Returns:
str: The assistant's reply extracted from the messages.
"""
result = []
# Messages are returned in reverse chronological order
# so to build a full reply we should take most recent
# assistant messages block and concatenate them in reverse order
for message in messages.data:
if message.role != 'assistant':
break
result.append(message.content[0].text.value)
result.reverse()
result = '\n'.join(result)
return result


# Sample assistant template used for creating new assistants in the system.
new_assistant_with_tools = {
"model": model_id,
Expand Down

0 comments on commit d442621

Please sign in to comment.