From d44262116a6f83f167877fa008f7afa99e241b2d Mon Sep 17 00:00:00 2001 From: Ilya Denisov Date: Fri, 5 Apr 2024 11:56:44 +0300 Subject: [PATCH] Fix reading multi-message assistant response --- interactions/identify_knowledge_gap.py | 16 +++++-------- .../query_assistant_from_documents.py | 19 ++------------- open_ai/assistants/thread_manager.py | 9 ++++---- open_ai/assistants/utility.py | 23 +++++++++++++++++++ 4 files changed, 35 insertions(+), 32 deletions(-) diff --git a/interactions/identify_knowledge_gap.py b/interactions/identify_knowledge_gap.py index 7ecb37d5..fa5b74d2 100644 --- a/interactions/identify_knowledge_gap.py +++ b/interactions/identify_knowledge_gap.py @@ -7,7 +7,7 @@ from configuration import interaction_retrieval_count, interactions_collection_name from configuration import quizz_assistant_id from open_ai.embedding.embed_manager import embed_text -from open_ai.assistants.utility import initiate_client +from open_ai.assistants.utility import extract_assistant_response, initiate_client from open_ai.assistants.thread_manager import ThreadManager from open_ai.assistants.assistant_manager import AssistantManager from database.interaction_manager import QAInteractionManager, QAInteractions @@ -144,25 +144,25 @@ def query_assistant_with_context(context, formatted_interactions, thread_id=None # Format the question with context and query the assistant - formatted_question = (f"""After analyzing the provided questions_text, + formatted_question = (f"""After analyzing the provided questions_text, Keep only the questions that are related to {context} From these identify those that were not provided a satisfactory answer in the answer_text These questions should reflect gaps in our current knowledge or documentation. Compile these questions so we can ask them to the domain experts and recover that knowledge. Provide them strictly in a JSON array, following the specified structure. - Each entry should include the question and a brief explanation of why it was + Each entry should include the question and a brief explanation of why it was included, how it relates to the {context} domain, and what part of the question wasn't covered. - Only include questions relevant to this domain:{context}\n + Only include questions relevant to this domain:{context}\n f"Context:{formatted_interactions}\n """) print(f"Formatted question:\n{formatted_question}\n") # Query the assistant - messages, thread_id = thread_manager.add_message_and_wait_for_reply(formatted_question, []) + messages, thread_id = thread_manager.add_message_and_wait_for_reply(formatted_question) print(f"The thread_id is: {thread_id}\n Messages received: {messages}\n") if messages and messages.data: - assistant_response = messages.data[0].content[0].text.value + assistant_response = extract_assistant_response(messages) print(f"Assistant full response: {assistant_response}\n") else: assistant_response = "No response received." @@ -246,7 +246,3 @@ def identify_knowledge_gaps(context): # Updated function call to match the expected input quiz_questions = post_questions_to_slack(channel_id=channel_id, quiz_question_dtos=quiz_question_dtos, user_ids=user_ids) - - -if __name__ == "__main__": - identify_knowledge_gaps("infrastructure") \ No newline at end of file diff --git a/open_ai/assistants/query_assistant_from_documents.py b/open_ai/assistants/query_assistant_from_documents.py index 62841ff8..9203cdae 100644 --- a/open_ai/assistants/query_assistant_from_documents.py +++ b/open_ai/assistants/query_assistant_from_documents.py @@ -1,5 +1,5 @@ # ./oai_assistants/query_assistant_from_documents.py -from open_ai.assistants.utility import initiate_client +from open_ai.assistants.utility import extract_assistant_response, initiate_client from open_ai.assistants.thread_manager import ThreadManager from open_ai.assistants.assistant_manager import AssistantManager from configuration import qa_assistant_id, file_system_path @@ -98,25 +98,10 @@ def query_assistant_with_context(question, page_ids, thread_id=None): messages, thread_id = thread_manager.add_message_and_wait_for_reply(formatted_question) print(f"The thread_id is: {thread_id}\n Messages received: {messages}\n") if messages and messages.data: - assistant_response = messages.data[0].content[0].text.value + assistant_response = extract_assistant_response(messages) print(f"Assistant full response: {assistant_response}\n") else: assistant_response = "No response received." print(f"No response received.\n") return assistant_response, thread_id - - -if __name__ == "__main__": - # First query - introduce a piece of information - initial_question = "My name is Roland, what do you know about my name?" - initial_response, thread_id = query_assistant_with_context(initial_question, []) - - print("Initial Response:", initial_response) - print("Thread ID from Initial Query:", thread_id) - - # Second query - follow-up question using the thread ID from the first query - follow_up_question = "What was my name?" - follow_up_response, _ = query_assistant_with_context(follow_up_question, [], thread_id=thread_id) - - print("Follow-up Response:", follow_up_response) diff --git a/open_ai/assistants/thread_manager.py b/open_ai/assistants/thread_manager.py index 2330da12..507ba406 100644 --- a/open_ai/assistants/thread_manager.py +++ b/open_ai/assistants/thread_manager.py @@ -1,8 +1,9 @@ # ./oai_assistants/thread_manager.py import time import json -from context.prepare_context import get_context +from openai import OpenAI +from context.prepare_context import get_context class ThreadManager: """ @@ -15,7 +16,7 @@ class ThreadManager: Attributes: client (OpenAI_Client): An instance of the client used for handling thread operations. """ - def __init__(self, client, assistant_id, thread_id=None): + def __init__(self, client: OpenAI, assistant_id, thread_id=None): """ Initializes the ThreadManager with a client to manage threads. @@ -69,7 +70,7 @@ def add_message_and_wait_for_reply(self, user_message): # If the run was successful, display messages as usual self.display_messages(messages) print("\nAssistant run completed.") - break + return messages, self.thread_id elif run_status.status == "failed": print("\nAssistant run failed.") # If there's a last_error, use it to inform the user @@ -94,8 +95,6 @@ def add_message_and_wait_for_reply(self, user_message): print("Waiting for run to complete...") time.sleep(5) # Adjust sleep time as needed - return messages, self.thread_id - def check_run_status(self, run_id): """ Checks the status of a thread run. diff --git a/open_ai/assistants/utility.py b/open_ai/assistants/utility.py index ba199450..b1284696 100644 --- a/open_ai/assistants/utility.py +++ b/open_ai/assistants/utility.py @@ -17,6 +17,29 @@ def initiate_client(): return client +def extract_assistant_response(messages) -> str: + """ + Extracts the assistant's reply from a list of messages. + + Args: + messages (list): A list of messages, including the assistant's response to a question. + + Returns: + str: The assistant's reply extracted from the messages. + """ + result = [] + # Messages are returned in reverse chronological order + # so to build a full reply we should take most recent + # assistant messages block and concatenate them in reverse order + for message in messages.data: + if message.role != 'assistant': + break + result.append(message.content[0].text.value) + result.reverse() + result = '\n'.join(result) + return result + + # Sample assistant template used for creating new assistants in the system. new_assistant_with_tools = { "model": model_id,