From 4e0db03fdebc02618cb38c1f457e3308b0d45516 Mon Sep 17 00:00:00 2001 From: Quinten Steenhuis Date: Wed, 28 Aug 2024 18:00:22 -0400 Subject: [PATCH] Fix issue where json_mode was forcefully enabled in chat_completion when the messages parameter was set --- docassemble/ALToolbox/llms.py | 46 +++++++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/docassemble/ALToolbox/llms.py b/docassemble/ALToolbox/llms.py index 3e78cf3..07abb13 100644 --- a/docassemble/ALToolbox/llms.py +++ b/docassemble/ALToolbox/llms.py @@ -159,14 +159,17 @@ def chat_completion( f"Warning: { system_message } does not contain the word 'json' but json_mode is set to True. Adding 'json' silently" ) system_message = f"{ system_message }\n\nRespond only with a JSON object" - elif messages: - if not any("json" in message["content"].lower() for message in messages): - log( - f"Warning: None of the messages contain the word 'json' but json_mode is set to True. Adding 'json' silently" - ) - messages.append( - {"role": "system", "content": "Respond only with a JSON object"} - ) + elif ( + messages + and json_mode + and not any("json" in message["content"].lower() for message in messages) + ): + log( + f"Warning: None of the messages contain the word 'json' but json_mode is set to True. Adding 'json' silently" + ) + messages.append( + {"role": "system", "content": "Respond only with a JSON object"} + ) if not messages: assert isinstance(system_message, str) @@ -774,6 +777,33 @@ def synthesize_draft_response(self): model=self.model, ) + def provide_feedback(self, feedback_prompt: str = ""): + """Returns feedback to the user based on the goals they satisfied.""" + if not feedback_prompt: + feedback_prompt = """ + You are a helpful instructor who is providing feedback to a student + based on their reflection and response to any questions you asked. + + Review the student's response and provide feedback on how well they + addressed the goals you set out for them. If they met the goals but + could dig deeper, offer specific feedback on how they could do so + in their next reflection. + """ + messages = [ + {"role": "assistant", "content": self.initial_question}, + {"role": "user", "content": self.initial_draft}, + ] + for question in self.elements: + messages.append({"role": "assistant", "content": question.question}) + messages.append({"role": "user", "content": question.response}) + + messages.append({"role": "assistant", "content": feedback_prompt}) + + return chat_completion( + messages=messages, + model=self.model, + ) + class IntakeQuestion(DAObject): """A class to represent a question in an LLM-assisted intake questionnaire.