Skip to content

Commit

Permalink
Merge pull request #266 from SuffolkLITLab/patch-multiple-messages-no…
Browse files Browse the repository at this point in the history
…t-json-mode

Fix issue where json_mode was forcefully enabled in chat_completion when the messages parameter was set
  • Loading branch information
nonprofittechy authored Aug 28, 2024
2 parents 892107b + 4e0db03 commit 5f8ce90
Showing 1 changed file with 38 additions and 8 deletions.
46 changes: 38 additions & 8 deletions docassemble/ALToolbox/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,14 +159,17 @@ def chat_completion(
f"Warning: { system_message } does not contain the word 'json' but json_mode is set to True. Adding 'json' silently"
)
system_message = f"{ system_message }\n\nRespond only with a JSON object"
elif messages:
if not any("json" in message["content"].lower() for message in messages):
log(
f"Warning: None of the messages contain the word 'json' but json_mode is set to True. Adding 'json' silently"
)
messages.append(
{"role": "system", "content": "Respond only with a JSON object"}
)
elif (
messages
and json_mode
and not any("json" in message["content"].lower() for message in messages)
):
log(
f"Warning: None of the messages contain the word 'json' but json_mode is set to True. Adding 'json' silently"
)
messages.append(
{"role": "system", "content": "Respond only with a JSON object"}
)

if not messages:
assert isinstance(system_message, str)
Expand Down Expand Up @@ -774,6 +777,33 @@ def synthesize_draft_response(self):
model=self.model,
)

def provide_feedback(self, feedback_prompt: str = ""):
"""Returns feedback to the user based on the goals they satisfied."""
if not feedback_prompt:
feedback_prompt = """
You are a helpful instructor who is providing feedback to a student
based on their reflection and response to any questions you asked.
Review the student's response and provide feedback on how well they
addressed the goals you set out for them. If they met the goals but
could dig deeper, offer specific feedback on how they could do so
in their next reflection.
"""
messages = [
{"role": "assistant", "content": self.initial_question},
{"role": "user", "content": self.initial_draft},
]
for question in self.elements:
messages.append({"role": "assistant", "content": question.question})
messages.append({"role": "user", "content": question.response})

messages.append({"role": "assistant", "content": feedback_prompt})

return chat_completion(
messages=messages,
model=self.model,
)


class IntakeQuestion(DAObject):
"""A class to represent a question in an LLM-assisted intake questionnaire.
Expand Down

0 comments on commit 5f8ce90

Please sign in to comment.