Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Retry refactor #1043

Merged
merged 16 commits into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions edsl/agents/Invigilator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@

from typing import Dict, Any, Optional

from edsl.exceptions import AgentRespondedWithBadJSONError
from edsl.prompts.Prompt import Prompt
from edsl.utilities.decorators import sync_wrapper, jupyter_nb_handler
from edsl.prompts.registry import get_classes as prompt_lookup
from edsl.exceptions.questions import QuestionAnswerValidationError
from edsl.agents.PromptConstructionMixin import PromptConstructorMixin
from edsl.agents.InvigilatorBase import InvigilatorBase
from edsl.data_transfer_models import AgentResponseDict, EDSLResultObjectInput
from edsl.agents.PromptConstructor import PromptConstructor


class NotApplicable(str):
Expand All @@ -19,9 +18,13 @@ def __new__(cls):
return instance


class InvigilatorAI(PromptConstructorMixin, InvigilatorBase):
class InvigilatorAI(InvigilatorBase):
"""An invigilator that uses an AI model to answer questions."""

def get_prompts(self) -> Dict[str, Prompt]:
"""Return the prompts used."""
return self.prompt_constructor.get_prompts()

async def async_answer_question(self) -> AgentResponseDict:
"""Answer a question using the AI model.

Expand Down
35 changes: 8 additions & 27 deletions edsl/agents/InvigilatorBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from edsl.language_models.LanguageModel import LanguageModel

from edsl.data_transfer_models import EDSLResultObjectInput
from edsl.agents.PromptConstructor import PromptConstructor


class InvigilatorBase(ABC):
Expand All @@ -27,16 +28,7 @@ class InvigilatorBase(ABC):

This returns an empty prompt because there is no memory the agent needs to have at q0.

>>> InvigilatorBase.example().create_memory_prompt("q0")
Prompt(text=\"""\""")

>>> i = InvigilatorBase.example()
>>> i.current_answers = {"q0": "Prior answer"}
>>> i.memory_plan.add_single_memory("q1", "q0")
>>> i.create_memory_prompt("q1")
Prompt(text=\"""
Before the question you are now answering, you already answered the following question(s):
...
"""

def __init__(
Expand Down Expand Up @@ -72,6 +64,11 @@ def __init__(
None # placeholder for the raw response from the model
)

@property
def prompt_constructor(self) -> PromptConstructor:
"""Return the prompt constructor."""
return PromptConstructor(self)

def to_dict(self):
attributes = [
"agent",
Expand Down Expand Up @@ -207,22 +204,6 @@ async def main():

return main()

def create_memory_prompt(self, question_name: str) -> Prompt:
"""Create a memory for the agent.

The returns a memory prompt for the agent.

>>> i = InvigilatorBase.example()
>>> i.current_answers = {"q0": "Prior answer"}
>>> i.memory_plan.add_single_memory("q1", "q0")
>>> p = i.create_memory_prompt("q1")
>>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
"""
return self.memory_plan.get_memory_prompt_fragment(
question_name, self.current_answers
)

@classmethod
def example(
cls, throw_an_exception=False, question=None, scenario=None, survey=None
Expand Down Expand Up @@ -285,9 +266,9 @@ def example(

memory_plan = MemoryPlan(survey=survey)
current_answers = None
from edsl.agents.PromptConstructionMixin import PromptConstructorMixin
from edsl.agents.PromptConstructor import PromptConstructor

class InvigilatorExample(PromptConstructorMixin, InvigilatorBase):
class InvigilatorExample(InvigilatorBase):
"""An example invigilator."""

async def async_answer_question(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def get_prompts(self, **kwargs) -> Dict[str, Prompt]:
}


class PromptConstructorMixin:
class PromptConstructor:
"""Mixin for constructing prompts for the LLM call.

The pieces of a prompt are:
Expand All @@ -149,14 +149,25 @@ class PromptConstructorMixin:
This is mixed into the Invigilator class.
"""

prompt_plan = PromptPlan()
def __init__(self, invigilator):
self.invigilator = invigilator
self.agent = invigilator.agent
self.question = invigilator.question
self.scenario = invigilator.scenario
self.survey = invigilator.survey
self.model = invigilator.model
self.current_answers = invigilator.current_answers
self.memory_plan = invigilator.memory_plan
self.prompt_plan = PromptPlan() # Assuming PromptPlan is defined elsewhere

# prompt_plan = PromptPlan()

@property
def agent_instructions_prompt(self) -> Prompt:
"""
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
>>> i = InvigilatorBase.example()
>>> i.agent_instructions_prompt
>>> i.prompt_constructor.agent_instructions_prompt
Prompt(text=\"""You are answering questions as if you were a human. Do not break character.\""")
"""
if not hasattr(self, "_agent_instructions_prompt"):
Expand All @@ -176,7 +187,7 @@ def agent_persona_prompt(self) -> Prompt:
"""
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
>>> i = InvigilatorBase.example()
>>> i.agent_persona_prompt
>>> i.prompt_constructor.agent_persona_prompt
Prompt(text=\"""You are an agent with the following persona:
{'age': 22, 'hair': 'brown', 'height': 5.5}\""")

Expand Down Expand Up @@ -231,7 +242,7 @@ def question_instructions_prompt(self) -> Prompt:
"""
>>> from edsl.agents.InvigilatorBase import InvigilatorBase
>>> i = InvigilatorBase.example()
>>> i.question_instructions_prompt
>>> i.prompt_constructor.question_instructions_prompt
Prompt(text=\"""...
...
"""
Expand Down Expand Up @@ -329,6 +340,23 @@ def prior_question_memory_prompt(self) -> Prompt:
self._prior_question_memory_prompt = memory_prompt
return self._prior_question_memory_prompt

def create_memory_prompt(self, question_name: str) -> Prompt:
"""Create a memory for the agent.

The returns a memory prompt for the agent.

>>> from edsl.agents.InvigilatorBase import InvigilatorBase
>>> i = InvigilatorBase.example()
>>> i.current_answers = {"q0": "Prior answer"}
>>> i.memory_plan.add_single_memory("q1", "q0")
>>> p = i.prompt_constructor.create_memory_prompt("q1")
>>> p.text.strip().replace("\\n", " ").replace("\\t", " ")
'Before the question you are now answering, you already answered the following question(s): Question: Do you like school? Answer: Prior answer'
"""
return self.memory_plan.get_memory_prompt_fragment(
question_name, self.current_answers
)

def construct_system_prompt(self) -> Prompt:
"""Construct the system prompt for the LLM call."""
import warnings
Expand Down
21 changes: 8 additions & 13 deletions edsl/inference_services/TestService.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from edsl.utilities.utilities import fix_partial_correct_response

from edsl.enums import InferenceServiceType
import random


class TestService(InferenceServiceABC):
Expand Down Expand Up @@ -60,22 +61,16 @@ async def async_execute_model_call(
await asyncio.sleep(0.1)
# return {"message": """{"answer": "Hello, world"}"""}
if hasattr(self, "throw_exception") and self.throw_exception:
raise Exception("This is a test error")
if hasattr(self, "exception_probability"):
p = self.exception_probability
else:
p = 1

if random.random() < p:
raise Exception("This is a test error")
return {
"message": [{"text": f"{self._canned_response}"}],
"usage": {"prompt_tokens": 1, "completion_tokens": 1},
}

return TestServiceLanguageModel

# _inference_service_ = "openai"
# _env_key_name_ = "OPENAI_API_KEY"
# _base_url_ = None

# _sync_client_ = openai.OpenAI
# _async_client_ = openai.AsyncOpenAI

# _sync_client_instance = None
# _async_client_instance = None

# key_sequence = ["choices", 0, "message", "content"]
Loading
Loading