diff --git a/council/controllers/llm_controller.py b/council/controllers/llm_controller.py index ff2c70cd..c0137852 100644 --- a/council/controllers/llm_controller.py +++ b/council/controllers/llm_controller.py @@ -45,7 +45,7 @@ def __str__(self): @llm_class_validator def validate(self): if self._score < 0 or self._score > 10: - raise LLMParsingException(f"Score {self._score} is invalid, value must be between 0 and 10.") + raise LLMParsingException(f"Specialist's score `{self._score}` is invalid, value must be between 0 and 10.") class LLMController(ControllerBase): diff --git a/council/evaluators/llm_evaluator.py b/council/evaluators/llm_evaluator.py index 856509e9..c9986070 100644 --- a/council/evaluators/llm_evaluator.py +++ b/council/evaluators/llm_evaluator.py @@ -9,7 +9,7 @@ from council.contexts import AgentContext, ChatMessage, ScoredChatMessage, ContextBase from council.evaluators import EvaluatorBase, EvaluatorException from council.llm import LLMBase, MonitoredLLM, llm_property, LLMAnswer, LLMMessage -from council.llm.llm_answer import LLMParsingException +from council.llm.llm_answer import LLMParsingException, llm_class_validator from council.utils import Option @@ -37,6 +37,11 @@ def justification(self) -> str: def __str__(self): return f"Message `{self._index}` graded `{self._grade}` with the justification: `{self._justification}`" + @llm_class_validator + def validate(self): + if self._grade < 0.0 or self._grade > 10.0: + raise LLMParsingException(f"Grade `{self._grade}` is invalid, value must be between 0.0 and 10.0") + class LLMEvaluator(EvaluatorBase): """Evaluator using an `LLM` to evaluate chain responses.""" diff --git a/council/llm/llm_answer.py b/council/llm/llm_answer.py index 90075e6b..160d28bd 100644 --- a/council/llm/llm_answer.py +++ b/council/llm/llm_answer.py @@ -39,7 +39,7 @@ def rank(self) -> int: return self._rank def __str__(self): - return f"{self._name} {{{self._description}, expected response type `{self._type}`}}" + return f"{self._name}: {{{self._description}, expected response type `{self._type.__name__}`}}" def can_parse(self, value: Any) -> bool: try: diff --git a/council/scorers/llm_similarity_scorer.py b/council/scorers/llm_similarity_scorer.py index 72ae457b..257c5cfc 100644 --- a/council/scorers/llm_similarity_scorer.py +++ b/council/scorers/llm_similarity_scorer.py @@ -4,7 +4,7 @@ from .scorer_base import ScorerBase from council.contexts import ChatMessage, ScorerContext, ContextBase from council.llm import LLMBase, LLMMessage, MonitoredLLM, llm_property, LLMAnswer -from ..llm.llm_answer import LLMParsingException +from ..llm.llm_answer import LLMParsingException, llm_class_validator from ..utils import Option @@ -26,6 +26,11 @@ def justification(self) -> str: def __str__(self): return f"Similarity score is {self.score} with the justification: {self._justification}" + @llm_class_validator + def validate(self): + if self._score < 0 or self._score > 100: + raise LLMParsingException(f"Similarity Score `{self._score}` is invalid, value must be between 0 and 100.") + class LLMSimilarityScorer(ScorerBase): """ @@ -100,7 +105,7 @@ def _build_system_message(self) -> LLMMessage: "1. Compare the {expected} message and the {actual} message.", "2. Score 0 (2 messages are unrelated) to 100 (the 2 messages have the same content).", "3. Your score must be fair.", - "\n#FORMATTING", + "\n# FORMATTING", self._llm_answer.to_prompt(), ] return LLMMessage.system_message("\n".join(system_prompt)) diff --git a/docs/source/getting_started/first_example.ipynb b/docs/source/getting_started/first_example.ipynb index 67e899b4..9da16e57 100644 --- a/docs/source/getting_started/first_example.ipynb +++ b/docs/source/getting_started/first_example.ipynb @@ -49,7 +49,7 @@ "import os\n", "\n", "dotenv.load_dotenv()\n", - "print(os.getenv(\"OPENAI_API_KEY\", None) is not None )\n", + "print(os.getenv(\"OPENAI_API_KEY\", None) is not None)\n", "\n", "openai_llm = OpenAILLM.from_env()\n" ], @@ -73,7 +73,7 @@ "source": [ "prompt = \"You are responding to every prompt with a short poem titled hello world\"\n", "hw_skill = LLMSkill(llm=openai_llm, system_prompt=prompt)\n", - "hw_chain = Chain(name=\"Hello World\", description=\"Answers with a poem about titled Hello World\", runners=[hw_skill])\n" + "hw_chain = Chain(name=\"Hello World\", description=\"Answers with a poem titled Hello World\", runners=[hw_skill])\n" ], "metadata": { "collapsed": false