From eb9d11499a60205da9030d207180cdb5cdeee640 Mon Sep 17 00:00:00 2001 From: Dmytro Nikolaiev Date: Fri, 11 Oct 2024 11:10:13 -0400 Subject: [PATCH 1/4] Implement LLMFileLoggingMiddleware --- council/llm/llm_middleware.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/council/llm/llm_middleware.py b/council/llm/llm_middleware.py index 0693cd28..e8c97c42 100644 --- a/council/llm/llm_middleware.py +++ b/council/llm/llm_middleware.py @@ -120,6 +120,41 @@ def __call__(self, llm: LLMBase, execute: ExecuteLLMRequest, request: LLMRequest return response +class LLMFileLoggingMiddleware: + """Middleware for logging LLM requests and responses into a file.""" + + def __init__(self, log_file: str, component_name: str) -> None: + """Initialize the middleware with the path to the log_file.""" + + self.log_file = log_file + self.component_name = component_name + + def __call__(self, llm: LLMBase, execute: ExecuteLLMRequest, request: LLMRequest) -> LLMResponse: + self._log_llm_request(request) + response = execute(request) + self._log_llm_response(response) + return response + + def _log_llm_request(self, request: LLMRequest) -> None: + messages_str = "\n\n".join(message.format() for message in request.messages) + self._log(f"LLM input for {self.component_name}:\n{messages_str}") + + def _log_llm_response(self, response: LLMResponse) -> None: + if response.result is None: + self._log(f"LLM output for {self.component_name} is not available") + return + self._log( + f"LLM output for {self.component_name} Duration: {response.duration:.2f} Output:\n" + f"{response.result.first_choice}" + ) + + def _log(self, content: str) -> None: + """Append `content` to a current log file""" + + with open(self.log_file, "a", encoding="utf-8") as file: + file.write(f"\n{content}") + + class LLMRetryMiddleware: """ Middleware for implementing retry logic for LLM requests. From ae19d6e4544ff4ab5d38d7bfb4d5e058f6b68ef7 Mon Sep 17 00:00:00 2001 From: Dmytro Nikolaiev Date: Fri, 11 Oct 2024 11:15:01 -0400 Subject: [PATCH 2/4] Add LLMFileLoggingMiddleware to __init__ --- council/llm/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/council/llm/__init__.py b/council/llm/__init__.py index c19d55f8..955582e7 100644 --- a/council/llm/__init__.py +++ b/council/llm/__init__.py @@ -17,6 +17,7 @@ LLMMiddlewareChain, LLMRetryMiddleware, LLMLoggingMiddleware, + LLMFileLoggingMiddleware, ExecuteLLMRequest, ) from .llm_function import LLMFunction, LLMFunctionError, FunctionOutOfRetryError From 011310554ce95e49787f3cbdab48b2be6324a65f Mon Sep 17 00:00:00 2001 From: Dmytro Nikolaiev Date: Fri, 11 Oct 2024 11:15:10 -0400 Subject: [PATCH 3/4] Update docs --- docs/source/reference/llm/llm_middleware.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/source/reference/llm/llm_middleware.rst b/docs/source/reference/llm/llm_middleware.rst index 62c7e499..9db016d9 100644 --- a/docs/source/reference/llm/llm_middleware.rst +++ b/docs/source/reference/llm/llm_middleware.rst @@ -13,6 +13,11 @@ LLMLoggingMiddleware .. autoclass:: council.llm.LLMLoggingMiddleware +LLMFileLoggingMiddleware +------------------------ + +.. autoclass:: council.llm.LLMFileLoggingMiddleware + LLMRetryMiddleware ------------------ From 5832f8ad1cf835c36ac8dfcf4eba6a8e2520e34a Mon Sep 17 00:00:00 2001 From: Dmytro Nikolaiev Date: Fri, 11 Oct 2024 12:40:40 -0400 Subject: [PATCH 4/4] Add lock for multi-threading --- council/llm/llm_middleware.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/council/llm/llm_middleware.py b/council/llm/llm_middleware.py index e8c97c42..b33c80af 100644 --- a/council/llm/llm_middleware.py +++ b/council/llm/llm_middleware.py @@ -1,6 +1,7 @@ from __future__ import annotations import time +from threading import Lock from typing import Any, Callable, List, Optional, Protocol, Sequence from council.contexts import LLMContext @@ -128,6 +129,7 @@ def __init__(self, log_file: str, component_name: str) -> None: self.log_file = log_file self.component_name = component_name + self._lock = Lock() def __call__(self, llm: LLMBase, execute: ExecuteLLMRequest, request: LLMRequest) -> LLMResponse: self._log_llm_request(request) @@ -151,8 +153,9 @@ def _log_llm_response(self, response: LLMResponse) -> None: def _log(self, content: str) -> None: """Append `content` to a current log file""" - with open(self.log_file, "a", encoding="utf-8") as file: - file.write(f"\n{content}") + with self._lock: # ensure each write is done atomically in case of multi-threading + with open(self.log_file, "a", encoding="utf-8") as file: + file.write(f"\n{content}") class LLMRetryMiddleware: