Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature refactor llm function package #200

Merged
merged 16 commits into from
Dec 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion council/controllers/llm_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from council.contexts import AgentContext, ChatMessage, ContextBase
from council.controllers import ControllerBase, ControllerException
from council.llm import LLMBase, LLMMessage, MonitoredLLM
from council.llm.llm_answer import LLMAnswer, LLMParsingException, llm_class_validator, llm_property
from council.llm.base.llm_answer import LLMAnswer, LLMParsingException, llm_class_validator, llm_property
from council.utils import Option
from typing_extensions import TypeGuard

Expand Down
2 changes: 1 addition & 1 deletion council/evaluators/llm_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from council.contexts import AgentContext, ChatMessage, ContextBase, ScoredChatMessage
from council.evaluators import EvaluatorBase, EvaluatorException
from council.llm import LLMAnswer, LLMBase, LLMMessage, MonitoredLLM, llm_property
from council.llm.llm_answer import LLMParsingException, llm_class_validator
from council.llm.base.llm_answer import LLMParsingException, llm_class_validator
from council.utils import Option


Expand Down
2 changes: 1 addition & 1 deletion council/filters/llm_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from council.contexts import AgentContext, ContextBase, ScoredChatMessage
from council.filters import FilterBase, FilterException
from council.llm import LLMAnswer, LLMBase, LLMMessage, MonitoredLLM, llm_property
from council.llm.llm_answer import LLMParsingException
from council.llm.base.llm_answer import LLMParsingException
from council.utils import Option


Expand Down
147 changes: 60 additions & 87 deletions council/llm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,99 +1,72 @@
"""This package provides clients to use various LLMs."""

from typing import Optional, Type

from ..utils import read_env_str

from .llm_config_object import LLMProvider, LLMConfigObject, LLMConfigSpec, LLMProviders
from .llm_answer import llm_property, LLMAnswer, LLMProperty, LLMParsingException
from .llm_exception import LLMException, LLMCallException, LLMCallTimeoutException, LLMTokenLimitException
from .llm_message import LLMMessageRole, LLMMessage, LLMMessageTokenCounterBase
from .llm_base import LLMBase, LLMResult, LLMConfigurationBase
from .llm_cost import (
LLMCostCard,
LLMConsumptionCalculatorBase,
from .base import (
AnthropicLLM,
AnthropicLLMConfiguration,
AzureChatGPTConfiguration,
AzureLLM,
DefaultLLMConsumptionCalculator,
DefaultLLMConsumptionCalculatorHelper,
TokenKind,
LLMCostManagerSpec,
GeminiLLM,
GeminiLLMConfiguration,
GroqLLM,
GroqLLMConfiguration,
LLMAnswer,
LLMBase,
LLMCacheControlData,
LLMCallException,
LLMCallTimeoutException,
LLMConfigObject,
LLMConfigSpec,
LLMConfigurationBase,
LLMConsumptionCalculatorBase,
LLMCostCard,
LLMCostManagerObject,
LLMException,
LLMFallback,
LLMMessage,
LLMMessageData,
LLMMessageRole,
LLMMessageTokenCounterBase,
LLMOutOfRetriesException,
LLMParsingException,
LLMProperty,
LLMProvider,
LLMProviders,
LLMResult,
LLMTokenLimitException,
MonitoredLLM,
OllamaLLM,
OllamaLLMConfiguration,
OpenAIChatGPTConfiguration,
OpenAILLM,
TokenKind,
get_default_llm,
get_llm_from_config,
get_llm_from_config_obj,
llm_property,
)
from .llm_fallback import LLMFallback
from .llm_middleware import (
LLMRequest,
LLMResponse,
from .llm_function import (
BaseModelResponseParser,
CodeBlocksResponseParser,
EchoResponseParser,
ExecuteLLMRequest,
FunctionOutOfRetryError,
JSONBlockResponseParser,
JSONResponseParser,
LLMCachingMiddleware,
LLMFileLoggingMiddleware,
LLMFunction,
LLMFunctionError,
LLMFunctionResponse,
LLMFunctionWithPrompt,
LLMLoggingMiddleware,
LLMLoggingStrategy,
LLMMiddleware,
LLMMiddlewareChain,
LLMRequest,
LLMResponse,
LLMRetryMiddleware,
LLMLoggingStrategy,
LLMLoggingMiddleware,
LLMFileLoggingMiddleware,
LLMCachingMiddleware,
ExecuteLLMRequest,
)
from .llm_response_parser import (
EchoResponseParser,
StringResponseParser,
CodeBlocksResponseParser,
JSONBlockResponseParser,
JSONResponseParser,
YAMLBlockResponseParser,
YAMLResponseParser,
)
from .llm_function import LLMFunction, LLMFunctionResponse, LLMFunctionError, FunctionOutOfRetryError
from .llm_function_with_prompt import LLMFunctionWithPrompt
from .monitored_llm import MonitoredLLM

from .providers import (
_build_llm,
_PROVIDER_TO_LLM,
AzureLLM,
AzureChatGPTConfiguration,
OpenAILLM,
OpenAIChatGPTConfiguration,
AnthropicLLM,
AnthropicLLMConfiguration,
GeminiLLM,
GeminiLLMConfiguration,
GroqLLM,
GroqLLMConfiguration,
OllamaLLM,
OllamaLLMConfiguration,
)


def get_default_llm(max_retries: Optional[int] = None) -> LLMBase:
"""Get default LLM based on `COUNCIL_DEFAULT_LLM_PROVIDER` env variable."""
provider_str = read_env_str("COUNCIL_DEFAULT_LLM_PROVIDER", default=LLMProviders.OpenAI).unwrap()
provider_str = provider_str.lower() + "spec"

llm_class: Optional[Type[LLMBase]] = next(
(llm_class for provider_enum, llm_class in _PROVIDER_TO_LLM.items() if provider_str == provider_enum.lower()),
None,
)

if llm_class is None:
raise ValueError(f"Provider {provider_str} not supported by Council.")

llm = llm_class.from_env()

if max_retries is not None and max_retries > 0:
return LLMFallback(llm=llm, fallback=llm, retry_before_fallback=max_retries - 1)

return llm


def get_llm_from_config(filename: str) -> LLMBase:
"""Get LLM from a yaml LLMConfigObject file."""
llm_config = LLMConfigObject.from_yaml(filename)
return get_llm_from_config_obj(llm_config)


def get_llm_from_config_obj(llm_config: LLMConfigObject):
llm = _build_llm(llm_config)
fallback_provider = llm_config.spec.fallback_provider
if fallback_provider is not None:
llm_config.spec.provider = fallback_provider
llm_with_fallback = _build_llm(llm_config)
return LLMFallback(llm, llm_with_fallback)
return llm
77 changes: 77 additions & 0 deletions council/llm/base/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
from typing import Optional, Type

from .llm_config_object import LLMProvider, LLMConfigObject, LLMConfigSpec, LLMProviders
from .llm_answer import llm_property, LLMAnswer, LLMProperty, LLMParsingException
from .llm_exception import (
LLMException,
LLMCallException,
LLMCallTimeoutException,
LLMTokenLimitException,
LLMOutOfRetriesException,
)
from .llm_message import LLMMessageRole, LLMMessage, LLMMessageData, LLMCacheControlData, LLMMessageTokenCounterBase
from .llm_base import LLMBase, LLMResult, LLMConfigurationBase, T_Configuration
from .llm_cost import (
LLMCostCard,
LLMCostManagerObject,
TokenKind,
LLMConsumptionCalculatorBase,
DefaultLLMConsumptionCalculator,
)
from .llm_fallback import LLMFallback
from .monitored_llm import MonitoredLLM

from .providers import (
_build_llm,
_PROVIDER_TO_LLM,
AzureLLM,
AzureChatGPTConfiguration,
OpenAILLM,
OpenAIChatGPTConfiguration,
AnthropicLLM,
AnthropicLLMConfiguration,
GeminiLLM,
GeminiLLMConfiguration,
GroqLLM,
GroqLLMConfiguration,
OllamaLLM,
OllamaLLMConfiguration,
)
from ...utils import read_env_str


def get_default_llm(max_retries: Optional[int] = None) -> LLMBase:
"""Get default LLM based on `COUNCIL_DEFAULT_LLM_PROVIDER` env variable."""
provider_str = read_env_str("COUNCIL_DEFAULT_LLM_PROVIDER", default=LLMProviders.OpenAI).unwrap()
provider_str = provider_str.lower() + "spec"

llm_class: Optional[Type[LLMBase]] = next(
(llm_class for provider_enum, llm_class in _PROVIDER_TO_LLM.items() if provider_str == provider_enum.lower()),
None,
)

if llm_class is None:
raise ValueError(f"Provider {provider_str} not supported by Council.")

llm = llm_class.from_env()

if max_retries is not None and max_retries > 0:
return LLMFallback(llm=llm, fallback=llm, retry_before_fallback=max_retries - 1)

return llm


def get_llm_from_config(filename: str) -> LLMBase:
"""Get LLM from a yaml LLMConfigObject file."""
llm_config = LLMConfigObject.from_yaml(filename)
return get_llm_from_config_obj(llm_config)


def get_llm_from_config_obj(llm_config: LLMConfigObject):
llm = _build_llm(llm_config)
fallback_provider = llm_config.spec.fallback_provider
if fallback_provider is not None:
llm_config.spec.provider = fallback_provider
llm_with_fallback = _build_llm(llm_config)
return LLMFallback(llm, llm_with_fallback)
return llm
File renamed without changes.
2 changes: 1 addition & 1 deletion council/llm/llm_base.py → council/llm/base/llm_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from council.contexts import Consumption, LLMContext, Monitorable
from typing_extensions import Self

from . import LLMConfigObject, LLMConfigSpec
from .llm_config_object import LLMConfigObject, LLMConfigSpec
from .llm_message import LLMMessage, LLMMessageTokenCounterBase

_DEFAULT_TIMEOUT: Final[int] = 30
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
15 changes: 5 additions & 10 deletions council/llm/llm_fallback.py → council/llm/base/llm_fallback.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,11 @@
from typing import Any, Sequence

from council.contexts import LLMContext
from council.llm import (
LLMBase,
LLMCallException,
LLMConfigSpec,
LLMConfigurationBase,
LLMException,
LLMMessage,
LLMResult,
)
from council.llm.llm_base import T_Configuration

from .llm_base import LLMBase, LLMConfigurationBase, LLMResult, T_Configuration
from .llm_config_object import LLMConfigSpec
from .llm_exception import LLMCallException, LLMException
from .llm_message import LLMMessage


class LLMFallbackConfiguration(LLMConfigurationBase):
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from typing import Any, Optional, Sequence

from council.contexts import Budget, ContextBase, LLMContext, Monitored
from council.llm import LLMBase, LLMMessage, LLMResult

from .llm_base import LLMBase, LLMResult
from .llm_message import LLMMessage


class MonitoredLLM(Monitored[LLMBase]):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@
from typing import Any, Dict, List, Optional, Sequence

from anthropic.types import Completion
from council.llm import LLMMessage

from ...llm_message import LLMMessage


class Usage:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

from anthropic import Anthropic
from anthropic._types import NOT_GIVEN
from council.llm import LLMMessage, LLMMessageRole

from ...llm_message import LLMMessage, LLMMessageRole
from .anthropic import AnthropicAPIClientResult, AnthropicAPIClientWrapper
from .anthropic_llm_configuration import AnthropicLLMConfiguration

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@

from anthropic import Anthropic, APIStatusError, APITimeoutError
from council.contexts import Consumption, LLMContext
from council.llm import LLMBase, LLMCallException, LLMCallTimeoutException, LLMMessage, LLMResult
from council.utils.utils import DurationManager

from ...llm_base import LLMBase, LLMResult
from ...llm_exception import LLMCallException, LLMCallTimeoutException
from ...llm_message import LLMMessage
from .anthropic import AnthropicAPIClientWrapper, Usage
from .anthropic_completion_llm import AnthropicCompletionLLM
from .anthropic_llm_configuration import AnthropicLLMConfiguration
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from typing import Any, Final, Optional

from council.llm import LLMConfigSpec, LLMConfigurationBase, LLMProviders
from council.utils import (
Parameter,
greater_than_validator,
Expand All @@ -12,6 +11,9 @@
zero_to_one_validator,
)

from ...llm_base import LLMConfigSpec, LLMConfigurationBase
from ...llm_config_object import LLMProviders

_env_var_prefix: Final[str] = "ANTHROPIC_"


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
from typing import Final, List, Mapping, Optional

from council.contexts import Consumption
from council.llm import LLMConsumptionCalculatorBase, LLMCostCard, LLMCostManagerObject, TokenKind

from ...llm_cost import LLMConsumptionCalculatorBase, LLMCostCard, LLMCostManagerObject, TokenKind
from .anthropic import Usage

ANTHROPIC_COSTS_FILENAME: Final[str] = os.path.join(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,8 @@
from anthropic import Anthropic
from anthropic._types import NOT_GIVEN
from anthropic.types import MessageParam, TextBlock
from council.llm import LLMMessage, LLMMessageRole
from council.llm.llm_message import LLMCacheControlData

from ...llm_message import LLMCacheControlData, LLMMessage, LLMMessageRole
from .anthropic import AnthropicAPIClientResult, AnthropicAPIClientWrapper, Usage
from .anthropic_llm_configuration import AnthropicLLMConfiguration

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,13 @@

import google.generativeai as genai # type: ignore
from council.contexts import Consumption, LLMContext
from council.llm import LLMBase, LLMMessage, LLMMessageRole, LLMResult
from council.utils.utils import DurationManager
from google.ai.generativelanguage import FileData
from google.ai.generativelanguage_v1 import HarmCategory # type: ignore
from google.generativeai.types import GenerateContentResponse, HarmBlockThreshold # type: ignore

from ...llm_base import LLMBase, LLMResult
from ...llm_message import LLMMessage, LLMMessageRole
from .gemini_llm_configuration import GeminiLLMConfiguration
from .gemini_llm_cost import GeminiConsumptionCalculator

Expand Down
Loading
Loading