diff --git a/cookbook/azure/README.md b/cookbook/azure/README.md index 52fb01ade..da100b63b 100644 --- a/cookbook/azure/README.md +++ b/cookbook/azure/README.md @@ -15,7 +15,7 @@ MARVIN_AZURE_OPENAI_API_KEY= MARVIN_AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" MARVIN_AZURE_OPENAI_API_VERSION=2023-12-01-preview # or latest -MARVIN_CHAT_COMPLETION_MODEL= +MARVIN_CHAT_COMPLETIONS_MODEL= ``` Note that the chat completion model must be your Azure OpenAI deployment name. diff --git a/cookbook/azure/usage.py b/cookbook/azure/usage.py index 4a622e9bb..31fd1de0c 100644 --- a/cookbook/azure/usage.py +++ b/cookbook/azure/usage.py @@ -10,7 +10,7 @@ MARVIN_AZURE_OPENAI_API_VERSION=2023-12-01-preview # or latest Note that you MUST set the LLM model name to be your Azure OpenAI deployment name, e.g. -MARVIN_CHAT_COMPLETION_MODEL= +MARVIN_CHAT_COMPLETIONS_MODEL= ``` """ diff --git a/docs/docs/configuration/settings.md b/docs/docs/configuration/settings.md index b5515f066..d940add49 100644 --- a/docs/docs/configuration/settings.md +++ b/docs/docs/configuration/settings.md @@ -24,7 +24,7 @@ A runtime settings object is accessible via `marvin.settings` and can be used to ```python import marvin - marvin.settings.openai_chat_completions_model = 'gpt-4' + marvin.settings.openai.chat.completions.model = 'gpt-4' ``` ## Settings for using Azure OpenAI models @@ -38,7 +38,7 @@ MARVIN_AZURE_OPENAI_API_KEY= MARVIN_AZURE_OPENAI_ENDPOINT="https://.openai.azure.com/" MARVIN_AZURE_OPENAI_API_VERSION=2023-12-01-preview # or latest -MARVIN_CHAT_COMPLETION_MODEL= +MARVIN_CHAT_COMPLETIONS_MODEL= ``` Note that the chat completion model must be your Azure OpenAI deployment name. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 4b4e173ec..995ca9ac8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,8 +98,8 @@ filterwarnings = [ env = [ "MARVIN_TEST_MODE=1", # use 3.5 for tests by default - 'D:MARVIN_CHAT_COMPLETION_MODEL=gpt-3.5-turbo', - 'D:MARVIN_CHAT_COMPLETION_TEMPERATURE=0.0', + 'D:MARVIN_CHAT_COMPLETIONS_MODEL=gpt-3.5-turbo', + 'D:MARVIN_CHAT_COMPLETIONS_TEMPERATURE=0.0', 'D:MARVIN_LOG_VERBOSE=1', 'D:MARVIN_LOG_LEVEL=DEBUG', 'PYTEST_TIMEOUT=20', diff --git a/src/marvin/settings.py b/src/marvin/settings.py index 8574de4fb..7074b5642 100644 --- a/src/marvin/settings.py +++ b/src/marvin/settings.py @@ -33,7 +33,7 @@ def __setattr__(self, name: str, value: Any) -> None: class ChatCompletionSettings(MarvinSettings): - model_config = SettingsConfigDict(env_prefix="marvin_chat_completion_") + model_config = SettingsConfigDict(env_prefix="marvin_chat_completions_") model: str = Field( description="The default chat model to use.", default="gpt-4-1106-preview" ) diff --git a/src/marvin/utilities/openai.py b/src/marvin/utilities/openai.py index 90623c790..f4189dee9 100644 --- a/src/marvin/utilities/openai.py +++ b/src/marvin/utilities/openai.py @@ -75,7 +75,7 @@ def get_openai_client( - MARVIN_AZURE_OPENAI_ENDPOINT In addition, you must set the LLM model name to your Azure OpenAI deployment name, e.g. - - MARVIN_CHAT_COMPLETION_MODEL = + - MARVIN_CHAT_COMPLETIONS_MODEL = """ ) )