From 2577acf6ddbd7f49437aca86d026ec32e13318ed Mon Sep 17 00:00:00 2001 From: Nicola Date: Fri, 18 Aug 2023 12:18:15 +0200 Subject: [PATCH] restored type check to string --- core/cat/factory/custom_llm.py | 2 +- core/cat/factory/llm.py | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/core/cat/factory/custom_llm.py b/core/cat/factory/custom_llm.py index d3b07c6d..73b99d1a 100644 --- a/core/cat/factory/custom_llm.py +++ b/core/cat/factory/custom_llm.py @@ -50,7 +50,7 @@ def _call( } try: - response_json = requests.post(self.url, json=request_body, headers=headers).json() + response_json = requests.post(self.url, json=request_body).json() except Exception as exc: raise ValueError("Custom LLM endpoint error " "during http POST request") from exc diff --git a/core/cat/factory/llm.py b/core/cat/factory/llm.py index 6a0b2564..92a1d715 100644 --- a/core/cat/factory/llm.py +++ b/core/cat/factory/llm.py @@ -36,7 +36,6 @@ class Config: class LLMCustomConfig(LLMSettings): - url: str auth_key: str = "optional_auth_key" options: str = "{}" @@ -45,16 +44,20 @@ class LLMCustomConfig(LLMSettings): # instantiate Custom LLM from configuration @classmethod def get_llm_from_config(cls, config): + options = config["options"] # options are inserted as a string in the admin - if isinstance(config["options"], dict): - config["options"] = json.loads(config["options"]) + if isinstance(options, str): + if options != "": + config["options"] = json.loads(options) + else: + config["options"] = {} return cls._pyclass(**config) class Config: schema_extra = { "humanReadableName": "Custom LLM", - "description": + "description": "LLM on a custom endpoint. " "See docs for examples.", } @@ -80,7 +83,7 @@ class LLMOpenAIConfig(LLMSettings): class Config: schema_extra = { "humanReadableName": "OpenAI GPT-3", - "description": + "description": "OpenAI GPT-3. More expensive but " "also more flexible than ChatGPT.", } @@ -138,6 +141,7 @@ class Config: "description": "Configuration for Cohere language model", } + # https://python.langchain.com/en/latest/modules/models/llms/integrations/huggingface_textgen_inference.html class LLMHuggingFaceTextGenInferenceConfig(LLMSettings): inference_server_url: str @@ -155,6 +159,7 @@ class Config: "description": "Configuration for HuggingFace TextGen Inference", } + class LLMHuggingFaceHubConfig(LLMSettings): # model_kwargs = { # "generation_config": {