Skip to content

Commit

Permalink
restored type check to string
Browse files Browse the repository at this point in the history
  • Loading branch information
nicola-corbellini committed Aug 18, 2023
1 parent 934584d commit 2577acf
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 6 deletions.
2 changes: 1 addition & 1 deletion core/cat/factory/custom_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def _call(
}

try:
response_json = requests.post(self.url, json=request_body, headers=headers).json()
response_json = requests.post(self.url, json=request_body).json()
except Exception as exc:
raise ValueError("Custom LLM endpoint error "
"during http POST request") from exc
Expand Down
15 changes: 10 additions & 5 deletions core/cat/factory/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ class Config:


class LLMCustomConfig(LLMSettings):

url: str
auth_key: str = "optional_auth_key"
options: str = "{}"
Expand All @@ -45,16 +44,20 @@ class LLMCustomConfig(LLMSettings):
# instantiate Custom LLM from configuration
@classmethod
def get_llm_from_config(cls, config):
options = config["options"]
# options are inserted as a string in the admin
if isinstance(config["options"], dict):
config["options"] = json.loads(config["options"])
if isinstance(options, str):
if options != "":
config["options"] = json.loads(options)
else:
config["options"] = {}

return cls._pyclass(**config)

class Config:
schema_extra = {
"humanReadableName": "Custom LLM",
"description":
"description":
"LLM on a custom endpoint. "
"See docs for examples.",
}
Expand All @@ -80,7 +83,7 @@ class LLMOpenAIConfig(LLMSettings):
class Config:
schema_extra = {
"humanReadableName": "OpenAI GPT-3",
"description":
"description":
"OpenAI GPT-3. More expensive but "
"also more flexible than ChatGPT.",
}
Expand Down Expand Up @@ -138,6 +141,7 @@ class Config:
"description": "Configuration for Cohere language model",
}


# https://python.langchain.com/en/latest/modules/models/llms/integrations/huggingface_textgen_inference.html
class LLMHuggingFaceTextGenInferenceConfig(LLMSettings):
inference_server_url: str
Expand All @@ -155,6 +159,7 @@ class Config:
"description": "Configuration for HuggingFace TextGen Inference",
}


class LLMHuggingFaceHubConfig(LLMSettings):
# model_kwargs = {
# "generation_config": {
Expand Down

0 comments on commit 2577acf

Please sign in to comment.