Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for Llama2, Palm, Cohere, Anthropic, Replicate, Azure Models[100+ LLMs] - using LiteLLM #200

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions agent/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import time

import openai
from langchain.adapters import openai as lc_openai
import litellm
from colorama import Fore, Style
from openai.error import APIError, RateLimitError

Expand All @@ -15,7 +15,7 @@

CFG = Config()

openai.api_key = CFG.openai_api_key
litellm.api_key = CFG.openai_api_key

from typing import Optional
import logging
Expand Down Expand Up @@ -62,12 +62,11 @@ def send_chat_completion_request(
messages, model, temperature, max_tokens, stream, websocket
):
if not stream:
result = lc_openai.ChatCompletion.create(
result = litellm.completion(
model=model, # Change model here to use different models
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
provider=CFG.llm_provider, # Change provider here to use a different API
)
return result["choices"][0]["message"]["content"]
else:
Expand All @@ -79,12 +78,11 @@ async def stream_response(model, messages, temperature, max_tokens, websocket):
response = ""
print(f"streaming response...")

for chunk in lc_openai.ChatCompletion.create(
for chunk in litellm.completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
provider=CFG.llm_provider,
stream=True,
):
content = chunk["choices"][0].get("delta", {}).get("content")
Expand Down
4 changes: 2 additions & 2 deletions permchain_example/editor_actors/editor.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatLiteLLM
from langchain.prompts import SystemMessagePromptTemplate
from config import Config

Expand All @@ -18,7 +18,7 @@

class EditorActor:
def __init__(self):
self.model = ChatOpenAI(model=CFG.smart_llm_model)
self.model = ChatLiteLLM(model=CFG.smart_llm_model)
self.prompt = SystemMessagePromptTemplate.from_template(EDIT_TEMPLATE) + "Draft:\n\n{draft}"
self.functions = [
{
Expand Down
4 changes: 2 additions & 2 deletions permchain_example/reviser_actors/reviser.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.chat_models import ChatLiteLLM, ChatAnthropic
from langchain.schema.output_parser import StrOutputParser
from langchain.prompts import SystemMessagePromptTemplate
from config import Config
Expand All @@ -7,7 +7,7 @@

class ReviserActor:
def __init__(self):
self.model = ChatOpenAI(model=CFG.smart_llm_model)
self.model = ChatLiteLLM(model=CFG.smart_llm_model)
self.prompt = SystemMessagePromptTemplate.from_template(
"You are an expert writer. "
"You have been tasked by your editor with revising the following draft, which was written by a non-expert. "
Expand Down
6 changes: 3 additions & 3 deletions permchain_example/search_actors/gpt_researcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from actions.web_scrape import scrape_text_with_selenium
from actions.web_search import web_search

from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatLiteLLM
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableMap, RunnableLambda
Expand Down Expand Up @@ -43,8 +43,8 @@
]
) | scrape_and_summarize.map() | (lambda x: "\n".join(x))

search_query = SEARCH_PROMPT | ChatOpenAI(model=CFG.smart_llm_model) | StrOutputParser() | json.loads
choose_agent = CHOOSE_AGENT_PROMPT | ChatOpenAI(model=CFG.smart_llm_model) | StrOutputParser() | json.loads
search_query = SEARCH_PROMPT | ChatLiteLLM(model=CFG.smart_llm_model) | StrOutputParser() | json.loads
choose_agent = CHOOSE_AGENT_PROMPT | ChatLiteLLM(model=CFG.smart_llm_model) | StrOutputParser() | json.loads

get_search_queries = {
"question": lambda x: x,
Expand Down
4 changes: 2 additions & 2 deletions permchain_example/writer_actors/writer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatLiteLLM
from langchain.schema.output_parser import StrOutputParser
from agent.prompts import generate_report_prompt, generate_agent_role_prompt
from config import Config
Expand All @@ -8,7 +8,7 @@

class WriterActor:
def __init__(self):
self.model = ChatOpenAI(model=CFG.smart_llm_model)
self.model = ChatLiteLLM(model=CFG.smart_llm_model)
self.prompt = ChatPromptTemplate.from_messages([
("system", generate_agent_role_prompt(agent="Default Agent")),
("user", generate_report_prompt(question="{query}", research_summary="{results}"))
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ pydantic
fastapi
python-multipart
markdown
langchain==0.0.275
langchain==0.0.297
litellm
tavily-python
permchain