-
Notifications
You must be signed in to change notification settings - Fork 38
/
Copy pathutil.py
38 lines (31 loc) · 1.34 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import os
from dotenv import load_dotenv, find_dotenv
from termcolor import colored
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
load_dotenv(find_dotenv())
def get_chat_response(system_prompt, user_prompt):
# By default, use the local LLM
llm_type = os.environ.get('LLM_TYPE', "local")
if llm_type == "local":
return get_local_llm_response(system_prompt, user_prompt)
else:
return get_openai_response(system_prompt, user_prompt)
def get_local_llm_response(system_prompt, user_prompt, model="ggml-gpt4all-j", temperature=0.9):
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
model_name = os.environ.get('MODEL_NAME', model)
llm = OpenAI(temperature=temperature, openai_api_base=base_path, model_name=model_name, openai_api_key="null")
text = system_prompt + "\n\n" + user_prompt + "\n\n"
response = llm(text)
print(response)
return response
def get_openai_response(system_prompt, user_prompt, model="gpt-3.5-turbo", temperature=0):
chat = ChatOpenAI(model_name=model, temperature=temperature)
messages = [
SystemMessage(content=system_prompt),
HumanMessage(content=user_prompt)
]
response = chat(messages)
print(response)
return response.content