Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add config for loading openai model. #91

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ To simplify the process of running the demos, we have incorporated scripts that

1. Navigate into the root directory.
2. Create an env file. You can use the env.example file as a template. (The open API key is optional and can be provided from the UI instead)
3. run `docker-compose up` to build the images.
3. run `docker compose up` to build the images.

This will start the backend and frontend servers, and you can access the demos at the following URLs:

Expand Down Expand Up @@ -59,7 +59,7 @@ The news articles are linked to the mentioned entity, while the actual text is s
To run the project on your own database, follow these two steps:

1. Set appropriate database credentials in `.env` file
2. Remove or set appropriate Cypher examples in `api/fewshot_examples.py` file
2. Remove or set appropriate Cypher examples in `api/src/fewshot_examples.py` file

## Contributing

Expand Down
3 changes: 3 additions & 0 deletions api/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"model_name": "gpt-4o"
}
3 changes: 2 additions & 1 deletion api/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,5 @@ retry==0.9.2
tiktoken==0.4.0
python-dotenv==1.0.0
websockets===11.0.3
gunicorn===20.1.0
gunicorn===20.1.0
jsonschema==3.0
2 changes: 1 addition & 1 deletion api/src/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class OpenAIChat(BaseLLM):
def __init__(
self,
openai_api_key: str,
model_name: str = "gpt-3.5-turbo",
model_name: str = "gpt-4o",
max_tokens: int = 1000,
temperature: float = 0.0,
) -> None:
Expand Down
20 changes: 15 additions & 5 deletions api/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from fewshot_examples import get_fewshot_examples
from llm.openai import OpenAIChat
from pydantic import BaseModel
import json


class Payload(BaseModel):
Expand All @@ -37,6 +38,11 @@ class questionProposalPayload(BaseModel):
api_key: Optional[str]


def load_config(filename):
with open(filename, "r") as f:
return json.load(f)


# Maximum number of records used in the context
HARD_LIMIT_CONTEXT_RECORDS = 10

Expand Down Expand Up @@ -76,12 +82,14 @@ async def questionProposalsForCurrentDb(payload: questionProposalPayload):
detail="Please set OPENAI_API_KEY environment variable or send it as api_key in the request body",
)
api_key = openai_api_key if openai_api_key else payload.api_key

config = load_config("../config.json")

questionProposalGenerator = QuestionProposalGenerator(
database=neo4j_connection,
llm=OpenAIChat(
openai_api_key=api_key,
model_name="gpt-3.5-turbo-0613",
model_name=config.get("model_name", "gpt-4o"),
max_tokens=512,
temperature=0.8,
),
Expand Down Expand Up @@ -130,12 +138,12 @@ async def onToken(token):

default_llm = OpenAIChat(
openai_api_key=api_key,
model_name=data.get("model_name", "gpt-3.5-turbo-0613"),
model_name=data.get("model_name", "gpt-4o"),
)
summarize_results = SummarizeCypherResult(
llm=OpenAIChat(
openai_api_key=api_key,
model_name="gpt-3.5-turbo-0613",
model_name=data.get("model_name", "gpt-4o"),
max_tokens=128,
)
)
Expand Down Expand Up @@ -205,8 +213,9 @@ async def root(payload: ImportPayload):
try:
result = ""

config = load_config("../config.json")
llm = OpenAIChat(
openai_api_key=api_key, model_name="gpt-3.5-turbo-16k", max_tokens=4000
openai_api_key=api_key, model_name=config.get("model_name","gpt-4o"), max_tokens=4000
)

if not payload.neo4j_schema:
Expand Down Expand Up @@ -245,10 +254,11 @@ async def companyInformation(payload: companyReportPayload):
detail="Please set OPENAI_API_KEY environment variable or send it as api_key in the request body",
)
api_key = openai_api_key if openai_api_key else payload.api_key
config = load_config("../config.json")

llm = OpenAIChat(
openai_api_key=api_key,
model_name="gpt-3.5-turbo-16k-0613",
model_name=config.get("model_name", "gpt-4o"),
max_tokens=512,
)
print("Running company report for " + payload.company)
Expand Down
44 changes: 22 additions & 22 deletions ui/src/chat-with-kg/App.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,23 @@ const SEND_REQUESTS = true;
const chatMessageObjects: ChatMessageObject[] = SEND_REQUESTS
? []
: [
{
id: 0,
type: "input",
sender: "self",
message:
"This is the first message which has decently long text and would denote something typed by the user",
complete: true,
},
{
id: 1,
type: "text",
sender: "bot",
message:
"And here is another message which would denote a response from the server, which for now will only be text",
complete: true,
},
];
{
id: 0,
type: "input",
sender: "self",
message:
"This is the first message which has decently long text and would denote something typed by the user",
complete: true,
},
{
id: 1,
type: "text",
sender: "bot",
message:
"And here is another message which would denote a response from the server, which for now will only be text",
complete: true,
},
];

const URI =
import.meta.env.VITE_KG_CHAT_BACKEND_ENDPOINT ??
Expand Down Expand Up @@ -73,7 +73,7 @@ function App() {
const [modalIsOpen, setModalIsOpen] = useState(false);
const [apiKey, setApiKey] = useState(loadKeyFromStorage() || "");
const [sampleQuestions, setSampleQuestions] = useState<string[]>([]);
const [text2cypherModel, setText2cypherModel] = useState<string>("gpt-3.5-turbo-0613");
const [text2cypherModel, setText2cypherModel] = useState<string>("gpt-4o");

const showContent = serverAvailable && !needsApiKeyLoading;

Expand Down Expand Up @@ -268,12 +268,12 @@ function App() {
<button onClick={openModal}>API Key</button>
</div>
)}
<div className="flex justify-end mr-4">
<div className="flex justify-end mr-4">
<select value={text2cypherModel} onChange={handleModelChange}>
<option value="gpt-3.5-turbo-0613">gpt-3.5-turbo</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-4o">gpt-4o</option>
<option value="gpt-4">gpt-4</option>
</select>
</div>
</div>
<div className="p-6 mx-auto mt-20 rounded-lg bg-palette-neutral-bg-weak min-h-[6rem] min-w-[18rem] max-w-4xl ">
{!serverAvailable && (
<div>Server is unavailable, please reload the page to try again.</div>
Expand Down