-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathui.py
36 lines (28 loc) · 1.08 KB
/
ui.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
import os
from llama_index.llms import OpenAI
from llama_index import StorageContext, load_index_from_storage
from llama_index.response.notebook_utils import display_response
# Load vector_index from the serialized file
PERSIST_DIR = "./storage"
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
vector_index = load_index_from_storage(storage_context)
# Access the OpenAI API key
openai_api_key = os.getenv('OPENAI_API_KEY')
# Define an LLM
llm_gpt4 = OpenAI(model="gpt-4", api_key=openai_api_key)
# Basic UI
def interface_func(message, placeholder):
print("This is the input text:", message)
# Build a QueryEngine and start querying.
query_engine = vector_index.as_query_engine()
# Callbert
# Reranking
response = query_engine.query(message)
print(response)
return str(response)
iface = gr.ChatInterface(
fn = interface_func,
title="The Supreme Modron's All Knowing Repository of Knowledge",
description="Ask any question about Faerun or about 5th edition rules.")
iface.launch(share=False, inbrowser=True)