Skip to content

Commit

Permalink
feat: more log for ui (#34)
Browse files Browse the repository at this point in the history
Co-authored-by: leoguillaume <[email protected]>
  • Loading branch information
leoguillaume and leoguillaumegouv authored Oct 11, 2024
1 parent 34d2f09 commit c9f21a4
Showing 1 changed file with 29 additions and 23 deletions.
52 changes: 29 additions & 23 deletions ui/chat.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import logging
import traceback

from openai import OpenAI
import requests
import streamlit as st
Expand All @@ -13,8 +16,9 @@
try:
language_models, embeddings_models = get_models(api_key=API_KEY)
collections = get_collections(api_key=API_KEY)
except Exception as e:
st.error(f"Error to fetch user data: {e}")
except Exception:
st.error("Error to fetch user data.")
logging.error(traceback.format_exc())
st.stop()

openai_client = OpenAI(base_url=BASE_URL, api_key=API_KEY)
Expand Down Expand Up @@ -71,30 +75,32 @@
st.markdown(prompt)

with st.chat_message("assistant"):
if rag:
data = {
"collections": params["rag"]["collections"],
"model": params["rag"]["embeddings_model"],
"k": params["rag"]["k"],
"prompt": prompt,
}
response = requests.post(f"{BASE_URL}/search", json=data, headers={"Authorization": f"Bearer {API_KEY}"})
assert response.status_code == 200
prompt_template = "Réponds à la question suivante en te basant sur les documents ci-dessous : {prompt}\n\nDocuments :\n{chunks}"
chunks = "\n".join([result["chunk"]["content"] for result in response.json()["data"]])

sources = list(set(result["chunk"]["metadata"]["document_name"] for result in response.json()["data"]))

prompt = prompt_template.format(prompt=prompt, chunks=chunks)
messages = st.session_state.messages[:-1] + [{"role": "user", "content": prompt}]
else:
messages = st.session_state.messages
sources = []
try:
if rag:
data = {
"collections": params["rag"]["collections"],
"model": params["rag"]["embeddings_model"],
"k": params["rag"]["k"],
"prompt": prompt,
}
response = requests.post(f"{BASE_URL}/search", json=data, headers={"Authorization": f"Bearer {API_KEY}"})
assert response.status_code == 200
prompt_template = "Réponds à la question suivante en te basant sur les documents ci-dessous : {prompt}\n\nDocuments :\n{chunks}"
chunks = "\n".join([result["chunk"]["content"] for result in response.json()["data"]])

sources = list(set(result["chunk"]["metadata"]["document_name"] for result in response.json()["data"]))

prompt = prompt_template.format(prompt=prompt, chunks=chunks)
messages = st.session_state.messages[:-1] + [{"role": "user", "content": prompt}]
else:
messages = st.session_state.messages
sources = []

stream = openai_client.chat.completions.create(stream=True, messages=messages, **params["sampling_params"])
response = st.write_stream(stream)
except Exception as e:
st.error(e)
except Exception:
st.error("Error to generate response.")
logging.error(traceback.format_exc())
if sources:
st.multiselect(options=sources, label="Sources", key="sources_tmp", default=sources)

Expand Down

0 comments on commit c9f21a4

Please sign in to comment.