Skip to content

Commit

Permalink
allow chat input
Browse files Browse the repository at this point in the history
  • Loading branch information
jiatastic committed Jul 18, 2023
1 parent e8957b1 commit de43022
Show file tree
Hide file tree
Showing 8 changed files with 148 additions and 145 deletions.
81 changes: 14 additions & 67 deletions Homepage.py
Original file line number Diff line number Diff line change
@@ -1,76 +1,19 @@
import streamlit as st
from streamlit_option_menu import option_menu
from app_utils import switch_page
from initialization import initialize_session_state, embedding, resume_reader
from prompts.prompts import templates
from typing import Literal
from dataclasses import dataclass
import streamlit as st
from speech_recognition.openai_whisper import save_wav_file, transcribe
from langchain.callbacks import get_openai_callback
from aws.synthesize_speech import synthesize_speech
from IPython.display import Audio
from dataclasses import dataclass
import base64
from typing import Literal
from audio_recorder_streamlit import audio_recorder
from initialization import initialize_session_state
@dataclass
class Message:
"""Class for keeping track of interview history."""
origin: Literal["human", "ai"]
message: str
def autoplay_audio(file_path: str):
def update_audio():
global global_audio_md
with open(file_path, "rb") as f:
data = f.read()
b64 = base64.b64encode(data).decode()
global_audio_md = f"""
<audio controls autoplay="true">
<source src="data:audio/mp3;base64,{b64}" type="audio/mp3">
</audio>
"""
def update_markdown(audio_md):
st.markdown(audio_md, unsafe_allow_html=True)
update_audio()
update_markdown(global_audio_md)

def answer_call_back():
with get_openai_callback() as cb:
# user input
human_answer = st.session_state.answer
# transcribe audio
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
st.session_state.history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.screen.run(input)
# speech synthesis and speak out
audio_file_path = synthesize_speech(llm_answer)
# create audio widget with autoplay
audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
return audio_widget
except:
st.session_state.history.append(Message("ai", "Sorry, I didn't get that. Please try again."))
from PIL import Image

# ————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
st.set_page_config(page_title = "🤖 AI Interviewer", layout = "centered")

home_title = "🤖 AI Interviewer"
im = Image.open("icon.png")
st.set_page_config(page_title = "AI Interviewer", layout = "centered",page_icon=im)

home_title = "AI Interviewer"
home_introduction = "Welcome to AI Interviewer, empowering your interview preparation with generative AI."

with st.sidebar:
st.markdown('🤖 AI Interviewer - V0.1.2')
st.markdown('AI Interviewer - V0.1.2')
st.markdown("""
#### Let's contact:
[Haoxiang Jia](https://www.linkedin.com/in/haoxiang-jia/)
Expand All @@ -94,8 +37,9 @@ def answer_call_back():
"<style>#MainMenu{visibility:hidden;}</style>",
unsafe_allow_html=True
)

st.image(im, width=100)
st.markdown(f"""# {home_title} <span style=color:#2E9BF5><font size=5>Beta</font></span>""",unsafe_allow_html=True)

st.markdown("""\n""")
#st.markdown("#### Greetings")
st.markdown("Welcome to AI Interviewer! 👏AI Interviewer is a generative AI powered tool that provides you with realistic interview experience. "
Expand All @@ -116,7 +60,8 @@ def answer_call_back():
st.info("""
📚In this session, the AI Interviewer will assess your technical skills as they relate to the job description.
- Press the microphone to start answering.
- Each Interview will take 10 to 15 mins.
- Each Interview will take 10 to 15 mins.
- To start a new session, just refresh the page.
- Start introduce yourself and enjoy! """)
if st.button("Start Interview!"):
switch_page("Professional Screen")
Expand All @@ -126,7 +71,8 @@ def answer_call_back():
st.info("""
📚In this session, the AI Interviewer will review your resume and discuss your past experiences.
- Press the microphone to start answering.
- Each Interview will take 10 to 15 mins.
- Each Interview will take 10 to 15 mins.
- To start a new session, just refresh the page.
- Start introduce yourself and enjoy! """
)
if st.button("Start Interview!"):
Expand All @@ -137,7 +83,8 @@ def answer_call_back():
st.info("""
📚In this session, the AI Interviewer will assess your soft skills as they relate to the job description.
- Press the microphone to start answering.
- Each Interview will take 10 to 15 mins.
- Each Interview will take 10 to 15 mins.
- To start a new session, just refresh the page.
- Start introduce yourself and enjoy!
""")
if st.button("Start Interview!"):
Expand Down
Binary file added icon.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed images/icon.png
Binary file not shown.
58 changes: 41 additions & 17 deletions pages/Behavioral Screen.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,15 @@ def initialize_session_state():
st.session_state.bq_docserch = save_vector(jd)
if "bq_retriever" not in st.session_state:
st.session_state.bq_retriever = st.session_state.bq_docserch.as_retriever(search_type="similarity")

if "bq_chain_type_kwargs" not in st.session_state:
Behavioral_Prompt = PromptTemplate(input_variables=["context", "question"],
template=templates.behavioral_template)
st.session_state.bq_chain_type_kwargs = {"prompt": Behavioral_Prompt}
# interview history
if "history" not in st.session_state:
st.session_state.history = []
st.session_state.history.append(Message("ai", "Hello there! I am your interviewer today. I will access your soft skills through a series of questions. Let's get started! Please start by saying hello or introducing yourself."))

# token count
if "token_count" not in st.session_state:
st.session_state.token_count = 0
Expand All @@ -95,7 +96,7 @@ def initialize_session_state():
PROMPT = PromptTemplate(
input_variables=["history", "input"],
template="""I want you to act as an interviewer strictly following the guideline in the current conversation.
Candidate has no idea what the guideline is.
Ask me questions and wait for my answers. Do not write explanations.
Ask question like a real person, only one question at a time.
Do not ask the same question.
Expand Down Expand Up @@ -128,28 +129,47 @@ def answer_call_back():
# user input
human_answer = st.session_state.answer
# transcribe audio
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
if voice:
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
st.session_state.history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.conversation.run(input)
# speech synthesis and speak out
audio_file_path = synthesize_speech(llm_answer)
# create audio widget with autoplay
audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
return audio_widget
except:
st.session_state.history.append(Message("ai", "Sorry, I didn't get that. Please try again."))
else:
input = human_answer
st.session_state.history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.conversation.run(input)
# OpenAI answer and save to history
llm_answer = st.session_state.conversation.run(input)
# speech synthesis and speak out
audio_file_path = synthesize_speech(llm_answer)
# 创建自动播放的音频部件
# create audio widget with autoplay
audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
return audio_widget
except:
st.session_state.history.append(Message("ai", "Sorry, I didn't get that. Please try again."))


### ————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
if jd:
Expand All @@ -168,23 +188,27 @@ def answer_call_back():
# keep interview
else:
with answer_placeholder:
answer = audio_recorder(pause_threshold=2.5, sample_rate=44100)
voice: bool = st.checkbox("I would like to speak with AI Interviewer!")
if voice:
answer = audio_recorder(pause_threshold=2.5, sample_rate=44100)
else:
answer = st.chat_input("Your answer")
if answer:
st.session_state['answer'] = answer
audio = answer_call_back()
else:
st.write("Please speak into the microphone to answer the question.")

with chat_placeholder:
auto_play = st.checkbox("Let AI interviewer speak!")
if auto_play:
try:
st.write(audio)
except:
pass
for answer in st.session_state.history:
if answer:
if answer.origin == 'ai':
with st.chat_message("assistant"):
st.write(answer.message)
try:
st.write(audio)
except:
pass
else:
with st.chat_message("user"):
st.write(answer.message)
Expand Down
70 changes: 47 additions & 23 deletions pages/Professional Screen.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,12 @@ def load_lottiefile(filepath: str):
### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
@dataclass
class Message:

"""class for keeping track of interview history."""

origin: Literal["human", "ai"]
message: str

def save_vector(text):

"""embeddings"""

text_splitter = NLTKTextSplitter()
texts = text_splitter.split_text(text)
# Create emebeddings
Expand All @@ -64,13 +60,16 @@ def initialize_session_state():
# interview history
if "jd_history" not in st.session_state:
st.session_state.jd_history = []
st.session_state.jd_history.append(Message("ai",
"Hello, Welcome to the interview. I am your interviewer today. I will ask you professional questions regarding the job description you submitted."
"Please start by introducting a little bit about yourself."))
# token count
if "token_count" not in st.session_state:
st.session_state.token_count = 0
if "jd_guideline" not in st.session_state:
llm = ChatOpenAI(
model_name = "gpt-3.5-turbo",
temperature = 0.6,)
temperature = 0.8,)
st.session_state.jd_guideline = RetrievalQA.from_chain_type(
llm=llm,
chain_type_kwargs=st.session_state.jd_chain_type_kwargs, chain_type='stuff',
Expand All @@ -82,11 +81,11 @@ def initialize_session_state():
temperature=0.8, )
PROMPT = PromptTemplate(
input_variables=["history", "input"],
template="""I want you to act as an interviewer strictly following the guideline in the current conversation.
template="""I want you to act as a human interviewer strictly following the guideline in the current conversation.
Ask me questions and wait for my answers like a real person.
Ask me questions and wait for my answers.
Do not write explanations.
Ask question like a real person, only one question at a time.
only one question at a time.
Do not ask the same question.
Do not repeat the question.
Do ask follow-up questions if necessary.
Expand All @@ -106,7 +105,7 @@ def initialize_session_state():
if 'jd_feedback' not in st.session_state:
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.5, )
temperature=0.8, )
st.session_state.jd_feedback = ConversationChain(
prompt=PromptTemplate(input_variables=["history", "input"], template=templates.feedback_template),
llm=llm,
Expand All @@ -118,11 +117,32 @@ def answer_call_back():
with get_openai_callback() as cb:
# user input
human_answer = st.session_state.answer
# transcribe audio
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
if voice:
# transcribe audio
save_wav_file("temp/audio.wav", human_answer)
try:
input = transcribe("temp/audio.wav")
# save human_answer to history
st.session_state.jd_history.append(
Message("human", input)
)
# OpenAI answer and save to history
llm_answer = st.session_state.jd_screen.run(input)
# speech synthesis
audio_file_path = synthesize_speech(llm_answer)
st.session_state.audio_file_path = audio_file_path
# 创建自动播放的音频部件
audio_widget = Audio(audio_file_path, autoplay=True)
# save audio data to history
st.session_state.jd_history.append(
Message("ai", llm_answer)
)
st.session_state.token_count += cb.total_tokens
return audio_widget
except:
st.session_state.jd_history.append(Message("ai", "Sorry, I didn't get that. Please try again."))
else:
input = human_answer
st.session_state.jd_history.append(
Message("human", input)
)
Expand All @@ -139,8 +159,6 @@ def answer_call_back():
)
st.session_state.token_count += cb.total_tokens
return audio_widget
except:
st.session_state.jd_history.append(Message("ai", "Sorry, I didn't get that. Please try again."))

### ——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
# sumitted job description
Expand All @@ -158,25 +176,31 @@ def answer_call_back():
st.stop()
else:
with answer_placeholder:
answer = audio_recorder(pause_threshold = 2.5, sample_rate = 44100)
voice: bool = st.checkbox("I would like to speak with AI Interviewer")
if voice:
answer = audio_recorder(pause_threshold = 2.5, sample_rate = 44100)
else:
answer = st.chat_input("Your answer")
if answer:
st.session_state['answer'] = answer
audio = answer_call_back()
else:
st.write("Please speak into the microphone to answer the question.")

with chat_placeholder:
auto_play = st.checkbox("Let AI interviewer speak!")
if auto_play:
try:
st.write(audio)
except:
pass
for answer in st.session_state.jd_history:
#if answer:
if answer.origin == 'ai':
with st.chat_message("assistant"):
st.write(answer.message)
try:
st.write(audio)
except:
pass
else:
with st.chat_message("user"):
st.write(answer.message)

credit_card_placeholder.caption(f"""
Used {st.session_state.token_count} tokens \n
Progress: {int(len(st.session_state.jd_history) / 30 * 100)}% completed.""")
Expand Down
Loading

0 comments on commit de43022

Please sign in to comment.