Skip to content

Commit

Permalink
[FIX] Fixed code because of bad openai version
Browse files Browse the repository at this point in the history
  • Loading branch information
pathfindermilan committed Oct 14, 2024
1 parent 5d75cd3 commit 34283b7
Show file tree
Hide file tree
Showing 6 changed files with 184 additions and 35 deletions.
109 changes: 90 additions & 19 deletions backend/console/ai/generate_text.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,100 @@
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
import openai
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

class AIInterviewer:
def __init__(self, agent):
self.agent_prompt = agent.behaviour.agent_prompt
self.custom_knowledge = agent.knowledge.custom_knowledge
self.model_name = agent.knowledge.agent_llm

def generate_next_question(self, text, last_question, n_questions, current_score):
prompt = f"""
{self.agent_prompt}
Custom Knowledge: {self.custom_knowledge}
Last Question: {last_question}
Human's Answer: {text}
Number of Questions Asked: {n_questions}
Current Score: {current_score}
Based on the information above, generate the next interview question. The question should be relevant to the previous question and answer, and appropriate for the current stage of the interview (considering the number of questions asked and the current score).
DO NOT INCLUDE ANY GREETING TO THE USER, be professional.
Next Question:
"""

response = openai.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant that generates interview questions."},
{"role": "user", "content": prompt}
],
max_tokens=100,
temperature=0.6,
)

return response.choices[0].message.content.strip()


def generate_ideal_answer(self, question, user_skills):
prompt = f"""
Custom Knowledge: {self.custom_knowledge}
User Skills: {user_skills}
Question: {question}
Based on the custom knowledge and the user's skills, generate an ideal answer to the given question. The answer should be comprehensive and demonstrate the expected knowledge for someone with the specified skills.
Ideal Answer:
"""

response = openai.chat.completions.create(
model=self.model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant that generates ideal answers based on user skills."},
{"role": "user", "content": prompt}
],
max_tokens=200,
temperature=0.5,
)

return response.choices[0].message.content.strip()


@staticmethod
def calculate_answer_similarity(ideal_answer, human_answer):
vectorizer = TfidfVectorizer().fit_transform([ideal_answer, human_answer])
cosine_sim = cosine_similarity(vectorizer[0:1], vectorizer[1:2])
return cosine_sim[0][0]

def ai_interviewer(text, session):
interviewer = AIInterviewer(session.order.agent)

ideal_answer = interviewer.generate_ideal_answer(session.last_question, session.applicant.skills)
answer_similarity = AIInterviewer.calculate_answer_similarity(ideal_answer, text)

agent = session.order.agent
question_score = answer_similarity * 100

agent_greeting = agent['behaviour']['agent_greeting']
agent_prompt = agent['behaviour']['agent_prompt']
custom_knowledge = agent['knowledge']['custom_knowledge']
model_name = agent['knowledge']['agent_llm']
if session.n_questions == 0:
session.score = question_score + 10
if session.score > 100:
session.score = 100
else:
session.score = (session.score * session.n_questions + question_score + 10) / (session.n_questions + 1)
if session.score > 100:
session.score = 100

# Create ChatOpenAI instance
llm = ChatOpenAI(model_name=model_name, temperature=0.4)
session.n_questions += 1

interview_template = ChatPromptTemplate.from_template(
f"{agent_prompt}\n\n"
f"Custom Knowledge: {custom_knowledge}\n\n"
"Interview progress: {progress}\n"
"User's previous message: {user_message}\n\n"
"Based on the interview progress, provide the appropriate response or question. "
"If this is the final response, include an evaluation of the candidate."
)
if session.n_questions >= 10 or (session.n_questions >= 5 and session.score < 50):
session.final = 1
next_question = None
else:
next_question = interviewer.generate_next_question(
text=text,
last_question=session.last_question,
n_questions=session.n_questions,
current_score=session.score
)

session.last_answer = text
session.last_question = next_question

session.save()

return "AI default ", 0, 1, "100%"
return session.score, next_question
2 changes: 1 addition & 1 deletion backend/console/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class Identity(models.Model):
class Behaviour(models.Model):
agent_greeting = models.CharField(
max_length=250,
default = "Hello! I'm Intervuo, AI interviewer. Please describe your experience and skills related to the position you're applying for"
default = "Hello! I am your AI interviewer. Please describe your experience and skills related to the position you're applying for"
)
agent_prompt = models.TextField(
default='''
Expand Down
17 changes: 10 additions & 7 deletions backend/console/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,13 +389,16 @@ def get_knowledge(self, order):
if knowledge_field is None:
return None

item = knowledge_field.knowledgefileitem
files = [
{
'file_url': item.file_item.url if item.file_item else None,
'status': item.status_url
}
]
try:
item = knowledge_field.knowledgefileitem
files = [
{
'file_url': item.file_item.url if item.file_item else None,
'status': item.status_url
}
]
except:
files = []

return {
'agent_llm': knowledge_field.agent_llm if knowledge_field.agent_llm else None,
Expand Down
16 changes: 8 additions & 8 deletions backend/console/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,11 +191,11 @@ def interview_session_create(request, agent_id):
session.save()
else:
if session.n_questions == 0 and not session.last_question:
greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a interview previously with you but you didn't specify your skill yet. Can you do that now, please?"
greeting = f"Hеllo again, I am {order.agent.identity.agent_name}, your ai interviewer and we started a interview previously. Please describe your experience and skills related to the position you're applying for"
elif session.n_questions == 0 and not session.last_answer:
greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a session previously. Tell me when you're ready to start with the questions?"
greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a session. Tell me when you're ready to start with the questions?"
elif session.n_questions != 0:
greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a interview process previously. Tell me when you're ready to to continue with the questions?"
greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a interview process. Tell me when you're ready to to continue with the questions?"
session.last_question = greeting
session.save()

Expand Down Expand Up @@ -342,12 +342,9 @@ def interview_session_flow(request, agent_id):
else:
ai_text = response
else:
session.last_answer = human_text
session.ready = True
session.n_questions = session.n_questions + 1
session.save()
ai_text = ai_interviewer(text = text, session = session)

score, ai_text = ai_interviewer(text = human_text, session = session)
elif session.n_questions != 0 and session.ready == False:
llm = ChatOpenAI(openai_api_key=os.getenv('OPENAI_API_KEY'), model="gpt-4o-mini", temperature=0)
previous_context_prompt = PromptTemplate(
Expand All @@ -364,7 +361,10 @@ def interview_session_flow(request, agent_id):
else:
ai_text = response
else:
ai_text = "This is not finished yet"
score, ai_text = ai_interviewer(text = human_text, session = session)

if session.final:
ai_text = f"Thanks for your time, we finished with the interview! Your score is {int(session.score)} percents. Have a good day."

return Response({
"ai_text": ai_text,
Expand Down
2 changes: 2 additions & 0 deletions backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,6 @@ dependencies = [
"langchain>=0.3.3",
"langchain-community>=0.3.2",
"langchain-openai>=0.2.2",
"openai>=1.51.2",
"scikit-learn>=1.5.2",
]
Loading

0 comments on commit 34283b7

Please sign in to comment.