From 34283b71a3e1bf9a604c3dec53e260ca274719b6 Mon Sep 17 00:00:00 2001 From: "pathfinder.milan" Date: Mon, 14 Oct 2024 05:41:33 +0200 Subject: [PATCH] [FIX] Fixed code because of bad openai version --- backend/console/ai/generate_text.py | 109 +++++++++++++++++++++++----- backend/console/models.py | 2 +- backend/console/serializers.py | 17 +++-- backend/console/views.py | 16 ++-- backend/pyproject.toml | 2 + backend/uv.lock | 73 +++++++++++++++++++ 6 files changed, 184 insertions(+), 35 deletions(-) diff --git a/backend/console/ai/generate_text.py b/backend/console/ai/generate_text.py index 8fc239f..2801558 100644 --- a/backend/console/ai/generate_text.py +++ b/backend/console/ai/generate_text.py @@ -1,29 +1,100 @@ -from langchain.chat_models import ChatOpenAI -from langchain.prompts import ChatPromptTemplate -from langchain.chains import LLMChain +import openai +from sklearn.feature_extraction.text import TfidfVectorizer +from sklearn.metrics.pairwise import cosine_similarity +class AIInterviewer: + def __init__(self, agent): + self.agent_prompt = agent.behaviour.agent_prompt + self.custom_knowledge = agent.knowledge.custom_knowledge + self.model_name = agent.knowledge.agent_llm + + def generate_next_question(self, text, last_question, n_questions, current_score): + prompt = f""" + {self.agent_prompt} + Custom Knowledge: {self.custom_knowledge} + Last Question: {last_question} + Human's Answer: {text} + Number of Questions Asked: {n_questions} + Current Score: {current_score} + Based on the information above, generate the next interview question. The question should be relevant to the previous question and answer, and appropriate for the current stage of the interview (considering the number of questions asked and the current score). + DO NOT INCLUDE ANY GREETING TO THE USER, be professional. + Next Question: + """ + + response = openai.chat.completions.create( + model=self.model_name, + messages=[ + {"role": "system", "content": "You are a helpful assistant that generates interview questions."}, + {"role": "user", "content": prompt} + ], + max_tokens=100, + temperature=0.6, + ) + + return response.choices[0].message.content.strip() + + + def generate_ideal_answer(self, question, user_skills): + prompt = f""" + Custom Knowledge: {self.custom_knowledge} + User Skills: {user_skills} + Question: {question} + Based on the custom knowledge and the user's skills, generate an ideal answer to the given question. The answer should be comprehensive and demonstrate the expected knowledge for someone with the specified skills. + Ideal Answer: + """ + + response = openai.chat.completions.create( + model=self.model_name, + messages=[ + {"role": "system", "content": "You are a helpful assistant that generates ideal answers based on user skills."}, + {"role": "user", "content": prompt} + ], + max_tokens=200, + temperature=0.5, + ) + + return response.choices[0].message.content.strip() + + + @staticmethod + def calculate_answer_similarity(ideal_answer, human_answer): + vectorizer = TfidfVectorizer().fit_transform([ideal_answer, human_answer]) + cosine_sim = cosine_similarity(vectorizer[0:1], vectorizer[1:2]) + return cosine_sim[0][0] def ai_interviewer(text, session): + interviewer = AIInterviewer(session.order.agent) + + ideal_answer = interviewer.generate_ideal_answer(session.last_question, session.applicant.skills) + answer_similarity = AIInterviewer.calculate_answer_similarity(ideal_answer, text) - agent = session.order.agent + question_score = answer_similarity * 100 - agent_greeting = agent['behaviour']['agent_greeting'] - agent_prompt = agent['behaviour']['agent_prompt'] - custom_knowledge = agent['knowledge']['custom_knowledge'] - model_name = agent['knowledge']['agent_llm'] + if session.n_questions == 0: + session.score = question_score + 10 + if session.score > 100: + session.score = 100 + else: + session.score = (session.score * session.n_questions + question_score + 10) / (session.n_questions + 1) + if session.score > 100: + session.score = 100 - # Create ChatOpenAI instance - llm = ChatOpenAI(model_name=model_name, temperature=0.4) + session.n_questions += 1 - interview_template = ChatPromptTemplate.from_template( - f"{agent_prompt}\n\n" - f"Custom Knowledge: {custom_knowledge}\n\n" - "Interview progress: {progress}\n" - "User's previous message: {user_message}\n\n" - "Based on the interview progress, provide the appropriate response or question. " - "If this is the final response, include an evaluation of the candidate." - ) + if session.n_questions >= 10 or (session.n_questions >= 5 and session.score < 50): + session.final = 1 + next_question = None + else: + next_question = interviewer.generate_next_question( + text=text, + last_question=session.last_question, + n_questions=session.n_questions, + current_score=session.score + ) + session.last_answer = text + session.last_question = next_question + session.save() - return "AI default ", 0, 1, "100%" + return session.score, next_question diff --git a/backend/console/models.py b/backend/console/models.py index 8a35ace..f265272 100644 --- a/backend/console/models.py +++ b/backend/console/models.py @@ -120,7 +120,7 @@ class Identity(models.Model): class Behaviour(models.Model): agent_greeting = models.CharField( max_length=250, - default = "Hello! I'm Intervuo, AI interviewer. Please describe your experience and skills related to the position you're applying for" + default = "Hello! I am your AI interviewer. Please describe your experience and skills related to the position you're applying for" ) agent_prompt = models.TextField( default=''' diff --git a/backend/console/serializers.py b/backend/console/serializers.py index 7baf91e..41ec5fc 100644 --- a/backend/console/serializers.py +++ b/backend/console/serializers.py @@ -389,13 +389,16 @@ def get_knowledge(self, order): if knowledge_field is None: return None - item = knowledge_field.knowledgefileitem - files = [ - { - 'file_url': item.file_item.url if item.file_item else None, - 'status': item.status_url - } - ] + try: + item = knowledge_field.knowledgefileitem + files = [ + { + 'file_url': item.file_item.url if item.file_item else None, + 'status': item.status_url + } + ] + except: + files = [] return { 'agent_llm': knowledge_field.agent_llm if knowledge_field.agent_llm else None, diff --git a/backend/console/views.py b/backend/console/views.py index eaf3ec3..0ca4abc 100644 --- a/backend/console/views.py +++ b/backend/console/views.py @@ -191,11 +191,11 @@ def interview_session_create(request, agent_id): session.save() else: if session.n_questions == 0 and not session.last_question: - greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a interview previously with you but you didn't specify your skill yet. Can you do that now, please?" + greeting = f"Hеllo again, I am {order.agent.identity.agent_name}, your ai interviewer and we started a interview previously. Please describe your experience and skills related to the position you're applying for" elif session.n_questions == 0 and not session.last_answer: - greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a session previously. Tell me when you're ready to start with the questions?" + greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a session. Tell me when you're ready to start with the questions?" elif session.n_questions != 0: - greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a interview process previously. Tell me when you're ready to to continue with the questions?" + greeting = f"Hi, I am {order.agent.identity.agent_name} and we started a interview process. Tell me when you're ready to to continue with the questions?" session.last_question = greeting session.save() @@ -342,12 +342,9 @@ def interview_session_flow(request, agent_id): else: ai_text = response else: - session.last_answer = human_text session.ready = True - session.n_questions = session.n_questions + 1 session.save() - ai_text = ai_interviewer(text = text, session = session) - + score, ai_text = ai_interviewer(text = human_text, session = session) elif session.n_questions != 0 and session.ready == False: llm = ChatOpenAI(openai_api_key=os.getenv('OPENAI_API_KEY'), model="gpt-4o-mini", temperature=0) previous_context_prompt = PromptTemplate( @@ -364,7 +361,10 @@ def interview_session_flow(request, agent_id): else: ai_text = response else: - ai_text = "This is not finished yet" + score, ai_text = ai_interviewer(text = human_text, session = session) + + if session.final: + ai_text = f"Thanks for your time, we finished with the interview! Your score is {int(session.score)} percents. Have a good day." return Response({ "ai_text": ai_text, diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 568bdb0..0ede878 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -22,4 +22,6 @@ dependencies = [ "langchain>=0.3.3", "langchain-community>=0.3.2", "langchain-openai>=0.2.2", + "openai>=1.51.2", + "scikit-learn>=1.5.2", ] diff --git a/backend/uv.lock b/backend/uv.lock index 93635ac..d3b2075 100644 --- a/backend/uv.lock +++ b/backend/uv.lock @@ -636,9 +636,11 @@ dependencies = [ { name = "langchain-community" }, { name = "langchain-openai" }, { name = "mysqlclient" }, + { name = "openai" }, { name = "pillow" }, { name = "python-dotenv" }, { name = "redis" }, + { name = "scikit-learn" }, ] [package.metadata] @@ -657,9 +659,11 @@ requires-dist = [ { name = "langchain-community", specifier = ">=0.3.2" }, { name = "langchain-openai", specifier = ">=0.2.2" }, { name = "mysqlclient", specifier = ">=2.2.4" }, + { name = "openai", specifier = ">=1.51.2" }, { name = "pillow", specifier = ">=10.4.0" }, { name = "python-dotenv", specifier = ">=1.0.1" }, { name = "redis", specifier = ">=5.1.1" }, + { name = "scikit-learn", specifier = ">=1.5.2" }, ] [[package]] @@ -703,6 +707,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7a/fc/8709ee90837e94790d8b50db51c7b8a70e86e41b2c81e824c20b0ecfeba7/jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda", size = 198919 }, ] +[[package]] +name = "joblib" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/64/33/60135848598c076ce4b231e1b1895170f45fbcaeaa2c9d5e38b04db70c35/joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e", size = 2116621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/29/df4b9b42f2be0b623cbd5e2140cafcaa2bef0759a00b7b70104dcfe2fb51/joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6", size = 301817 }, +] + [[package]] name = "jsonpatch" version = "1.33" @@ -1314,6 +1327,57 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481 }, ] +[[package]] +name = "scikit-learn" +version = "1.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "joblib" }, + { name = "numpy" }, + { name = "scipy" }, + { name = "threadpoolctl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/37/59/44985a2bdc95c74e34fef3d10cb5d93ce13b0e2a7baefffe1b53853b502d/scikit_learn-1.5.2.tar.gz", hash = "sha256:b4237ed7b3fdd0a4882792e68ef2545d5baa50aca3bb45aa7df468138ad8f94d", size = 7001680 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/db/b485c1ac54ff3bd9e7e6b39d3cc6609c4c76a65f52ab0a7b22b6c3ab0e9d/scikit_learn-1.5.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f932a02c3f4956dfb981391ab24bda1dbd90fe3d628e4b42caef3e041c67707a", size = 12110344 }, + { url = "https://files.pythonhosted.org/packages/54/1a/7deb52fa23aebb855431ad659b3c6a2e1709ece582cb3a63d66905e735fe/scikit_learn-1.5.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3b923d119d65b7bd555c73be5423bf06c0105678ce7e1f558cb4b40b0a5502b1", size = 11033502 }, + { url = "https://files.pythonhosted.org/packages/a1/32/4a7a205b14c11225609b75b28402c196e4396ac754dab6a81971b811781c/scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f60021ec1574e56632be2a36b946f8143bf4e5e6af4a06d85281adc22938e0dd", size = 12085794 }, + { url = "https://files.pythonhosted.org/packages/c6/29/044048c5e911373827c0e1d3051321b9183b2a4f8d4e2f11c08fcff83f13/scikit_learn-1.5.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:394397841449853c2290a32050382edaec3da89e35b3e03d6cc966aebc6a8ae6", size = 12945797 }, + { url = "https://files.pythonhosted.org/packages/aa/ce/c0b912f2f31aeb1b756a6ba56bcd84dd1f8a148470526a48515a3f4d48cd/scikit_learn-1.5.2-cp312-cp312-win_amd64.whl", hash = "sha256:57cc1786cfd6bd118220a92ede80270132aa353647684efa385a74244a41e3b1", size = 10985467 }, + { url = "https://files.pythonhosted.org/packages/a4/50/8891028437858cc510e13578fe7046574a60c2aaaa92b02d64aac5b1b412/scikit_learn-1.5.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9a702e2de732bbb20d3bad29ebd77fc05a6b427dc49964300340e4c9328b3f5", size = 12025584 }, + { url = "https://files.pythonhosted.org/packages/d2/79/17feef8a1c14149436083bec0e61d7befb4812e272d5b20f9d79ea3e9ab1/scikit_learn-1.5.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:b0768ad641981f5d3a198430a1d31c3e044ed2e8a6f22166b4d546a5116d7908", size = 10959795 }, + { url = "https://files.pythonhosted.org/packages/b1/c8/f08313f9e2e656bd0905930ae8bf99a573ea21c34666a813b749c338202f/scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:178ddd0a5cb0044464fc1bfc4cca5b1833bfc7bb022d70b05db8530da4bb3dd3", size = 12077302 }, + { url = "https://files.pythonhosted.org/packages/a7/48/fbfb4dc72bed0fe31fe045fb30e924909ad03f717c36694351612973b1a9/scikit_learn-1.5.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7284ade780084d94505632241bf78c44ab3b6f1e8ccab3d2af58e0e950f9c12", size = 13002811 }, + { url = "https://files.pythonhosted.org/packages/a5/e7/0c869f9e60d225a77af90d2aefa7a4a4c0e745b149325d1450f0f0ce5399/scikit_learn-1.5.2-cp313-cp313-win_amd64.whl", hash = "sha256:b7b0f9a0b1040830d38c39b91b3a44e1b643f4b36e36567b80b7c6bd2202a27f", size = 10951354 }, +] + +[[package]] +name = "scipy" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/62/11/4d44a1f274e002784e4dbdb81e0ea96d2de2d1045b2132d5af62cc31fd28/scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417", size = 58620554 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/04/2bdacc8ac6387b15db6faa40295f8bd25eccf33f1f13e68a72dc3c60a99e/scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d", size = 39128781 }, + { url = "https://files.pythonhosted.org/packages/c8/53/35b4d41f5fd42f5781dbd0dd6c05d35ba8aa75c84ecddc7d44756cd8da2e/scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07", size = 29939542 }, + { url = "https://files.pythonhosted.org/packages/66/67/6ef192e0e4d77b20cc33a01e743b00bc9e68fb83b88e06e636d2619a8767/scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5", size = 23148375 }, + { url = "https://files.pythonhosted.org/packages/f6/32/3a6dedd51d68eb7b8e7dc7947d5d841bcb699f1bf4463639554986f4d782/scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc", size = 25578573 }, + { url = "https://files.pythonhosted.org/packages/f0/5a/efa92a58dc3a2898705f1dc9dbaf390ca7d4fba26d6ab8cfffb0c72f656f/scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310", size = 35319299 }, + { url = "https://files.pythonhosted.org/packages/8e/ee/8a26858ca517e9c64f84b4c7734b89bda8e63bec85c3d2f432d225bb1886/scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066", size = 40849331 }, + { url = "https://files.pythonhosted.org/packages/a5/cd/06f72bc9187840f1c99e1a8750aad4216fc7dfdd7df46e6280add14b4822/scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1", size = 42544049 }, + { url = "https://files.pythonhosted.org/packages/aa/7d/43ab67228ef98c6b5dd42ab386eae2d7877036970a0d7e3dd3eb47a0d530/scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f", size = 44521212 }, + { url = "https://files.pythonhosted.org/packages/50/ef/ac98346db016ff18a6ad7626a35808f37074d25796fd0234c2bb0ed1e054/scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79", size = 39091068 }, + { url = "https://files.pythonhosted.org/packages/b9/cc/70948fe9f393b911b4251e96b55bbdeaa8cca41f37c26fd1df0232933b9e/scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e", size = 29875417 }, + { url = "https://files.pythonhosted.org/packages/3b/2e/35f549b7d231c1c9f9639f9ef49b815d816bf54dd050da5da1c11517a218/scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73", size = 23084508 }, + { url = "https://files.pythonhosted.org/packages/3f/d6/b028e3f3e59fae61fb8c0f450db732c43dd1d836223a589a8be9f6377203/scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e", size = 25503364 }, + { url = "https://files.pythonhosted.org/packages/a7/2f/6c142b352ac15967744d62b165537a965e95d557085db4beab2a11f7943b/scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d", size = 35292639 }, + { url = "https://files.pythonhosted.org/packages/56/46/2449e6e51e0d7c3575f289f6acb7f828938eaab8874dbccfeb0cd2b71a27/scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e", size = 40798288 }, + { url = "https://files.pythonhosted.org/packages/32/cd/9d86f7ed7f4497c9fd3e39f8918dd93d9f647ba80d7e34e4946c0c2d1a7c/scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06", size = 42524647 }, + { url = "https://files.pythonhosted.org/packages/f5/1b/6ee032251bf4cdb0cc50059374e86a9f076308c1512b61c4e003e241efb7/scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84", size = 44469524 }, +] + [[package]] name = "screen" version = "1.0.1" @@ -1408,6 +1472,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/3f/8ba87d9e287b9d385a02a7114ddcef61b26f86411e121c9003eb509a1773/tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687", size = 28165 }, ] +[[package]] +name = "threadpoolctl" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/55/b5148dcbf72f5cde221f8bfe3b6a540da7aa1842f6b491ad979a6c8b84af/threadpoolctl-3.5.0.tar.gz", hash = "sha256:082433502dd922bf738de0d8bcc4fdcbf0979ff44c42bd40f5af8a282f6fa107", size = 41936 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/2c/ffbf7a134b9ab11a67b0cf0726453cedd9c5043a4fe7a35d1cefa9a1bcfb/threadpoolctl-3.5.0-py3-none-any.whl", hash = "sha256:56c1e26c150397e58c4926da8eeee87533b1e32bef131bd4bf6a2f45f3185467", size = 18414 }, +] + [[package]] name = "tiktoken" version = "0.8.0"