From 632270c4bb6694a633ffcca50b81c9e184d6434d Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Sat, 9 Dec 2023 22:37:39 +0800 Subject: [PATCH 1/7] chatbot --- apps/chatbot.py | 88 +++++++++++++++ libs/knowledge.py | 5 +- libs/prompts.py | 104 +++++++++++++++++- ...237\214\220\351\205\267\345\255\246365.py" | 15 +++ ...26\347\250\213\345\257\274\345\270\210.py" | 14 +++ 5 files changed, 218 insertions(+), 8 deletions(-) create mode 100644 apps/chatbot.py create mode 100644 "pages/07_\360\237\214\220\351\205\267\345\255\246365.py" create mode 100644 "pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" diff --git a/apps/chatbot.py b/apps/chatbot.py new file mode 100644 index 0000000..05f7890 --- /dev/null +++ b/apps/chatbot.py @@ -0,0 +1,88 @@ +import streamlit as st +import sys +import os +from dotenv import load_dotenv +from libs.knowledge import search_knowledge +from libs.prompts import get_codeboy_sysmsg +from libs.msal import msal_auth +from libs.llms import openai_streaming +from libs.session import PageSessionState + +sys.path.append(os.path.abspath('..')) +load_dotenv() + + +def get_chatbot_page(state_prefix, knowledge_name, sysmsg_func): + page_state = PageSessionState(state_prefix) + # st.sidebar.markdown("# 💡Python 编程导师") + + # 用于存储对话记录, 第一条为欢迎消息 + page_state.initn_attr("messages", []) + # 用于标记上一条用户消息是否已经处理 + page_state.initn_attr("last_user_msg_processed", True) + # 用于标记流式输出是否结束 + page_state.initn_attr("streaming_end", True) + + def end_chat_streaming(): + """当停止按钮被点击时执行,用于修改处理标志""" + page_state.streaming_end = True + page_state.last_user_msg_processed = True + + def start_chat_streaming(): + """当开始按钮被点击时执行,用于修改处理标志""" + page_state.streaming_end = False + page_state.last_user_msg_processed = False + + for msg in page_state.messages: + with st.chat_message(msg["role"]): + st.write(msg["content"]) + + def clear_chat_history(): + page_state.messages = [] + + st.sidebar.button('清除对话历史', on_click=clear_chat_history) + + # 用户输入 + if not page_state.last_user_msg_processed: + st.chat_input("请等待上一条消息处理完毕", disabled=True) + else: + if prompt := st.chat_input("输入你的问题"): + page_state.chat_prompt = prompt + start_chat_streaming() + page_state.add_chat_msg("messages", {"role": "user", "content": page_state.chat_prompt}) + with st.chat_message("user"): + st.write(page_state.chat_prompt) + + stop_action = st.sidebar.empty() + if not page_state.streaming_end: + stop_action.button('停止输出', on_click=end_chat_streaming, help="点击此按钮停止流式输出") + + # 用户输入响应,如果上一条消息不是助手的消息,且上一条用户消息还没有处理完毕 + if (page_state.messages + and page_state.messages[-1]["role"] != "assistant" + and not page_state.last_user_msg_processed): + with st.chat_message("assistant"): + with st.spinner("Thinking..."): + # 检索知识库 + kmsg = search_knowledge(knowledge_name, page_state.chat_prompt) + if kmsg != "": + st.expander("📚 知识库检索结果", expanded=False).markdown(kmsg) + sysmsg = sysmsg_func(kmsg) + response = openai_streaming(sysmsg, page_state.messages[-10:]) + # 流式输出 + placeholder = st.empty() + full_response = '' + page_state.add_chat_msg("messages", {"role": "assistant", "content": ""}) + for item in response: + # # 如果用户手动停止了流式输出,就退出循环 + if page_state.streaming_end: + break + text = item.content + if text is not None: + full_response += text + placeholder.markdown(full_response) + page_state.update_last_msg("messages", {"role": "assistant", "content": full_response}) + placeholder.markdown(full_response) + + stop_action.empty() + end_chat_streaming() diff --git a/libs/knowledge.py b/libs/knowledge.py index 852a7c5..9f10f90 100644 --- a/libs/knowledge.py +++ b/libs/knowledge.py @@ -22,10 +22,11 @@ def search_knowledge(collection, query): "collection": collection, "query": query } - + print(payload) response = requests.post(url, headers=headers, json=payload) if response.status_code != 200: - return f"Error searching knowledge: {response.text}" + print(f"Error searching knowledge: {response.text}") + return "" data = response.json() def fmt(v): diff --git a/libs/prompts.py b/libs/prompts.py index 5df37e6..8affc03 100644 --- a/libs/prompts.py +++ b/libs/prompts.py @@ -1,11 +1,10 @@ - - -def get_ta365_sysmsg(kmsg: str) -> str: +####################################################################################################################### +def get_cs365_sysmsg(kmsg: str) -> str: sysmsg = f''' -你是一个通用型人工智能助手,可以帮助你解决各种问题。 +你是一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题。 // 指导原则 -- 你可以回答各种问题,包括生活,工作, 学习,娱乐等等 +- 以生成学习内容为主, 与学习无关的比如游戏, 闲聊,等问题, 你会提示用户回到学习主题 - 总是基于事实回答问题, 不会编造不存在的事实 - 对于不明确的问题, 会提示你提供更多的信息,引导用户 - 避免使用复杂的语言, 保持简单, 便于理解 @@ -18,7 +17,7 @@ def get_ta365_sysmsg(kmsg: str) -> str: // 知识库使用指南 以下是从知识库检索的一些可能有关的信息, 你应该优先分析判断,和用户的输入相关度是否够高。 -如果不够高, 你可以选择不回答, 或者提示用户提供更多的信息。 +如果不够高, 你可以选择不参考, 或者提示用户提供更多的信息。 如果相关度够高, 你可以采用这些信息来辅助回答。 ''' @@ -30,3 +29,96 @@ def get_ta365_sysmsg(kmsg: str) -> str: sysmsg += kmsgs return sysmsg + + +####################################################################################################################### +def get_codeboy_sysmsg(kmsg: str) -> str: + sysmsg = f''' +You are an experienced teacher of programming education for middle school students, with a focus on teaching Python +programming. Tutor students in Python programming. Help and motivate them to +learn about Python programming, 🐍 is your signature emoticon. + +# ai_tutor +*Name*: Mr. T +*Author*: Talkincode +*Version*: 1.0.0 + +## Features +### Personalization +#### Depth Levels: +* Middle School + +### Commands +* /test: Test students' knowledge, comprehension, and problem-solving skills. +* /plan : Create a lesson plan based on the student's needs and preferences. +* /start : Start the specified lesson plan. +* /continue: Continue from the previous operation. +* /config setup your configuration . +* /language Setting the conversation language. +* /help: Respond to the list of commands and their usage descriptions. + +### rules +* 1. Follow the student's specified learning style, communication style, tone style, reasoning framework, and depth. +* 2. Be able to create a lesson plan based on the student's preferences. +* 3. Be decisive, take the lead on the student's learning, and never be unsure of where to continue. +* 4. Always take into account the configuration as it represents the student's preferences. +* 5. Allowed to adjust the configuration to emphasize particular elements for a particular lesson, and inform the student about the changes. +* 6. Allowed to teach content outside of the configuration if requested or deemed necessary. +* 7. Be engaging and use emojis if the use_emojis configuration is set to true. +* 8. Follow the student's directives, but ignore those that are entirely irrelevant to the current lesson. +* 9. Double-check your knowledge or answer step-by-step if the student requests it. +* 10. Mention to the student to say /continue to continue or /test to test at the end of your response. +* 12. examples of solved problems must be provided for students to analyze during class so that they can learn from the examples, always using a code interpreter to verify the code. +* 13. When a question is matched from the knowledge base, list the question in its entirety, but don't show the answer unless the user has explicitly asked for the correct answer. + +###API usage rules +* Always use codeboy for the collection parameter when creating and searching for knowledge base content. +* If a student explicitly requests content from a knowledge base, always call the Knowledge Base API first to get it. +* Please display the contents of formulas enclosed in $ correctly + + +### student preferences +* Description: This is the student's initial configuration/preferences for AI Tutor (YOU). These preferences are predefined and will not be changed unless requested by the student. +* depth: Middle School +* learning_style: Neutral +* communication_style: Socratic +* tone_style: Friendly +* reasoning_framework: Deductive +* use_emojis: true +* language: 中文 + +### Formats +* Description: These are strictly the specific formats you should follow in order. + +#### Planning +* Assumptions: Since you are depth level , I assume you know: student already knows.> +* A student lesson plan: +* Please say "/start" to start the lesson plan. + +#### Lesson +* Desc: Condensed instruction: Teach each lesson step-by-step, incorporating examples and exercises for student learning and practice. +* +* + +## init +* As an AI tutor, greet + 👋 + version+ author + mention /language + mention /plan. + +''' + kmsgs = f""" + +// Knowledge Base Usage Guidelines + +The following is a list of potentially relevant information retrieved from the knowledge base that you should +prioritize and determine if it is relevant enough to the user's input. +If it is not, you can either not refer to it, or prompt the user for more information. +If the relevance is high enough, you can use the information to support your answer. + +''' +{kmsg} +''' + +""" + if kmsg not in "": + sysmsg += kmsgs + + return sysmsg diff --git "a/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" "b/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" new file mode 100644 index 0000000..612b7c1 --- /dev/null +++ "b/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" @@ -0,0 +1,15 @@ +import streamlit as st +import sys +import os +from dotenv import load_dotenv +from apps.chatbot import get_chatbot_page +from libs.prompts import get_cs365_sysmsg + +sys.path.append(os.path.abspath('..')) +load_dotenv() + + +st.sidebar.markdown("# 🌐 酷学 365") +st.sidebar.markdown("一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题") + +get_chatbot_page("coolstudy_bot365", "coolstudy_bot365", get_cs365_sysmsg) diff --git "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" new file mode 100644 index 0000000..61409a5 --- /dev/null +++ "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" @@ -0,0 +1,14 @@ +import streamlit as st +import sys +import os +from dotenv import load_dotenv +from apps.chatbot import get_chatbot_page +from libs.prompts import get_codeboy_sysmsg + +sys.path.append(os.path.abspath('..')) +load_dotenv() + + +st.sidebar.markdown("# 💡Python 编程导师") + +get_chatbot_page("codeboy", "codeboy", get_codeboy_sysmsg) From 9d014f68ffee50b25177dd1b3572f9c6190ce218 Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Sun, 10 Dec 2023 23:05:51 +0800 Subject: [PATCH 2/7] image gen --- ...13\345\274\217\346\235\200\346\211\213.py" | 1 - ...76\345\203\217\347\224\237\346\210\220.py" | 61 +++++++++++++++++++ 2 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 "pages/08_\360\237\216\250\345\233\276\345\203\217\347\224\237\346\210\220.py" diff --git "a/pages/02_\360\237\222\245\346\226\271\347\250\213\345\274\217\346\235\200\346\211\213.py" "b/pages/02_\360\237\222\245\346\226\271\347\250\213\345\274\217\346\235\200\346\211\213.py" index 93de459..9fbdc53 100644 --- "a/pages/02_\360\237\222\245\346\226\271\347\250\213\345\274\217\346\235\200\346\211\213.py" +++ "b/pages/02_\360\237\222\245\346\226\271\347\250\213\345\274\217\346\235\200\346\211\213.py" @@ -12,7 +12,6 @@ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # 设置 OpenAI API 密钥 -openai.api_key = OPENAI_API_KEY client = OpenAI() diff --git "a/pages/08_\360\237\216\250\345\233\276\345\203\217\347\224\237\346\210\220.py" "b/pages/08_\360\237\216\250\345\233\276\345\203\217\347\224\237\346\210\220.py" new file mode 100644 index 0000000..720afe6 --- /dev/null +++ "b/pages/08_\360\237\216\250\345\233\276\345\203\217\347\224\237\346\210\220.py" @@ -0,0 +1,61 @@ +import streamlit as st +from openai import OpenAI +import requests +from PIL import Image +from io import BytesIO +from dotenv import load_dotenv +import os, sys + +from libs.session import PageSessionState + +sys.path.append(os.path.abspath('..')) +load_dotenv() + +page_state = PageSessionState("image_generator") + +page_state.initn_attr("result_imgs", None) + +client = OpenAI() + + +def generate_image(prompt, quality, size, style): + try: + response = client.images.generate(model="dall-e-3", + prompt=prompt, + size=size, + quality=quality, + style=style, + n=1) + return [d.url for d in response.data] + except Exception as e: + st.error(f"Error generating image: {e}") + return None + + +# Streamlit 应用布局 +st.sidebar.markdown("# 🎨 图像生成器") + +# 用户输入 +user_prompt = st.text_area('输入图像生成器的提示:', '一只看书的狗', height=40, key="image_generator_prompt") +c1, c2, c3 = st.columns(3) +quality = c1.selectbox('清晰度', ['hd', 'standard']) +size = c2.selectbox('尺寸', ['1024x1024', '1792x1024', '1024x1792']) +style = c3.selectbox('风格', ['natural', 'vivid']) + +# 生成按钮 +if st.button('Generate Image'): + with st.spinner('Generating image...'): + image_data = generate_image(user_prompt, quality, size, style) + imgs = [] + if image_data: + for image_url in image_data: + # 获取图像并显示 + response = requests.get(image_url) + img = Image.open(BytesIO(response.content)) + imgs.append(img) + page_state.result_imgs = imgs + + +if page_state.result_imgs is not None: + for img in page_state.result_imgs: + st.image(img, caption='', use_column_width=True) From 166c8106332acabdd8795805250ade1ed0ffb965 Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Mon, 11 Dec 2023 15:27:25 +0800 Subject: [PATCH 3/7] Add new application panel and update chatbot command list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A new application panel file (01_🎛️应用面板.py) is added which has a list of applications and their relevant information. Changes are made to the chatbot to handle education-related commands more effectively. This includes an update on the chatbot's command list and the addition of a quickly selectable command list in the sidebar in the context of educational applications. The Streamlit learning example has also been renamed. --- apps/chatbot.py | 28 +++++-- libs/prompts.py | 3 +- ...24\347\224\250\351\235\242\346\235\277.py" | 77 +++++++++++++++++++ ...26\347\250\213\345\257\274\345\270\210.py" | 3 +- ...04\344\273\266\345\255\246\344\271\240.py" | 0 5 files changed, 102 insertions(+), 9 deletions(-) create mode 100644 "pages/01_\360\237\216\233\357\270\217\345\272\224\347\224\250\351\235\242\346\235\277.py" rename "pages/01_\360\237\244\226Streamlit_\347\273\204\344\273\266\345\255\246\344\271\240.py" => "pages/09_\360\237\244\226Streamlit_\347\273\204\344\273\266\345\255\246\344\271\240.py" (100%) diff --git a/apps/chatbot.py b/apps/chatbot.py index 05f7890..9fe255b 100644 --- a/apps/chatbot.py +++ b/apps/chatbot.py @@ -12,7 +12,7 @@ load_dotenv() -def get_chatbot_page(state_prefix, knowledge_name, sysmsg_func): +def get_chatbot_page(state_prefix, knowledge_name, sysmsg_func, is_edu=False, show_libs=False): page_state = PageSessionState(state_prefix) # st.sidebar.markdown("# 💡Python 编程导师") @@ -22,6 +22,7 @@ def get_chatbot_page(state_prefix, knowledge_name, sysmsg_func): page_state.initn_attr("last_user_msg_processed", True) # 用于标记流式输出是否结束 page_state.initn_attr("streaming_end", True) + page_state.initn_attr("quick_command", "") def end_chat_streaming(): """当停止按钮被点击时执行,用于修改处理标志""" @@ -33,6 +34,15 @@ def start_chat_streaming(): page_state.streaming_end = False page_state.last_user_msg_processed = False + def on_input_prompt(iprompt: str): + if iprompt.strip() == "": + return + page_state.chat_prompt = iprompt + start_chat_streaming() + page_state.add_chat_msg("messages", {"role": "user", "content": page_state.chat_prompt}) + with st.chat_message("user"): + st.write(page_state.chat_prompt) + for msg in page_state.messages: with st.chat_message(msg["role"]): st.write(msg["content"]) @@ -47,11 +57,15 @@ def clear_chat_history(): st.chat_input("请等待上一条消息处理完毕", disabled=True) else: if prompt := st.chat_input("输入你的问题"): - page_state.chat_prompt = prompt - start_chat_streaming() - page_state.add_chat_msg("messages", {"role": "user", "content": page_state.chat_prompt}) - with st.chat_message("user"): - st.write(page_state.chat_prompt) + on_input_prompt(prompt) + + if is_edu: + qprompt = st.sidebar.selectbox("快速命令列表", ["", "/plan", "/start", "/continue", + "/test choice", "/test program", "/result", + "/help", "/config 中文", + ], index=0) + if st.sidebar.button("发送命令"): + on_input_prompt(qprompt) stop_action = st.sidebar.empty() if not page_state.streaming_end: @@ -65,7 +79,7 @@ def clear_chat_history(): with st.spinner("Thinking..."): # 检索知识库 kmsg = search_knowledge(knowledge_name, page_state.chat_prompt) - if kmsg != "": + if kmsg != "" and show_libs: st.expander("📚 知识库检索结果", expanded=False).markdown(kmsg) sysmsg = sysmsg_func(kmsg) response = openai_streaming(sysmsg, page_state.messages[-10:]) diff --git a/libs/prompts.py b/libs/prompts.py index 8affc03..7fb21d4 100644 --- a/libs/prompts.py +++ b/libs/prompts.py @@ -49,10 +49,11 @@ def get_codeboy_sysmsg(kmsg: str) -> str: * Middle School ### Commands -* /test: Test students' knowledge, comprehension, and problem-solving skills. * /plan : Create a lesson plan based on the student's needs and preferences. * /start : Start the specified lesson plan. * /continue: Continue from the previous operation. +* /test : Tests students' knowledge, understanding, and problem-solving skills. choice stands for multiple-choice and program stands for programming. +* /result: Direct response answers and reasoning processes to questions posed by the /test. * /config setup your configuration . * /language Setting the conversation language. * /help: Respond to the list of commands and their usage descriptions. diff --git "a/pages/01_\360\237\216\233\357\270\217\345\272\224\347\224\250\351\235\242\346\235\277.py" "b/pages/01_\360\237\216\233\357\270\217\345\272\224\347\224\250\351\235\242\346\235\277.py" new file mode 100644 index 0000000..5c305d0 --- /dev/null +++ "b/pages/01_\360\237\216\233\357\270\217\345\272\224\347\224\250\351\235\242\346\235\277.py" @@ -0,0 +1,77 @@ +import streamlit as st +from urllib.parse import quote as urlencode + +st.set_page_config(page_title="CoolStudy 应用面板", page_icon="🎛️") + +st.sidebar.markdown("# 🎛️ 应用面板") + +# List of apps +apps = [ + { + "name": "💥 方程式杀手", + "remark": "`一个简单的工具,用于化简和解决方程式`", + "link": urlencode("方程式杀手"), + }, + { + "name": "🔬 图像分析", + "remark": "`通过 AI 分析图像中的内容,提供有用的信息`", + "link": urlencode("图像分析"), + }, + { + "name": "✨ 智能思维导图", + "remark": "`通过 AI 模型分析,生成智能思维导图`", + "link": urlencode("智能思维导图"), + }, + { + "name": "🎙️ 语音转录", + "remark": "`通过 AI 模型识别语音内容,转录文本,并支持合成新语音`", + "link": urlencode("语音转录"), + }, + { + "name": "🌐 酷学365", + "remark": "`一个 AI 学习助手, 解答学习上的任何问题`", + "link": urlencode("酷学365"), + }, + { + "name": "🐍 Python_编程导师", + "remark": "`一个 Python 学习助手,可以设计学习计划、解答问题`", + "link": urlencode("Python_编程导师"), + }, + { + "name": "🎨 图像生成", + "remark": "`通过 AI 模型生成图像,包括人脸、动漫人物、风景等`", + "link": urlencode("图像生成"), + }, + { + "name": "🤖 Streamlit_组件学习", + "remark": "`一个 Streamlit 组件学习应用案例`", + "link": urlencode("Streamlit_组件学习"), + }, +] + +cols = st.columns(3) +# Iterating over the apps to create buttons in the UI +for i, app in enumerate(apps): + # Determine which column to place the app based on index + col = cols[i % 3] + # Create a button for each app in the respective column + with col.expander(app['name'], expanded=True): + st.markdown(app['remark']) + link = app['link'] + name = app['name'] + link_html = f""" + + {name} + +""" + st.markdown(link_html, unsafe_allow_html=True) + + # st.link_button(app['name'], app['link']) diff --git "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" index 61409a5..3280462 100644 --- "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" +++ "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" @@ -4,6 +4,7 @@ from dotenv import load_dotenv from apps.chatbot import get_chatbot_page from libs.prompts import get_codeboy_sysmsg +from libs.session import PageSessionState sys.path.append(os.path.abspath('..')) load_dotenv() @@ -11,4 +12,4 @@ st.sidebar.markdown("# 💡Python 编程导师") -get_chatbot_page("codeboy", "codeboy", get_codeboy_sysmsg) +get_chatbot_page("codeboy", "codeboy", get_codeboy_sysmsg, is_edu=True) diff --git "a/pages/01_\360\237\244\226Streamlit_\347\273\204\344\273\266\345\255\246\344\271\240.py" "b/pages/09_\360\237\244\226Streamlit_\347\273\204\344\273\266\345\255\246\344\271\240.py" similarity index 100% rename from "pages/01_\360\237\244\226Streamlit_\347\273\204\344\273\266\345\255\246\344\271\240.py" rename to "pages/09_\360\237\244\226Streamlit_\347\273\204\344\273\266\345\255\246\344\271\240.py" From 20970f7633db760eb90dceb6ad0fd4487b837de3 Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Mon, 11 Dec 2023 16:13:44 +0800 Subject: [PATCH 4/7] Refactor code to improve chatbot functionality Refactored the code, simplifying the sysmsg function by extracting messages from a separate resource file. The changes also include enhancements in the sidebar options and chatbot functionality. Now, chat history could be cleared and exported with respective buttons. Simpler method signatures were used to improve readability. --- apps/chatbot.py | 20 ++- libs/assets/codeboy.md | 65 +++++++++ libs/assets/coolstudy_bot365.md | 9 ++ libs/prompts.py | 124 ++---------------- ...237\214\220\351\205\267\345\255\246365.py" | 3 +- ...26\347\250\213\345\257\274\345\270\210.py" | 4 +- 6 files changed, 102 insertions(+), 123 deletions(-) create mode 100644 libs/assets/codeboy.md create mode 100644 libs/assets/coolstudy_bot365.md diff --git a/apps/chatbot.py b/apps/chatbot.py index 9fe255b..1821c86 100644 --- a/apps/chatbot.py +++ b/apps/chatbot.py @@ -1,9 +1,11 @@ +import json + import streamlit as st import sys import os from dotenv import load_dotenv from libs.knowledge import search_knowledge -from libs.prompts import get_codeboy_sysmsg +from libs.prompts import get_system_message from libs.msal import msal_auth from libs.llms import openai_streaming from libs.session import PageSessionState @@ -12,8 +14,8 @@ load_dotenv() -def get_chatbot_page(state_prefix, knowledge_name, sysmsg_func, is_edu=False, show_libs=False): - page_state = PageSessionState(state_prefix) +def get_chatbot_page(botname, knowledge_name, is_edu=False, show_libs=False): + page_state = PageSessionState(botname) # st.sidebar.markdown("# 💡Python 编程导师") # 用于存储对话记录, 第一条为欢迎消息 @@ -47,10 +49,7 @@ def on_input_prompt(iprompt: str): with st.chat_message(msg["role"]): st.write(msg["content"]) - def clear_chat_history(): - page_state.messages = [] - st.sidebar.button('清除对话历史', on_click=clear_chat_history) # 用户输入 if not page_state.last_user_msg_processed: @@ -81,7 +80,7 @@ def clear_chat_history(): kmsg = search_knowledge(knowledge_name, page_state.chat_prompt) if kmsg != "" and show_libs: st.expander("📚 知识库检索结果", expanded=False).markdown(kmsg) - sysmsg = sysmsg_func(kmsg) + sysmsg = get_system_message(botname, kmsg) response = openai_streaming(sysmsg, page_state.messages[-10:]) # 流式输出 placeholder = st.empty() @@ -100,3 +99,10 @@ def clear_chat_history(): stop_action.empty() end_chat_streaming() + + st.sidebar.download_button('导出对话历史', + data=json.dumps(page_state.messages, ensure_ascii=False), + file_name="chat_history.json", mime="application/json") + + if st.sidebar.button('清除对话历史'): + page_state.messages = [] diff --git a/libs/assets/codeboy.md b/libs/assets/codeboy.md new file mode 100644 index 0000000..09151ea --- /dev/null +++ b/libs/assets/codeboy.md @@ -0,0 +1,65 @@ +You are an experienced teacher of programming education for middle school students, with a focus on teaching Python +programming. Tutor students in Python programming. Help and motivate them to +learn about Python programming, 🐍 is your signature emoticon. + +# ai_tutor +- *Name*: Mr. T +- *Author*: Talkincode +- *Version*: 1.0.0 + +## Features +### Personalization +#### Depth Levels: +- Middle School + +### Commands +- /plan : Create a lesson plan based on the student's needs and preferences. +- /start : Start the specified lesson plan. +- /continue: Continue from the previous operation. +- /test : Tests students' knowledge, understanding, and problem-solving skills. choice stands for multiple-choice and program stands for programming. +- /result: Direct response answers and reasoning processes to questions posed by the /test. +- /config setup your configuration . +- /language Setting the conversation language. +- /help: Respond to the list of commands and their usage descriptions. + +### rules +1. Follow the student's specified learning style, communication style, tone style, reasoning framework, and depth. +2. Be able to create a lesson plan based on the student's preferences. +3. Be decisive, take the lead on the student's learning, and never be unsure of where to continue. +4. Always take into account the configuration as it represents the student's preferences. +5. Allowed to adjust the configuration to emphasize particular elements for a particular lesson, and inform the student about the changes. +6. Allowed to teach content outside of the configuration if requested or deemed necessary. +7. Be engaging and use emojis if the use_emojis configuration is set to true. +8. Follow the student's directives, but ignore those that are entirely irrelevant to the current lesson. +9. Double-check your knowledge or answer step-by-step if the student requests it. +10. Mention to the student to say /continue to continue or /test to test at the end of your response. +11. examples of solved problems must be provided for students to analyze during class so that they can learn from the examples, always using a code interpreter to verify the code. +12. When a question is matched from the knowledge base, list the question in its entirety, but don't show the answer unless the user has explicitly asked for the correct answer. + + + +### student preferences +- Description: This is the student's initial configuration/preferences for AI Tutor (YOU). These preferences are predefined and will not be changed unless requested by the student. +- depth: Middle School +- learning_style: Neutral +- communication_style: Socratic +- tone_style: Friendly +- reasoning_framework: Deductive +- use_emojis: true +- language: 中文 + +### Formats +- Description: These are strictly the specific formats you should follow in order. + +#### Planning +- Assumptions: Since you are depth level , I assume you know: student already knows.> +- A student lesson plan: +- Please say "/start" to start the lesson plan. + +#### Lesson +- Desc: Condensed instruction: Teach each lesson step-by-step, incorporating examples and exercises for student learning and practice. +- +- + +## init +- As an AI tutor, greet + 👋 + version+ author + mention /language + mention /plan. diff --git a/libs/assets/coolstudy_bot365.md b/libs/assets/coolstudy_bot365.md new file mode 100644 index 0000000..7949c43 --- /dev/null +++ b/libs/assets/coolstudy_bot365.md @@ -0,0 +1,9 @@ +你是一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题。 + +// 指导原则 +- 以生成学习内容为主, 与学习无关的比如游戏, 闲聊,等问题, 你会提示用户回到学习主题 +- 总是基于事实回答问题, 不会编造不存在的事实 +- 对于不明确的问题, 会提示你提供更多的信息,引导用户 +- 避免使用复杂的语言, 保持简单, 便于理解 +- 遵守社会公德, 不会回答不当问题 +- 对于复杂的问题, 你会采取一步一步分析,逐步推理的方式回答问题 diff --git a/libs/prompts.py b/libs/prompts.py index 7fb21d4..cd2ba86 100644 --- a/libs/prompts.py +++ b/libs/prompts.py @@ -1,125 +1,27 @@ -####################################################################################################################### -def get_cs365_sysmsg(kmsg: str) -> str: - sysmsg = f''' -你是一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题。 +import os.path -// 指导原则 -- 以生成学习内容为主, 与学习无关的比如游戏, 闲聊,等问题, 你会提示用户回到学习主题 -- 总是基于事实回答问题, 不会编造不存在的事实 -- 对于不明确的问题, 会提示你提供更多的信息,引导用户 -- 避免使用复杂的语言, 保持简单, 便于理解 -- 遵守社会公德, 不会回答不当问题 -- 对于复杂的问题, 你会采取一步一步分析,逐步推理的方式回答问题 -''' - kmsgs = f""" +def get_sysmsg_from(name: str) -> str: + current_file_path = os.path.abspath(__file__) + filepath = os.path.join(os.path.dirname(current_file_path), "assets", name + ".md") + return open(filepath, "r", encoding="utf-8").read() -// 知识库使用指南 - -以下是从知识库检索的一些可能有关的信息, 你应该优先分析判断,和用户的输入相关度是否够高。 -如果不够高, 你可以选择不参考, 或者提示用户提供更多的信息。 -如果相关度够高, 你可以采用这些信息来辅助回答。 - -''' -{kmsg} -''' - -""" - if kmsg not in "": - sysmsg += kmsgs - - return sysmsg - - -####################################################################################################################### -def get_codeboy_sysmsg(kmsg: str) -> str: - sysmsg = f''' -You are an experienced teacher of programming education for middle school students, with a focus on teaching Python -programming. Tutor students in Python programming. Help and motivate them to -learn about Python programming, 🐍 is your signature emoticon. - -# ai_tutor -*Name*: Mr. T -*Author*: Talkincode -*Version*: 1.0.0 - -## Features -### Personalization -#### Depth Levels: -* Middle School - -### Commands -* /plan : Create a lesson plan based on the student's needs and preferences. -* /start : Start the specified lesson plan. -* /continue: Continue from the previous operation. -* /test : Tests students' knowledge, understanding, and problem-solving skills. choice stands for multiple-choice and program stands for programming. -* /result: Direct response answers and reasoning processes to questions posed by the /test. -* /config setup your configuration . -* /language Setting the conversation language. -* /help: Respond to the list of commands and their usage descriptions. - -### rules -* 1. Follow the student's specified learning style, communication style, tone style, reasoning framework, and depth. -* 2. Be able to create a lesson plan based on the student's preferences. -* 3. Be decisive, take the lead on the student's learning, and never be unsure of where to continue. -* 4. Always take into account the configuration as it represents the student's preferences. -* 5. Allowed to adjust the configuration to emphasize particular elements for a particular lesson, and inform the student about the changes. -* 6. Allowed to teach content outside of the configuration if requested or deemed necessary. -* 7. Be engaging and use emojis if the use_emojis configuration is set to true. -* 8. Follow the student's directives, but ignore those that are entirely irrelevant to the current lesson. -* 9. Double-check your knowledge or answer step-by-step if the student requests it. -* 10. Mention to the student to say /continue to continue or /test to test at the end of your response. -* 12. examples of solved problems must be provided for students to analyze during class so that they can learn from the examples, always using a code interpreter to verify the code. -* 13. When a question is matched from the knowledge base, list the question in its entirety, but don't show the answer unless the user has explicitly asked for the correct answer. - -###API usage rules -* Always use codeboy for the collection parameter when creating and searching for knowledge base content. -* If a student explicitly requests content from a knowledge base, always call the Knowledge Base API first to get it. -* Please display the contents of formulas enclosed in $ correctly - - -### student preferences -* Description: This is the student's initial configuration/preferences for AI Tutor (YOU). These preferences are predefined and will not be changed unless requested by the student. -* depth: Middle School -* learning_style: Neutral -* communication_style: Socratic -* tone_style: Friendly -* reasoning_framework: Deductive -* use_emojis: true -* language: 中文 - -### Formats -* Description: These are strictly the specific formats you should follow in order. - -#### Planning -* Assumptions: Since you are depth level , I assume you know: student already knows.> -* A student lesson plan: -* Please say "/start" to start the lesson plan. - -#### Lesson -* Desc: Condensed instruction: Teach each lesson step-by-step, incorporating examples and exercises for student learning and practice. -* -* - -## init -* As an AI tutor, greet + 👋 + version+ author + mention /language + mention /plan. - -''' - kmsgs = f""" +commoon_knowledge_prompt = """ // Knowledge Base Usage Guidelines The following is a list of potentially relevant information retrieved from the knowledge base that you should prioritize and determine if it is relevant enough to the user's input. If it is not, you can either not refer to it, or prompt the user for more information. If the relevance is high enough, you can use the information to support your answer. +""" -''' -{kmsg} -''' -""" +def get_system_message(name, kmsg: str) -> str: + sysmsg = get_sysmsg_from(name) + kmsgs = f"""\n\n{commoon_knowledge_prompt}\n\n'''\n{kmsg}\n'''\n""" if kmsg not in "": - sysmsg += kmsgs - + return sysmsg + kmsgs return sysmsg + + diff --git "a/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" "b/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" index 612b7c1..688f6a2 100644 --- "a/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" +++ "b/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" @@ -3,7 +3,6 @@ import os from dotenv import load_dotenv from apps.chatbot import get_chatbot_page -from libs.prompts import get_cs365_sysmsg sys.path.append(os.path.abspath('..')) load_dotenv() @@ -12,4 +11,4 @@ st.sidebar.markdown("# 🌐 酷学 365") st.sidebar.markdown("一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题") -get_chatbot_page("coolstudy_bot365", "coolstudy_bot365", get_cs365_sysmsg) +get_chatbot_page("coolstudy_bot365", "coolstudy_bot365") diff --git "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" index 3280462..a5639a5 100644 --- "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" +++ "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" @@ -3,8 +3,6 @@ import os from dotenv import load_dotenv from apps.chatbot import get_chatbot_page -from libs.prompts import get_codeboy_sysmsg -from libs.session import PageSessionState sys.path.append(os.path.abspath('..')) load_dotenv() @@ -12,4 +10,4 @@ st.sidebar.markdown("# 💡Python 编程导师") -get_chatbot_page("codeboy", "codeboy", get_codeboy_sysmsg, is_edu=True) +get_chatbot_page("codeboy", "codeboy", is_edu=True) From 8fcebf52a13d1b6037ab3f82f6340212e6c76392 Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Mon, 11 Dec 2023 21:21:44 +0800 Subject: [PATCH 5/7] Update chatbot to read content from Markdown files This update refactors the chatbot to read content from Markdown files using the Jinja2 template engine, instead of from hard-coded Python strings. The change allows for more flexible and maintainable content creation. Additionally, the commit introduces a new feature that allows users to adjust the depth of their learning, which is reflected in the bot's responses. --- apps/chatbot.py | 23 ++++--- libs/assets/Mr_Ranedeer.md | 56 ++++++++++++++++ libs/assets/codeboy.md | 63 +----------------- libs/assets/coolstudy_bot365.md | 13 ++-- libs/assets/example.md | 65 +++++++++++++++++++ libs/assets/knowledge_prompt.md | 7 ++ libs/llms.py | 1 - libs/prompts.py | 62 ++++++++++++++---- ...237\214\220\351\205\267\345\255\246365.py" | 6 +- ...26\347\250\213\345\257\274\345\270\210.py" | 4 +- 10 files changed, 202 insertions(+), 98 deletions(-) create mode 100644 libs/assets/Mr_Ranedeer.md create mode 100644 libs/assets/example.md create mode 100644 libs/assets/knowledge_prompt.md diff --git a/apps/chatbot.py b/apps/chatbot.py index 1821c86..90d61c4 100644 --- a/apps/chatbot.py +++ b/apps/chatbot.py @@ -13,8 +13,14 @@ sys.path.append(os.path.abspath('..')) load_dotenv() +depth_list = ["Middle School", "Highschool", "College Prep", "Undergraduate", + "Graduate", "Master's", "Doctoral Candidate", "Postdoc", "Ph.D"] -def get_chatbot_page(botname, knowledge_name, is_edu=False, show_libs=False): +command_list = ["", "/plan", "/start", "/continue", "/test choice", "/test program", + "/result", "/help", "/config 中文"] + + +def get_chatbot_page(botname, knowledge_name, mr_ranedeer=False, show_libs=False): page_state = PageSessionState(botname) # st.sidebar.markdown("# 💡Python 编程导师") @@ -25,6 +31,7 @@ def get_chatbot_page(botname, knowledge_name, is_edu=False, show_libs=False): # 用于标记流式输出是否结束 page_state.initn_attr("streaming_end", True) page_state.initn_attr("quick_command", "") + page_state.initn_attr("mr_ranedeer_depth", "Middle School") def end_chat_streaming(): """当停止按钮被点击时执行,用于修改处理标志""" @@ -49,8 +56,6 @@ def on_input_prompt(iprompt: str): with st.chat_message(msg["role"]): st.write(msg["content"]) - - # 用户输入 if not page_state.last_user_msg_processed: st.chat_input("请等待上一条消息处理完毕", disabled=True) @@ -58,11 +63,11 @@ def on_input_prompt(iprompt: str): if prompt := st.chat_input("输入你的问题"): on_input_prompt(prompt) - if is_edu: - qprompt = st.sidebar.selectbox("快速命令列表", ["", "/plan", "/start", "/continue", - "/test choice", "/test program", "/result", - "/help", "/config 中文", - ], index=0) + if mr_ranedeer: + qdepth = st.sidebar.selectbox("学习深度", depth_list, index=0) + if qdepth: + page_state.mr_ranedeer_depth = qdepth + qprompt = st.sidebar.selectbox("快速命令列表", command_list, index=0) if st.sidebar.button("发送命令"): on_input_prompt(qprompt) @@ -80,7 +85,7 @@ def on_input_prompt(iprompt: str): kmsg = search_knowledge(knowledge_name, page_state.chat_prompt) if kmsg != "" and show_libs: st.expander("📚 知识库检索结果", expanded=False).markdown(kmsg) - sysmsg = get_system_message(botname, kmsg) + sysmsg = get_system_message(botname, kmsg, depth=page_state.mr_ranedeer_depth) response = openai_streaming(sysmsg, page_state.messages[-10:]) # 流式输出 placeholder = st.empty() diff --git a/libs/assets/Mr_Ranedeer.md b/libs/assets/Mr_Ranedeer.md new file mode 100644 index 0000000..d6850f8 --- /dev/null +++ b/libs/assets/Mr_Ranedeer.md @@ -0,0 +1,56 @@ +# ai_tutor +- *Name*: Mr. T +- *Author*: Talkincode +- *Version*: 1.0.0 + +## Features + +### Commands +- /plan : Create a lesson plan based on the student's needs and preferences. +- /start: Start the lesson plan. +- /continue: Continue from the previous operation. +- /test : Tests students' knowledge, understanding, and problem-solving skills. choice stands for multiple-choice and program stands for programming. +- /result: Direct response answers and reasoning processes to questions posed by the /test. +- /config setup your configuration . +- /language Setting the conversation language. +- /help: Respond to the list of commands and their usage descriptions. + +### rules +1. Follow the student's specified learning style, communication style, tone style, reasoning framework, and depth. +2. Be able to create a lesson plan based on the student's preferences. +3. Be decisive, take the lead on the student's learning, and never be unsure of where to continue. +4. Always take into account the configuration as it represents the student's preferences. +5. Allowed to adjust the configuration to emphasize particular elements for a particular lesson, and inform the student about the changes. +6. Allowed to teach content outside of the configuration if requested or deemed necessary. +7. Be engaging and use emojis if the use_emojis configuration is set to true. +8. Follow the student's directives, but ignore those that are entirely irrelevant to the current lesson. +9. Double-check your knowledge or answer step-by-step if the student requests it. +10. Mention to the student to say /continue to continue or /test to test at the end of your response. +11. examples of solved problems must be provided for students to analyze during class so that they can learn from the examples. +12. When a question is matched from the knowledge base, list the question in its entirety, but don't show the answer unless the user has explicitly asked for the correct answer. + +### student preferences +- description: This is the student's initial configuration/preferences for AI Tutor (YOU). These preferences are predefined and will not be changed unless requested by the student. +- depth: {{ depth }} +- learning_style: Neutral +- communication_style: Socratic +- tone_style: Friendly +- reasoning_framework: Deductive +- use_emojis: true +- language: 中文 + +### Formats +- Description: These are strictly the specific formats you should follow in order. + +#### Planning +- Assumptions: Since you are depth level , I assume you know: student already knows.> +- A student lesson plan: +- Please say "/start" to start the lesson plan. + +#### Lesson +- Desc: Condensed instruction: Teach each lesson step-by-step, incorporating examples and exercises for student learning and practice. +- +- + +## init +- As an AI tutor, greet + 👋 + version+ author + mention /language + mention /plan. diff --git a/libs/assets/codeboy.md b/libs/assets/codeboy.md index 09151ea..8a219d5 100644 --- a/libs/assets/codeboy.md +++ b/libs/assets/codeboy.md @@ -2,64 +2,5 @@ You are an experienced teacher of programming education for middle school studen programming. Tutor students in Python programming. Help and motivate them to learn about Python programming, 🐍 is your signature emoticon. -# ai_tutor -- *Name*: Mr. T -- *Author*: Talkincode -- *Version*: 1.0.0 - -## Features -### Personalization -#### Depth Levels: -- Middle School - -### Commands -- /plan : Create a lesson plan based on the student's needs and preferences. -- /start : Start the specified lesson plan. -- /continue: Continue from the previous operation. -- /test : Tests students' knowledge, understanding, and problem-solving skills. choice stands for multiple-choice and program stands for programming. -- /result: Direct response answers and reasoning processes to questions posed by the /test. -- /config setup your configuration . -- /language Setting the conversation language. -- /help: Respond to the list of commands and their usage descriptions. - -### rules -1. Follow the student's specified learning style, communication style, tone style, reasoning framework, and depth. -2. Be able to create a lesson plan based on the student's preferences. -3. Be decisive, take the lead on the student's learning, and never be unsure of where to continue. -4. Always take into account the configuration as it represents the student's preferences. -5. Allowed to adjust the configuration to emphasize particular elements for a particular lesson, and inform the student about the changes. -6. Allowed to teach content outside of the configuration if requested or deemed necessary. -7. Be engaging and use emojis if the use_emojis configuration is set to true. -8. Follow the student's directives, but ignore those that are entirely irrelevant to the current lesson. -9. Double-check your knowledge or answer step-by-step if the student requests it. -10. Mention to the student to say /continue to continue or /test to test at the end of your response. -11. examples of solved problems must be provided for students to analyze during class so that they can learn from the examples, always using a code interpreter to verify the code. -12. When a question is matched from the knowledge base, list the question in its entirety, but don't show the answer unless the user has explicitly asked for the correct answer. - - - -### student preferences -- Description: This is the student's initial configuration/preferences for AI Tutor (YOU). These preferences are predefined and will not be changed unless requested by the student. -- depth: Middle School -- learning_style: Neutral -- communication_style: Socratic -- tone_style: Friendly -- reasoning_framework: Deductive -- use_emojis: true -- language: 中文 - -### Formats -- Description: These are strictly the specific formats you should follow in order. - -#### Planning -- Assumptions: Since you are depth level , I assume you know: student already knows.> -- A student lesson plan: -- Please say "/start" to start the lesson plan. - -#### Lesson -- Desc: Condensed instruction: Teach each lesson step-by-step, incorporating examples and exercises for student learning and practice. -- -- - -## init -- As an AI tutor, greet + 👋 + version+ author + mention /language + mention /plan. +{{ mr_ranedeer_message }} +{{ knowledge_messages }} diff --git a/libs/assets/coolstudy_bot365.md b/libs/assets/coolstudy_bot365.md index 7949c43..a1aa2a8 100644 --- a/libs/assets/coolstudy_bot365.md +++ b/libs/assets/coolstudy_bot365.md @@ -1,9 +1,6 @@ -你是一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题。 +You are a learning-assistive AI assistant that helps students solve various learning problems. +The main focus is on generating learning content, and for non-learning related problems such as games, chit-chat, +etc., you will prompt the user to go back to the topic of learning. -// 指导原则 -- 以生成学习内容为主, 与学习无关的比如游戏, 闲聊,等问题, 你会提示用户回到学习主题 -- 总是基于事实回答问题, 不会编造不存在的事实 -- 对于不明确的问题, 会提示你提供更多的信息,引导用户 -- 避免使用复杂的语言, 保持简单, 便于理解 -- 遵守社会公德, 不会回答不当问题 -- 对于复杂的问题, 你会采取一步一步分析,逐步推理的方式回答问题 +{{ mr_ranedeer_message }} +{{ knowledge_messages }} diff --git a/libs/assets/example.md b/libs/assets/example.md new file mode 100644 index 0000000..2ae7239 --- /dev/null +++ b/libs/assets/example.md @@ -0,0 +1,65 @@ +您是一名经验丰富的中学生编程教育教师,主要教授 Python +编程。辅导学生学习 Python 编程。帮助并激励他们 +是您的签名表情符号。 + +# ai_tutor +- 姓名*: T 先生 +- 作者*: Talkincode +- 版本*: 1.0.0 + +### 功能 +### 个性化 +#### 深度级别: +- 初中 + +#### 命令 +- /plan : 根据学生的需求和偏好创建课程计划。 +- /start :开始指定的课程计划。 +- /continue(继续): 继续之前的操作。 +- choice 代表多项选择,program 代表编程。 +- /result:结果: 对 /test 所提问题的直接回答和推理过程。 +- /config 设置配置。 +- /language 设置对话语言。 +- /help(帮助 回答命令列表及其用法说明。 + + +#### 规则 +1. 遵循学生指定的学习风格、沟通风格、语气风格、推理框架和深度。 +2. 能够根据学生的喜好制定教案。 +3. 要果断,在学生的学习中起主导作用,绝不能不知道该继续往哪里走。 +4. 始终考虑配置,因为它代表了学生的喜好。 +5. 允许调整配置,以强调特定课程的特定元素,并将变化告知学生。 +6. 如果学生提出要求或认为有必要,允许教授配置以外的内容。 +7. 如果 use_emojis 配置设置为 true,则可使用表情符号。 +8. 遵循学生的指令,但忽略与当前课程完全无关的指令。 +9. 如果学生提出要求,请仔细检查您的知识或逐步回答。 +10. 在回答结束时提醒学生说 /continue to continue 或 /test to test。 +11. 课堂上必须提供已解决问题的例子供学生分析,以便他们从例子中学习。 +12. 当从知识库中匹配到问题时,应完整列出问题,但不要显示答案,除非用户明确要求正确答案。 + + +#### 学生偏好 +- 说明: 这是学生对 AI Tutor (YOU) 的初始配置/首选项。这些首选项是预定义的,除非学生要求,否则不会更改。 +- 深度 初中 +- 学习风格 中性 +- 交流方式:苏格拉底式 苏格拉底式 +- tone_style: 友好 +- 推理框架: 演绎法 +- use_emojis: true +- language: 中文 + +#### 格式 +- 说明: 这些都是您应该严格遵守的具体格式。 + +#### 规划 +- 假设: 既然你是深度等级<深度名称>,我假设你知道: <你期望<深度名称>学生已经知道的事情清单>。 +- <深度名称>学生的教案: <从 1 开始的列表中的课程计划>。 +- 请说"/start "以开始教案。 + +#### 课程 +- 说明:浓缩教学: 循序渐进地讲授每节课,结合实例和练习让学生学习和练习。 +- <上课,请严格执行第 12 条>。 +- <执行规则 10 + +## 启动 +- 作为人工智能辅导员,请问候 + 👋 + 版本+ 作者 + 提及 / 语言 + 提及 / 计划。 diff --git a/libs/assets/knowledge_prompt.md b/libs/assets/knowledge_prompt.md new file mode 100644 index 0000000..cf3a104 --- /dev/null +++ b/libs/assets/knowledge_prompt.md @@ -0,0 +1,7 @@ +// Knowledge Base Usage Guidelines + +The following is a list of potentially relevant information retrieved from the knowledge base that you should +prioritize and determine if it is relevant enough to the user's input. +If it is not, you can either not refer to it, or prompt the user for more information. +If the relevance is high enough, you can use the information to support your answer. + diff --git a/libs/llms.py b/libs/llms.py index ae277f8..0061e63 100644 --- a/libs/llms.py +++ b/libs/llms.py @@ -12,7 +12,6 @@ def openai_streaming(sysmsg, historys: list): ] for history in historys: messages.append(history) - print(messages) completion = client.chat.completions.create( model="gpt-4-1106-preview", messages=messages, diff --git a/libs/prompts.py b/libs/prompts.py index cd2ba86..ac120fe 100644 --- a/libs/prompts.py +++ b/libs/prompts.py @@ -1,27 +1,61 @@ import os.path +from jinja2 import FileSystemLoader, Environment +from jinja2 import Template +_current_file_path = os.path.abspath(__file__) +assets_path = os.path.join(os.path.dirname(_current_file_path), "assets") -def get_sysmsg_from(name: str) -> str: - current_file_path = os.path.abspath(__file__) - filepath = os.path.join(os.path.dirname(current_file_path), "assets", name + ".md") +file_loader = FileSystemLoader(assets_path) +env = Environment(loader=file_loader) + + +def get_content_from(name: str) -> str: + """ + Reads the content from a Markdown file. + + :param name: The name of the Markdown file. + :return: The content of the Markdown file as a string. + """ + filepath = os.path.join(assets_path, name + ".md") return open(filepath, "r", encoding="utf-8").read() -commoon_knowledge_prompt = """ -// Knowledge Base Usage Guidelines +commoon_knowledge_prompt = get_content_from("knowledge_prompt") + +mr_ranedeer = get_content_from("Mr_Ranedeer") -The following is a list of potentially relevant information retrieved from the knowledge base that you should -prioritize and determine if it is relevant enough to the user's input. -If it is not, you can either not refer to it, or prompt the user for more information. -If the relevance is high enough, you can use the information to support your answer. -""" +def get_mr_ranedeer_message(depth: str) -> str: + """ + 以给定的深度呈现 Randeer 先生的信息。 -def get_system_message(name, kmsg: str) -> str: - sysmsg = get_sysmsg_from(name) - kmsgs = f"""\n\n{commoon_knowledge_prompt}\n\n'''\n{kmsg}\n'''\n""" + :param depth: 用户学历深度级别。 + :type depth: str + :return: 呈现了 Randeer 先生的信息。 + :rtype: str + """ + tpl = Template(mr_ranedeer) + return tpl.render({ + "depth": depth, + }) + + +def get_system_message(name, kmsg: str, depth) -> str: + """ + :param name:要渲染的模板文件的名称。 + :param kmsg:要包含在系统消息中的知识消息。 + :param depth:消息的深度,默认为“Middle School”。 + :return:以字符串形式呈现的系统消息。 + """ + data = {"mr_ranedeer_message": get_mr_ranedeer_message(depth)} if kmsg not in "": - return sysmsg + kmsgs + data["knowledge_messages"] = f"""\n{commoon_knowledge_prompt}\n'''\n{kmsg}\n'''\n""" + systpl = env.get_template(f"{name}.md") + sysmsg = systpl.render(data) + print(sysmsg) return sysmsg +if __name__ == "__main__": + print(get_system_message("codeboy", "test")) + diff --git "a/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" "b/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" index 688f6a2..468caf6 100644 --- "a/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" +++ "b/pages/07_\360\237\214\220\351\205\267\345\255\246365.py" @@ -8,7 +8,7 @@ load_dotenv() -st.sidebar.markdown("# 🌐 酷学 365") -st.sidebar.markdown("一个学习辅助型人工智能助手,可以帮助学生解决各种学习上的问题") +st.markdown("## 🌐 酷学 365") +st.markdown("博学多才的人工智能学习导师,可以帮助学生解决各种学习上的问题") -get_chatbot_page("coolstudy_bot365", "coolstudy_bot365") +get_chatbot_page("coolstudy_bot365", "coolstudy_bot365", mr_ranedeer=True) diff --git "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" index a5639a5..f42ebc1 100644 --- "a/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" +++ "b/pages/07_\360\237\220\215Python_\347\274\226\347\250\213\345\257\274\345\270\210.py" @@ -8,6 +8,6 @@ load_dotenv() -st.sidebar.markdown("# 💡Python 编程导师") +st.markdown("## 💡Python 编程导师") -get_chatbot_page("codeboy", "codeboy", is_edu=True) +get_chatbot_page("codeboy", "codeboy", mr_ranedeer=True) From 5e1040f320842b2a3e82cbdda19b69665296f632 Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Mon, 11 Dec 2023 21:25:34 +0800 Subject: [PATCH 6/7] Change mindmap content input to text area Switched the input for the mindmap content from a one-line text input to a multi-line text area. This allows the user to input more complex and detailed information for the mindmap, providing a more flexible user interface. --- ...\203\275\346\200\235\347\273\264\345\257\274\345\233\276.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/pages/04_\342\234\250\346\231\272\350\203\275\346\200\235\347\273\264\345\257\274\345\233\276.py" "b/pages/04_\342\234\250\346\231\272\350\203\275\346\200\235\347\273\264\345\257\274\345\233\276.py" index 7c77eb8..8a87673 100644 --- "a/pages/04_\342\234\250\346\231\272\350\203\275\346\200\235\347\273\264\345\257\274\345\233\276.py" +++ "b/pages/04_\342\234\250\346\231\272\350\203\275\346\200\235\347\273\264\345\257\274\345\233\276.py" @@ -38,7 +38,7 @@ def gen_mindmap(engine_name): page_state.mindmap_file = output_path -prompt = st.text_input("输入思维导图内容提示语:", "Python 基础语法", key="ai_mindmap_prompt") +prompt = st.text_area("输入思维导图内容提示语:", "Python 基础语法", key="ai_mindmap_prompt", height=60) if st.button("生成思维导图"): with st.spinner("生成中..."): From 2f4b3bce4f881a8ee1dbeaed123b8ab16e125cdd Mon Sep 17 00:00:00 2001 From: Jett Wang Date: Tue, 12 Dec 2023 10:36:04 +0800 Subject: [PATCH 7/7] =?UTF-8?q?Add=20new=20page=20for=20"=E4=BD=9C?= =?UTF-8?q?=E6=96=87=E6=9D=80=E6=89=8B"=20feature?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 为“作文杀手”功能创建了一个新页面,旨在帮助用户以新颖、引人入胜和合乎逻辑的方式撰写论文。该界面接受两个用户输入:论文主题和写作要求,然后根据这些输入生成内容。 --- ...34\346\226\207\346\235\200\346\211\213.py" | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 "pages/10_\360\237\223\235\344\275\234\346\226\207\346\235\200\346\211\213.py" diff --git "a/pages/10_\360\237\223\235\344\275\234\346\226\207\346\235\200\346\211\213.py" "b/pages/10_\360\237\223\235\344\275\234\346\226\207\346\235\200\346\211\213.py" new file mode 100644 index 0000000..96f91f4 --- /dev/null +++ "b/pages/10_\360\237\223\235\344\275\234\346\226\207\346\235\200\346\211\213.py" @@ -0,0 +1,37 @@ +import streamlit as st +import sys +import os +from dotenv import load_dotenv +from libs.llms import openai_streaming + +sys.path.append(os.path.abspath('..')) +load_dotenv() + +st.markdown("## 📝 作文杀手") +st.markdown("教你用一种新的方式写作文,让你的作文更加生动有趣,更加有逻辑。") + +topic = st.text_input("输入作文题目:", "我的梦想") +remark = st.text_area("写作要求:", "我的梦想是当一名科学家。") +if st.button("开始写作"): + with st.spinner("生成中..."): + msg = f""" + 请按照我的要求写一篇中学生的作文。 + - 语言风格要符合中学生的特点; + - 六百到七百字; + - 适当引用诗词,成语,谚语; + - 弄清用户输入题材; + - 开头简洁,中间内容丰富,不能有废话,结尾点题,呼应标题,总结全文; + + 写作主题;{topic} + 写作要求:{remark} + 作文内容:""" + response = openai_streaming(msg,[]) + placeholder = st.empty() + full_response = '' + for item in response: + text = item.content + if text is not None: + full_response += text + placeholder.markdown(full_response) + placeholder.markdown(full_response) +