Skip to content

Commit

Permalink
Merge pull request #82 from mit-submit/main
Browse files Browse the repository at this point in the history
Initial Release for 801 Class Project
  • Loading branch information
julius-heitkoetter authored Sep 29, 2023
2 parents 62fd447 + eca4127 commit efb0c92
Show file tree
Hide file tree
Showing 20 changed files with 380 additions and 30 deletions.
70 changes: 70 additions & 0 deletions .github/workflows/prod-801-ci-cd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
name: Deploy A2rchi Prod for 8.01
run-name: ${{ github.actor }} deploys A2rchi for 8.01 to prod
on:
push:
branches:
- release-8.01
jobs:
deploy-prod-system:
runs-on: ubuntu-latest
env:
SSH_AUTH_SOCK: /tmp/ssh_agent.sock
steps:
# boilerplate message and pull repository to CI runner
- run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event."
- uses: actions/checkout@v3
- run: echo "The ${{ github.repository }} repository has been cloned to the runner."

# setup SSH
- name: Setup SSH
run: |
mkdir -p /home/runner/.ssh/
echo "${{ secrets.SSH_PRIVATE_KEY_MDRUSSO }}" > /home/runner/.ssh/id_rsa_submit
chmod 600 /home/runner/.ssh/id_rsa_submit
echo "${{ secrets.SSH_SUBMIT_KNOWN_HOSTS }}" > ~/.ssh/known_hosts
cp ${{ github.workspace }}/deploy/ssh_config /home/runner/.ssh/config
ssh-agent -a $SSH_AUTH_SOCK > /dev/null
ssh-add /home/runner/.ssh/id_rsa_submit
# create secrets files for docker-compose
- name: Create Secrets Files
run: |
mkdir -p ${{ github.workspace }}/deploy/prod-801/secrets/
touch ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt
echo "${{ secrets.PROD_FLASK_UPLOADER_APP_SECRET_KEY }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt
chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt
touch ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt
echo "${{ secrets.PROD_UPLOADER_SALT }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt
chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt
touch ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt
echo "${{ secrets.OPENAI_API_KEY }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt
chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt
touch ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt
echo "${{ secrets.HF_TOKEN }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt
chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt
# stop any existing docker compose that's running
- name: Stop Docker Compose
run: |
ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-stop.sh
# copy repository to machine
- name: Copy Repository
run: |
rsync -e ssh -r ${{ github.workspace}}/* --exclude .git/ --delete submit-t3desk:~/A2rchi-prod-801/
# run deploy script
- name: Run Deploy Script
run: |
ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-install.sh
# clean up secret files
- name: Remove Secrets from Runner
run: |
rm ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt
rm ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt
rm ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt
rm ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt
# print job status
- run: echo "🍏 This job's status is ${{ job.status }}."
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ venv
*.egg-info
*sqlite_db
.vscode
801-content/
32 changes: 18 additions & 14 deletions A2rchi/chains/prompts.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,31 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
from A2rchi.utils.config_loader import Config_Loader

condense_history_template = """Given the following conversation between you (the AI named A2rchi), a human user who needs help, and an expert, and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
config = Config_Loader().config["chains"]["prompts"]

Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
def read_prompt(prompt_filepath, is_condense_prompt=False, is_main_prompt=False):
with open(prompt_filepath, "r") as f:
raw_prompt = f.read()

prompt_template = """You are a conversational chatbot named A2rchi who helps people navigate a computing resource named subMIT. You will be provided context to help you answer their questions.
Using your linux and computing knowledge, answer the question at the end. Unless otherwise indicated, assume the users are not well versed computing.
Please do not assume that subMIT machines have anything installed on top of native linux except if the context mentions it.
If you don't know, say "I don't know", if you need to ask a follow up question, please do.
prompt = ""
for line in raw_prompt.split("\n"):
if len(line.lstrip())>0 and line.lstrip()[0:1] != "#":
prompt += line + "\n"

Context: {context} Additionally, it is always preferred to use conda, if possible.
if is_condense_prompt and ("{chat_history}" not in prompt or "{question}" not in prompt):
raise ValueError("""Condensing prompt must contain \"{chat_history}\" and \"{question}\" tags. Instead, found prompt to be:
""" + prompt)
if is_main_prompt and ("{context}" not in prompt or "{question}" not in prompt):
raise ValueError("""Condensing prompt must contain \"{context}\" and \"{question}\" tags. Instead, found prompt to be:
""" + prompt)

Question: {question}
Helpful Answer:"""
return prompt

QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
template=read_prompt(config["MAIN_PROMPT"], is_main_prompt=True), input_variables=["context", "question"]
)

CONDENSE_QUESTION_PROMPT = PromptTemplate(
template=condense_history_template, input_variables=["chat_history", "question"]
template=read_prompt(config["CONDENSING_PROMPT"], is_condense_prompt=True), input_variables=["chat_history", "question"]
)
2 changes: 1 addition & 1 deletion A2rchi/interfaces/chat_app/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def __init__(self, app, **configs):
CORS(self.app)

# add endpoints for flask app
self.add_endpoint('/get_chat_response', 'get_chat_response', self.get_chat_response, methods=["POST"])
self.add_endpoint('/api/get_chat_response', 'get_chat_response', self.get_chat_response, methods=["POST"])
self.add_endpoint('/', '', self.index)
self.add_endpoint('/terms', 'terms', self.terms)

Expand Down
2 changes: 1 addition & 1 deletion A2rchi/interfaces/chat_app/static/script.js
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ const refreshChat = async () => {
}

const getChatResponse = async (incomingChatDiv) => {
const API_URL = "http://0.0.0.0:7861/get_chat_response";
const API_URL = "http://t3desk019.mit.edu:7861/api/get_chat_response";
const pElement = document.createElement("div");

// Define the properties and data for the API request
Expand Down
2 changes: 1 addition & 1 deletion A2rchi/interfaces/chat_app/static/script.js-template
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ const refreshChat = async () => {
}

const getChatResponse = async (incomingChatDiv) => {
const API_URL = "http://0.0.0.0:XX-HTTP_PORT-XX/get_chat_response";
const API_URL = "http://t3desk019.mit.edu:XX-HTTP_PORT-XX/api/get_chat_response";
const pElement = document.createElement("div");

// Define the properties and data for the API request
Expand Down
4 changes: 2 additions & 2 deletions A2rchi/utils/config_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ def load_config(self):
"""
Small function for loading the config.yaml file
"""
prod_or_dev = os.getenv("PROD_OR_DEV")
env = os.getenv("RUNTIME_ENV")
try:
with open(f"./config/{prod_or_dev}-config.yaml", "r") as f:
with open(f"./config/{env}-config.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)

# change the model class parameter from a string to an actual class
Expand Down
2 changes: 1 addition & 1 deletion A2rchi/utils/scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def collect_urls_from_lists(self):
data = f.read()

for line in data.split("\n"):
if len(line) > 0 and line[0] != '#':
if len(line.lstrip())>0 and line.lstrip()[0:1] != "#":
urls.append(line)

return urls
Expand Down
5 changes: 5 additions & 0 deletions config/dev-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ chains:
- User
- A2rchi
- Expert
prompts:
# prompt that serves to condense a history and a question into a single question
CONDENSING_PROMPT: config/prompts/condense.prompt
# main prompt which takes in a single question and a context.
MAIN_PROMPT: config/prompts/submit.prompt
chain:
# pick one of the models listed in the model class map below
MODEL_NAME: OpenAILLM # LlamaLLM
Expand Down
110 changes: 110 additions & 0 deletions config/prod-801-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
global:
TRAINED_ON: "8.01" #used to create name of the specific version of a2rchi we're using
DATA_PATH: "/root/data/"
ACCOUNTS_PATH: "/root/.accounts/"
LOCAL_VSTORE_PATH: "/root/data/vstore/"
ACCEPTED_FILES:
-".txt"
-".html"
-".pdf"

interfaces:
chat_app:
PORT: 7861
EXTERNAL_PORT: 7683
HOST: "0.0.0.0" # either "0.0.0.0" (for public) or "127.0.0.1" (for internal)
HOSTNAME: "ppc.mit.edu" # careful, this is used for the chat service
template_folder: "/root/A2rchi/A2rchi/interfaces/chat_app/templates"
static_folder: "/root/A2rchi/A2rchi/interfaces/chat_app/static"
uploader_app:
PORT: 5001
HOST: "0.0.0.0" # either "0.0.0.0" (for public) or "127.0.0.1" (for internal)
template_folder: "/root/A2rchi/A2rchi/interfaces/uploader_app/templates"

chains:
input_lists:
- empty.list
- miscellanea.list
base:
# roles that A2rchi knows about
ROLES:
- User
- A2rchi
- Expert
prompts:
# prompt that serves to condense a history and a question into a single question
CONDENSING_PROMPT: config/prompts/condense.prompt
# main prompt which takes in a single question and a context.
MAIN_PROMPT: config/prompts/801.prompt
chain:
# pick one of the models listed in the model class map below
MODEL_NAME: OpenAILLM
# map of all the class models and their keyword arguments
MODEL_CLASS_MAP:
OpenAILLM:
class: OpenAILLM
kwargs:
model_name: gpt-4
temperature: 1
DumbLLM:
class: DumbLLM
kwargs:
filler: null
LlamaLLM:
class: LlamaLLM
kwargs:
base_model: "meta-llama/Llama-2-7b-chat-hf" #the location of the model (ex. meta-llama/Llama-2-70b)
peft_model: null #the location of the finetuning of the model. Can be none
enable_salesforce_content_safety: True # Enable safety check with Salesforce safety flan t5
quantization: True #enables 8-bit quantization
max_new_tokens: 4096 #The maximum numbers of tokens to generate
seed: null #seed value for reproducibility
do_sample: True #Whether or not to use sampling ; use greedy decoding otherwise.
min_length: null #The minimum length of the sequence to be generated, input prompt + min_new_tokens
use_cache: True #[optional] Whether or not the model should use the past last key/values attentions Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.
top_p: .9 # [optional] If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
temperature: .6 # [optional] The value used to modulate the next token probabilities.
top_k: 50 # [optional] The number of highest probability vocabulary tokens to keep for top-k-filtering.
repetition_penalty: 1.0 #The parameter for repetition penalty. 1.0 means no penalty.
length_penalty: 1 #[optional] Exponential penalty to the length that is used with beam-based generation.
max_padding_length: null # the max padding length to be used with tokenizer padding the prompts.
chain_update_time: 10 # the amount of time (in seconds) which passes between when the chain updates to the newest version of the vectorstore
utils:
cleo:
cleo_update_time: 10
mailbox:
IMAP4_PORT: 143
mailbox_update_time: 10
data_manager:
CHUNK_SIZE: 1000
CHUNK_OVERLAP: 0
use_HTTP_chromadb_client: True # recommended: True (use http client for the chromadb vectorstore?)
# use_HTTP_chromadb_client: False
vectordb_update_time: 10
chromadb_host: chromadb-prod-801
chromadb_port: 8000
collection_name: "prod_801_collection" #unique in case vector stores are ever combined.
reset_collection: True # reset the entire collection each time it is accessed by a new data manager instance
embeddings:
# choose one embedding from list below
EMBEDDING_NAME: OpenAIEmbeddings
# list of possible embeddings to use in vectorstore
EMBEDDING_CLASS_MAP:
OpenAIEmbeddings:
class: OpenAIEmbeddings
kwargs:
model: text-embedding-ada-002
similarity_score_reference: 0.4
HuggingFaceEmbeddings:
class: HuggingFaceEmbeddings
kwargs:
model_name: "sentence-transformers/all-mpnet-base-v2"
model_kwargs:
device: 'cpu'
encode_kwargs:
normalize_embeddings: True
similarity_score_reference: 0.9
scraper:
reset_data: True # delete websites and sources.yml in data folder
verify_urls: False # should be true when possible
enable_warnings: False # keeps output clean if verify == False
5 changes: 5 additions & 0 deletions config/prod-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ chains:
- User
- A2rchi
- Expert
prompts:
# prompt that serves to condense a history and a question into a single question
CONDENSING_PROMPT: config/prompts/condense.prompt
# main prompt which takes in a single question and a context.
MAIN_PROMPT: config/prompts/submit.prompt
chain:
# pick one of the models listed in the model class map below
MODEL_NAME: OpenAILLM
Expand Down
16 changes: 16 additions & 0 deletions config/prompts/801.prompt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Prompt used to qurery LLM with appropriate context and question.
# This prompt is specific to 8.01 taught at MIT and likely will not perform well for other applications, where it is recommeneded to write your own prompt and change it in the config
#
# All final promptsd must have the following tags in them, which will be filled with the appropriate information:
# {question}
# {context}
#
You are a conversational chatbot and teaching assisitant named A2rchi who helps students taking Classical Mechanics 1 at MIT (also called 8.01). You will be provided context to help you answer their questions.
Using your physics, math, and problem solving knowledge, answer the question at the end. Unless otherwise indicated, assume the users know high school level physics.
Since you are a teaching assisitant, please try to give throughou answers to questions with explanations, instead of just giving the answer.
If you don't know, say "I don't know". It is extremely important you only give correct answers. If you need to ask a follow up question, please do.

Context: {context}

Question: {question}
Helpful Answer:
13 changes: 13 additions & 0 deletions config/prompts/condense.prompt
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Prompt used to condense a chat history and a follow up question into a stand alone question.
# This is a very general prompt for condensing histories, so for base installs it will not need to be modified
#
# All condensing prompts must have the following tags in them, which will be filled with the appropriate information:
# {chat_history}
# {question}
#
Given the following conversation between you (the AI named A2rchi), a human user who needs help, and an expert, and a follow up question, rephrase the follow up question to be a standalone question, in its original language.

Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:
16 changes: 16 additions & 0 deletions config/prompts/submit.prompt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Prompt used to qurery LLM with appropriate context and question.
# This prompt is specific to subMIT and likely will not perform well for other applications, where it is recommeneded to write your own prompt and change it in the config
#
# All final promptsd must have the following tags in them, which will be filled with the appropriate information:
# {question}
# {context}
#
You are a conversational chatbot named A2rchi who helps people navigate a computing resource named subMIT. You will be provided context to help you answer their questions.
Using your linux and computing knowledge, answer the question at the end. Unless otherwise indicated, assume the users are not well versed computing.
Please do not assume that subMIT machines have anything installed on top of native linux except if the context mentions it.
If you don't know, say "I don't know", if you need to ask a follow up question, please do.

Context: {context} Additionally, it is always preferred to use conda, if possible.

Question: {question}
Helpful Answer:
Loading

0 comments on commit efb0c92

Please sign in to comment.