From a0a2bac94a9661bc2c04a949a97018fe8d3b4b81 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Mon, 25 Sep 2023 10:53:48 -0400 Subject: [PATCH 01/18] adding terms and A2rchi type specification --- A2rchi/bin/service_chat.py | 2 ++ A2rchi/interfaces/chat_app/static/script.js | 4 +++- A2rchi/interfaces/chat_app/static/script.js-template | 4 +++- config/dev-config.yaml | 1 + config/prod-config.yaml | 1 + 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/A2rchi/bin/service_chat.py b/A2rchi/bin/service_chat.py index cb72512..8c54c28 100644 --- a/A2rchi/bin/service_chat.py +++ b/A2rchi/bin/service_chat.py @@ -11,6 +11,7 @@ os.environ['OPENAI_API_KEY'] = read_secret("OPENAI_API_KEY") os.environ['HUGGING_FACE_HUB_TOKEN'] = read_secret("HUGGING_FACE_HUB_TOKEN") config = Config_Loader().config["interfaces"]["chat_app"] +global_config = Config_Loader().config["global"] print(f"Starting Chat Service with (host, port): ({config['HOST']}, {config['PORT']})") def generate_script(config): @@ -22,6 +23,7 @@ def generate_script(config): template = f.read() filled_template = template.replace('XX-HTTP_PORT-XX', str(config["PORT"])) + filled_template = filled_template.replace('XX-TRAINED_ON-XX', str(global_config["TRAINED_ON"])) script_file = os.path.join(config["static_folder"], "script.js") with open(script_file, "w") as f: diff --git a/A2rchi/interfaces/chat_app/static/script.js b/A2rchi/interfaces/chat_app/static/script.js index 2b7ab16..5cf08a7 100644 --- a/A2rchi/interfaces/chat_app/static/script.js +++ b/A2rchi/interfaces/chat_app/static/script.js @@ -18,7 +18,9 @@ const loadDataFromLocalstorage = () => { const defaultText = `

A2rchi

-

Start a conversation and explore the power of A2rchi.
Your chat history will be displayed here.

+

Start a conversation and explore the power of A2rchi, specially trained on subMIT.
+ Your chat history will be displayed here.

+ By using this website, you agree to the terms and conditions outlined in our Terms and Conditions statement. Please take a moment to review them.

` chatContainer.innerHTML = localStorage.getItem("all-chats") || defaultText; diff --git a/A2rchi/interfaces/chat_app/static/script.js-template b/A2rchi/interfaces/chat_app/static/script.js-template index 75b994c..c1563b2 100644 --- a/A2rchi/interfaces/chat_app/static/script.js-template +++ b/A2rchi/interfaces/chat_app/static/script.js-template @@ -18,7 +18,9 @@ const loadDataFromLocalstorage = () => { const defaultText = `

A2rchi

-

Start a conversation and explore the power of A2rchi.
Your chat history will be displayed here.

+

Start a conversation and explore the power of A2rchi, specially trained on XX-TRAINED_ON-XX.
+ Your chat history will be displayed here.

+ By using this website, you agree to the terms and conditions outlined in our Terms and Conditions statement. Please take a moment to review them.

` chatContainer.innerHTML = localStorage.getItem("all-chats") || defaultText; diff --git a/config/dev-config.yaml b/config/dev-config.yaml index 1ce406d..268858c 100644 --- a/config/dev-config.yaml +++ b/config/dev-config.yaml @@ -1,4 +1,5 @@ global: + TRAINED_ON: "subMIT (development)" #used to create name of the specific version of a2rchi we're using DATA_PATH: "/root/data/" ACCOUNTS_PATH: "/root/.accounts/" LOCAL_VSTORE_PATH: "/root/data/vstore/" diff --git a/config/prod-config.yaml b/config/prod-config.yaml index 7a0b62c..9685a53 100644 --- a/config/prod-config.yaml +++ b/config/prod-config.yaml @@ -1,4 +1,5 @@ global: + TRAINED_ON: "subMIT" #used to create name of the specific version of a2rchi we're using DATA_PATH: "/root/data/" ACCOUNTS_PATH: "/root/.accounts/" LOCAL_VSTORE_PATH: "/root/data/vstore/" From 52ea35ebb278bb074310ed395b8a5401c47426ca Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Mon, 25 Sep 2023 17:23:08 -0400 Subject: [PATCH 02/18] add endpoint --- A2rchi/interfaces/chat_app/app.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/A2rchi/interfaces/chat_app/app.py b/A2rchi/interfaces/chat_app/app.py index 48a0a86..f07f0a7 100644 --- a/A2rchi/interfaces/chat_app/app.py +++ b/A2rchi/interfaces/chat_app/app.py @@ -159,6 +159,7 @@ def __init__(self, app, **configs): # add endpoints for flask app self.add_endpoint('/get_chat_response', 'get_chat_response', self.get_chat_response, methods=["POST"]) self.add_endpoint('/', '', self.index) + self.add_endpoint('/terms', 'terms', self.terms) def configs(self, **configs): for config, value in configs: @@ -197,3 +198,6 @@ def get_chat_response(self): def index(self): return render_template('index.html') + + def terms(self): + return render_template('terms.html') From 231df548b7d50956bc83969bf3082a00776d1872 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Mon, 25 Sep 2023 17:26:58 -0400 Subject: [PATCH 03/18] added html template --- .../interfaces/chat_app/templates/terms.html | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 A2rchi/interfaces/chat_app/templates/terms.html diff --git a/A2rchi/interfaces/chat_app/templates/terms.html b/A2rchi/interfaces/chat_app/templates/terms.html new file mode 100644 index 0000000..ce6a458 --- /dev/null +++ b/A2rchi/interfaces/chat_app/templates/terms.html @@ -0,0 +1,40 @@ + + + +Terms and Conditions + + +

Terms and Conditions

+ +

Welcome to A2rchi. By using this website, you agree to comply with and be bound by the following terms and conditions of use. Please review these terms carefully before using the website. If you do not agree to these terms, you should not use the website. +

+ +

1. Data Usage and Privacy:

+ +

1.1 We may collect and store the questions you ask the chat bot for research and improvement purposes. However, we do not collect any additional meta data, and we will never share this information with any third party.

+ +

2. Usage Restrictions:

+ +

2.1 You agree to use the chat bot provided on the website only for its intended purpose and not for any unauthorized or unlawful activities.

+ +

2.2 You agree not to use the chat bot to generate spam or any automated content that disrupts the service or violates the rights of others.

+ +

3. Termination of Use:

+ +

3.1 We reserve the right to terminate your access to the chat bot and the website at our discretion, without notice, if you violate these terms and conditions.

+ +

4. Changes to Terms and Conditions:

+ +

4.1 We may revise these terms and conditions at any time without notice. By continuing to use the website after such changes, you agree to be bound by the revised terms.

+ +

5. Disclaimer:

+ +

5.1 The chat bot provided on the website is for informational purposes only. We do not guarantee the accuracy, completeness, or reliability of the information provided by the chat bot.

+ +

6. Contact Us:

+ +

6.1 If you have any questions or concerns regarding these terms and conditions, please contact us at a2rchi@mit.edu.

+ +

By using the website, you acknowledge that you have read, understood, and agree to be bound by these terms and conditions. Your continued use of the website constitutes your acceptance of these terms.

+ + From 928670fbc6b037833a7686617049739c59c0528a Mon Sep 17 00:00:00 2001 From: julius-heitkoetter <97237339+julius-heitkoetter@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:40:04 -0400 Subject: [PATCH 04/18] shortening terms and conditions statement on homepage --- A2rchi/interfaces/chat_app/static/script.js-template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/A2rchi/interfaces/chat_app/static/script.js-template b/A2rchi/interfaces/chat_app/static/script.js-template index c1563b2..034b2c6 100644 --- a/A2rchi/interfaces/chat_app/static/script.js-template +++ b/A2rchi/interfaces/chat_app/static/script.js-template @@ -20,7 +20,7 @@ const loadDataFromLocalstorage = () => {

A2rchi

Start a conversation and explore the power of A2rchi, specially trained on XX-TRAINED_ON-XX.
Your chat history will be displayed here.

- By using this website, you agree to the terms and conditions outlined in our Terms and Conditions statement. Please take a moment to review them.

+ By using this website, you agree to the terms and conditions.

` chatContainer.innerHTML = localStorage.getItem("all-chats") || defaultText; From 2231d9b72b9c8fb1b469e6c5787a4f0ba5763308 Mon Sep 17 00:00:00 2001 From: julius-heitkoetter <97237339+julius-heitkoetter@users.noreply.github.com> Date: Mon, 25 Sep 2023 17:41:38 -0400 Subject: [PATCH 05/18] update script.js to match template --- A2rchi/interfaces/chat_app/static/script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/A2rchi/interfaces/chat_app/static/script.js b/A2rchi/interfaces/chat_app/static/script.js index 5cf08a7..e3e5070 100644 --- a/A2rchi/interfaces/chat_app/static/script.js +++ b/A2rchi/interfaces/chat_app/static/script.js @@ -20,7 +20,7 @@ const loadDataFromLocalstorage = () => {

A2rchi

Start a conversation and explore the power of A2rchi, specially trained on subMIT.
Your chat history will be displayed here.

- By using this website, you agree to the terms and conditions outlined in our Terms and Conditions statement. Please take a moment to review them.

+ By using this website, you agree to the terms and conditions.

` chatContainer.innerHTML = localStorage.getItem("all-chats") || defaultText; From 0b0f800ad741477fbeffdfeefaad527641259cf2 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 07:50:55 -0400 Subject: [PATCH 06/18] fixed commenting logic --- A2rchi/utils/scraper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/A2rchi/utils/scraper.py b/A2rchi/utils/scraper.py index 364023a..9a93cae 100644 --- a/A2rchi/utils/scraper.py +++ b/A2rchi/utils/scraper.py @@ -59,7 +59,7 @@ def collect_urls_from_lists(self): data = f.read() for line in data.split("\n"): - if len(line) > 0 and line[0] != '#': + if len(line.lstrip())>0 and line.lstrip()[0:1] != "#": urls.append(line) return urls From cc6d99f2e6ea40705dcd42aeb532e51202e12d3a Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 07:51:25 -0400 Subject: [PATCH 07/18] added logic for pulling prompts from config --- A2rchi/chains/prompts.py | 32 ++++++++++++++++++-------------- config/dev-config.yaml | 3 +++ config/prod-config.yaml | 3 +++ 3 files changed, 24 insertions(+), 14 deletions(-) diff --git a/A2rchi/chains/prompts.py b/A2rchi/chains/prompts.py index e2c26c6..fdd8f1a 100644 --- a/A2rchi/chains/prompts.py +++ b/A2rchi/chains/prompts.py @@ -1,27 +1,31 @@ # flake8: noqa from langchain.prompts.prompt import PromptTemplate +from A2rchi.utils.config_loader import Config_Loader -condense_history_template = """Given the following conversation between you (the AI named A2rchi), a human user who needs help, and an expert, and a follow up question, rephrase the follow up question to be a standalone question, in its original language. +config = Config_Loader().config["chains"]["prompts"] -Chat History: -{chat_history} -Follow Up Input: {question} -Standalone question:""" +def read_prompt(prompt_name, is_condense_prompt=False, is_main_prompt=False): + with open(f"config/prompts/{prompt_name}", "r") as f: + raw_prompt = f.read() -prompt_template = """You are a conversational chatbot named A2rchi who helps people navigate a computing resource named subMIT. You will be provided context to help you answer their questions. -Using your linux and computing knowledge, answer the question at the end. Unless otherwise indicated, assume the users are not well versed computing. - Please do not assume that subMIT machines have anything installed on top of native linux except if the context mentions it. -If you don't know, say "I don't know", if you need to ask a follow up question, please do. + prompt = "" + for line in raw_prompt.split("\n"): + if len(line.lstrip())>0 and line.lstrip()[0:1] != "#": + prompt += line + "\n" -Context: {context} Additionally, it is always preferred to use conda, if possible. + if is_condense_prompt and ("{chat_history}" not in prompt or "{question}" not in prompt): + raise ValueError("""Condensing prompt must contain \"{chat_history}\" and \"{question}\" tags. Instead, found prompt to be: + """ + prompt) + if is_main_prompt and ("{context}" not in prompt or "{question}" not in prompt): + raise ValueError("""Condensing prompt must contain \"{context}\" and \"{question}\" tags. Instead, found prompt to be: + """ + prompt) -Question: {question} -Helpful Answer:""" + return prompt QA_PROMPT = PromptTemplate( - template=prompt_template, input_variables=["context", "question"] + template=read_prompt(config["MAIN_PROMPT"], is_main_prompt=True), input_variables=["context", "question"] ) CONDENSE_QUESTION_PROMPT = PromptTemplate( - template=condense_history_template, input_variables=["chat_history", "question"] + template=read_prompt(config["CONDENSING_PROMPT"], is_condense_prompt=True), input_variables=["chat_history", "question"] ) diff --git a/config/dev-config.yaml b/config/dev-config.yaml index acd1f7f..c4ce9e6 100644 --- a/config/dev-config.yaml +++ b/config/dev-config.yaml @@ -32,6 +32,9 @@ chains: - User - A2rchi - Expert + prompts: + CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question + MAIN_PROMPT: submit.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. chain: # pick one of the models listed in the model class map below MODEL_NAME: OpenAILLM # LlamaLLM diff --git a/config/prod-config.yaml b/config/prod-config.yaml index d919cb2..3344628 100644 --- a/config/prod-config.yaml +++ b/config/prod-config.yaml @@ -32,6 +32,9 @@ chains: - User - A2rchi - Expert + prompts: + CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question + MAIN_PROMPT: submit.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. chain: # pick one of the models listed in the model class map below MODEL_NAME: OpenAILLM From 8f4acd504434c8a7ea5a520b86cb815a0c37a236 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 07:51:51 -0400 Subject: [PATCH 08/18] added prompts to config --- config/prompts/801.prompt | 16 ++++++++++++++++ config/prompts/condense.prompt | 13 +++++++++++++ config/prompts/submit.prompt | 16 ++++++++++++++++ 3 files changed, 45 insertions(+) create mode 100644 config/prompts/801.prompt create mode 100644 config/prompts/condense.prompt create mode 100644 config/prompts/submit.prompt diff --git a/config/prompts/801.prompt b/config/prompts/801.prompt new file mode 100644 index 0000000..f33be0d --- /dev/null +++ b/config/prompts/801.prompt @@ -0,0 +1,16 @@ +# Prompt used to qurery LLM with appropriate context and question. +# This prompt is specific to 8.01 taught at MIT and likely will not perform well for other applications, where it is recommeneded to write your own prompt and change it in the config +# +# All final promptsd must have the following tags in them, which will be filled with the appropriate information: +# {question} +# {context} +# +You are a conversational chatbot and teaching assisitant named A2rchi who helps students taking Classical Mechanics 1 at MIT (also called 8.01). You will be provided context to help you answer their questions. +Using your physics, math, and problem solving knowledge, answer the question at the end. Unless otherwise indicated, assume the users know high school level physics. +Since you are a teaching assisitant, please try to give throughou answers to questions with explanations, instead of just giving the answer. +If you don't know, say "I don't know". It is extremely important you only give correct answers. If you need to ask a follow up question, please do. + +Context: {context} + +Question: {question} +Helpful Answer: \ No newline at end of file diff --git a/config/prompts/condense.prompt b/config/prompts/condense.prompt new file mode 100644 index 0000000..9552b9d --- /dev/null +++ b/config/prompts/condense.prompt @@ -0,0 +1,13 @@ +# Prompt used to condense a chat history and a follow up question into a stand alone question. +# This is a very general prompt for condensing histories, so for base installs it will not need to be modified +# +# All condensing prompts must have the following tags in them, which will be filled with the appropriate information: +# {chat_history} +# {question} +# +Given the following conversation between you (the AI named A2rchi), a human user who needs help, and an expert, and a follow up question, rephrase the follow up question to be a standalone question, in its original language. + +Chat History: +{chat_history} +Follow Up Input: {question} +Standalone question: \ No newline at end of file diff --git a/config/prompts/submit.prompt b/config/prompts/submit.prompt new file mode 100644 index 0000000..c43ca74 --- /dev/null +++ b/config/prompts/submit.prompt @@ -0,0 +1,16 @@ +# Prompt used to qurery LLM with appropriate context and question. +# This prompt is specific to subMIT and likely will not perform well for other applications, where it is recommeneded to write your own prompt and change it in the config +# +# All final promptsd must have the following tags in them, which will be filled with the appropriate information: +# {question} +# {context} +# +You are a conversational chatbot named A2rchi who helps people navigate a computing resource named subMIT. You will be provided context to help you answer their questions. +Using your linux and computing knowledge, answer the question at the end. Unless otherwise indicated, assume the users are not well versed computing. + Please do not assume that subMIT machines have anything installed on top of native linux except if the context mentions it. +If you don't know, say "I don't know", if you need to ask a follow up question, please do. + +Context: {context} Additionally, it is always preferred to use conda, if possible. + +Question: {question} +Helpful Answer: \ No newline at end of file From 79783d70423d1d1b762b731d8e794e5bcf7ef103 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 08:49:32 -0400 Subject: [PATCH 09/18] adding 8.01 to github workflows --- .github/workflows/prod-801-ci-cd.yaml | 70 +++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 .github/workflows/prod-801-ci-cd.yaml diff --git a/.github/workflows/prod-801-ci-cd.yaml b/.github/workflows/prod-801-ci-cd.yaml new file mode 100644 index 0000000..b93cccb --- /dev/null +++ b/.github/workflows/prod-801-ci-cd.yaml @@ -0,0 +1,70 @@ +name: Deploy A2rchi Prod for 8.01 +run-name: ${{ github.actor }} deploys A2rchi for 8.01 to prod +on: + push: + branches: + - release-8.01 +jobs: + deploy-prod-system: + runs-on: ubuntu-latest + env: + SSH_AUTH_SOCK: /tmp/ssh_agent.sock + steps: + # boilerplate message and pull repository to CI runner + - run: echo "🎉 The job was automatically triggered by a ${{ github.event_name }} event." + - uses: actions/checkout@v3 + - run: echo "The ${{ github.repository }} repository has been cloned to the runner." + + # setup SSH + - name: Setup SSH + run: | + mkdir -p /home/runner/.ssh/ + echo "${{ secrets.SSH_PRIVATE_KEY_MDRUSSO }}" > /home/runner/.ssh/id_rsa_submit + chmod 600 /home/runner/.ssh/id_rsa_submit + echo "${{ secrets.SSH_SUBMIT_KNOWN_HOSTS }}" > ~/.ssh/known_hosts + cp ${{ github.workspace }}/deploy/ssh_config /home/runner/.ssh/config + ssh-agent -a $SSH_AUTH_SOCK > /dev/null + ssh-add /home/runner/.ssh/id_rsa_submit + + # create secrets files for docker-compose + - name: Create Secrets Files + run: | + mkdir -p ${{ github.workspace }}/deploy/prod-801/secrets/ + touch ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt + echo "${{ secrets.PROD_FLASK_UPLOADER_APP_SECRET_KEY }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt + chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt + touch ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt + echo "${{ secrets.PROD_UPLOADER_SALT }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt + chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt + touch ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt + echo "${{ secrets.OPENAI_API_KEY }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt + chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt + touch ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt + echo "${{ secrets.HF_TOKEN }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt + chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt + + # stop any existing docker compose that's running + - name: Stop Docker Compose + run: | + ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-stop.sh + + # copy repository to machine + - name: Copy Repository + run: | + rsync -e ssh -r ${{ github.workspace}}/* --exclude .git/ --delete submit-t3desk:~/A2rchi-prod-801/ + + # run deploy script + - name: Run Deploy Script + run: | + ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod/prod-801-install.sh + + # clean up secret files + - name: Remove Secrets from Runner + run: | + rm ${{ github.workspace }}/deploy/prod-801/secrets/flask_uploader_app_secret_key.txt + rm ${{ github.workspace }}/deploy/prod-801/secrets/uploader_salt.txt + rm ${{ github.workspace }}/deploy/prod-801/secrets/openai_api_key.txt + rm ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt + + # print job status + - run: echo "🍏 This job's status is ${{ job.status }}." \ No newline at end of file From a3fe3d89f5349893c8b508c269b9919cc139b104 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 08:50:11 -0400 Subject: [PATCH 10/18] create config files for 8.01 --- config/prod-801-config.yaml | 109 ++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 config/prod-801-config.yaml diff --git a/config/prod-801-config.yaml b/config/prod-801-config.yaml new file mode 100644 index 0000000..0c80833 --- /dev/null +++ b/config/prod-801-config.yaml @@ -0,0 +1,109 @@ +global: + TRAINED_ON: "8.01" #used to create name of the specific version of a2rchi we're using + DATA_PATH: "/root/data/" + ACCOUNTS_PATH: "/root/.accounts/" + LOCAL_VSTORE_PATH: "/root/data/vstore/" + ACCEPTED_FILES: + -".txt" + -".html" + -".pdf" + +interfaces: + chat_app: + PORT: 7861 + EXTERNAL_PORT: 7683 + HOST: "0.0.0.0" # either "0.0.0.0" (for public) or "127.0.0.1" (for internal) + HOSTNAME: "ppc.mit.edu" # careful, this is used for the chat service + template_folder: "/root/A2rchi/A2rchi/interfaces/chat_app/templates" + static_folder: "/root/A2rchi/A2rchi/interfaces/chat_app/static" + uploader_app: + PORT: 5001 + HOST: "0.0.0.0" # either "0.0.0.0" (for public) or "127.0.0.1" (for internal) + template_folder: "/root/A2rchi/A2rchi/interfaces/uploader_app/templates" + +chains: + input_lists: + - empty.list + - submit.list + - miscellanea.list + base: + # roles that A2rchi knows about + ROLES: + - User + - A2rchi + - Expert + prompts: + CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question + MAIN_PROMPT: submit.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. + chain: + # pick one of the models listed in the model class map below + MODEL_NAME: OpenAILLM + # map of all the class models and their keyword arguments + MODEL_CLASS_MAP: + OpenAILLM: + class: OpenAILLM + kwargs: + model_name: gpt-4 + temperature: 1 + DumbLLM: + class: DumbLLM + kwargs: + filler: null + LlamaLLM: + class: LlamaLLM + kwargs: + base_model: "meta-llama/Llama-2-7b-chat-hf" #the location of the model (ex. meta-llama/Llama-2-70b) + peft_model: null #the location of the finetuning of the model. Can be none + enable_salesforce_content_safety: True # Enable safety check with Salesforce safety flan t5 + quantization: True #enables 8-bit quantization + max_new_tokens: 4096 #The maximum numbers of tokens to generate + seed: null #seed value for reproducibility + do_sample: True #Whether or not to use sampling ; use greedy decoding otherwise. + min_length: null #The minimum length of the sequence to be generated, input prompt + min_new_tokens + use_cache: True #[optional] Whether or not the model should use the past last key/values attentions Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. + top_p: .9 # [optional] If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. + temperature: .6 # [optional] The value used to modulate the next token probabilities. + top_k: 50 # [optional] The number of highest probability vocabulary tokens to keep for top-k-filtering. + repetition_penalty: 1.0 #The parameter for repetition penalty. 1.0 means no penalty. + length_penalty: 1 #[optional] Exponential penalty to the length that is used with beam-based generation. + max_padding_length: null # the max padding length to be used with tokenizer padding the prompts. + chain_update_time: 10 # the amount of time (in seconds) which passes between when the chain updates to the newest version of the vectorstore +utils: + cleo: + cleo_update_time: 10 + mailbox: + IMAP4_PORT: 143 + mailbox_update_time: 10 + data_manager: + CHUNK_SIZE: 1000 + CHUNK_OVERLAP: 0 + use_HTTP_chromadb_client: True # recommended: True (use http client for the chromadb vectorstore?) + # use_HTTP_chromadb_client: False + vectordb_update_time: 10 + chromadb_host: chromadb-prod-801 + chromadb_port: 8000 + collection_name: "prod_801_collection" #unique in case vector stores are ever combined. + reset_collection: True # reset the entire collection each time it is accessed by a new data manager instance + embeddings: + # choose one embedding from list below + EMBEDDING_NAME: OpenAIEmbeddings + # list of possible embeddings to use in vectorstore + EMBEDDING_CLASS_MAP: + OpenAIEmbeddings: + class: OpenAIEmbeddings + kwargs: + model: text-embedding-ada-002 + similarity_score_reference: 0.4 + HuggingFaceEmbeddings: + class: HuggingFaceEmbeddings + kwargs: + model_name: "sentence-transformers/all-mpnet-base-v2" + model_kwargs: + device: 'cpu' + encode_kwargs: + normalize_embeddings: True + similarity_score_reference: 0.9 + scraper: + reset_data: True # delete websites and sources.yml in data folder + verify_urls: False # should be true when possible + enable_warnings: False # keeps output clean if verify == False From 711a5279e56d2f8e338ab0259c206adef519adb9 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 09:01:50 -0400 Subject: [PATCH 11/18] addeded deploy scripts --- deploy/prod-801/prod-801-compose.yaml | 78 +++++++++++++++++++++++++++ deploy/prod-801/prod-801-install.sh | 21 ++++++++ deploy/prod-801/prod-801-stop.sh | 5 ++ 3 files changed, 104 insertions(+) create mode 100644 deploy/prod-801/prod-801-compose.yaml create mode 100644 deploy/prod-801/prod-801-install.sh create mode 100644 deploy/prod-801/prod-801-stop.sh diff --git a/deploy/prod-801/prod-801-compose.yaml b/deploy/prod-801/prod-801-compose.yaml new file mode 100644 index 0000000..72bd2b4 --- /dev/null +++ b/deploy/prod-801/prod-801-compose.yaml @@ -0,0 +1,78 @@ +services: + chat-prod-801: + build: + context: ../.. + dockerfile: deploy/dockerfiles/Dockerfile-chat + depends_on: + chromadb-prod-801: + condition: service_healthy + environment: + PROD_OR_DEV: prod-801 + OPENAI_API_KEY_FILE: /run/secrets/openai_api_key + HUGGING_FACE_HUB_TOKEN_FILE: /run/secrets/hf_token + secrets: + - openai_api_key + - hf_token + volumes: + - a2rchi-prod-801-data:/root/data/ + ports: + - 7683:7861 # host:container + restart: always + + data-manager-prod-801: + build: + context: ../.. + dockerfile: deploy/dockerfiles/Dockerfile-data-manager + depends_on: + chromadb-prod-801: + condition: service_healthy + environment: + PROD_OR_DEV: prod-801 + FLASK_UPLOADER_APP_SECRET_KEY_FILE: /run/secrets/flask_uploader_app_secret_key + UPLOADER_SALT_FILE: /run/secrets/uploader_salt + OPENAI_API_KEY_FILE: /run/secrets/openai_api_key + HUGGING_FACE_HUB_TOKEN_FILE: /run/secrets/hf_token + secrets: + - flask_uploader_app_secret_key + - uploader_salt + - openai_api_key + - hf_token + ports: + - 5004:5001 # host:container + volumes: + - a2rchi-prod-801-data:/root/data/ + restart: always + + chromadb-prod-801: + build: + context: ../.. + dockerfile: deploy/dockerfiles/Dockerfile-chroma + environment: + PROD_OR_DEV: prod-801 + ports: + - 8003:8000 # host:container + volumes: + - a2rchi-prod-801-data:/chroma/chroma/ + restart: always + # healthcheck originates from inside container; so use container port + healthcheck: + test: ["CMD", "curl", "-f", "http://0.0.0.0:8000/api/v1/heartbeat"] + interval: 15s + timeout: 10s + retries: 3 + start_period: 10s + start_interval: 5s + +volumes: + a2rchi-prod-801-data: + external: true + +secrets: + flask_uploader_app_secret_key: + file: secrets/flask_uploader_app_secret_key.txt + uploader_salt: + file: secrets/uploader_salt.txt + openai_api_key: + file: secrets/openai_api_key.txt + hf_token: + file: secrets/hf_token.txt diff --git a/deploy/prod-801/prod-801-install.sh b/deploy/prod-801/prod-801-install.sh new file mode 100644 index 0000000..0e3691e --- /dev/null +++ b/deploy/prod-801/prod-801-install.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# create volume if it doesn't already exist +exists=`docker volume ls | awk '{print $2}' | grep a2rchi-prod-801-data` +if [[ $exists != 'a2rchi-prod-801-data' ]]; then + docker volume create --name a2rchi-prod-801-data +fi + +# start services +echo "Starting docker compose" +cd A2rchi-prod-801/deploy/prod-801/ +docker compose -f prod-801-compose.yaml up -d --build --force-recreate --always-recreate-deps + +# # secrets files are created by CI pipeline and destroyed here +# rm secrets/cleo_*.txt +# rm secrets/imap_*.txt +# rm secrets/sender_*.txt +# rm secrets/flask_uploader_app_secret_key.txt +# rm secrets/uploader_salt.txt +# rm secrets/openai_api_key.txt +# rm secrets/hf_token.txt diff --git a/deploy/prod-801/prod-801-stop.sh b/deploy/prod-801/prod-801-stop.sh new file mode 100644 index 0000000..d367bd5 --- /dev/null +++ b/deploy/prod-801/prod-801-stop.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +echo "Stop running docker compose" +cd A2rchi-prod-801/deploy/prod-801/ +docker compose -f prod-801-compose.yaml down From 41cf7c92871cc6e64d46390a6daeb68b93b3aa6d Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 11:09:56 -0400 Subject: [PATCH 12/18] changing prompt to 8.01 specific prompt --- config/prod-801-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/prod-801-config.yaml b/config/prod-801-config.yaml index 0c80833..d2a4f2c 100644 --- a/config/prod-801-config.yaml +++ b/config/prod-801-config.yaml @@ -34,7 +34,7 @@ chains: - Expert prompts: CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question - MAIN_PROMPT: submit.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. + MAIN_PROMPT: 801.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. chain: # pick one of the models listed in the model class map below MODEL_NAME: OpenAILLM From 060f5d8830e059232032b76c66cdd69f7d6bb531 Mon Sep 17 00:00:00 2001 From: Julius Heitkoetter Date: Wed, 27 Sep 2023 11:16:48 -0400 Subject: [PATCH 13/18] moved api call from url /get_chat_response to /api/get_chat_response --- A2rchi/interfaces/chat_app/app.py | 2 +- A2rchi/interfaces/chat_app/static/script.js | 2 +- A2rchi/interfaces/chat_app/static/script.js-template | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/A2rchi/interfaces/chat_app/app.py b/A2rchi/interfaces/chat_app/app.py index 48e1911..20401bd 100644 --- a/A2rchi/interfaces/chat_app/app.py +++ b/A2rchi/interfaces/chat_app/app.py @@ -153,7 +153,7 @@ def __init__(self, app, **configs): CORS(self.app) # add endpoints for flask app - self.add_endpoint('/get_chat_response', 'get_chat_response', self.get_chat_response, methods=["POST"]) + self.add_endpoint('/api/get_chat_response', 'get_chat_response', self.get_chat_response, methods=["POST"]) self.add_endpoint('/', '', self.index) self.add_endpoint('/terms', 'terms', self.terms) diff --git a/A2rchi/interfaces/chat_app/static/script.js b/A2rchi/interfaces/chat_app/static/script.js index e3e5070..e3f7ea1 100644 --- a/A2rchi/interfaces/chat_app/static/script.js +++ b/A2rchi/interfaces/chat_app/static/script.js @@ -42,7 +42,7 @@ const refreshChat = async () => { } const getChatResponse = async (incomingChatDiv) => { - const API_URL = "http://0.0.0.0:7861/get_chat_response"; + const API_URL = "http://0.0.0.0:7861/api/get_chat_response"; const pElement = document.createElement("div"); // Define the properties and data for the API request diff --git a/A2rchi/interfaces/chat_app/static/script.js-template b/A2rchi/interfaces/chat_app/static/script.js-template index 034b2c6..b036fcb 100644 --- a/A2rchi/interfaces/chat_app/static/script.js-template +++ b/A2rchi/interfaces/chat_app/static/script.js-template @@ -42,7 +42,7 @@ const refreshChat = async () => { } const getChatResponse = async (incomingChatDiv) => { - const API_URL = "http://0.0.0.0:XX-HTTP_PORT-XX/get_chat_response"; + const API_URL = "http://0.0.0.0:XX-HTTP_PORT-XX/api/get_chat_response"; const pElement = document.createElement("div"); // Define the properties and data for the API request From 590ffc08fbb04da29129216cb04bda2ee34cb90c Mon Sep 17 00:00:00 2001 From: mdr223 Date: Thu, 28 Sep 2023 20:50:07 -0400 Subject: [PATCH 14/18] updating hostname --- A2rchi/interfaces/chat_app/static/script.js | 2 +- A2rchi/interfaces/chat_app/static/script.js-template | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/A2rchi/interfaces/chat_app/static/script.js b/A2rchi/interfaces/chat_app/static/script.js index e3e5070..ed0b530 100644 --- a/A2rchi/interfaces/chat_app/static/script.js +++ b/A2rchi/interfaces/chat_app/static/script.js @@ -42,7 +42,7 @@ const refreshChat = async () => { } const getChatResponse = async (incomingChatDiv) => { - const API_URL = "http://0.0.0.0:7861/get_chat_response"; + const API_URL = "http://t3desk019.mit.edu:7861/api/get_chat_response"; const pElement = document.createElement("div"); // Define the properties and data for the API request diff --git a/A2rchi/interfaces/chat_app/static/script.js-template b/A2rchi/interfaces/chat_app/static/script.js-template index 034b2c6..5e407a1 100644 --- a/A2rchi/interfaces/chat_app/static/script.js-template +++ b/A2rchi/interfaces/chat_app/static/script.js-template @@ -42,7 +42,7 @@ const refreshChat = async () => { } const getChatResponse = async (incomingChatDiv) => { - const API_URL = "http://0.0.0.0:XX-HTTP_PORT-XX/get_chat_response"; + const API_URL = "http://t3desk019.mit.edu:XX-HTTP_PORT-XX/api/get_chat_response"; const pElement = document.createElement("div"); // Define the properties and data for the API request From c41e32145181cca3b731637f7611ccff5abfeb34 Mon Sep 17 00:00:00 2001 From: mdr223 Date: Thu, 28 Sep 2023 21:52:40 -0400 Subject: [PATCH 15/18] making filepaths explicit in config --- A2rchi/chains/prompts.py | 4 ++-- config/dev-config.yaml | 6 ++++-- config/prod-config.yaml | 6 ++++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/A2rchi/chains/prompts.py b/A2rchi/chains/prompts.py index fdd8f1a..cfc5ef9 100644 --- a/A2rchi/chains/prompts.py +++ b/A2rchi/chains/prompts.py @@ -4,8 +4,8 @@ config = Config_Loader().config["chains"]["prompts"] -def read_prompt(prompt_name, is_condense_prompt=False, is_main_prompt=False): - with open(f"config/prompts/{prompt_name}", "r") as f: +def read_prompt(prompt_filepath, is_condense_prompt=False, is_main_prompt=False): + with open(prompt_filepath, "r") as f: raw_prompt = f.read() prompt = "" diff --git a/config/dev-config.yaml b/config/dev-config.yaml index c4ce9e6..b426fe5 100644 --- a/config/dev-config.yaml +++ b/config/dev-config.yaml @@ -33,8 +33,10 @@ chains: - A2rchi - Expert prompts: - CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question - MAIN_PROMPT: submit.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. + # prompt that serves to condense a history and a question into a single question + CONDENSING_PROMPT: config/prompts/condense.prompt + # main prompt which takes in a single question and a context. + MAIN_PROMPT: config/prompts/submit.prompt chain: # pick one of the models listed in the model class map below MODEL_NAME: OpenAILLM # LlamaLLM diff --git a/config/prod-config.yaml b/config/prod-config.yaml index 3344628..5bd5f5b 100644 --- a/config/prod-config.yaml +++ b/config/prod-config.yaml @@ -33,8 +33,10 @@ chains: - A2rchi - Expert prompts: - CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question - MAIN_PROMPT: submit.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. + # prompt that serves to condense a history and a question into a single question + CONDENSING_PROMPT: config/prompts/condense.prompt + # main prompt which takes in a single question and a context. + MAIN_PROMPT: config/prompts/submit.prompt chain: # pick one of the models listed in the model class map below MODEL_NAME: OpenAILLM From 77f6095080c1de7beb9d00d8b46bbd1db2cc6494 Mon Sep 17 00:00:00 2001 From: mdr223 Date: Thu, 28 Sep 2023 22:26:27 -0400 Subject: [PATCH 16/18] added 801-content to t3desk019 machine and mount it in docker-compose --- .github/workflows/prod-801-ci-cd.yaml | 2 +- .gitignore | 1 + A2rchi/utils/config_loader.py | 4 ++-- config/prod-801-config.yaml | 7 ++++--- deploy/dev/dev-compose.yaml | 10 +++++----- deploy/dockerfiles/Dockerfile-data-manager | 3 +++ deploy/prod-801/prod-801-compose.yaml | 9 ++++++--- deploy/prod/prod-compose.yaml | 10 +++++----- 8 files changed, 27 insertions(+), 19 deletions(-) diff --git a/.github/workflows/prod-801-ci-cd.yaml b/.github/workflows/prod-801-ci-cd.yaml index b93cccb..9545044 100644 --- a/.github/workflows/prod-801-ci-cd.yaml +++ b/.github/workflows/prod-801-ci-cd.yaml @@ -56,7 +56,7 @@ jobs: # run deploy script - name: Run Deploy Script run: | - ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod/prod-801-install.sh + ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-install.sh # clean up secret files - name: Remove Secrets from Runner diff --git a/.gitignore b/.gitignore index 1c11446..bdd9ff6 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ venv *.egg-info *sqlite_db .vscode +801-content/ diff --git a/A2rchi/utils/config_loader.py b/A2rchi/utils/config_loader.py index 41a43b5..eb4ec52 100644 --- a/A2rchi/utils/config_loader.py +++ b/A2rchi/utils/config_loader.py @@ -16,9 +16,9 @@ def load_config(self): """ Small function for loading the config.yaml file """ - prod_or_dev = os.getenv("PROD_OR_DEV") + env = os.getenv("RUNTIME_ENV") try: - with open(f"./config/{prod_or_dev}-config.yaml", "r") as f: + with open(f"./config/{env}-config.yaml", "r") as f: config = yaml.load(f, Loader=yaml.FullLoader) # change the model class parameter from a string to an actual class diff --git a/config/prod-801-config.yaml b/config/prod-801-config.yaml index d2a4f2c..08cea21 100644 --- a/config/prod-801-config.yaml +++ b/config/prod-801-config.yaml @@ -24,7 +24,6 @@ interfaces: chains: input_lists: - empty.list - - submit.list - miscellanea.list base: # roles that A2rchi knows about @@ -33,8 +32,10 @@ chains: - A2rchi - Expert prompts: - CONDENSING_PROMPT: condense.prompt #name of the prompt (in config/prompts) that serves to condense a history and a question into a single question - MAIN_PROMPT: 801.prompt #name of the main prompt (in config/prompts) which takes in a single question and a context. + # prompt that serves to condense a history and a question into a single question + CONDENSING_PROMPT: config/prompts/condense.prompt + # main prompt which takes in a single question and a context. + MAIN_PROMPT: config/prompts/801.prompt chain: # pick one of the models listed in the model class map below MODEL_NAME: OpenAILLM diff --git a/deploy/dev/dev-compose.yaml b/deploy/dev/dev-compose.yaml index e9a1c38..a6776e5 100644 --- a/deploy/dev/dev-compose.yaml +++ b/deploy/dev/dev-compose.yaml @@ -7,7 +7,7 @@ services: chromadb-dev: condition: service_healthy environment: - PROD_OR_DEV: dev + RUNTIME_ENV: dev CLEO_URL_FILE: /run/secrets/cleo_url CLEO_USER_FILE: /run/secrets/cleo_user CLEO_PW_FILE: /run/secrets/cleo_pw @@ -43,7 +43,7 @@ services: chromadb-dev: condition: service_healthy environment: - PROD_OR_DEV: dev + RUNTIME_ENV: dev OPENAI_API_KEY_FILE: /run/secrets/openai_api_key HUGGING_FACE_HUB_TOKEN_FILE: /run/secrets/hf_token secrets: @@ -63,7 +63,7 @@ services: chromadb-dev: condition: service_healthy environment: - PROD_OR_DEV: dev + RUNTIME_ENV: dev IMAP_USER_FILE: /run/secrets/imap_user IMAP_PW_FILE: /run/secrets/imap_pw CLEO_URL_FILE: /run/secrets/cleo_url @@ -101,7 +101,7 @@ services: chromadb-dev: condition: service_healthy environment: - PROD_OR_DEV: dev + RUNTIME_ENV: dev FLASK_UPLOADER_APP_SECRET_KEY_FILE: /run/secrets/flask_uploader_app_secret_key UPLOADER_SALT_FILE: /run/secrets/uploader_salt OPENAI_API_KEY_FILE: /run/secrets/openai_api_key @@ -122,7 +122,7 @@ services: context: ../.. dockerfile: deploy/dockerfiles/Dockerfile-chroma environment: - PROD_OR_DEV: dev + RUNTIME_ENV: dev ports: - 8002:8000 # host:container volumes: diff --git a/deploy/dockerfiles/Dockerfile-data-manager b/deploy/dockerfiles/Dockerfile-data-manager index 8f98e6c..99481d6 100644 --- a/deploy/dockerfiles/Dockerfile-data-manager +++ b/deploy/dockerfiles/Dockerfile-data-manager @@ -9,4 +9,7 @@ COPY config config COPY A2rchi A2rchi RUN pip install --upgrade pip && pip install . +# ensure this directory is present for prod-801 deployment +RUN if [ "$BUILD_ENV" = "prod-801" ] ; then mkdir /root/data/801-content ; fi + CMD ["python", "-u", "A2rchi/bin/service_data_manager.py"] diff --git a/deploy/prod-801/prod-801-compose.yaml b/deploy/prod-801/prod-801-compose.yaml index 72bd2b4..c6b41d6 100644 --- a/deploy/prod-801/prod-801-compose.yaml +++ b/deploy/prod-801/prod-801-compose.yaml @@ -7,7 +7,7 @@ services: chromadb-prod-801: condition: service_healthy environment: - PROD_OR_DEV: prod-801 + RUNTIME_ENV: prod-801 OPENAI_API_KEY_FILE: /run/secrets/openai_api_key HUGGING_FACE_HUB_TOKEN_FILE: /run/secrets/hf_token secrets: @@ -23,11 +23,13 @@ services: build: context: ../.. dockerfile: deploy/dockerfiles/Dockerfile-data-manager + args: + BUILD_ENV: prod-801 depends_on: chromadb-prod-801: condition: service_healthy environment: - PROD_OR_DEV: prod-801 + RUNTIME_ENV: prod-801 FLASK_UPLOADER_APP_SECRET_KEY_FILE: /run/secrets/flask_uploader_app_secret_key UPLOADER_SALT_FILE: /run/secrets/uploader_salt OPENAI_API_KEY_FILE: /run/secrets/openai_api_key @@ -41,6 +43,7 @@ services: - 5004:5001 # host:container volumes: - a2rchi-prod-801-data:/root/data/ + - /home/tier3/a2rchi/801-content/:/root/data/801-content/ restart: always chromadb-prod-801: @@ -48,7 +51,7 @@ services: context: ../.. dockerfile: deploy/dockerfiles/Dockerfile-chroma environment: - PROD_OR_DEV: prod-801 + RUNTIME_ENV: prod-801 ports: - 8003:8000 # host:container volumes: diff --git a/deploy/prod/prod-compose.yaml b/deploy/prod/prod-compose.yaml index 4513999..a7d52d7 100644 --- a/deploy/prod/prod-compose.yaml +++ b/deploy/prod/prod-compose.yaml @@ -7,7 +7,7 @@ services: chromadb-prod: condition: service_healthy environment: - PROD_OR_DEV: prod + RUNTIME_ENV: prod CLEO_URL_FILE: /run/secrets/cleo_url CLEO_USER_FILE: /run/secrets/cleo_user CLEO_PW_FILE: /run/secrets/cleo_pw @@ -43,7 +43,7 @@ services: chromadb-prod: condition: service_healthy environment: - PROD_OR_DEV: prod + RUNTIME_ENV: prod OPENAI_API_KEY_FILE: /run/secrets/openai_api_key HUGGING_FACE_HUB_TOKEN_FILE: /run/secrets/hf_token secrets: @@ -63,7 +63,7 @@ services: chromadb-prod: condition: service_healthy environment: - PROD_OR_DEV: prod + RUNTIME_ENV: prod IMAP_USER_FILE: /run/secrets/imap_user IMAP_PW_FILE: /run/secrets/imap_pw CLEO_URL_FILE: /run/secrets/cleo_url @@ -101,7 +101,7 @@ services: chromadb-prod: condition: service_healthy environment: - PROD_OR_DEV: prod + RUNTIME_ENV: prod FLASK_UPLOADER_APP_SECRET_KEY_FILE: /run/secrets/flask_uploader_app_secret_key UPLOADER_SALT_FILE: /run/secrets/uploader_salt OPENAI_API_KEY_FILE: /run/secrets/openai_api_key @@ -122,7 +122,7 @@ services: context: ../.. dockerfile: deploy/dockerfiles/Dockerfile-chroma environment: - PROD_OR_DEV: prod + RUNTIME_ENV: prod ports: - 8000:8000 # host:container volumes: From 7934e61a8eedd573a4e6bd74b483ddd56e1ebb8c Mon Sep 17 00:00:00 2001 From: mdr223 Date: Thu, 28 Sep 2023 22:40:59 -0400 Subject: [PATCH 17/18] test first release of 801 --- .github/workflows/prod-801-ci-cd.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/prod-801-ci-cd.yaml b/.github/workflows/prod-801-ci-cd.yaml index 9545044..dfc643a 100644 --- a/.github/workflows/prod-801-ci-cd.yaml +++ b/.github/workflows/prod-801-ci-cd.yaml @@ -3,6 +3,7 @@ run-name: ${{ github.actor }} deploys A2rchi for 8.01 to prod on: push: branches: + - main - release-8.01 jobs: deploy-prod-system: @@ -43,10 +44,10 @@ jobs: echo "${{ secrets.HF_TOKEN }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt - # stop any existing docker compose that's running - - name: Stop Docker Compose - run: | - ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-stop.sh + # # stop any existing docker compose that's running + # - name: Stop Docker Compose + # run: | + # ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-stop.sh # copy repository to machine - name: Copy Repository From eca41270d81917ebb3040ba4c51284c55f0ac5d8 Mon Sep 17 00:00:00 2001 From: mdr223 Date: Thu, 28 Sep 2023 22:49:49 -0400 Subject: [PATCH 18/18] reverting ci pipeline to be ready for production --- .github/workflows/prod-801-ci-cd.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/prod-801-ci-cd.yaml b/.github/workflows/prod-801-ci-cd.yaml index dfc643a..9545044 100644 --- a/.github/workflows/prod-801-ci-cd.yaml +++ b/.github/workflows/prod-801-ci-cd.yaml @@ -3,7 +3,6 @@ run-name: ${{ github.actor }} deploys A2rchi for 8.01 to prod on: push: branches: - - main - release-8.01 jobs: deploy-prod-system: @@ -44,10 +43,10 @@ jobs: echo "${{ secrets.HF_TOKEN }}" >> ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt chmod 400 ${{ github.workspace }}/deploy/prod-801/secrets/hf_token.txt - # # stop any existing docker compose that's running - # - name: Stop Docker Compose - # run: | - # ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-stop.sh + # stop any existing docker compose that's running + - name: Stop Docker Compose + run: | + ssh submit-t3desk 'bash -s' < ${{ github.workspace }}/deploy/prod-801/prod-801-stop.sh # copy repository to machine - name: Copy Repository