From 8744faf0d6595e847e7345230cb0ccd56cdccaff Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 13:15:05 +0100 Subject: [PATCH 001/109] Initial commit. --- README.md | 0 chat_gpt/__init__.py | 7 +++++++ chat_gpt/__main__.py | 14 +++++++++++++ chat_gpt/argparse_wrapper.py | 40 ++++++++++++++++++++++++++++++++++++ chat_gpt/chat_gpt.py | 17 +++++++++++++++ poetry.toml | 7 +++++++ pyproject.toml | 29 ++++++++++++++++++++++++++ 7 files changed, 114 insertions(+) create mode 100644 README.md create mode 100644 chat_gpt/__init__.py create mode 100644 chat_gpt/__main__.py create mode 100644 chat_gpt/argparse_wrapper.py create mode 100644 chat_gpt/chat_gpt.py create mode 100644 poetry.toml create mode 100644 pyproject.toml diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/chat_gpt/__init__.py b/chat_gpt/__init__.py new file mode 100644 index 0000000..0b3796c --- /dev/null +++ b/chat_gpt/__init__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 +import os + +import openai + +# Initialize the OpenAI API client +openai.api_key = os.environ["OPENAI_API_KEY"] diff --git a/chat_gpt/__main__.py b/chat_gpt/__main__.py new file mode 100644 index 0000000..8452c7e --- /dev/null +++ b/chat_gpt/__main__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +"""Program's entry point.""" +from .argparse_wrapper import get_parsed_args +from .chat_gpt import simple_chat + + +def main(argv=None): + """Program's main routine.""" + args = get_parsed_args(argv=argv) + simple_chat(args) + + +if __name__ == "__main__": + main() diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py new file mode 100644 index 0000000..f98bbdc --- /dev/null +++ b/chat_gpt/argparse_wrapper.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +"""Wrappers for argparse functionality.""" +import argparse +import sys + + +def get_parsed_args(argv=None): + """Get parsed command line arguments. + + Args: + argv (list): A list of passed command line args. + + Returns: + argparse.Namespace: Parsed command line arguments. + + """ + if argv is None: + argv = sys.argv[1:] + + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + "intial_ai_instructions", + type=str, + default=( + "You are a helpful assistant that answers correctly and concisely. " + + "You use as few tokens as possible unless otherwise specified." + ), + help="Initial instructions for the AI", + nargs="?", + ) + parser.add_argument( + "--model", + type=str, + default="gpt-3.5-turbo", + choices=["gpt-3.5-turbo", "gpt-4"], + help="OpenAI API engine to use for completion", + ) + return parser.parse_args(argv) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py new file mode 100644 index 0000000..a2c1dfc --- /dev/null +++ b/chat_gpt/chat_gpt.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +import openai + + +def simple_chat(args): + try: + messages = [{"role": "system", "content": args.intial_ai_instructions}] + while True: + messages.append({"role": "user", "content": input("You: ")}) + query_result = openai.ChatCompletion.create( + messages=messages, model=args.model + ) + response_msg = query_result["choices"][0]["message"] + messages.append(response_msg) + print(f"AI: {response_msg['content']}\n") + except KeyboardInterrupt: + print("Exiting.") diff --git a/poetry.toml b/poetry.toml new file mode 100644 index 0000000..c880064 --- /dev/null +++ b/poetry.toml @@ -0,0 +1,7 @@ +[virtualenvs] + create = true + in-project = true + prefer-active-python = true + +[virtualenvs.options] + system-site-packages = false diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..1813d5b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,29 @@ +[tool.poetry] + authors = ["Paulo V C Medeiros "] + description = "A simple package to test OpenAI API capabilities." + license = "MIT" + name = "chat_gpt" + readme = "README.md" + version = "0.1.0" + +[tool.poetry.scripts] + chatgpt = "chat_gpt.__main__:main" + +[tool.poetry.dependencies] + flask = "^3.0.0" + flask-bootstrap = "^3.3.7.1" + openai = "^0.28.1" + python = "^3.9" + +[tool.poetry.group.dev.dependencies] + black = "^23.10.1" + flakeheaven = "^3.3.0" + ipython = "^8.16.1" + isort = "^5.12.0" + pydoclint = "^0.3.8" + pytest = "^7.4.3" + ruff = "^0.1.3" + +[build-system] + build-backend = "poetry.core.masonry.api" + requires = ["poetry-core"] From 7e25172a073a2aaaced3297713ac48d8e1677232 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 17:50:12 +0100 Subject: [PATCH 002/109] Play a bit with using embeddings see --- chat_gpt/__init__.py | 9 ++ chat_gpt/__main__.py | 6 +- chat_gpt/argparse_wrapper.py | 2 +- chat_gpt/chat_gpt.py | 212 +++++++++++++++++++++++++++++++++-- pyproject.toml | 12 +- 5 files changed, 229 insertions(+), 12 deletions(-) diff --git a/chat_gpt/__init__.py b/chat_gpt/__init__.py index 0b3796c..828a97b 100644 --- a/chat_gpt/__init__.py +++ b/chat_gpt/__init__.py @@ -1,7 +1,16 @@ #!/usr/bin/env python3 import os +import tempfile +from pathlib import Path import openai + +class GeneralConstants: + _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() + PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name) + EMBEDDINGS_FILE = PACKAGE_TMPDIR / "embeddings.csv" + + # Initialize the OpenAI API client openai.api_key = os.environ["OPENAI_API_KEY"] diff --git a/chat_gpt/__main__.py b/chat_gpt/__main__.py index 8452c7e..e24222e 100644 --- a/chat_gpt/__main__.py +++ b/chat_gpt/__main__.py @@ -1,13 +1,15 @@ #!/usr/bin/env python3 """Program's entry point.""" from .argparse_wrapper import get_parsed_args -from .chat_gpt import simple_chat +from .chat_gpt import chat_with_context, simple_chat def main(argv=None): """Program's main routine.""" args = get_parsed_args(argv=argv) - simple_chat(args) + # args.intial_ai_instructions += " In your answer, include the total number of tokens used in the question/answer pair." + chat_with_context(args) + # simple_chat(args) if __name__ == "__main__": diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index f98bbdc..e24555f 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -25,7 +25,7 @@ def get_parsed_args(argv=None): type=str, default=( "You are a helpful assistant that answers correctly and concisely. " - + "You use as few tokens as possible unless otherwise specified." + + "Use the fewest tokens possible unless otherwise instructed." ), help="Initial instructions for the AI", nargs="?", diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index a2c1dfc..7d9694c 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -1,17 +1,213 @@ #!/usr/bin/env python3 +import ast +import csv +import json +from pathlib import Path + +import numpy as np import openai +import pandas as pd +import tiktoken +from openai.embeddings_utils import distances_from_embeddings + +from . import GeneralConstants + + +def num_tokens_from_string(string: str, model: str) -> int: + """Returns the number of tokens in a text string.""" + encoding = tiktoken.encoding_for_model(model) + return len(encoding.encode(string)) def simple_chat(args): + TOTAL_N_TOKENS = 0 + conversation = [{"role": "system", "content": args.intial_ai_instructions}] try: - messages = [{"role": "system", "content": args.intial_ai_instructions}] - while True: - messages.append({"role": "user", "content": input("You: ")}) - query_result = openai.ChatCompletion.create( - messages=messages, model=args.model - ) + # while True: + # messages.append({"role": "user", "content": input("You: ")}) + for question in Path("questions.txt").read_text().split("\n"): + question = question.strip() + if not question: + continue + + conversation.append({"role": "user", "content": question}) + print(question) + + success = False + while not success: + try: + query_result = openai.ChatCompletion.create( + messages=conversation, + model=args.model, + request_timeout=30, + ) + except ( + openai.error.ServiceUnavailableError, + openai.error.Timeout, + ) as error: + print(f" > {error}. Retrying...") + else: + success = True response_msg = query_result["choices"][0]["message"] - messages.append(response_msg) - print(f"AI: {response_msg['content']}\n") + conversation.append(response_msg) + + ai_reply = response_msg["content"] + print(f"AI: {ai_reply}") + + text_for_token_count = "".join(msg["content"] for msg in conversation) + n_tokens = num_tokens_from_string( + string=text_for_token_count, model=args.model + ) + TOTAL_N_TOKENS += n_tokens + print(" > Total tokens used: ", n_tokens) + print() + except KeyboardInterrupt: + print("Exiting.") + print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) + + +def store_message_to_file( + msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE +): + """Store message and embeddings to file.""" + # Adapted from + response = openai.Embedding.create( + model="text-embedding-ada-002", input=msg_obj["content"] + ) + emb_mess_pair = { + "embedding": json.dumps(response["data"][0]["embedding"]), + "message": json.dumps(msg_obj), + } + + init_file = not file_path.exists() or file_path.stat().st_size == 0 + write_mode = "w" if init_file else "a" + + with open(file_path, write_mode, newline="") as file: + writer = csv.DictWriter(file, fieldnames=emb_mess_pair.keys()) + if init_file: + writer.writeheader() + writer.writerow(emb_mess_pair) + + +def find_context(file_path: Path = GeneralConstants.EMBEDDINGS_FILE, option="both"): + """Lookup context from file.""" + # Adapted from + if not file_path.exists() or file_path.stat().st_size == 0: + return [] + + df = pd.read_csv(file_path) + df["embedding"] = df.embedding.apply(eval).apply(np.array) + + if option == "both": + message_list_embeddings = df["embedding"].values[:-3] + elif option == "assistant": + message_list_embeddings = df.loc[ + df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "assistant"), + "embedding", + ].values[-1] + elif option == "user": + message_list_embeddings = df.loc[ + df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "user"), + "embedding", + ].values[:-2] + else: + return [] # Return an empty list if no context is found + + query_embedding = df["embedding"].values[-1] + distances = distances_from_embeddings( + query_embedding, message_list_embeddings, distance_metric="L1" + ) + mask = (np.array(distances) < 21.6)[np.argsort(distances)] + + message_array = df["message"].iloc[np.argsort(distances)][mask] + message_array = [] if message_array is None else message_array[:4] + + message_objects = [json.loads(message) for message in message_array] + context_for_current_user_query = "" + for msg in message_objects: + context_for_current_user_query += f"{msg['role']}: {msg['content']}\n" + + if not context_for_current_user_query: + return [] + + return [ + { + "role": "system", + "content": f"Your knowledge: {context_for_current_user_query} " + + "+ Previous messages. " + + "Only answer next message.", + } + ] + + +def chat_with_context( + args, + context_file_path: Path = GeneralConstants.EMBEDDINGS_FILE, +): + intial_ai_instruct_msg = {"role": "system", "content": args.intial_ai_instructions} + conversation = [] + TOTAL_N_TOKENS = 0 + try: + # while True: + # user_input = {"role": "user", "content": input("You: ")} + for question in Path("questions.txt").read_text().split("\n"): + question = question.strip() + if not question: + continue + user_input = {"role": "user", "content": question} + store_message_to_file(msg_obj=user_input, file_path=context_file_path) + + last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] + current_context = find_context(file_path=context_file_path, option="both") + conversation = [ + intial_ai_instruct_msg, + *last_msg_exchange, + *current_context, + user_input, + ] + + print(question, end="") + print(f" (conversation length: {len(conversation)})") + + print("AI: ", end="") + full_reply_content = "" + success = False + while not success: + try: + for line in openai.ChatCompletion.create( + model=args.model, + messages=conversation, + request_timeout=30, + stream=True, + ): + reply_content_token = getattr( + line.choices[0].delta, "content", "" + ) + print(reply_content_token, end="") + full_reply_content += reply_content_token + except ( + openai.error.ServiceUnavailableError, + openai.error.Timeout, + ) as error: + print(f" > {error}. Retrying...") + else: + success = True + print() + + reply_msg_obj = {"role": "assistant", "content": full_reply_content} + store_message_to_file(file_path=context_file_path, msg_obj=reply_msg_obj) + conversation.append(reply_msg_obj) + + text_for_token_count = "".join(msg["content"] for msg in conversation) + n_tokens = num_tokens_from_string( + string=text_for_token_count, model=args.model + ) + print(" > Total tokens used: ", n_tokens) + print() + TOTAL_N_TOKENS += n_tokens + except KeyboardInterrupt: print("Exiting.") + print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) diff --git a/pyproject.toml b/pyproject.toml index 1813d5b..2ea0696 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,10 +10,20 @@ chatgpt = "chat_gpt.__main__:main" [tool.poetry.dependencies] + # Python version + python = ">=3.9,<3.13" + # Deps that should have been openapi deps + matplotlib = "^3.8.0" + plotly = "^5.18.0" + scikit-learn = "^1.3.2" + scipy = "^1.11.3" + # Other dependencies flask = "^3.0.0" flask-bootstrap = "^3.3.7.1" + numpy = "^1.26.1" openai = "^0.28.1" - python = "^3.9" + pandas = "^2.1.2" + tiktoken = "^0.5.1" [tool.poetry.group.dev.dependencies] black = "^23.10.1" From a69bd9d895bc28fc8e788fde796e0700ee2e7047 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 19:20:02 +0100 Subject: [PATCH 003/109] Some refactoring --- chat_gpt/argparse_wrapper.py | 4 +- chat_gpt/chat_gpt.py | 196 +++++++++++++++++------------------ 2 files changed, 96 insertions(+), 104 deletions(-) diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index e24555f..00e88b0 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -24,8 +24,8 @@ def get_parsed_args(argv=None): "intial_ai_instructions", type=str, default=( - "You are a helpful assistant that answers correctly and concisely. " - + "Use the fewest tokens possible unless otherwise instructed." + "You are a helpful assistant You answer correctly.\n" + + "You answer using the minimum possible number of tokens." ), help="Initial instructions for the AI", nargs="?", diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 7d9694c..cbde3af 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -13,13 +13,78 @@ from . import GeneralConstants +class BaseChatContext: + def add_user_input(self, conversation, user_input): + user_input_msg_obj = {"role": "user", "content": user_input} + conversation.append(user_input_msg_obj) + return conversation + + def add_chat_reply(self, conversation, chat_reply): + reply_msg_obj = {"role": "assistant", "content": chat_reply} + conversation.append(reply_msg_obj) + return conversation + + +class EmbeddingBasedChatContext(BaseChatContext): + """Chat context.""" + + def __init__(self): + self.context_file_path = GeneralConstants.EMBEDDINGS_FILE + + def add_user_input(self, conversation, user_input): + user_input_msg_obj = {"role": "user", "content": user_input} + store_message_to_file( + msg_obj=user_input_msg_obj, file_path=self.context_file_path + ) + intial_ai_instruct_msg = conversation[0] + last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] + current_context = find_context(file_path=self.context_file_path, option="both") + conversation = [ + intial_ai_instruct_msg, + *current_context, + *last_msg_exchange, + user_input_msg_obj, + ] + return conversation + + def add_chat_reply(self, conversation, chat_reply): + reply_msg_obj = {"role": "assistant", "content": chat_reply} + conversation.append(reply_msg_obj) + store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) + return conversation + + def num_tokens_from_string(string: str, model: str) -> int: """Returns the number of tokens in a text string.""" encoding = tiktoken.encoding_for_model(model) return len(encoding.encode(string)) -def simple_chat(args): +def make_query(conversation: list, model: str): + success = False + print("=========================================") + for line in conversation: + print(line) + print("=========================================") + while not success: + try: + for line in openai.ChatCompletion.create( + model=model, + messages=conversation, + request_timeout=30, + stream=True, + ): + reply_content_token = getattr(line.choices[0].delta, "content", "") + yield reply_content_token + success = True + except ( + openai.error.ServiceUnavailableError, + openai.error.Timeout, + ) as error: + print(f" > {error}. Retrying...") + + +def _base_chat(args, context): TOTAL_N_TOKENS = 0 conversation = [{"role": "system", "content": args.intial_ai_instructions}] try: @@ -29,43 +94,41 @@ def simple_chat(args): question = question.strip() if not question: continue - - conversation.append({"role": "user", "content": question}) print(question) - success = False - while not success: - try: - query_result = openai.ChatCompletion.create( - messages=conversation, - model=args.model, - request_timeout=30, - ) - except ( - openai.error.ServiceUnavailableError, - openai.error.Timeout, - ) as error: - print(f" > {error}. Retrying...") - else: - success = True - response_msg = query_result["choices"][0]["message"] - conversation.append(response_msg) - - ai_reply = response_msg["content"] - print(f"AI: {ai_reply}") - - text_for_token_count = "".join(msg["content"] for msg in conversation) - n_tokens = num_tokens_from_string( - string=text_for_token_count, model=args.model + # Add context to the conversation + conversation = context.add_user_input( + conversation=conversation, user_input=question + ) + + print("AI: ", end="") + full_reply_content = "" + for token in make_query(conversation=conversation, model=args.model): + print(token, end="") + full_reply_content += token + print("\n") + + # Update context with the reply + conversation = context.add_chat_reply( + conversation=conversation, chat_reply=full_reply_content + ) + + TOTAL_N_TOKENS += num_tokens_from_string( + string="".join(msg["content"] for msg in conversation), model=args.model ) - TOTAL_N_TOKENS += n_tokens - print(" > Total tokens used: ", n_tokens) - print() except KeyboardInterrupt: print("Exiting.") print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) +def simple_chat(args): + return _base_chat(args, context=BaseChatContext()) + + +def chat_with_context(args): + return _base_chat(args, context=EmbeddingBasedChatContext()) + + def store_message_to_file( msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE ): @@ -136,78 +199,7 @@ def find_context(file_path: Path = GeneralConstants.EMBEDDINGS_FILE, option="bot { "role": "system", "content": f"Your knowledge: {context_for_current_user_query} " - + "+ Previous messages. " + + "+ Previous messages.\n" + "Only answer next message.", } ] - - -def chat_with_context( - args, - context_file_path: Path = GeneralConstants.EMBEDDINGS_FILE, -): - intial_ai_instruct_msg = {"role": "system", "content": args.intial_ai_instructions} - conversation = [] - TOTAL_N_TOKENS = 0 - try: - # while True: - # user_input = {"role": "user", "content": input("You: ")} - for question in Path("questions.txt").read_text().split("\n"): - question = question.strip() - if not question: - continue - user_input = {"role": "user", "content": question} - store_message_to_file(msg_obj=user_input, file_path=context_file_path) - - last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] - current_context = find_context(file_path=context_file_path, option="both") - conversation = [ - intial_ai_instruct_msg, - *last_msg_exchange, - *current_context, - user_input, - ] - - print(question, end="") - print(f" (conversation length: {len(conversation)})") - - print("AI: ", end="") - full_reply_content = "" - success = False - while not success: - try: - for line in openai.ChatCompletion.create( - model=args.model, - messages=conversation, - request_timeout=30, - stream=True, - ): - reply_content_token = getattr( - line.choices[0].delta, "content", "" - ) - print(reply_content_token, end="") - full_reply_content += reply_content_token - except ( - openai.error.ServiceUnavailableError, - openai.error.Timeout, - ) as error: - print(f" > {error}. Retrying...") - else: - success = True - print() - - reply_msg_obj = {"role": "assistant", "content": full_reply_content} - store_message_to_file(file_path=context_file_path, msg_obj=reply_msg_obj) - conversation.append(reply_msg_obj) - - text_for_token_count = "".join(msg["content"] for msg in conversation) - n_tokens = num_tokens_from_string( - string=text_for_token_count, model=args.model - ) - print(" > Total tokens used: ", n_tokens) - print() - TOTAL_N_TOKENS += n_tokens - - except KeyboardInterrupt: - print("Exiting.") - print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) From cf46dadfe4c89837cf8074003dcb6e44820d476d Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 20:34:00 +0100 Subject: [PATCH 004/109] Create Chat class --- chat_gpt/__main__.py | 11 ++- chat_gpt/argparse_wrapper.py | 6 +- chat_gpt/chat_gpt.py | 181 ++++++++++++++++++++++------------- 3 files changed, 123 insertions(+), 75 deletions(-) diff --git a/chat_gpt/__main__.py b/chat_gpt/__main__.py index e24222e..7888f1a 100644 --- a/chat_gpt/__main__.py +++ b/chat_gpt/__main__.py @@ -1,15 +1,18 @@ #!/usr/bin/env python3 """Program's entry point.""" from .argparse_wrapper import get_parsed_args -from .chat_gpt import chat_with_context, simple_chat +from .chat_gpt import Chat def main(argv=None): """Program's main routine.""" args = get_parsed_args(argv=argv) - # args.intial_ai_instructions += " In your answer, include the total number of tokens used in the question/answer pair." - chat_with_context(args) - # simple_chat(args) + chat = Chat( + model=args.model, + base_instructions=args.intial_ai_instructions, + send_full_history=args.send_full_history, + ) + chat.start() if __name__ == "__main__": diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index 00e88b0..dd335a5 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -23,10 +23,7 @@ def get_parsed_args(argv=None): parser.add_argument( "intial_ai_instructions", type=str, - default=( - "You are a helpful assistant You answer correctly.\n" - + "You answer using the minimum possible number of tokens." - ), + default="You answer using the minimum possible number of tokens.", help="Initial instructions for the AI", nargs="?", ) @@ -37,4 +34,5 @@ def get_parsed_args(argv=None): choices=["gpt-3.5-turbo", "gpt-4"], help="OpenAI API engine to use for completion", ) + parser.add_argument("--send-full-history", action="store_true") return parser.parse_args(argv) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index cbde3af..4aad974 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -14,13 +14,24 @@ class BaseChatContext: + def __init__(self, parent_chat): + self.parent_chat = parent_chat + def add_user_input(self, conversation, user_input): - user_input_msg_obj = {"role": "user", "content": user_input} + user_input_msg_obj = { + "role": "user", + "name": self.parent_chat.username, + "content": user_input, + } conversation.append(user_input_msg_obj) return conversation def add_chat_reply(self, conversation, chat_reply): - reply_msg_obj = {"role": "assistant", "content": chat_reply} + reply_msg_obj = { + "role": "assistant", + "name": self.parent_chat.assistant_name, + "content": chat_reply, + } conversation.append(reply_msg_obj) return conversation @@ -28,44 +39,118 @@ def add_chat_reply(self, conversation, chat_reply): class EmbeddingBasedChatContext(BaseChatContext): """Chat context.""" - def __init__(self): + def __init__(self, parent_chat): + self.parent_chat = parent_chat self.context_file_path = GeneralConstants.EMBEDDINGS_FILE def add_user_input(self, conversation, user_input): - user_input_msg_obj = {"role": "user", "content": user_input} + user_input_msg_obj = { + "role": "user", + "name": self.parent_chat.username, + "content": user_input, + } store_message_to_file( msg_obj=user_input_msg_obj, file_path=self.context_file_path ) intial_ai_instruct_msg = conversation[0] last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] - current_context = find_context(file_path=self.context_file_path, option="both") + current_context = find_context( + file_path=self.context_file_path, + parent_chat=self.parent_chat, + option="both", + ) conversation = [ intial_ai_instruct_msg, - *current_context, *last_msg_exchange, + *current_context, user_input_msg_obj, ] return conversation def add_chat_reply(self, conversation, chat_reply): - reply_msg_obj = {"role": "assistant", "content": chat_reply} + reply_msg_obj = { + "role": "assistant", + "name": self.parent_chat.assistant_name, + "content": chat_reply, + } conversation.append(reply_msg_obj) store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) return conversation -def num_tokens_from_string(string: str, model: str) -> int: - """Returns the number of tokens in a text string.""" - encoding = tiktoken.encoding_for_model(model) - return len(encoding.encode(string)) - +class Chat: + def __init__( + self, model: str, base_instructions: str, send_full_history: bool = False + ): + self.model = model + self.base_instructions = base_instructions + self.send_full_history = send_full_history + self.username = "chat_user" + self.assistant_name = f"chat_{model.replace('.', '_')}" + self.system_name = "chat_manager" + + def start(self): + if self.send_full_history: + context = BaseChatContext(parent_chat=self) + else: + context = EmbeddingBasedChatContext(parent_chat=self) + + TOTAL_N_TOKENS = 0 + initial_ai_instructions = ( + f"You are {self.assistant_name}," + + f" a helpful assistant to {self.username}.\n" + + "You answer correctly.\n" + + f"{self.base_instructions.strip()}.\n" + + f" Follow ALL directives by {self.system_name}." + ) -def make_query(conversation: list, model: str): + conversation = [ + { + "role": "system", + "name": self.system_name, + "content": initial_ai_instructions.strip(), + } + ] + try: + # while True: + # messages.append({"role": "user", "content": input("You: ")}) + for question in Path("questions.txt").read_text().split("\n"): + question = question.strip() + if not question: + continue + print(question) + + # Add context to the conversation + conversation = context.add_user_input( + conversation=conversation, user_input=question + ) + + print("AI: ", end="") + full_reply_content = "" + for token in communicate_with_model( + conversation=conversation, model=self.model + ): + print(token, end="") + full_reply_content += token + full_reply_content = full_reply_content.strip() + print("\n") + + # Update context with the reply + conversation = context.add_chat_reply( + conversation=conversation, chat_reply=full_reply_content + ) + + TOTAL_N_TOKENS += _num_tokens_from_string( + string="".join(msg["content"] for msg in conversation), + model=self.model, + ) + except KeyboardInterrupt: + print("Exiting.") + print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) + + +def communicate_with_model(conversation: list, model: str): success = False - print("=========================================") - for line in conversation: - print(line) - print("=========================================") while not success: try: for line in openai.ChatCompletion.create( @@ -84,51 +169,6 @@ def make_query(conversation: list, model: str): print(f" > {error}. Retrying...") -def _base_chat(args, context): - TOTAL_N_TOKENS = 0 - conversation = [{"role": "system", "content": args.intial_ai_instructions}] - try: - # while True: - # messages.append({"role": "user", "content": input("You: ")}) - for question in Path("questions.txt").read_text().split("\n"): - question = question.strip() - if not question: - continue - print(question) - - # Add context to the conversation - conversation = context.add_user_input( - conversation=conversation, user_input=question - ) - - print("AI: ", end="") - full_reply_content = "" - for token in make_query(conversation=conversation, model=args.model): - print(token, end="") - full_reply_content += token - print("\n") - - # Update context with the reply - conversation = context.add_chat_reply( - conversation=conversation, chat_reply=full_reply_content - ) - - TOTAL_N_TOKENS += num_tokens_from_string( - string="".join(msg["content"] for msg in conversation), model=args.model - ) - except KeyboardInterrupt: - print("Exiting.") - print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) - - -def simple_chat(args): - return _base_chat(args, context=BaseChatContext()) - - -def chat_with_context(args): - return _base_chat(args, context=EmbeddingBasedChatContext()) - - def store_message_to_file( msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE ): @@ -153,7 +193,7 @@ def store_message_to_file( writer.writerow(emb_mess_pair) -def find_context(file_path: Path = GeneralConstants.EMBEDDINGS_FILE, option="both"): +def find_context(file_path: Path, parent_chat: Chat, option="both"): """Lookup context from file.""" # Adapted from @@ -190,7 +230,7 @@ def find_context(file_path: Path = GeneralConstants.EMBEDDINGS_FILE, option="bot message_objects = [json.loads(message) for message in message_array] context_for_current_user_query = "" for msg in message_objects: - context_for_current_user_query += f"{msg['role']}: {msg['content']}\n" + context_for_current_user_query += f"{msg['name']}: {msg['content']}\n" if not context_for_current_user_query: return [] @@ -198,8 +238,15 @@ def find_context(file_path: Path = GeneralConstants.EMBEDDINGS_FILE, option="bot return [ { "role": "system", - "content": f"Your knowledge: {context_for_current_user_query} " - + "+ Previous messages.\n" - + "Only answer next message.", + "name": parent_chat.system_name, + "content": f"{parent_chat.assistant_name}'s knowledge: " + + f"{context_for_current_user_query} + Previous messages.\n" + + f"Answer {parent_chat.username}'s last message.", } ] + + +def _num_tokens_from_string(string: str, model: str) -> int: + """Returns the number of tokens in a text string.""" + encoding = tiktoken.encoding_for_model(model) + return len(encoding.encode(string)) From 3a1a8da1519d45304127b2fc3c72bd1cec0fcb0b Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 20:48:32 +0100 Subject: [PATCH 005/109] Restore getting prompts from user --- chat_gpt/chat_gpt.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 4aad974..e7e8182 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -112,13 +112,10 @@ def start(self): } ] try: - # while True: - # messages.append({"role": "user", "content": input("You: ")}) - for question in Path("questions.txt").read_text().split("\n"): - question = question.strip() + while True: + question = input("You: ").strip() if not question: continue - print(question) # Add context to the conversation conversation = context.add_user_input( @@ -241,7 +238,7 @@ def find_context(file_path: Path, parent_chat: Chat, option="both"): "name": parent_chat.system_name, "content": f"{parent_chat.assistant_name}'s knowledge: " + f"{context_for_current_user_query} + Previous messages.\n" - + f"Answer {parent_chat.username}'s last message.", + + "Only answer last message.", } ] From 079c6cdc1c67a90d5b7f92bc90f8a317eb848962 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 20:50:24 +0100 Subject: [PATCH 006/109] Exit gracefully upon EOFError --- chat_gpt/chat_gpt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index e7e8182..3ee88e5 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -141,7 +141,7 @@ def start(self): string="".join(msg["content"] for msg in conversation), model=self.model, ) - except KeyboardInterrupt: + except (KeyboardInterrupt, EOFError): print("Exiting.") print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) From ff9aaace635f026fe31134042da4ee611729ccba Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 29 Oct 2023 23:53:24 +0100 Subject: [PATCH 007/109] Some more refactoring --- chat_gpt/chat_gpt.py | 45 ++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 3ee88e5..7ff86c3 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -14,10 +14,10 @@ class BaseChatContext: - def __init__(self, parent_chat): + def __init__(self, parent_chat: "Chat"): self.parent_chat = parent_chat - def add_user_input(self, conversation, user_input): + def add_user_input(self, conversation: list, user_input: str): user_input_msg_obj = { "role": "user", "name": self.parent_chat.username, @@ -26,7 +26,7 @@ def add_user_input(self, conversation, user_input): conversation.append(user_input_msg_obj) return conversation - def add_chat_reply(self, conversation, chat_reply): + def add_chat_reply(self, conversation: list, chat_reply: str): reply_msg_obj = { "role": "assistant", "name": self.parent_chat.assistant_name, @@ -39,22 +39,22 @@ def add_chat_reply(self, conversation, chat_reply): class EmbeddingBasedChatContext(BaseChatContext): """Chat context.""" - def __init__(self, parent_chat): + def __init__(self, parent_chat: "Chat"): self.parent_chat = parent_chat self.context_file_path = GeneralConstants.EMBEDDINGS_FILE - def add_user_input(self, conversation, user_input): + def add_user_input(self, conversation: list, user_input: str): user_input_msg_obj = { "role": "user", "name": self.parent_chat.username, "content": user_input, } - store_message_to_file( + _store_message_to_file( msg_obj=user_input_msg_obj, file_path=self.context_file_path ) intial_ai_instruct_msg = conversation[0] last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] - current_context = find_context( + current_context = _find_context( file_path=self.context_file_path, parent_chat=self.parent_chat, option="both", @@ -67,14 +67,14 @@ def add_user_input(self, conversation, user_input): ] return conversation - def add_chat_reply(self, conversation, chat_reply): + def add_chat_reply(self, conversation: list, chat_reply: str): reply_msg_obj = { "role": "assistant", "name": self.parent_chat.assistant_name, "content": chat_reply, } conversation.append(reply_msg_obj) - store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) + _store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) return conversation @@ -83,7 +83,7 @@ def __init__( self, model: str, base_instructions: str, send_full_history: bool = False ): self.model = model - self.base_instructions = base_instructions + self.base_instructions = base_instructions.strip(" .") self.send_full_history = send_full_history self.username = "chat_user" self.assistant_name = f"chat_{model.replace('.', '_')}" @@ -96,12 +96,12 @@ def start(self): context = EmbeddingBasedChatContext(parent_chat=self) TOTAL_N_TOKENS = 0 - initial_ai_instructions = ( - f"You are {self.assistant_name}," - + f" a helpful assistant to {self.username}.\n" - + "You answer correctly.\n" - + f"{self.base_instructions.strip()}.\n" - + f" Follow ALL directives by {self.system_name}." + initial_ai_instructions = " ".join( + [ + f"You are {self.assistant_name}, a helpful assistant to {self.username}.", + f"You answer correctly. {self.base_instructions}." + f"Follow all directives given by {self.system_name}.", + ] ) conversation = [ @@ -124,7 +124,7 @@ def start(self): print("AI: ", end="") full_reply_content = "" - for token in communicate_with_model( + for token in _communicate_with_model( conversation=conversation, model=self.model ): print(token, end="") @@ -142,11 +142,11 @@ def start(self): model=self.model, ) except (KeyboardInterrupt, EOFError): - print("Exiting.") - print("TOTAL N TOKENS: ", TOTAL_N_TOKENS) + print("Exiting chat. ", end="") + print(f"You have used {TOTAL_N_TOKENS} tokens.") -def communicate_with_model(conversation: list, model: str): +def _communicate_with_model(conversation: list, model: str): success = False while not success: try: @@ -155,6 +155,7 @@ def communicate_with_model(conversation: list, model: str): messages=conversation, request_timeout=30, stream=True, + temperature=0.8, ): reply_content_token = getattr(line.choices[0].delta, "content", "") yield reply_content_token @@ -166,7 +167,7 @@ def communicate_with_model(conversation: list, model: str): print(f" > {error}. Retrying...") -def store_message_to_file( +def _store_message_to_file( msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE ): """Store message and embeddings to file.""" @@ -190,7 +191,7 @@ def store_message_to_file( writer.writerow(emb_mess_pair) -def find_context(file_path: Path, parent_chat: Chat, option="both"): +def _find_context(file_path: Path, parent_chat: Chat, option="both"): """Lookup context from file.""" # Adapted from From 1e401e14a3c5ee0ab15c7d534b85ff7c28336d7c Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 00:47:18 +0100 Subject: [PATCH 008/109] Print an estimate of the costs using the API --- chat_gpt/chat_gpt.py | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 7ff86c3..69b6b17 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -12,6 +12,11 @@ from . import GeneralConstants +PRICING_PER_THOUSAND_TOKENS = { + "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, + "gpt-4": {"input": 0.03, "output": 0.06}, +} + class BaseChatContext: def __init__(self, parent_chat: "Chat"): @@ -95,7 +100,7 @@ def start(self): else: context = EmbeddingBasedChatContext(parent_chat=self) - TOTAL_N_TOKENS = 0 + TOTAL_N_TOKENS = {"input": 0, "output": 0} initial_ai_instructions = " ".join( [ f"You are {self.assistant_name}, a helpful assistant to {self.username}.", @@ -122,6 +127,12 @@ def start(self): conversation=conversation, user_input=question ) + # Update number of input tokens + TOTAL_N_TOKENS["input"] += _num_tokens_from_string( + string="".join(msg["content"] for msg in conversation), + model=self.model, + ) + print("AI: ", end="") full_reply_content = "" for token in _communicate_with_model( @@ -129,21 +140,25 @@ def start(self): ): print(token, end="") full_reply_content += token - full_reply_content = full_reply_content.strip() print("\n") + # Update number of output tokens + TOTAL_N_TOKENS["output"] += _num_tokens_from_string( + string=full_reply_content, model=self.model + ) + # Update context with the reply conversation = context.add_chat_reply( - conversation=conversation, chat_reply=full_reply_content + conversation=conversation, chat_reply=full_reply_content.strip() ) - TOTAL_N_TOKENS += _num_tokens_from_string( - string="".join(msg["content"] for msg in conversation), - model=self.model, - ) except (KeyboardInterrupt, EOFError): print("Exiting chat. ", end="") - print(f"You have used {TOTAL_N_TOKENS} tokens.") + finally: + model_costs = PRICING_PER_THOUSAND_TOKENS[self.model] + costs = {k: model_costs[k] * v / 1000.0 for k, v in TOTAL_N_TOKENS.items()} + print(f"You have used {sum(TOTAL_N_TOKENS.values())} tokens.") + print(f"The estimated cost is ${sum(costs.values()):.3f}.") def _communicate_with_model(conversation: list, model: str): From 41441bb9d0b599b7526362d3eb734fbd43c9dae9 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 11:22:06 +0100 Subject: [PATCH 009/109] Refactoring --- chat_gpt/__main__.py | 2 +- chat_gpt/argparse_wrapper.py | 2 +- chat_gpt/chat_gpt.py | 75 ++++++++++++++++++++---------------- pyproject.toml | 17 ++++++-- 4 files changed, 58 insertions(+), 38 deletions(-) diff --git a/chat_gpt/__main__.py b/chat_gpt/__main__.py index 7888f1a..2c504cf 100644 --- a/chat_gpt/__main__.py +++ b/chat_gpt/__main__.py @@ -9,7 +9,7 @@ def main(argv=None): args = get_parsed_args(argv=argv) chat = Chat( model=args.model, - base_instructions=args.intial_ai_instructions, + base_instructions=args.initial_ai_instructions, send_full_history=args.send_full_history, ) chat.start() diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index dd335a5..a4a5ca6 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -21,7 +21,7 @@ def get_parsed_args(argv=None): formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( - "intial_ai_instructions", + "initial_ai_instructions", type=str, default="You answer using the minimum possible number of tokens.", help="Initial instructions for the AI", diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 69b6b17..e3072ac 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -88,80 +88,89 @@ def __init__( self, model: str, base_instructions: str, send_full_history: bool = False ): self.model = model - self.base_instructions = base_instructions.strip(" .") - self.send_full_history = send_full_history self.username = "chat_user" self.assistant_name = f"chat_{model.replace('.', '_')}" self.system_name = "chat_manager" - def start(self): - if self.send_full_history: - context = BaseChatContext(parent_chat=self) - else: - context = EmbeddingBasedChatContext(parent_chat=self) - - TOTAL_N_TOKENS = {"input": 0, "output": 0} - initial_ai_instructions = " ".join( + self.ground_ai_instructions = " ".join( [ - f"You are {self.assistant_name}, a helpful assistant to {self.username}.", - f"You answer correctly. {self.base_instructions}." - f"Follow all directives given by {self.system_name}.", + instruction.strip() + for instruction in [ + f"Your name is {self.assistant_name}", + f"You are a helpful assistant to {self.username}.", + "You answer correctly. You do not lie.", + f"{base_instructions.strip(' .')}.", + f"You follow all directives by {self.system_name}.", + ] + if instruction.strip() ] ) + self.token_usage = {"input": 0, "output": 0} + self.token_price = { + k: v / 1000.0 for k, v in PRICING_PER_THOUSAND_TOKENS[self.model].items() + } + + if send_full_history: + self.context = BaseChatContext(parent_chat=self) + else: + self.context = EmbeddingBasedChatContext(parent_chat=self) + + def get_n_tokens(self, string: str) -> int: + return _num_tokens_from_string(string=string, model=self.model) + + def start(self): conversation = [ { "role": "system", "name": self.system_name, - "content": initial_ai_instructions.strip(), + "content": self.ground_ai_instructions, } ] try: while True: - question = input("You: ").strip() + question = input(f"{self.username}: ").strip() if not question: continue # Add context to the conversation - conversation = context.add_user_input( + conversation = self.context.add_user_input( conversation=conversation, user_input=question ) # Update number of input tokens - TOTAL_N_TOKENS["input"] += _num_tokens_from_string( - string="".join(msg["content"] for msg in conversation), - model=self.model, + self.token_usage["input"] += sum( + self.get_n_tokens(string=msg["content"]) for msg in conversation ) - print("AI: ", end="") + print(f"{self.assistant_name}: ", end="") full_reply_content = "" - for token in _communicate_with_model( - conversation=conversation, model=self.model - ): + for token in _make_api_call(conversation=conversation, model=self.model): print(token, end="") full_reply_content += token print("\n") # Update number of output tokens - TOTAL_N_TOKENS["output"] += _num_tokens_from_string( - string=full_reply_content, model=self.model - ) + self.token_usage["output"] += self.get_n_tokens(full_reply_content) # Update context with the reply - conversation = context.add_chat_reply( + conversation = self.context.add_chat_reply( conversation=conversation, chat_reply=full_reply_content.strip() ) except (KeyboardInterrupt, EOFError): - print("Exiting chat. ", end="") + print("Exiting chat.") finally: - model_costs = PRICING_PER_THOUSAND_TOKENS[self.model] - costs = {k: model_costs[k] * v / 1000.0 for k, v in TOTAL_N_TOKENS.items()} - print(f"You have used {sum(TOTAL_N_TOKENS.values())} tokens.") - print(f"The estimated cost is ${sum(costs.values()):.3f}.") + print() + print("Token usage summary:") + for k, v in self.token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {sum(self.token_usage.values())}") + costs = {k: v * self.token_price[k] for k, v in self.token_usage.items()} + print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") -def _communicate_with_model(conversation: list, model: str): +def _make_api_call(conversation: list, model: str): success = False while not success: try: diff --git a/pyproject.toml b/pyproject.toml index 2ea0696..030bbf6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,10 @@ readme = "README.md" version = "0.1.0" +[build-system] + build-backend = "poetry.core.masonry.api" + requires = ["poetry-core"] + [tool.poetry.scripts] chatgpt = "chat_gpt.__main__:main" @@ -34,6 +38,13 @@ pytest = "^7.4.3" ruff = "^0.1.3" -[build-system] - build-backend = "poetry.core.masonry.api" - requires = ["poetry-core"] + ################## + # Linter configs # + ################## + +[tool.black] + line-length = 90 + +[tool.isort] + line_length = 90 + profile = "black" From 4fcf1358bf0efff2130b0c3e06af4af090e6a53d Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 12:50:34 +0100 Subject: [PATCH 010/109] Add a database to keep track of usage & costs --- chat_gpt/__init__.py | 1 + chat_gpt/chat_gpt.py | 65 ++++++++++++++++++++++--------- chat_gpt/database.py | 91 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 139 insertions(+), 18 deletions(-) create mode 100644 chat_gpt/database.py diff --git a/chat_gpt/__init__.py b/chat_gpt/__init__.py index 828a97b..50e7422 100644 --- a/chat_gpt/__init__.py +++ b/chat_gpt/__init__.py @@ -10,6 +10,7 @@ class GeneralConstants: _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name) EMBEDDINGS_FILE = PACKAGE_TMPDIR / "embeddings.csv" + TOKEN_USAGE_DATABASE = Path.home() / ".cache" / "chat_gpt" / "token_usage.db" # Initialize the OpenAI API client diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index e3072ac..28834ec 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import ast import csv +import datetime import json from pathlib import Path @@ -11,11 +12,7 @@ from openai.embeddings_utils import distances_from_embeddings from . import GeneralConstants - -PRICING_PER_THOUSAND_TOKENS = { - "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, - "gpt-4": {"input": 0.03, "output": 0.06}, -} +from .database import TokenUsageDatabase class BaseChatContext: @@ -107,18 +104,16 @@ def __init__( ) self.token_usage = {"input": 0, "output": 0} - self.token_price = { - k: v / 1000.0 for k, v in PRICING_PER_THOUSAND_TOKENS[self.model].items() - } + self.token_usage_db = TokenUsageDatabase( + fpath=GeneralConstants.TOKEN_USAGE_DATABASE, + model=self.model, + ) if send_full_history: self.context = BaseChatContext(parent_chat=self) else: self.context = EmbeddingBasedChatContext(parent_chat=self) - def get_n_tokens(self, string: str) -> int: - return _num_tokens_from_string(string=string, model=self.model) - def start(self): conversation = [ { @@ -161,13 +156,47 @@ def start(self): except (KeyboardInterrupt, EOFError): print("Exiting chat.") finally: - print() - print("Token usage summary:") - for k, v in self.token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {sum(self.token_usage.values())}") - costs = {k: v * self.token_price[k] for k, v in self.token_usage.items()} - print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") + self.report_token_usage() + + def get_n_tokens(self, string: str) -> int: + return _num_tokens_from_string(string=string, model=self.model) + + def report_token_usage(self): + print() + print("Token usage summary:") + for k, v in self.token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {sum(self.token_usage.values())}") + costs = { + k: v * self.token_usage_db.token_price[k] for k, v in self.token_usage.items() + } + print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") + + # Store token usage to database + self.token_usage_db.create() + self.token_usage_db.insert_data( + n_input_tokens=self.token_usage["input"], + n_output_tokens=self.token_usage["output"], + ) + + accumulated_usage = self.token_usage_db.retrieve_sums() + accumulated_token_usage = { + "input": accumulated_usage["n_input_tokens"], + "output": accumulated_usage["n_output_tokens"], + } + acc_costs = { + "input": accumulated_usage["cost_input_tokens"], + "output": accumulated_usage["cost_output_tokens"], + } + print() + since = datetime.datetime.fromtimestamp( + accumulated_usage["earliest_timestamp"], datetime.timezone.utc + ).isoformat(sep=" ", timespec="seconds") + print(f"Accumulated token usage since {since.replace('+00:00', 'Z')}:") + for k, v in accumulated_token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {sum(accumulated_token_usage.values())}") + print(f"Estimated total costs since same date: ${sum(acc_costs.values()):.3f}.") def _make_api_call(conversation: list, model: str): diff --git a/chat_gpt/database.py b/chat_gpt/database.py new file mode 100644 index 0000000..da8a9dc --- /dev/null +++ b/chat_gpt/database.py @@ -0,0 +1,91 @@ +import datetime +import sqlite3 +from pathlib import Path + +PRICING_PER_THOUSAND_TOKENS = { + "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, + "gpt-4": {"input": 0.03, "output": 0.06}, +} + + +class TokenUsageDatabase: + def __init__(self, fpath: Path, model: str): + self.fpath = fpath + self.token_price = { + k: v / 1000.0 for k, v in PRICING_PER_THOUSAND_TOKENS[model].items() + } + + # Function to create the database and table + def create(self): + self.fpath.parent.mkdir(parents=True, exist_ok=True) + conn = sqlite3.connect(self.fpath) + cursor = conn.cursor() + + # Create a table to store the data with 'timestamp' as the primary key + cursor.execute( + """ + CREATE TABLE IF NOT EXISTS token_costs ( + timestamp REAL PRIMARY KEY, + n_input_tokens INTEGER, + n_output_tokens INTEGER, + cost_input_tokens REAL, + cost_output_tokens REAL + ) + """ + ) + + conn.commit() + conn.close() + + # Function to insert data into the database + def insert_data(self, n_input_tokens, n_output_tokens): + conn = sqlite3.connect(self.fpath) + cursor = conn.cursor() + + # Insert the data into the table + cursor.execute( + """ + INSERT OR REPLACE INTO token_costs + (timestamp, n_input_tokens, n_output_tokens, cost_input_tokens, cost_output_tokens) + VALUES (?, ?, ?, ?, ?) + """, + ( + datetime.datetime.utcnow().timestamp(), + n_input_tokens, + n_output_tokens, + n_input_tokens * self.token_price["input"], + n_output_tokens * self.token_price["output"], + ), + ) + + conn.commit() + conn.close() + + def retrieve_sums(self): + conn = sqlite3.connect(self.fpath) + cursor = conn.cursor() + + # SQL query to calculate the sum of each variable + cursor.execute( + """ + SELECT + MIN(timestamp) AS earliest_timestamp, + SUM(n_input_tokens) AS total_n_input_tokens, + SUM(n_output_tokens) AS total_n_output_tokens, + SUM(cost_input_tokens) AS total_cost_input_tokens, + SUM(cost_output_tokens) AS total_cost_output_tokens + FROM token_costs + """ + ) + + data = cursor.fetchone() + + conn.close() + + return { + "earliest_timestamp": data[0], + "n_input_tokens": data[1], + "n_output_tokens": data[2], + "cost_input_tokens": data[3], + "cost_output_tokens": data[4], + } From 79ef896033fc7f2fdd9c4e24c4e4209fef8222dc Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 13:44:38 +0100 Subject: [PATCH 011/109] Put chat context code in separate file --- chat_gpt/chat_context.py | 157 +++++++++++++++++++++++++++++++++++++++ chat_gpt/chat_gpt.py | 150 +------------------------------------ 2 files changed, 158 insertions(+), 149 deletions(-) create mode 100644 chat_gpt/chat_context.py diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py new file mode 100644 index 0000000..ffc2d90 --- /dev/null +++ b/chat_gpt/chat_context.py @@ -0,0 +1,157 @@ +import ast +import csv +import json +from pathlib import Path +from typing import TYPE_CHECKING + +import numpy as np +import openai +import pandas as pd +from openai.embeddings_utils import distances_from_embeddings + +from . import GeneralConstants + +if TYPE_CHECKING: + from .chat_gpt import Chat + + +class BaseChatContext: + def __init__(self, parent_chat: "Chat"): + self.parent_chat = parent_chat + + def add_user_input(self, conversation: list, user_input: str): + user_input_msg_obj = { + "role": "user", + "name": self.parent_chat.username, + "content": user_input, + } + conversation.append(user_input_msg_obj) + return conversation + + def add_chat_reply(self, conversation: list, chat_reply: str): + reply_msg_obj = { + "role": "assistant", + "name": self.parent_chat.assistant_name, + "content": chat_reply, + } + conversation.append(reply_msg_obj) + return conversation + + +class EmbeddingBasedChatContext(BaseChatContext): + """Chat context.""" + + def __init__(self, parent_chat: "Chat"): + self.parent_chat = parent_chat + self.context_file_path = GeneralConstants.EMBEDDINGS_FILE + + def add_user_input(self, conversation: list, user_input: str): + user_input_msg_obj = { + "role": "user", + "name": self.parent_chat.username, + "content": user_input, + } + _store_message_to_file( + msg_obj=user_input_msg_obj, file_path=self.context_file_path + ) + intial_ai_instruct_msg = conversation[0] + last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] + current_context = _find_context( + file_path=self.context_file_path, + parent_chat=self.parent_chat, + option="both", + ) + conversation = [ + intial_ai_instruct_msg, + *last_msg_exchange, + *current_context, + user_input_msg_obj, + ] + return conversation + + def add_chat_reply(self, conversation: list, chat_reply: str): + reply_msg_obj = { + "role": "assistant", + "name": self.parent_chat.assistant_name, + "content": chat_reply, + } + conversation.append(reply_msg_obj) + _store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) + return conversation + + +def _store_message_to_file( + msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE +): + """Store message and embeddings to file.""" + # Adapted from + response = openai.Embedding.create( + model="text-embedding-ada-002", input=msg_obj["content"] + ) + emb_mess_pair = { + "embedding": json.dumps(response["data"][0]["embedding"]), + "message": json.dumps(msg_obj), + } + + init_file = not file_path.exists() or file_path.stat().st_size == 0 + write_mode = "w" if init_file else "a" + + with open(file_path, write_mode, newline="") as file: + writer = csv.DictWriter(file, fieldnames=emb_mess_pair.keys()) + if init_file: + writer.writeheader() + writer.writerow(emb_mess_pair) + + +def _find_context(file_path: Path, parent_chat: "Chat", option="both"): + """Lookup context from file.""" + # Adapted from + if not file_path.exists() or file_path.stat().st_size == 0: + return [] + + df = pd.read_csv(file_path) + df["embedding"] = df.embedding.apply(eval).apply(np.array) + + if option == "both": + message_list_embeddings = df["embedding"].values[:-3] + elif option == "assistant": + message_list_embeddings = df.loc[ + df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "assistant"), + "embedding", + ].values[-1] + elif option == "user": + message_list_embeddings = df.loc[ + df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "user"), + "embedding", + ].values[:-2] + else: + return [] # Return an empty list if no context is found + + query_embedding = df["embedding"].values[-1] + distances = distances_from_embeddings( + query_embedding, message_list_embeddings, distance_metric="L1" + ) + mask = (np.array(distances) < 21.6)[np.argsort(distances)] + + message_array = df["message"].iloc[np.argsort(distances)][mask] + message_array = [] if message_array is None else message_array[:4] + + message_objects = [json.loads(message) for message in message_array] + context_for_current_user_query = "" + for msg in message_objects: + context_for_current_user_query += f"{msg['name']}: {msg['content']}\n" + + if not context_for_current_user_query: + return [] + + return [ + { + "role": "system", + "name": parent_chat.system_name, + "content": f"{parent_chat.assistant_name}'s knowledge: " + + f"{context_for_current_user_query} + Previous messages.\n" + + "Only answer last message.", + } + ] diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 28834ec..3a9d004 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -1,85 +1,14 @@ #!/usr/bin/env python3 -import ast -import csv import datetime -import json -from pathlib import Path -import numpy as np import openai -import pandas as pd import tiktoken -from openai.embeddings_utils import distances_from_embeddings from . import GeneralConstants +from .chat_context import BaseChatContext, EmbeddingBasedChatContext from .database import TokenUsageDatabase -class BaseChatContext: - def __init__(self, parent_chat: "Chat"): - self.parent_chat = parent_chat - - def add_user_input(self, conversation: list, user_input: str): - user_input_msg_obj = { - "role": "user", - "name": self.parent_chat.username, - "content": user_input, - } - conversation.append(user_input_msg_obj) - return conversation - - def add_chat_reply(self, conversation: list, chat_reply: str): - reply_msg_obj = { - "role": "assistant", - "name": self.parent_chat.assistant_name, - "content": chat_reply, - } - conversation.append(reply_msg_obj) - return conversation - - -class EmbeddingBasedChatContext(BaseChatContext): - """Chat context.""" - - def __init__(self, parent_chat: "Chat"): - self.parent_chat = parent_chat - self.context_file_path = GeneralConstants.EMBEDDINGS_FILE - - def add_user_input(self, conversation: list, user_input: str): - user_input_msg_obj = { - "role": "user", - "name": self.parent_chat.username, - "content": user_input, - } - _store_message_to_file( - msg_obj=user_input_msg_obj, file_path=self.context_file_path - ) - intial_ai_instruct_msg = conversation[0] - last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] - current_context = _find_context( - file_path=self.context_file_path, - parent_chat=self.parent_chat, - option="both", - ) - conversation = [ - intial_ai_instruct_msg, - *last_msg_exchange, - *current_context, - user_input_msg_obj, - ] - return conversation - - def add_chat_reply(self, conversation: list, chat_reply: str): - reply_msg_obj = { - "role": "assistant", - "name": self.parent_chat.assistant_name, - "content": chat_reply, - } - conversation.append(reply_msg_obj) - _store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) - return conversation - - class Chat: def __init__( self, model: str, base_instructions: str, send_full_history: bool = False @@ -220,83 +149,6 @@ def _make_api_call(conversation: list, model: str): print(f" > {error}. Retrying...") -def _store_message_to_file( - msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE -): - """Store message and embeddings to file.""" - # Adapted from - response = openai.Embedding.create( - model="text-embedding-ada-002", input=msg_obj["content"] - ) - emb_mess_pair = { - "embedding": json.dumps(response["data"][0]["embedding"]), - "message": json.dumps(msg_obj), - } - - init_file = not file_path.exists() or file_path.stat().st_size == 0 - write_mode = "w" if init_file else "a" - - with open(file_path, write_mode, newline="") as file: - writer = csv.DictWriter(file, fieldnames=emb_mess_pair.keys()) - if init_file: - writer.writeheader() - writer.writerow(emb_mess_pair) - - -def _find_context(file_path: Path, parent_chat: Chat, option="both"): - """Lookup context from file.""" - # Adapted from - if not file_path.exists() or file_path.stat().st_size == 0: - return [] - - df = pd.read_csv(file_path) - df["embedding"] = df.embedding.apply(eval).apply(np.array) - - if option == "both": - message_list_embeddings = df["embedding"].values[:-3] - elif option == "assistant": - message_list_embeddings = df.loc[ - df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "assistant"), - "embedding", - ].values[-1] - elif option == "user": - message_list_embeddings = df.loc[ - df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "user"), - "embedding", - ].values[:-2] - else: - return [] # Return an empty list if no context is found - - query_embedding = df["embedding"].values[-1] - distances = distances_from_embeddings( - query_embedding, message_list_embeddings, distance_metric="L1" - ) - mask = (np.array(distances) < 21.6)[np.argsort(distances)] - - message_array = df["message"].iloc[np.argsort(distances)][mask] - message_array = [] if message_array is None else message_array[:4] - - message_objects = [json.loads(message) for message in message_array] - context_for_current_user_query = "" - for msg in message_objects: - context_for_current_user_query += f"{msg['name']}: {msg['content']}\n" - - if not context_for_current_user_query: - return [] - - return [ - { - "role": "system", - "name": parent_chat.system_name, - "content": f"{parent_chat.assistant_name}'s knowledge: " - + f"{context_for_current_user_query} + Previous messages.\n" - + "Only answer last message.", - } - ] - - def _num_tokens_from_string(string: str, model: str) -> int: """Returns the number of tokens in a text string.""" encoding = tiktoken.encoding_for_model(model) From 4d402fd6b6ffd6732f64970b871de36f1b4c2bfc Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 15:59:35 +0100 Subject: [PATCH 012/109] Refactoring --- chat_gpt/chat_gpt.py | 69 ++++++++++++++++++++++++-------------------- 1 file changed, 38 insertions(+), 31 deletions(-) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 3a9d004..c2e61c6 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -39,53 +39,60 @@ def __init__( ) if send_full_history: - self.context = BaseChatContext(parent_chat=self) + self.context_handler = BaseChatContext(parent_chat=self) else: - self.context = EmbeddingBasedChatContext(parent_chat=self) + self.context_handler = EmbeddingBasedChatContext(parent_chat=self) - def start(self): - conversation = [ + self.query_context = [ { "role": "system", "name": self.system_name, "content": self.ground_ai_instructions, } ] - try: - while True: - question = input(f"{self.username}: ").strip() - if not question: - continue - # Add context to the conversation - conversation = self.context.add_user_input( - conversation=conversation, user_input=question - ) + def __del__(self): + self.report_token_usage() + + def yield_response(self, question: str): + question = question.strip() - # Update number of input tokens - self.token_usage["input"] += sum( - self.get_n_tokens(string=msg["content"]) for msg in conversation - ) + # Add context to the conversation + self.query_context = self.context_handler.add_user_input( + conversation=self.query_context, user_input=question + ) - print(f"{self.assistant_name}: ", end="") - full_reply_content = "" - for token in _make_api_call(conversation=conversation, model=self.model): - print(token, end="") - full_reply_content += token - print("\n") + # Update number of input tokens + self.token_usage["input"] += sum( + self.get_n_tokens(string=msg["content"]) for msg in self.query_context + ) - # Update number of output tokens - self.token_usage["output"] += self.get_n_tokens(full_reply_content) + full_reply_content = "" + for chunk in _make_api_call(conversation=self.query_context, model=self.model): + full_reply_content += chunk + yield chunk - # Update context with the reply - conversation = self.context.add_chat_reply( - conversation=conversation, chat_reply=full_reply_content.strip() - ) + # Update number of output tokens + self.token_usage["output"] += self.get_n_tokens(full_reply_content) + # Update context with the reply + self.query_context = self.context_handler.add_chat_reply( + conversation=self.query_context, chat_reply=full_reply_content.strip() + ) + + def start(self): + try: + while True: + question = input(f"{self.username}: ").strip() + if not question: + continue + print(f"{self.assistant_name}: ", end="", flush=True) + for chunk in self.yield_response(question=question): + print(chunk, end="", flush=True) + print() + print() except (KeyboardInterrupt, EOFError): print("Exiting chat.") - finally: - self.report_token_usage() def get_n_tokens(self, string: str) -> int: return _num_tokens_from_string(string=string, model=self.model) From edb982de9c725d6ca2f17b51cdc383a0fb305292 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 15:59:48 +0100 Subject: [PATCH 013/109] Add streamlit app --- chat_gpt/app/__init__.py | 0 chat_gpt/app/app.py | 47 ++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 6 ++--- 3 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 chat_gpt/app/__init__.py create mode 100644 chat_gpt/app/app.py diff --git a/chat_gpt/app/__init__.py b/chat_gpt/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py new file mode 100644 index 0000000..267279d --- /dev/null +++ b/chat_gpt/app/app.py @@ -0,0 +1,47 @@ +# Adapted from +# +import streamlit as st + +from chat_gpt.chat_gpt import Chat + +# Initialize chat. Kepp it throughout the session. +try: + session_chat = st.session_state["chat"] +except KeyError: + session_chat = Chat( + model="gpt-3.5-turbo", + base_instructions="You answer using the minimum possible number of tokens.", + ) + st.session_state["chat"] = session_chat + +st.title(f"Chat with {session_chat.assistant_name}") + +# Initialize chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Display chat messages from history on app rerun +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# Accept user input +if prompt := st.chat_input("Send a message"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + message_placeholder = st.empty() + message_placeholder.markdown("▌") # Use blinking cursor to indicate activity + full_response = "" + # Stream assistant response + for chunk in session_chat.yield_response(prompt): + full_response += chunk + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": full_response}) diff --git a/pyproject.toml b/pyproject.toml index 030bbf6..3c942b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,18 +15,18 @@ [tool.poetry.dependencies] # Python version - python = ">=3.9,<3.13" + python = ">=3.9,<3.9.7 || >3.9.7,<3.13" # Deps that should have been openapi deps matplotlib = "^3.8.0" plotly = "^5.18.0" scikit-learn = "^1.3.2" scipy = "^1.11.3" # Other dependencies - flask = "^3.0.0" - flask-bootstrap = "^3.3.7.1" numpy = "^1.26.1" openai = "^0.28.1" pandas = "^2.1.2" + streamlit = "^1.28.0" + streamlit-chat = "^0.1.1" tiktoken = "^0.5.1" [tool.poetry.group.dev.dependencies] From 81df3fc8cd742164300ff346d4c209877f006e57 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 16:04:22 +0100 Subject: [PATCH 014/109] Add gitignore Using template --- .gitignore | 163 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 163 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0a53700 --- /dev/null +++ b/.gitignore @@ -0,0 +1,163 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +.idea/ + +# Vim +*.swp From 8850fdb1daafc32e0b49e0e3f01836a38acdc676 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 16:59:59 +0100 Subject: [PATCH 015/109] Add model info to cost-tracking database --- chat_gpt/app/app.py | 47 ------------------------------------ chat_gpt/chat_gpt.py | 25 +++++++++++++++++++ chat_gpt/database.py | 57 +++++++++++++++++++++++++++++++------------- 3 files changed, 65 insertions(+), 64 deletions(-) delete mode 100644 chat_gpt/app/app.py diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py deleted file mode 100644 index 267279d..0000000 --- a/chat_gpt/app/app.py +++ /dev/null @@ -1,47 +0,0 @@ -# Adapted from -# -import streamlit as st - -from chat_gpt.chat_gpt import Chat - -# Initialize chat. Kepp it throughout the session. -try: - session_chat = st.session_state["chat"] -except KeyError: - session_chat = Chat( - model="gpt-3.5-turbo", - base_instructions="You answer using the minimum possible number of tokens.", - ) - st.session_state["chat"] = session_chat - -st.title(f"Chat with {session_chat.assistant_name}") - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] - -# Display chat messages from history on app rerun -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - -# Accept user input -if prompt := st.chat_input("Send a message"): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - message_placeholder = st.empty() - message_placeholder.markdown("▌") # Use blinking cursor to indicate activity - full_response = "" - # Stream assistant response - for chunk in session_chat.yield_response(prompt): - full_response += chunk - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": full_response}) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index c2e61c6..00facb1 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -115,6 +115,31 @@ def report_token_usage(self): n_output_tokens=self.token_usage["output"], ) + for ( + model, + accumulated_usage, + ) in self.token_usage_db.retrieve_sums_by_model().items(): + accumulated_token_usage = { + "input": accumulated_usage["n_input_tokens"], + "output": accumulated_usage["n_output_tokens"], + } + acc_costs = { + "input": accumulated_usage["cost_input_tokens"], + "output": accumulated_usage["cost_output_tokens"], + } + print() + print(f"Model: {model}") + since = datetime.datetime.fromtimestamp( + accumulated_usage["earliest_timestamp"], datetime.timezone.utc + ).isoformat(sep=" ", timespec="seconds") + print(f"Accumulated token usage since {since.replace('+00:00', 'Z')}:") + for k, v in accumulated_token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {sum(accumulated_token_usage.values())}") + print( + f"Estimated total costs since same date: ${sum(acc_costs.values()):.3f}." + ) + accumulated_usage = self.token_usage_db.retrieve_sums() accumulated_token_usage = { "input": accumulated_usage["n_input_tokens"], diff --git a/chat_gpt/database.py b/chat_gpt/database.py index da8a9dc..73f5aff 100644 --- a/chat_gpt/database.py +++ b/chat_gpt/database.py @@ -1,5 +1,6 @@ import datetime import sqlite3 +from collections import defaultdict from pathlib import Path PRICING_PER_THOUSAND_TOKENS = { @@ -11,9 +12,9 @@ class TokenUsageDatabase: def __init__(self, fpath: Path, model: str): self.fpath = fpath - self.token_price = { - k: v / 1000.0 for k, v in PRICING_PER_THOUSAND_TOKENS[model].items() - } + self.model = model.strip() + pricing = PRICING_PER_THOUSAND_TOKENS[self.model] + self.token_price = {k: v / 1000.0 for k, v in pricing.items()} # Function to create the database and table def create(self): @@ -26,6 +27,7 @@ def create(self): """ CREATE TABLE IF NOT EXISTS token_costs ( timestamp REAL PRIMARY KEY, + model TEXT, n_input_tokens INTEGER, n_output_tokens INTEGER, cost_input_tokens REAL, @@ -45,12 +47,19 @@ def insert_data(self, n_input_tokens, n_output_tokens): # Insert the data into the table cursor.execute( """ - INSERT OR REPLACE INTO token_costs - (timestamp, n_input_tokens, n_output_tokens, cost_input_tokens, cost_output_tokens) - VALUES (?, ?, ?, ?, ?) + INSERT OR REPLACE INTO token_costs ( + timestamp, + model, + n_input_tokens, + n_output_tokens, + cost_input_tokens, + cost_output_tokens + ) + VALUES (?, ?, ?, ?, ?, ?) """, ( datetime.datetime.utcnow().timestamp(), + self.model, n_input_tokens, n_output_tokens, n_input_tokens * self.token_price["input"], @@ -61,31 +70,45 @@ def insert_data(self, n_input_tokens, n_output_tokens): conn.commit() conn.close() - def retrieve_sums(self): + def retrieve_sums_by_model(self): conn = sqlite3.connect(self.fpath) cursor = conn.cursor() - # SQL query to calculate the sum of each variable cursor.execute( """ SELECT + model, MIN(timestamp) AS earliest_timestamp, SUM(n_input_tokens) AS total_n_input_tokens, SUM(n_output_tokens) AS total_n_output_tokens, SUM(cost_input_tokens) AS total_cost_input_tokens, SUM(cost_output_tokens) AS total_cost_output_tokens FROM token_costs - """ + GROUP BY model + """ ) - data = cursor.fetchone() + data = cursor.fetchall() conn.close() - return { - "earliest_timestamp": data[0], - "n_input_tokens": data[1], - "n_output_tokens": data[2], - "cost_input_tokens": data[3], - "cost_output_tokens": data[4], - } + sums_by_model = {} + for row in data: + model_name = row[0] + sums = { + "earliest_timestamp": row[1], + "n_input_tokens": row[2], + "n_output_tokens": row[3], + "cost_input_tokens": row[4], + "cost_output_tokens": row[5], + } + sums_by_model[model_name] = sums + + return sums_by_model + + def retrieve_sums(self): + sums = defaultdict(int) + for sums_by_model in self.retrieve_sums_by_model().values(): + for k, v in sums_by_model.items(): + sums[k] += v + return sums From dab7beab085cd82b579428104d7d2154dcc7b00d Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 17:35:01 +0100 Subject: [PATCH 016/109] Refactor token-related code --- chat_gpt/chat_gpt.py | 85 ++++------------------------- chat_gpt/{database.py => tokens.py} | 58 +++++++++++++++++++- 2 files changed, 67 insertions(+), 76 deletions(-) rename chat_gpt/{database.py => tokens.py} (59%) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat_gpt.py index 00facb1..2b0d22e 100644 --- a/chat_gpt/chat_gpt.py +++ b/chat_gpt/chat_gpt.py @@ -1,12 +1,9 @@ #!/usr/bin/env python3 -import datetime - import openai -import tiktoken from . import GeneralConstants from .chat_context import BaseChatContext, EmbeddingBasedChatContext -from .database import TokenUsageDatabase +from .tokens import TokenUsageDatabase class Chat: @@ -52,6 +49,11 @@ def __init__( ] def __del__(self): + # Store token usage to database + self.token_usage_db.insert_data( + n_input_tokens=self.token_usage["input"], + n_output_tokens=self.token_usage["output"], + ) self.report_token_usage() def yield_response(self, question: str): @@ -64,7 +66,8 @@ def yield_response(self, question: str): # Update number of input tokens self.token_usage["input"] += sum( - self.get_n_tokens(string=msg["content"]) for msg in self.query_context + self.token_usage_db.get_n_tokens(string=msg["content"]) + for msg in self.query_context ) full_reply_content = "" @@ -73,7 +76,7 @@ def yield_response(self, question: str): yield chunk # Update number of output tokens - self.token_usage["output"] += self.get_n_tokens(full_reply_content) + self.token_usage["output"] += self.token_usage_db.get_n_tokens(full_reply_content) # Update context with the reply self.query_context = self.context_handler.add_chat_reply( @@ -94,70 +97,8 @@ def start(self): except (KeyboardInterrupt, EOFError): print("Exiting chat.") - def get_n_tokens(self, string: str) -> int: - return _num_tokens_from_string(string=string, model=self.model) - def report_token_usage(self): - print() - print("Token usage summary:") - for k, v in self.token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {sum(self.token_usage.values())}") - costs = { - k: v * self.token_usage_db.token_price[k] for k, v in self.token_usage.items() - } - print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") - - # Store token usage to database - self.token_usage_db.create() - self.token_usage_db.insert_data( - n_input_tokens=self.token_usage["input"], - n_output_tokens=self.token_usage["output"], - ) - - for ( - model, - accumulated_usage, - ) in self.token_usage_db.retrieve_sums_by_model().items(): - accumulated_token_usage = { - "input": accumulated_usage["n_input_tokens"], - "output": accumulated_usage["n_output_tokens"], - } - acc_costs = { - "input": accumulated_usage["cost_input_tokens"], - "output": accumulated_usage["cost_output_tokens"], - } - print() - print(f"Model: {model}") - since = datetime.datetime.fromtimestamp( - accumulated_usage["earliest_timestamp"], datetime.timezone.utc - ).isoformat(sep=" ", timespec="seconds") - print(f"Accumulated token usage since {since.replace('+00:00', 'Z')}:") - for k, v in accumulated_token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {sum(accumulated_token_usage.values())}") - print( - f"Estimated total costs since same date: ${sum(acc_costs.values()):.3f}." - ) - - accumulated_usage = self.token_usage_db.retrieve_sums() - accumulated_token_usage = { - "input": accumulated_usage["n_input_tokens"], - "output": accumulated_usage["n_output_tokens"], - } - acc_costs = { - "input": accumulated_usage["cost_input_tokens"], - "output": accumulated_usage["cost_output_tokens"], - } - print() - since = datetime.datetime.fromtimestamp( - accumulated_usage["earliest_timestamp"], datetime.timezone.utc - ).isoformat(sep=" ", timespec="seconds") - print(f"Accumulated token usage since {since.replace('+00:00', 'Z')}:") - for k, v in accumulated_token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {sum(accumulated_token_usage.values())}") - print(f"Estimated total costs since same date: ${sum(acc_costs.values()):.3f}.") + self.token_usage_db.print_usage_costs(self.token_usage) def _make_api_call(conversation: list, model: str): @@ -179,9 +120,3 @@ def _make_api_call(conversation: list, model: str): openai.error.Timeout, ) as error: print(f" > {error}. Retrying...") - - -def _num_tokens_from_string(string: str, model: str) -> int: - """Returns the number of tokens in a text string.""" - encoding = tiktoken.encoding_for_model(model) - return len(encoding.encode(string)) diff --git a/chat_gpt/database.py b/chat_gpt/tokens.py similarity index 59% rename from chat_gpt/database.py rename to chat_gpt/tokens.py index 73f5aff..4b98981 100644 --- a/chat_gpt/database.py +++ b/chat_gpt/tokens.py @@ -3,6 +3,8 @@ from collections import defaultdict from pathlib import Path +import tiktoken + PRICING_PER_THOUSAND_TOKENS = { "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, "gpt-4": {"input": 0.03, "output": 0.06}, @@ -15,8 +17,11 @@ def __init__(self, fpath: Path, model: str): self.model = model.strip() pricing = PRICING_PER_THOUSAND_TOKENS[self.model] self.token_price = {k: v / 1000.0 for k, v in pricing.items()} + self.create() + + def get_n_tokens(self, string: str) -> int: + return _num_tokens_from_string(string=string, model=self.model) - # Function to create the database and table def create(self): self.fpath.parent.mkdir(parents=True, exist_ok=True) conn = sqlite3.connect(self.fpath) @@ -112,3 +117,54 @@ def retrieve_sums(self): for k, v in sums_by_model.items(): sums[k] += v return sums + + def print_usage_costs(self, token_usage: dict): + print() + print("=======================================================") + print("Summary of OpenAI API token usage and associated costs:") + print("=======================================================") + + for model, accumulated_usage in self.retrieve_sums_by_model().items(): + _print_accumulated_token_usage( + accumulated_usage=accumulated_usage, model=model + ) + + _print_accumulated_token_usage(accumulated_usage=self.retrieve_sums()) + + print() + print("Token usage summary for this chat:") + for k, v in token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {sum(token_usage.values())}") + costs = {k: v * self.token_price[k] for k, v in token_usage.items()} + print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") + + +def _num_tokens_from_string(string: str, model: str) -> int: + """Returns the number of tokens in a text string.""" + encoding = tiktoken.encoding_for_model(model) + return len(encoding.encode(string)) + + +def _print_accumulated_token_usage(accumulated_usage: dict, model: str = None): + print() + if model is not None: + print(f"Model: {model}") + + since = datetime.datetime.fromtimestamp( + accumulated_usage["earliest_timestamp"], datetime.timezone.utc + ).isoformat(sep=" ", timespec="seconds") + print(f"Accumulated token usage since {since.replace('+00:00', 'Z')}:") + + accumulated_token_usage = { + "input": accumulated_usage["n_input_tokens"], + "output": accumulated_usage["n_output_tokens"], + } + acc_costs = { + "input": accumulated_usage["cost_input_tokens"], + "output": accumulated_usage["cost_output_tokens"], + } + for k, v in accumulated_token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {sum(accumulated_token_usage.values())}") + print(f"Estimated total costs since same date: ${sum(acc_costs.values()):.3f}.") From c174e29de02e382a204484b3e83e0ef75185e315 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 17:41:33 +0100 Subject: [PATCH 017/109] rename chat_gpt mod to just chat --- chat_gpt/__main__.py | 2 +- chat_gpt/app/app | 48 +++++++++++++++++++++++++++++++ chat_gpt/{chat_gpt.py => chat.py} | 0 chat_gpt/chat_context.py | 2 +- 4 files changed, 50 insertions(+), 2 deletions(-) create mode 100755 chat_gpt/app/app rename chat_gpt/{chat_gpt.py => chat.py} (100%) diff --git a/chat_gpt/__main__.py b/chat_gpt/__main__.py index 2c504cf..a38cb46 100644 --- a/chat_gpt/__main__.py +++ b/chat_gpt/__main__.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """Program's entry point.""" from .argparse_wrapper import get_parsed_args -from .chat_gpt import Chat +from .chat import Chat def main(argv=None): diff --git a/chat_gpt/app/app b/chat_gpt/app/app new file mode 100755 index 0000000..ec11396 --- /dev/null +++ b/chat_gpt/app/app @@ -0,0 +1,48 @@ +#!/usr/bin/env -S streamlit run +# Adapted from +# +import streamlit as st + +from chat_gpt.chat import Chat + +# Initialize chat. Kepp it throughout the session. +try: + session_chat = st.session_state["chat"] +except KeyError: + session_chat = Chat( + model="gpt-3.5-turbo", + base_instructions="You answer using the minimum possible number of tokens.", + ) + st.session_state["chat"] = session_chat + +st.title(f"Chat with {session_chat.assistant_name}") + +# Initialize chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Display chat messages from history on app rerun +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# Accept user input +if prompt := st.chat_input("Send a message"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + message_placeholder = st.empty() + message_placeholder.markdown("▌") # Use blinking cursor to indicate activity + full_response = "" + # Stream assistant response + for chunk in session_chat.yield_response(prompt): + full_response += chunk + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": full_response}) diff --git a/chat_gpt/chat_gpt.py b/chat_gpt/chat.py similarity index 100% rename from chat_gpt/chat_gpt.py rename to chat_gpt/chat.py diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index ffc2d90..8d9a706 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -12,7 +12,7 @@ from . import GeneralConstants if TYPE_CHECKING: - from .chat_gpt import Chat + from .chat import Chat class BaseChatContext: From dcb2a63eca7c2f00cbe31283a5771aef9d78fc8f Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 19:28:46 +0100 Subject: [PATCH 018/109] Add terminal/browser cdms. --- chat_gpt/__init__.py | 13 +++++++++- chat_gpt/__main__.py | 8 +----- chat_gpt/app/{app => app.py} | 13 +++++++--- chat_gpt/argparse_wrapper.py | 45 ++++++++++++++++++++++++++++----- chat_gpt/command_definitions.py | 33 ++++++++++++++++++++++++ 5 files changed, 94 insertions(+), 18 deletions(-) rename chat_gpt/app/{app => app.py} (85%) mode change 100755 => 100644 create mode 100644 chat_gpt/command_definitions.py diff --git a/chat_gpt/__init__.py b/chat_gpt/__init__.py index 50e7422..aa26a51 100644 --- a/chat_gpt/__init__.py +++ b/chat_gpt/__init__.py @@ -1,16 +1,27 @@ #!/usr/bin/env python3 import os import tempfile +import uuid +from importlib.metadata import version from pathlib import Path import openai class GeneralConstants: + PACKAGE_NAME = __name__ + VERSION = version(__name__) + PACKAGE_DIRECTORY = Path(__file__).parent + RUN_ID = uuid.uuid4().hex + PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name) EMBEDDINGS_FILE = PACKAGE_TMPDIR / "embeddings.csv" - TOKEN_USAGE_DATABASE = Path.home() / ".cache" / "chat_gpt" / "token_usage.db" + PARSED_ARGS_FILE = PACKAGE_TMPDIR / f"parsed_args_{RUN_ID}.pkl" + TOKEN_USAGE_DATABASE = PACKAGE_CACHE_DIRECTORY / "token_usage.db" + + PACKAGE_TMPDIR.mkdir(parents=True, exist_ok=True) + PACKAGE_CACHE_DIRECTORY.mkdir(parents=True, exist_ok=True) # Initialize the OpenAI API client diff --git a/chat_gpt/__main__.py b/chat_gpt/__main__.py index a38cb46..b90a2ef 100644 --- a/chat_gpt/__main__.py +++ b/chat_gpt/__main__.py @@ -1,18 +1,12 @@ #!/usr/bin/env python3 """Program's entry point.""" from .argparse_wrapper import get_parsed_args -from .chat import Chat def main(argv=None): """Program's main routine.""" args = get_parsed_args(argv=argv) - chat = Chat( - model=args.model, - base_instructions=args.initial_ai_instructions, - send_full_history=args.send_full_history, - ) - chat.start() + args.run_command(args=args) if __name__ == "__main__": diff --git a/chat_gpt/app/app b/chat_gpt/app/app.py old mode 100755 new mode 100644 similarity index 85% rename from chat_gpt/app/app rename to chat_gpt/app/app.py index ec11396..c2273bc --- a/chat_gpt/app/app +++ b/chat_gpt/app/app.py @@ -1,6 +1,9 @@ -#!/usr/bin/env -S streamlit run +#!/usr/bin/env python3 # Adapted from # +import pickle +import sys + import streamlit as st from chat_gpt.chat import Chat @@ -9,12 +12,16 @@ try: session_chat = st.session_state["chat"] except KeyError: + parsed_args_file = sys.argv[-1] + with open(parsed_args_file, "rb") as parsed_args_file: + args = pickle.load(parsed_args_file) session_chat = Chat( - model="gpt-3.5-turbo", - base_instructions="You answer using the minimum possible number of tokens.", + model=args.model, + base_instructions=args.initial_ai_instructions, ) st.session_state["chat"] = session_chat + st.title(f"Chat with {session_chat.assistant_name}") # Initialize chat history diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index a4a5ca6..989107a 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -3,6 +3,8 @@ import argparse import sys +from .command_definitions import run_on_browser, run_on_terminal + def get_parsed_args(argv=None): """Get parsed command line arguments. @@ -16,23 +18,52 @@ def get_parsed_args(argv=None): """ if argv is None: argv = sys.argv[1:] + if not argv: + argv = ["browser"] - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( + common_parser = argparse.ArgumentParser(add_help=False) + common_parser.add_argument( "initial_ai_instructions", type=str, default="You answer using the minimum possible number of tokens.", help="Initial instructions for the AI", nargs="?", ) - parser.add_argument( + common_parser.add_argument( "--model", type=str, default="gpt-3.5-turbo", choices=["gpt-3.5-turbo", "gpt-4"], help="OpenAI API engine to use for completion", ) - parser.add_argument("--send-full-history", action="store_true") - return parser.parse_args(argv) + common_parser.add_argument("--send-full-history", action="store_true") + + main_parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + # Configure the main parser to handle the commands + subparsers = main_parser.add_subparsers( + title="commands", + dest="command", + required=True, + description=( + "Valid commands (note that commands also accept their " + + "own arguments, in particular [-h]):" + ), + help="command description", + ) + + parser_browser = subparsers.add_parser( + "browser", parents=[common_parser], help="Run the chat on the browser." + ) + parser_browser.set_defaults(run_command=run_on_browser) + + parser_terminal = subparsers.add_parser( + "terminal", + parents=[common_parser], + help="Run the chat on the terminal.", + ) + parser_terminal.set_defaults(run_command=run_on_terminal) + + return main_parser.parse_args(argv) diff --git a/chat_gpt/command_definitions.py b/chat_gpt/command_definitions.py new file mode 100644 index 0000000..1fdfb0b --- /dev/null +++ b/chat_gpt/command_definitions.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +import contextlib +import pickle +from subprocess import run + +from . import GeneralConstants +from .chat import Chat + + +def run_on_terminal(args): + """Program's main routine.""" + chat = Chat( + model=args.model, + base_instructions=args.initial_ai_instructions, + send_full_history=args.send_full_history, + ) + chat.start() + + +def run_on_browser(args): + with open(GeneralConstants.PARSED_ARGS_FILE, "wb") as parsed_args_file: + pickle.dump(args, parsed_args_file) + app_path = GeneralConstants.PACKAGE_DIRECTORY / "app" / "app.py" + with contextlib.suppress(KeyboardInterrupt): + run( + [ + "streamlit", + "run", + app_path.as_posix(), + "--", + GeneralConstants.PARSED_ARGS_FILE.as_posix(), + ] + ) From 5db150018cbf1e5f20c4cd8d259f296f9f95da53 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 19:36:46 +0100 Subject: [PATCH 019/109] Use dark theme for app --- chat_gpt/command_definitions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/chat_gpt/command_definitions.py b/chat_gpt/command_definitions.py index 1fdfb0b..db17dbc 100644 --- a/chat_gpt/command_definitions.py +++ b/chat_gpt/command_definitions.py @@ -27,6 +27,7 @@ def run_on_browser(args): "streamlit", "run", app_path.as_posix(), + "--theme.base=dark", "--", GeneralConstants.PARSED_ARGS_FILE.as_posix(), ] From ac850187aed97ba4da563f8da779d726d76b6a89 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 30 Oct 2023 21:44:11 +0100 Subject: [PATCH 020/109] Add `show-costs` command --- chat_gpt/app/app.py | 5 +---- chat_gpt/argparse_wrapper.py | 10 +++++++++- chat_gpt/chat.py | 20 ++++++++++++++++++-- chat_gpt/command_definitions.py | 15 ++++++++------- chat_gpt/tokens.py | 16 ++++++++++------ 5 files changed, 46 insertions(+), 20 deletions(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index c2273bc..a24823e 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -15,10 +15,7 @@ parsed_args_file = sys.argv[-1] with open(parsed_args_file, "rb") as parsed_args_file: args = pickle.load(parsed_args_file) - session_chat = Chat( - model=args.model, - base_instructions=args.initial_ai_instructions, - ) + session_chat = Chat.from_cli_args(cli_args=args) st.session_state["chat"] = session_chat diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index 989107a..c5069dc 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -3,7 +3,7 @@ import argparse import sys -from .command_definitions import run_on_browser, run_on_terminal +from .command_definitions import run_on_browser, run_on_terminal, show_accumulated_costs def get_parsed_args(argv=None): @@ -37,6 +37,7 @@ def get_parsed_args(argv=None): help="OpenAI API engine to use for completion", ) common_parser.add_argument("--send-full-history", action="store_true") + common_parser.add_argument("--skip-reporting-costs", action="store_true") main_parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter @@ -66,4 +67,11 @@ def get_parsed_args(argv=None): ) parser_terminal.set_defaults(run_command=run_on_terminal) + parser_show_costs = subparsers.add_parser( + "show-costs", + parents=[common_parser], + help="Show the number of tokens used for each message.", + ) + parser_show_costs.set_defaults(run_command=show_accumulated_costs) + return main_parser.parse_args(argv) diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index 2b0d22e..af7d11a 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -8,7 +8,11 @@ class Chat: def __init__( - self, model: str, base_instructions: str, send_full_history: bool = False + self, + model: str, + base_instructions: str, + send_full_history: bool = False, + report_estimated_costs_when_done: bool = True, ): self.model = model self.username = "chat_user" @@ -40,6 +44,8 @@ def __init__( else: self.context_handler = EmbeddingBasedChatContext(parent_chat=self) + self.report_estimated_costs_when_done = report_estimated_costs_when_done + self.query_context = [ { "role": "system", @@ -54,7 +60,17 @@ def __del__(self): n_input_tokens=self.token_usage["input"], n_output_tokens=self.token_usage["output"], ) - self.report_token_usage() + if self.report_estimated_costs_when_done: + self.report_token_usage() + + @classmethod + def from_cli_args(cls, cli_args): + return cls( + model=cli_args.model, + base_instructions=cli_args.initial_ai_instructions, + send_full_history=cli_args.send_full_history, + report_estimated_costs_when_done=not cli_args.skip_reporting_costs, + ) def yield_response(self, question: str): question = question.strip() diff --git a/chat_gpt/command_definitions.py b/chat_gpt/command_definitions.py index db17dbc..71ee2da 100644 --- a/chat_gpt/command_definitions.py +++ b/chat_gpt/command_definitions.py @@ -7,17 +7,18 @@ from .chat import Chat +def show_accumulated_costs(args): + """Show the accumulated costs of the chat and exit.""" + Chat.from_cli_args(cli_args=args) + + def run_on_terminal(args): - """Program's main routine.""" - chat = Chat( - model=args.model, - base_instructions=args.initial_ai_instructions, - send_full_history=args.send_full_history, - ) - chat.start() + """Run the chat on the terminal.""" + Chat.from_cli_args(cli_args=args).start() def run_on_browser(args): + """Run the chat on the browser.""" with open(GeneralConstants.PARSED_ARGS_FILE, "wb") as parsed_args_file: pickle.dump(args, parsed_args_file) app_path = GeneralConstants.PACKAGE_DIRECTORY / "app" / "app.py" diff --git a/chat_gpt/tokens.py b/chat_gpt/tokens.py index 4b98981..0a6058d 100644 --- a/chat_gpt/tokens.py +++ b/chat_gpt/tokens.py @@ -132,12 +132,16 @@ def print_usage_costs(self, token_usage: dict): _print_accumulated_token_usage(accumulated_usage=self.retrieve_sums()) print() - print("Token usage summary for this chat:") - for k, v in token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {sum(token_usage.values())}") - costs = {k: v * self.token_price[k] for k, v in token_usage.items()} - print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") + total_tokens = sum(token_usage.values()) + if total_tokens: + print("Token usage summary for this chat:") + for k, v in token_usage.items(): + print(f" > {k.capitalize()}: {v}") + print(f" > Total: {total_tokens}") + costs = {k: v * self.token_price[k] for k, v in token_usage.items()} + print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") + else: + print("> No tokens were exchanged in this interaction.") def _num_tokens_from_string(string: str, model: str) -> int: From 0b9c7f2c409af1e29357736cacc82d93020f3b64 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 00:56:13 +0100 Subject: [PATCH 021/109] Cost reports are now tabular --- chat_gpt/chat.py | 4 +- chat_gpt/command_definitions.py | 3 +- chat_gpt/tokens.py | 129 +++++++++++++++++++++----------- 3 files changed, 88 insertions(+), 48 deletions(-) diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index af7d11a..5de45c8 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -113,8 +113,8 @@ def start(self): except (KeyboardInterrupt, EOFError): print("Exiting chat.") - def report_token_usage(self): - self.token_usage_db.print_usage_costs(self.token_usage) + def report_token_usage(self, current_chat: bool = True): + self.token_usage_db.print_usage_costs(self.token_usage, current_chat=current_chat) def _make_api_call(conversation: list, model: str): diff --git a/chat_gpt/command_definitions.py b/chat_gpt/command_definitions.py index 71ee2da..b729210 100644 --- a/chat_gpt/command_definitions.py +++ b/chat_gpt/command_definitions.py @@ -9,7 +9,8 @@ def show_accumulated_costs(args): """Show the accumulated costs of the chat and exit.""" - Chat.from_cli_args(cli_args=args) + args.skip_reporting_costs = True + Chat.from_cli_args(cli_args=args).report_token_usage(current_chat=False) def run_on_terminal(args): diff --git a/chat_gpt/tokens.py b/chat_gpt/tokens.py index 0a6058d..d279796 100644 --- a/chat_gpt/tokens.py +++ b/chat_gpt/tokens.py @@ -3,6 +3,7 @@ from collections import defaultdict from pathlib import Path +import pandas as pd import tiktoken PRICING_PER_THOUSAND_TOKENS = { @@ -118,30 +119,73 @@ def retrieve_sums(self): sums[k] += v return sums - def print_usage_costs(self, token_usage: dict): - print() - print("=======================================================") - print("Summary of OpenAI API token usage and associated costs:") - print("=======================================================") - - for model, accumulated_usage in self.retrieve_sums_by_model().items(): - _print_accumulated_token_usage( - accumulated_usage=accumulated_usage, model=model - ) - - _print_accumulated_token_usage(accumulated_usage=self.retrieve_sums()) + def get_usage_balance_dataframe(self): + sums_by_model = self.retrieve_sums_by_model() + df_rows = [] + for model, accumulated_usage in sums_by_model.items(): + accumulated_tokens_usage = { + "input": accumulated_usage["n_input_tokens"], + "output": accumulated_usage["n_output_tokens"], + } + accumlated_costs = { + "input": accumulated_usage["cost_input_tokens"], + "output": accumulated_usage["cost_output_tokens"], + } + first_used = datetime.datetime.fromtimestamp( + accumulated_usage["earliest_timestamp"], datetime.timezone.utc + ).isoformat(sep=" ", timespec="seconds") + df_row = { + "Model": model, + "First Registered Use": first_used.replace("+00:00", "Z"), + "Tokens: Input": accumulated_tokens_usage["input"], + "Tokens: Output": accumulated_tokens_usage["output"], + "Tokens: Total": sum(accumulated_tokens_usage.values()), + "Cost ($): Input": accumlated_costs["input"], + "Cost ($): Output": accumlated_costs["output"], + "Cost ($): Total": sum(accumlated_costs.values()), + } + df_rows.append(df_row) + + df = _group_columns_by_prefix(pd.DataFrame(df_rows)) + df = _add_totals_row(df) + + return df + + def get_current_chat_usage_dataframe(self, token_usage: dict): + costs = {k: v * self.token_price[k] for k, v in token_usage.items()} + df_row = { + "Model": self.model, + "Tokens: Input": token_usage["input"], + "Tokens: Output": token_usage["output"], + "Tokens: Total": sum(token_usage.values()), + "Cost ($): Input": costs["input"], + "Cost ($): Output": costs["output"], + "Cost ($): Total": sum(costs.values()), + } + df = pd.DataFrame([df_row]) + df = _group_columns_by_prefix(df.set_index("Model")) + + return df + + def print_usage_costs(self, token_usage: dict, current_chat: bool = True): + header_start = "Estimated token usage and associated costs" + header2dataframe = { + f"{header_start}: Accumulated": self.get_usage_balance_dataframe(), + f"{header_start}: Current Chat": self.get_current_chat_usage_dataframe( + token_usage + ), + } - print() - total_tokens = sum(token_usage.values()) - if total_tokens: - print("Token usage summary for this chat:") - for k, v in token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {total_tokens}") - costs = {k: v * self.token_price[k] for k, v in token_usage.items()} - print(f"Estimated total cost for this chat: ${sum(costs.values()):.3f}.") - else: - print("> No tokens were exchanged in this interaction.") + for header, df in header2dataframe.items(): + if "Current" in header and not current_chat: + continue + underline = "-" * len(header) + print() + print(underline) + print(header) + print(underline) + print(df) + print() def _num_tokens_from_string(string: str, model: str) -> int: @@ -150,25 +194,20 @@ def _num_tokens_from_string(string: str, model: str) -> int: return len(encoding.encode(string)) -def _print_accumulated_token_usage(accumulated_usage: dict, model: str = None): - print() - if model is not None: - print(f"Model: {model}") - - since = datetime.datetime.fromtimestamp( - accumulated_usage["earliest_timestamp"], datetime.timezone.utc - ).isoformat(sep=" ", timespec="seconds") - print(f"Accumulated token usage since {since.replace('+00:00', 'Z')}:") - - accumulated_token_usage = { - "input": accumulated_usage["n_input_tokens"], - "output": accumulated_usage["n_output_tokens"], - } - acc_costs = { - "input": accumulated_usage["cost_input_tokens"], - "output": accumulated_usage["cost_output_tokens"], - } - for k, v in accumulated_token_usage.items(): - print(f" > {k.capitalize()}: {v}") - print(f" > Total: {sum(accumulated_token_usage.values())}") - print(f"Estimated total costs since same date: ${sum(acc_costs.values()):.3f}.") +def _group_columns_by_prefix(df): + df = df.copy() + col_tuples_for_multiindex = df.columns.str.split(": ", expand=True).values + df.columns = pd.MultiIndex.from_tuples( + [("", x[0]) if pd.isnull(x[1]) else x for x in col_tuples_for_multiindex] + ) + return df + + +def _add_totals_row(df): + df = df.copy() + dtypes = df.dtypes + df.loc["Total"] = df.sum(numeric_only=True) + for col in df.columns: + df[col] = df[col].astype(dtypes[col]) + df = df.fillna("") + return df From f7bd26b7d0e7547b1f6b8841881bfd3496f15aa1 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 01:07:43 +0100 Subject: [PATCH 022/109] Set title in web app --- chat_gpt/app/app.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index a24823e..fc2a263 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -19,12 +19,16 @@ st.session_state["chat"] = session_chat -st.title(f"Chat with {session_chat.assistant_name}") +page_title = f"Chat with {session_chat.model}" +# Set the title that is shown in the browser's tab +st.set_page_config(page_title=page_title) +# Set page title +st.title(page_title) + # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] - # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): From 536e5b848bb5efbbd1878b8e1cb2efdfcd5743a4 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 01:15:25 +0100 Subject: [PATCH 023/109] Minor refactoring in app --- chat_gpt/app/app.py | 96 +++++++++++++++++++++++---------------------- 1 file changed, 50 insertions(+), 46 deletions(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index fc2a263..1adfeac 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -8,49 +8,53 @@ from chat_gpt.chat import Chat -# Initialize chat. Kepp it throughout the session. -try: - session_chat = st.session_state["chat"] -except KeyError: - parsed_args_file = sys.argv[-1] - with open(parsed_args_file, "rb") as parsed_args_file: - args = pickle.load(parsed_args_file) - session_chat = Chat.from_cli_args(cli_args=args) - st.session_state["chat"] = session_chat - - -page_title = f"Chat with {session_chat.model}" -# Set the title that is shown in the browser's tab -st.set_page_config(page_title=page_title) -# Set page title -st.title(page_title) - - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] -# Display chat messages from history on app rerun -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - -# Accept user input -if prompt := st.chat_input("Send a message"): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - message_placeholder = st.empty() - message_placeholder.markdown("▌") # Use blinking cursor to indicate activity - full_response = "" - # Stream assistant response - for chunk in session_chat.yield_response(prompt): - full_response += chunk - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": full_response}) + +def run(): + # Initialize chat. Kepp it throughout the session. + try: + session_chat = st.session_state["chat"] + except KeyError: + parsed_args_file = sys.argv[-1] + with open(parsed_args_file, "rb") as parsed_args_file: + args = pickle.load(parsed_args_file) + session_chat = Chat.from_cli_args(cli_args=args) + st.session_state["chat"] = session_chat + + page_title = f"Chat with {session_chat.model}" + # Set the title that is shown in the browser's tab + st.set_page_config(page_title=page_title) + # Set page title + st.title(page_title) + + # Initialize chat history + if "messages" not in st.session_state: + st.session_state.messages = [] + # Display chat messages from history on app rerun + for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + # Accept user input + if prompt := st.chat_input("Send a message"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + message_placeholder = st.empty() + message_placeholder.markdown("▌") # Use blinking cursor to indicate activity + full_response = "" + # Stream assistant response + for chunk in session_chat.yield_response(prompt): + full_response += chunk + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": full_response}) + + +if __name__ == "__main__": + run() From 45fd8d62bb0bb5731a995b3a9aeebd9276a782b4 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 10:12:21 +0100 Subject: [PATCH 024/109] Consider embedding model when evaluating usage --- chat_gpt/argparse_wrapper.py | 10 ++++-- chat_gpt/chat.py | 60 ++++++++++++++++++++----------- chat_gpt/chat_context.py | 30 ++++++++++++---- chat_gpt/tokens.py | 70 ++++++++++++++++++++++-------------- 4 files changed, 116 insertions(+), 54 deletions(-) diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index c5069dc..7769870 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -31,12 +31,18 @@ def get_parsed_args(argv=None): ) common_parser.add_argument( "--model", - type=str, + type=lambda x: str(x).lower(), default="gpt-3.5-turbo", choices=["gpt-3.5-turbo", "gpt-4"], help="OpenAI API engine to use for completion", ) - common_parser.add_argument("--send-full-history", action="store_true") + common_parser.add_argument( + "--embedding-model", + type=lambda x: None if str(x).lower() == "none" else str(x).lower(), + default="text-embedding-ada-002", + choices=["text-embedding-ada-002", None], + help="OpenAI API engine to use for embedding", + ) common_parser.add_argument("--skip-reporting-costs", action="store_true") main_parser = argparse.ArgumentParser( diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index 5de45c8..d5c631e 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -1,9 +1,11 @@ #!/usr/bin/env python3 +from collections import defaultdict + import openai from . import GeneralConstants from .chat_context import BaseChatContext, EmbeddingBasedChatContext -from .tokens import TokenUsageDatabase +from .tokens import TokenUsageDatabase, get_n_tokens class Chat: @@ -11,10 +13,11 @@ def __init__( self, model: str, base_instructions: str, - send_full_history: bool = False, + embedding_model: str = "text-embedding-ada-002", report_estimated_costs_when_done: bool = True, ): self.model = model + self.embedding_model = embedding_model self.username = "chat_user" self.assistant_name = f"chat_{model.replace('.', '_')}" self.system_name = "chat_manager" @@ -33,16 +36,15 @@ def __init__( ] ) - self.token_usage = {"input": 0, "output": 0} + self.token_usage = defaultdict(lambda: {"input": 0, "output": 0}) self.token_usage_db = TokenUsageDatabase( - fpath=GeneralConstants.TOKEN_USAGE_DATABASE, - model=self.model, + fpath=GeneralConstants.TOKEN_USAGE_DATABASE ) - if send_full_history: - self.context_handler = BaseChatContext(parent_chat=self) - else: + if self.embedding_model == "text-embedding-ada-002": self.context_handler = EmbeddingBasedChatContext(parent_chat=self) + else: + self.context_handler = BaseChatContext(parent_chat=self) self.report_estimated_costs_when_done = report_estimated_costs_when_done @@ -56,10 +58,12 @@ def __init__( def __del__(self): # Store token usage to database - self.token_usage_db.insert_data( - n_input_tokens=self.token_usage["input"], - n_output_tokens=self.token_usage["output"], - ) + for model in [self.model, self.embedding_model]: + self.token_usage_db.insert_data( + model=model, + n_input_tokens=self.token_usage[model]["input"], + n_output_tokens=self.token_usage[model]["output"], + ) if self.report_estimated_costs_when_done: self.report_token_usage() @@ -67,8 +71,8 @@ def __del__(self): def from_cli_args(cls, cli_args): return cls( model=cli_args.model, + embedding_model=cli_args.embedding_model, base_instructions=cli_args.initial_ai_instructions, - send_full_history=cli_args.send_full_history, report_estimated_costs_when_done=not cli_args.skip_reporting_costs, ) @@ -76,13 +80,20 @@ def yield_response(self, question: str): question = question.strip() # Add context to the conversation - self.query_context = self.context_handler.add_user_input( + context_handler_response = self.context_handler.add_user_input( conversation=self.query_context, user_input=question ) + self.query_context = context_handler_response["conversation"] + + # Update number of tokens used in context handler + for direction in ["input", "output"]: + self.token_usage[self.embedding_model][direction] += context_handler_response[ + "tokens_usage" + ][direction] - # Update number of input tokens - self.token_usage["input"] += sum( - self.token_usage_db.get_n_tokens(string=msg["content"]) + # Update number of input tokens used in the chat model + self.token_usage[self.model]["input"] += sum( + get_n_tokens(string=msg["content"], model=self.model) for msg in self.query_context ) @@ -91,13 +102,22 @@ def yield_response(self, question: str): full_reply_content += chunk yield chunk - # Update number of output tokens - self.token_usage["output"] += self.token_usage_db.get_n_tokens(full_reply_content) + # Update number of tokens output from the chat + self.token_usage[self.model]["output"] += get_n_tokens( + string=full_reply_content, model=self.model + ) # Update context with the reply - self.query_context = self.context_handler.add_chat_reply( + context_handler_response = self.context_handler.add_chat_reply( conversation=self.query_context, chat_reply=full_reply_content.strip() ) + self.query_context = context_handler_response["conversation"] + + # Update number of tokens used in context handler to store the reply + for direction in ["input", "output"]: + self.token_usage[self.embedding_model][direction] += context_handler_response[ + "tokens_usage" + ][direction] def start(self): try: diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index 8d9a706..5813c1a 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -26,7 +26,9 @@ def add_user_input(self, conversation: list, user_input: str): "content": user_input, } conversation.append(user_input_msg_obj) - return conversation + tokens_usage = {"input": 0, "output": 0} + + return {"conversation": conversation, "tokens_usage": tokens_usage} def add_chat_reply(self, conversation: list, chat_reply: str): reply_msg_obj = { @@ -35,7 +37,9 @@ def add_chat_reply(self, conversation: list, chat_reply: str): "content": chat_reply, } conversation.append(reply_msg_obj) - return conversation + tokens_usage = {"input": 0, "output": 0} + + return {"conversation": conversation, "tokens_usage": tokens_usage} class EmbeddingBasedChatContext(BaseChatContext): @@ -51,9 +55,11 @@ def add_user_input(self, conversation: list, user_input: str): "name": self.parent_chat.username, "content": user_input, } - _store_message_to_file( + + tokens_usage = _store_message_to_file( msg_obj=user_input_msg_obj, file_path=self.context_file_path ) + intial_ai_instruct_msg = conversation[0] last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] current_context = _find_context( @@ -67,7 +73,8 @@ def add_user_input(self, conversation: list, user_input: str): *current_context, user_input_msg_obj, ] - return conversation + + return {"conversation": conversation, "tokens_usage": tokens_usage} def add_chat_reply(self, conversation: list, chat_reply: str): reply_msg_obj = { @@ -76,8 +83,11 @@ def add_chat_reply(self, conversation: list, chat_reply: str): "content": chat_reply, } conversation.append(reply_msg_obj) - _store_message_to_file(file_path=self.context_file_path, msg_obj=reply_msg_obj) - return conversation + tokens_usage = _store_message_to_file( + file_path=self.context_file_path, msg_obj=reply_msg_obj + ) + + return {"conversation": conversation, "tokens_usage": tokens_usage} def _store_message_to_file( @@ -86,9 +96,15 @@ def _store_message_to_file( """Store message and embeddings to file.""" # Adapted from + # See also . response = openai.Embedding.create( model="text-embedding-ada-002", input=msg_obj["content"] ) + + input_tokens = response["usage"]["prompt_tokens"] + output_tokens = response["usage"]["total_tokens"] - input_tokens + tokens_usage = {"input": input_tokens, "output": output_tokens} + emb_mess_pair = { "embedding": json.dumps(response["data"][0]["embedding"]), "message": json.dumps(msg_obj), @@ -103,6 +119,8 @@ def _store_message_to_file( writer.writeheader() writer.writerow(emb_mess_pair) + return tokens_usage + def _find_context(file_path: Path, parent_chat: "Chat", option="both"): """Lookup context from file.""" diff --git a/chat_gpt/tokens.py b/chat_gpt/tokens.py index d279796..1472f8e 100644 --- a/chat_gpt/tokens.py +++ b/chat_gpt/tokens.py @@ -9,19 +9,21 @@ PRICING_PER_THOUSAND_TOKENS = { "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, "gpt-4": {"input": 0.03, "output": 0.06}, + "text-embedding-ada-002": {"input": 0.0001, "output": 0.0}, + None: {"input": 0.0, "output": 0.0}, } class TokenUsageDatabase: - def __init__(self, fpath: Path, model: str): + def __init__(self, fpath: Path): self.fpath = fpath - self.model = model.strip() - pricing = PRICING_PER_THOUSAND_TOKENS[self.model] - self.token_price = {k: v / 1000.0 for k, v in pricing.items()} - self.create() + self.token_price = {} + for model, price_per_k_tokens in PRICING_PER_THOUSAND_TOKENS.items(): + self.token_price[model] = { + k: v / 1000.0 for k, v in price_per_k_tokens.items() + } - def get_n_tokens(self, string: str) -> int: - return _num_tokens_from_string(string=string, model=self.model) + self.create() def create(self): self.fpath.parent.mkdir(parents=True, exist_ok=True) @@ -46,7 +48,10 @@ def create(self): conn.close() # Function to insert data into the database - def insert_data(self, n_input_tokens, n_output_tokens): + def insert_data(self, model, n_input_tokens, n_output_tokens): + if model is None: + return + conn = sqlite3.connect(self.fpath) cursor = conn.cursor() @@ -65,11 +70,11 @@ def insert_data(self, n_input_tokens, n_output_tokens): """, ( datetime.datetime.utcnow().timestamp(), - self.model, + model, n_input_tokens, n_output_tokens, - n_input_tokens * self.token_price["input"], - n_output_tokens * self.token_price["output"], + n_input_tokens * self.token_price[model]["input"], + n_output_tokens * self.token_price[model]["output"], ), ) @@ -123,6 +128,9 @@ def get_usage_balance_dataframe(self): sums_by_model = self.retrieve_sums_by_model() df_rows = [] for model, accumulated_usage in sums_by_model.items(): + if model is None: + continue + accumulated_tokens_usage = { "input": accumulated_usage["n_input_tokens"], "output": accumulated_usage["n_output_tokens"], @@ -151,20 +159,27 @@ def get_usage_balance_dataframe(self): return df - def get_current_chat_usage_dataframe(self, token_usage: dict): - costs = {k: v * self.token_price[k] for k, v in token_usage.items()} - df_row = { - "Model": self.model, - "Tokens: Input": token_usage["input"], - "Tokens: Output": token_usage["output"], - "Tokens: Total": sum(token_usage.values()), - "Cost ($): Input": costs["input"], - "Cost ($): Output": costs["output"], - "Cost ($): Total": sum(costs.values()), - } - df = pd.DataFrame([df_row]) - df = _group_columns_by_prefix(df.set_index("Model")) + def get_current_chat_usage_dataframe(self, token_usage_per_model: dict): + df_rows = [] + for model, token_usage in token_usage_per_model.items(): + if model is None: + continue + costs = {k: v * self.token_price[model][k] for k, v in token_usage.items()} + df_row = { + "Model": model, + "Tokens: Input": token_usage["input"], + "Tokens: Output": token_usage["output"], + "Tokens: Total": sum(token_usage.values()), + "Cost ($): Input": costs["input"], + "Cost ($): Output": costs["output"], + "Cost ($): Total": sum(costs.values()), + } + df_rows.append(df_row) + df = pd.DataFrame(df_rows) + if df_rows: + df = _group_columns_by_prefix(df.set_index("Model")) + df = _add_totals_row(df) return df def print_usage_costs(self, token_usage: dict, current_chat: bool = True): @@ -184,11 +199,14 @@ def print_usage_costs(self, token_usage: dict, current_chat: bool = True): print(underline) print(header) print(underline) - print(df) + if df.empty: + print("None.") + else: + print(df) print() -def _num_tokens_from_string(string: str, model: str) -> int: +def get_n_tokens(string: str, model: str) -> int: """Returns the number of tokens in a text string.""" encoding = tiktoken.encoding_for_model(model) return len(encoding.encode(string)) From 739c21acaa8ad0b002eb03181256b326ec950019 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 16:34:30 +0100 Subject: [PATCH 025/109] Start rewrite of context handler --- chat_gpt/app/app.py | 101 ++++++++++++++-------------- chat_gpt/chat.py | 89 +++++++++++++----------- chat_gpt/chat_context.py | 141 +++++++++++++-------------------------- chat_gpt/tokens.py | 30 +++++---- 4 files changed, 166 insertions(+), 195 deletions(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index 1adfeac..cb622c2 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -8,53 +8,54 @@ from chat_gpt.chat import Chat - -def run(): - # Initialize chat. Kepp it throughout the session. - try: - session_chat = st.session_state["chat"] - except KeyError: - parsed_args_file = sys.argv[-1] - with open(parsed_args_file, "rb") as parsed_args_file: - args = pickle.load(parsed_args_file) - session_chat = Chat.from_cli_args(cli_args=args) - st.session_state["chat"] = session_chat - - page_title = f"Chat with {session_chat.model}" - # Set the title that is shown in the browser's tab - st.set_page_config(page_title=page_title) - # Set page title - st.title(page_title) - - # Initialize chat history - if "messages" not in st.session_state: - st.session_state.messages = [] - # Display chat messages from history on app rerun - for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - # Accept user input - if prompt := st.chat_input("Send a message"): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - message_placeholder = st.empty() - message_placeholder.markdown("▌") # Use blinking cursor to indicate activity - full_response = "" - # Stream assistant response - for chunk in session_chat.yield_response(prompt): - full_response += chunk - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) - # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": full_response}) - - -if __name__ == "__main__": - run() +# Initialize chat. Kepp it throughout the session. +try: + session_chat = st.session_state["chat"] +except KeyError: + parsed_args_file = sys.argv[-1] + with open(parsed_args_file, "rb") as parsed_args_file: + args = pickle.load(parsed_args_file) + session_chat = Chat.from_cli_args(cli_args=args) + st.session_state["chat"] = session_chat + +page_title = f"Chat with {session_chat.model}" +# Set the title that is shown in the browser's tab +st.set_page_config(page_title=page_title) +# Set page title +st.title(page_title) + + +# Using "with" notation +with st.sidebar: + add_radio = st.radio( + "Choose a shipping method", ("Standard (5-15 days)", "Express (2-5 days)") + ) + +# Initialize chat history +if "messages" not in st.session_state: + st.session_state.messages = [] +# Display chat messages from history on app rerun +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +# Accept user input +if prompt := st.chat_input("Send a message"): + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + message_placeholder = st.empty() + message_placeholder.markdown("▌") # Use blinking cursor to indicate activity + full_response = "" + # Stream assistant response + for chunk in session_chat.yield_response(prompt): + full_response += chunk + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + # Add assistant response to chat history + st.session_state.messages.append({"role": "assistant", "content": full_response}) diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index d5c631e..ce0f117 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -from collections import defaultdict +from collections import defaultdict, deque import openai @@ -26,7 +26,7 @@ def __init__( [ instruction.strip() for instruction in [ - f"Your name is {self.assistant_name}", + f"Your name is {self.assistant_name}.", f"You are a helpful assistant to {self.username}.", "You answer correctly. You do not lie.", f"{base_instructions.strip(' .')}.", @@ -42,19 +42,20 @@ def __init__( ) if self.embedding_model == "text-embedding-ada-002": - self.context_handler = EmbeddingBasedChatContext(parent_chat=self) + self.context_handler = EmbeddingBasedChatContext( + embedding_model=self.embedding_model, parent_chat=self + ) else: self.context_handler = BaseChatContext(parent_chat=self) + self.history = deque(maxlen=2) self.report_estimated_costs_when_done = report_estimated_costs_when_done - self.query_context = [ - { - "role": "system", - "name": self.system_name, - "content": self.ground_ai_instructions, - } - ] + self.base_directive = { + "role": "system", + "name": self.system_name, + "content": self.ground_ai_instructions, + } def __del__(self): # Store token usage to database @@ -79,45 +80,57 @@ def from_cli_args(cls, cli_args): def yield_response(self, question: str): question = question.strip() - # Add context to the conversation - context_handler_response = self.context_handler.add_user_input( - conversation=self.query_context, user_input=question - ) - self.query_context = context_handler_response["conversation"] + prompt_as_msg = {"role": "user", "name": self.username, "content": question} + self.history.append(prompt_as_msg) - # Update number of tokens used in context handler - for direction in ["input", "output"]: - self.token_usage[self.embedding_model][direction] += context_handler_response[ - "tokens_usage" - ][direction] + prompt_embedding_request = self.context_handler.get_embedding(text=question) + prompt_embedding = prompt_embedding_request["embedding"] - # Update number of input tokens used in the chat model - self.token_usage[self.model]["input"] += sum( - get_n_tokens(string=msg["content"], model=self.model) - for msg in self.query_context - ) + context = self.context_handler.get_context(embedding=prompt_embedding) + conversation = [self.base_directive, *context, prompt_as_msg] full_reply_content = "" - for chunk in _make_api_call(conversation=self.query_context, model=self.model): + for chunk in _make_api_call(conversation=conversation, model=self.model): full_reply_content += chunk yield chunk - # Update number of tokens output from the chat - self.token_usage[self.model]["output"] += get_n_tokens( - string=full_reply_content, model=self.model + reply_as_msg = { + "role": "assistant", + "name": self.assistant_name, + "content": full_reply_content.strip(), + } + self.history.append(reply_as_msg) + + reply_embedding_request = self.context_handler.get_embedding( + text=full_reply_content + ) + reply_embedding = reply_embedding_request["embedding"] + + self.context_handler.add_msg_and_embedding( + msg=prompt_as_msg, embedding=prompt_embedding ) - # Update context with the reply - context_handler_response = self.context_handler.add_chat_reply( - conversation=self.query_context, chat_reply=full_reply_content.strip() + self.context_handler.add_msg_and_embedding( + msg=reply_as_msg, embedding=reply_embedding ) - self.query_context = context_handler_response["conversation"] - # Update number of tokens used in context handler to store the reply - for direction in ["input", "output"]: - self.token_usage[self.embedding_model][direction] += context_handler_response[ - "tokens_usage" - ][direction] + # Update self.token_usage + # 1: With tokens used in chat input + self.token_usage[self.model]["input"] += sum( + get_n_tokens(string=msg["content"], model=self.model) for msg in conversation + ) + # 2: With tokens used in chat output + self.token_usage[self.model]["output"] += get_n_tokens( + string=full_reply_content, model=self.model + ) + # 3: With tokens used in context handler for prompt + self.token_usage[self.embedding_model]["input"] += sum( + prompt_embedding_request["tokens_usage"].values() + ) + # 4: With tokens used in context handler for reply + self.token_usage[self.embedding_model]["output"] += sum( + reply_embedding_request["tokens_usage"].values() + ) def start(self): try: diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index 5813c1a..9083b13 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -7,7 +7,7 @@ import numpy as np import openai import pandas as pd -from openai.embeddings_utils import distances_from_embeddings +from openai.embeddings_utils import cosine_similarity from . import GeneralConstants @@ -45,70 +45,50 @@ def add_chat_reply(self, conversation: list, chat_reply: str): class EmbeddingBasedChatContext(BaseChatContext): """Chat context.""" - def __init__(self, parent_chat: "Chat"): + def __init__(self, embedding_model: str, parent_chat: "Chat"): + self.embedding_model = embedding_model self.parent_chat = parent_chat self.context_file_path = GeneralConstants.EMBEDDINGS_FILE - def add_user_input(self, conversation: list, user_input: str): - user_input_msg_obj = { - "role": "user", - "name": self.parent_chat.username, - "content": user_input, - } + def get_embedding(self, text: str): + return request_embedding_from_openai(text=text, model=self.embedding_model) - tokens_usage = _store_message_to_file( - msg_obj=user_input_msg_obj, file_path=self.context_file_path + def add_msg_and_embedding(self, msg: dict, embedding): + _store_message_and_embedding( + file_path=self.context_file_path, msg_obj=msg, embedding=embedding ) - intial_ai_instruct_msg = conversation[0] - last_msg_exchange = conversation[-2:] if len(conversation) > 2 else [] - current_context = _find_context( + def get_context(self, embedding): + return _find_context( + embedding=embedding, file_path=self.context_file_path, parent_chat=self.parent_chat, option="both", ) - conversation = [ - intial_ai_instruct_msg, - *last_msg_exchange, - *current_context, - user_input_msg_obj, - ] - return {"conversation": conversation, "tokens_usage": tokens_usage} - def add_chat_reply(self, conversation: list, chat_reply: str): - reply_msg_obj = { - "role": "assistant", - "name": self.parent_chat.assistant_name, - "content": chat_reply, - } - conversation.append(reply_msg_obj) - tokens_usage = _store_message_to_file( - file_path=self.context_file_path, msg_obj=reply_msg_obj - ) +def request_embedding_from_openai(text: str, model: str): + text = text.replace("\n", " ") + embedding_request = openai.Embedding.create(input=[text], model=model) - return {"conversation": conversation, "tokens_usage": tokens_usage} + embedding = embedding_request["data"][0]["embedding"] + input_tokens = embedding_request["usage"]["prompt_tokens"] + output_tokens = embedding_request["usage"]["total_tokens"] - input_tokens + tokens_usage = {"input": input_tokens, "output": output_tokens} + + return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_message_to_file( - msg_obj: dict, file_path: Path = GeneralConstants.EMBEDDINGS_FILE + +def _store_message_and_embedding( + msg_obj: dict, embedding, file_path: Path = GeneralConstants.EMBEDDINGS_FILE ): """Store message and embeddings to file.""" # Adapted from # See also . - response = openai.Embedding.create( - model="text-embedding-ada-002", input=msg_obj["content"] - ) - - input_tokens = response["usage"]["prompt_tokens"] - output_tokens = response["usage"]["total_tokens"] - input_tokens - tokens_usage = {"input": input_tokens, "output": output_tokens} - emb_mess_pair = { - "embedding": json.dumps(response["data"][0]["embedding"]), - "message": json.dumps(msg_obj), - } + emb_mess_pair = {"message": json.dumps(msg_obj), "embedding": json.dumps(embedding)} init_file = not file_path.exists() or file_path.stat().st_size == 0 write_mode = "w" if init_file else "a" @@ -119,57 +99,30 @@ def _store_message_to_file( writer.writeheader() writer.writerow(emb_mess_pair) - return tokens_usage - -def _find_context(file_path: Path, parent_chat: "Chat", option="both"): - """Lookup context from file.""" - # Adapted from - if not file_path.exists() or file_path.stat().st_size == 0: +def _find_context(file_path: Path, embedding: str, parent_chat: "Chat", n=4, **kwargs): + try: + df = pd.read_csv(file_path) + except FileNotFoundError: return [] - df = pd.read_csv(file_path) - df["embedding"] = df.embedding.apply(eval).apply(np.array) - - if option == "both": - message_list_embeddings = df["embedding"].values[:-3] - elif option == "assistant": - message_list_embeddings = df.loc[ - df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "assistant"), - "embedding", - ].values[-1] - elif option == "user": - message_list_embeddings = df.loc[ - df["message"].apply(lambda x: ast.literal_eval(x)["role"] == "user"), - "embedding", - ].values[:-2] - else: - return [] # Return an empty list if no context is found - - query_embedding = df["embedding"].values[-1] - distances = distances_from_embeddings( - query_embedding, message_list_embeddings, distance_metric="L1" - ) - mask = (np.array(distances) < 21.6)[np.argsort(distances)] - - message_array = df["message"].iloc[np.argsort(distances)][mask] - message_array = [] if message_array is None else message_array[:4] - - message_objects = [json.loads(message) for message in message_array] - context_for_current_user_query = "" - for msg in message_objects: - context_for_current_user_query += f"{msg['name']}: {msg['content']}\n" - - if not context_for_current_user_query: - return [] + df["embedding"] = df.embedding.apply(ast.literal_eval).apply(np.array) - return [ - { - "role": "system", - "name": parent_chat.system_name, - "content": f"{parent_chat.assistant_name}'s knowledge: " - + f"{context_for_current_user_query} + Previous messages.\n" - + "Only answer last message.", - } - ] + df["similarity"] = df["embedding"].apply(lambda x: cosine_similarity(x, embedding)) + + df = df.sort_values("similarity", ascending=False).head(n) + + selected_history = df["message"].apply(ast.literal_eval).values + + context_msg_content = "You know previous messages.\n" + context_msg_content = "You also know that the following was said:\n" + for message in selected_history: + context_msg_content += f"{message['name']}: {message['content']}\n" + context_msg_content += "Answer the next message." + context_msg = { + "role": "system", + "name": parent_chat.system_name, + "content": context_msg_content, + } + + return [context_msg] diff --git a/chat_gpt/tokens.py b/chat_gpt/tokens.py index 1472f8e..9816b6d 100644 --- a/chat_gpt/tokens.py +++ b/chat_gpt/tokens.py @@ -6,7 +6,7 @@ import pandas as pd import tiktoken -PRICING_PER_THOUSAND_TOKENS = { +PRICE_PER_THOUSAND_TOKENS = { "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, "gpt-4": {"input": 0.03, "output": 0.06}, "text-embedding-ada-002": {"input": 0.0001, "output": 0.0}, @@ -18,7 +18,7 @@ class TokenUsageDatabase: def __init__(self, fpath: Path): self.fpath = fpath self.token_price = {} - for model, price_per_k_tokens in PRICING_PER_THOUSAND_TOKENS.items(): + for model, price_per_k_tokens in PRICE_PER_THOUSAND_TOKENS.items(): self.token_price[model] = { k: v / 1000.0 for k, v in price_per_k_tokens.items() } @@ -192,18 +192,9 @@ def print_usage_costs(self, token_usage: dict, current_chat: bool = True): } for header, df in header2dataframe.items(): - if "Current" in header and not current_chat: + if "current" in header.lower() and not current_chat: continue - underline = "-" * len(header) - print() - print(underline) - print(header) - print(underline) - if df.empty: - print("None.") - else: - print(df) - print() + _print_df(df=df, header=header) def get_n_tokens(string: str, model: str) -> int: @@ -229,3 +220,16 @@ def _add_totals_row(df): df[col] = df[col].astype(dtypes[col]) df = df.fillna("") return df + + +def _print_df(df: pd.DataFrame, header: str): + underline = "-" * len(header) + print() + print(underline) + print(header) + print(underline) + if df.empty: + print("None.") + else: + print(df) + print() From 1a120ad9f56f27dee26b89c76590ba98347a3117 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 20:09:02 +0100 Subject: [PATCH 026/109] Improvements to EmbeddingBasedChatContext --- chat_gpt/chat.py | 20 +++++++++----------- chat_gpt/chat_context.py | 39 +++++++++++++++++++++++++++++---------- 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index ce0f117..e203ac1 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -83,7 +83,7 @@ def yield_response(self, question: str): prompt_as_msg = {"role": "user", "name": self.username, "content": question} self.history.append(prompt_as_msg) - prompt_embedding_request = self.context_handler.get_embedding(text=question) + prompt_embedding_request = self.context_handler.calculate_embedding(text=question) prompt_embedding = prompt_embedding_request["embedding"] context = self.context_handler.get_context(embedding=prompt_embedding) @@ -101,17 +101,15 @@ def yield_response(self, question: str): } self.history.append(reply_as_msg) - reply_embedding_request = self.context_handler.get_embedding( - text=full_reply_content + this_exchange_text = ( + f"{self.username}: {question}. {self.assistant_name}: {full_reply_content}" ) - reply_embedding = reply_embedding_request["embedding"] - - self.context_handler.add_msg_and_embedding( - msg=prompt_as_msg, embedding=prompt_embedding + this_exchange_text_embedding_request = self.context_handler.calculate_embedding( + text=this_exchange_text ) - - self.context_handler.add_msg_and_embedding( - msg=reply_as_msg, embedding=reply_embedding + this_exchange_text_embedding = this_exchange_text_embedding_request["embedding"] + self.context_handler.add_to_history( + text=this_exchange_text, embedding=this_exchange_text_embedding ) # Update self.token_usage @@ -129,7 +127,7 @@ def yield_response(self, question: str): ) # 4: With tokens used in context handler for reply self.token_usage[self.embedding_model]["output"] += sum( - reply_embedding_request["tokens_usage"].values() + this_exchange_text_embedding_request["tokens_usage"].values() ) def start(self): diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index 9083b13..ee00d93 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -1,6 +1,7 @@ import ast import csv import json +import time from pathlib import Path from typing import TYPE_CHECKING @@ -50,12 +51,13 @@ def __init__(self, embedding_model: str, parent_chat: "Chat"): self.parent_chat = parent_chat self.context_file_path = GeneralConstants.EMBEDDINGS_FILE - def get_embedding(self, text: str): + def calculate_embedding(self, text: str): + text = text.lower().replace("\n", " ") return request_embedding_from_openai(text=text, model=self.embedding_model) - def add_msg_and_embedding(self, msg: dict, embedding): + def add_to_history(self, text, embedding): _store_message_and_embedding( - file_path=self.context_file_path, msg_obj=msg, embedding=embedding + file_path=self.context_file_path, msg_obj=text, embedding=embedding ) def get_context(self, embedding): @@ -88,7 +90,11 @@ def _store_message_and_embedding( # use-embeddings-to-retrieve-relevant-context-for-ai-assistant/268538> # See also . - emb_mess_pair = {"message": json.dumps(msg_obj), "embedding": json.dumps(embedding)} + emb_mess_pair = { + "timestamp": int(time.time()), + "message": json.dumps(msg_obj), + "embedding": json.dumps(embedding), + } init_file = not file_path.exists() or file_path.stat().st_size == 0 write_mode = "w" if init_file else "a" @@ -100,7 +106,13 @@ def _store_message_and_embedding( writer.writerow(emb_mess_pair) -def _find_context(file_path: Path, embedding: str, parent_chat: "Chat", n=4, **kwargs): +def _find_context( + file_path: Path, + embedding: str, + parent_chat: "Chat", + n_related_entries: int = 4, + n_directly_preceeding_exchanges: int = 2, +): try: df = pd.read_csv(file_path) except FileNotFoundError: @@ -110,15 +122,22 @@ def _find_context(file_path: Path, embedding: str, parent_chat: "Chat", n=4, **k df["similarity"] = df["embedding"].apply(lambda x: cosine_similarity(x, embedding)) - df = df.sort_values("similarity", ascending=False).head(n) + # Get the last n messages added to the history + df_last_n_chats = df.tail(n_directly_preceeding_exchanges) - selected_history = df["message"].apply(ast.literal_eval).values + df_similar_chats = ( + df.sort_values("similarity", ascending=False) + .head(n_related_entries) + .sort_values("timestamp") + ) + df_context = pd.concat([df_similar_chats, df_last_n_chats]) + selected = df_context["message"].apply(ast.literal_eval).drop_duplicates().tolist() context_msg_content = "You know previous messages.\n" context_msg_content = "You also know that the following was said:\n" - for message in selected_history: - context_msg_content += f"{message['name']}: {message['content']}\n" - context_msg_content += "Answer the next message." + for message in selected: + context_msg_content += f"{message}\n" + context_msg_content += "Answer the last message." context_msg = { "role": "system", "name": parent_chat.system_name, From 232c192bb2b0c0f8da95620dec29b6e4f03b1cfd Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 20:09:28 +0100 Subject: [PATCH 027/109] Reduce timeout to 10 seconds --- chat_gpt/chat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index e203ac1..b8118f4 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -155,7 +155,7 @@ def _make_api_call(conversation: list, model: str): for line in openai.ChatCompletion.create( model=model, messages=conversation, - request_timeout=30, + request_timeout=10, stream=True, temperature=0.8, ): From f1b6f8a5811f345da4945b1e7c5017806772d8b9 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 20:16:39 +0100 Subject: [PATCH 028/109] Fix to extra arg in function --- chat_gpt/chat_context.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index ee00d93..847ccd0 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -65,7 +65,6 @@ def get_context(self, embedding): embedding=embedding, file_path=self.context_file_path, parent_chat=self.parent_chat, - option="both", ) @@ -108,7 +107,7 @@ def _store_message_and_embedding( def _find_context( file_path: Path, - embedding: str, + embedding: list[float], parent_chat: "Chat", n_related_entries: int = 4, n_directly_preceeding_exchanges: int = 2, From e48eabb079bfa7d883d7962b13345a0c8766a699 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 31 Oct 2023 22:10:05 +0100 Subject: [PATCH 029/109] Refactoring & some fixes --- chat_gpt/argparse_wrapper.py | 2 +- chat_gpt/chat.py | 67 +++++++++++--------------- chat_gpt/chat_context.py | 92 ++++++++++++++++++------------------ 3 files changed, 73 insertions(+), 88 deletions(-) diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index 7769870..be6aa72 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -37,7 +37,7 @@ def get_parsed_args(argv=None): help="OpenAI API engine to use for completion", ) common_parser.add_argument( - "--embedding-model", + "--context-model", type=lambda x: None if str(x).lower() == "none" else str(x).lower(), default="text-embedding-ada-002", choices=["text-embedding-ada-002", None], diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index b8118f4..d7d8fa4 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -from collections import defaultdict, deque +from collections import defaultdict import openai @@ -13,11 +13,15 @@ def __init__( self, model: str, base_instructions: str, - embedding_model: str = "text-embedding-ada-002", + context_model: str = "text-embedding-ada-002", report_estimated_costs_when_done: bool = True, ): - self.model = model - self.embedding_model = embedding_model + self.model = model.lower() + + if context_model is not None: + context_model = context_model.lower() + self.context_model = context_model + self.username = "chat_user" self.assistant_name = f"chat_{model.replace('.', '_')}" self.system_name = "chat_manager" @@ -41,13 +45,14 @@ def __init__( fpath=GeneralConstants.TOKEN_USAGE_DATABASE ) - if self.embedding_model == "text-embedding-ada-002": + if self.context_model is None: + self.context_handler = BaseChatContext(parent_chat=self) + elif self.context_model == "text-embedding-ada-002": self.context_handler = EmbeddingBasedChatContext( - embedding_model=self.embedding_model, parent_chat=self + embedding_model=self.context_model, parent_chat=self ) else: - self.context_handler = BaseChatContext(parent_chat=self) - self.history = deque(maxlen=2) + raise NotImplementedError(f"Unknown context model: {self.context_model}") self.report_estimated_costs_when_done = report_estimated_costs_when_done @@ -59,7 +64,7 @@ def __init__( def __del__(self): # Store token usage to database - for model in [self.model, self.embedding_model]: + for model in [self.model, self.context_model]: self.token_usage_db.insert_data( model=model, n_input_tokens=self.token_usage[model]["input"], @@ -72,21 +77,17 @@ def __del__(self): def from_cli_args(cls, cli_args): return cls( model=cli_args.model, - embedding_model=cli_args.embedding_model, + context_model=cli_args.context_model, base_instructions=cli_args.initial_ai_instructions, report_estimated_costs_when_done=not cli_args.skip_reporting_costs, ) - def yield_response(self, question: str): - question = question.strip() - - prompt_as_msg = {"role": "user", "name": self.username, "content": question} - self.history.append(prompt_as_msg) + def yield_response(self, prompt: str): + prompt = prompt.strip() + prompt_as_msg = {"role": "user", "name": self.username, "content": prompt} - prompt_embedding_request = self.context_handler.calculate_embedding(text=question) - prompt_embedding = prompt_embedding_request["embedding"] - - context = self.context_handler.get_context(embedding=prompt_embedding) + prompt_context_request = self.context_handler.get_context(text=prompt) + context = prompt_context_request["context_messages"] conversation = [self.base_directive, *context, prompt_as_msg] full_reply_content = "" @@ -94,22 +95,8 @@ def yield_response(self, question: str): full_reply_content += chunk yield chunk - reply_as_msg = { - "role": "assistant", - "name": self.assistant_name, - "content": full_reply_content.strip(), - } - self.history.append(reply_as_msg) - - this_exchange_text = ( - f"{self.username}: {question}. {self.assistant_name}: {full_reply_content}" - ) - this_exchange_text_embedding_request = self.context_handler.calculate_embedding( - text=this_exchange_text - ) - this_exchange_text_embedding = this_exchange_text_embedding_request["embedding"] - self.context_handler.add_to_history( - text=this_exchange_text, embedding=this_exchange_text_embedding + history_entry_registration_tokens_usage = self.context_handler.add_to_history( + text=f"{self.username}: {prompt}. {self.assistant_name}: {full_reply_content}" ) # Update self.token_usage @@ -122,12 +109,12 @@ def yield_response(self, question: str): string=full_reply_content, model=self.model ) # 3: With tokens used in context handler for prompt - self.token_usage[self.embedding_model]["input"] += sum( - prompt_embedding_request["tokens_usage"].values() + self.token_usage[self.context_model]["input"] += sum( + prompt_context_request["tokens_usage"].values() ) # 4: With tokens used in context handler for reply - self.token_usage[self.embedding_model]["output"] += sum( - this_exchange_text_embedding_request["tokens_usage"].values() + self.token_usage[self.context_model]["output"] += sum( + history_entry_registration_tokens_usage.values() ) def start(self): @@ -137,7 +124,7 @@ def start(self): if not question: continue print(f"{self.assistant_name}: ", end="", flush=True) - for chunk in self.yield_response(question=question): + for chunk in self.yield_response(prompt=question): print(chunk, end="", flush=True) print() print() diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index 847ccd0..24808ab 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -2,6 +2,7 @@ import csv import json import time +from collections import deque from pathlib import Path from typing import TYPE_CHECKING @@ -19,57 +20,56 @@ class BaseChatContext: def __init__(self, parent_chat: "Chat"): self.parent_chat = parent_chat + self.history = deque(maxlen=50) + self._tokens_usage = {"input": 0, "output": 0} - def add_user_input(self, conversation: list, user_input: str): - user_input_msg_obj = { - "role": "user", - "name": self.parent_chat.username, - "content": user_input, - } - conversation.append(user_input_msg_obj) - tokens_usage = {"input": 0, "output": 0} - - return {"conversation": conversation, "tokens_usage": tokens_usage} - - def add_chat_reply(self, conversation: list, chat_reply: str): - reply_msg_obj = { - "role": "assistant", - "name": self.parent_chat.assistant_name, - "content": chat_reply, - } - conversation.append(reply_msg_obj) - tokens_usage = {"input": 0, "output": 0} + def add_to_history(self, text: str): + self.history.append(text) + return self._tokens_usage - return {"conversation": conversation, "tokens_usage": tokens_usage} + def get_context(self, text: str): + context_msg = _compose_context_msg( + history=self.history, system_name=self.parent_chat.system_name + ) + return {"context_messages": [context_msg], "tokens_usage": self._tokens_usage} class EmbeddingBasedChatContext(BaseChatContext): """Chat context.""" def __init__(self, embedding_model: str, parent_chat: "Chat"): - self.embedding_model = embedding_model self.parent_chat = parent_chat + self.embedding_model = embedding_model self.context_file_path = GeneralConstants.EMBEDDINGS_FILE - def calculate_embedding(self, text: str): - text = text.lower().replace("\n", " ") - return request_embedding_from_openai(text=text, model=self.embedding_model) - - def add_to_history(self, text, embedding): - _store_message_and_embedding( - file_path=self.context_file_path, msg_obj=text, embedding=embedding + def add_to_history(self, text: str): + embedding_request = self.calculate_embedding(text=text) + _store_object_and_embedding( + obj=text, + embedding=embedding_request["embedding"], + file_path=self.context_file_path, ) + return embedding_request["tokens_usage"] - def get_context(self, embedding): - return _find_context( - embedding=embedding, + def get_context(self, text: str): + embedding_request = self.calculate_embedding(text=text) + context_messages = _find_context( + embedding=embedding_request["embedding"], file_path=self.context_file_path, parent_chat=self.parent_chat, ) + return { + "context_messages": context_messages, + "tokens_usage": embedding_request["tokens_usage"], + } + + def calculate_embedding(self, text: str): + return request_embedding_from_openai(text=text, model=self.embedding_model) + def request_embedding_from_openai(text: str, model: str): - text = text.replace("\n", " ") + text.lower().replace("\n", " ") embedding_request = openai.Embedding.create(input=[text], model=model) embedding = embedding_request["data"][0]["embedding"] @@ -81,8 +81,8 @@ def request_embedding_from_openai(text: str, model: str): return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_message_and_embedding( - msg_obj: dict, embedding, file_path: Path = GeneralConstants.EMBEDDINGS_FILE +def _store_object_and_embedding( + obj, embedding, file_path: Path = GeneralConstants.EMBEDDINGS_FILE ): """Store message and embeddings to file.""" # Adapted from Date: Wed, 1 Nov 2023 00:55:40 +0100 Subject: [PATCH 030/109] Minor change to composing context msgs --- chat_gpt/chat_context.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index 24808ab..6f96628 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -106,11 +106,9 @@ def _store_object_and_embedding( def _compose_context_msg(history: list[str], system_name: str): - context_msg_content = "You know that the following was said:\n" - for message in history: - context_msg_content += f"{message}\n" + context_msg_content = "You know that the following was said:\n\n" + context_msg_content += "\x1f\n".join(rf"{message}" for message in history) + "\n\n" context_msg_content += "Answer the last message." - return {"role": "system", "name": system_name, "content": context_msg_content} From 01db41cec7884f025d49e8bae70e065e8fccd1a1 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 1 Nov 2023 11:17:24 +0100 Subject: [PATCH 031/109] Rename cmd `show-costs` to `accounting` --- chat_gpt/argparse_wrapper.py | 17 ++++++++--------- chat_gpt/command_definitions.py | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index be6aa72..9a508c9 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -3,10 +3,10 @@ import argparse import sys -from .command_definitions import run_on_browser, run_on_terminal, show_accumulated_costs +from .command_definitions import accounting, run_on_browser, run_on_terminal -def get_parsed_args(argv=None): +def get_parsed_args(argv=None, default_command="browser"): """Get parsed command line arguments. Args: @@ -19,7 +19,7 @@ def get_parsed_args(argv=None): if argv is None: argv = sys.argv[1:] if not argv: - argv = ["browser"] + argv = [default_command] common_parser = argparse.ArgumentParser(add_help=False) common_parser.add_argument( @@ -67,17 +67,16 @@ def get_parsed_args(argv=None): parser_browser.set_defaults(run_command=run_on_browser) parser_terminal = subparsers.add_parser( - "terminal", - parents=[common_parser], - help="Run the chat on the terminal.", + "terminal", parents=[common_parser], help="Run the chat on the terminal." ) parser_terminal.set_defaults(run_command=run_on_terminal) - parser_show_costs = subparsers.add_parser( - "show-costs", + parser_accounting = subparsers.add_parser( + "accounting", + aliases=["acc"], parents=[common_parser], help="Show the number of tokens used for each message.", ) - parser_show_costs.set_defaults(run_command=show_accumulated_costs) + parser_accounting.set_defaults(run_command=accounting) return main_parser.parse_args(argv) diff --git a/chat_gpt/command_definitions.py b/chat_gpt/command_definitions.py index b729210..1cb1eb3 100644 --- a/chat_gpt/command_definitions.py +++ b/chat_gpt/command_definitions.py @@ -7,7 +7,7 @@ from .chat import Chat -def show_accumulated_costs(args): +def accounting(args): """Show the accumulated costs of the chat and exit.""" args.skip_reporting_costs = True Chat.from_cli_args(cli_args=args).report_token_usage(current_chat=False) From 5f086bb03ad36e5bc6f2635cc75d5b6c337d4350 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 1 Nov 2023 15:57:26 +0100 Subject: [PATCH 032/109] Some refactoring --- chat_gpt/argparse_wrapper.py | 31 +++++++++++-------- chat_gpt/chat.py | 55 ++++++++++++++++++--------------- chat_gpt/command_definitions.py | 9 +++--- chat_gpt/tokens.py | 2 +- 4 files changed, 54 insertions(+), 43 deletions(-) diff --git a/chat_gpt/argparse_wrapper.py b/chat_gpt/argparse_wrapper.py index 9a508c9..42e5e4d 100644 --- a/chat_gpt/argparse_wrapper.py +++ b/chat_gpt/argparse_wrapper.py @@ -3,10 +3,10 @@ import argparse import sys -from .command_definitions import accounting, run_on_browser, run_on_terminal +from .command_definitions import accounting, run_on_terminal, run_on_ui -def get_parsed_args(argv=None, default_command="browser"): +def get_parsed_args(argv=None, default_command="ui"): """Get parsed command line arguments. Args: @@ -21,29 +21,29 @@ def get_parsed_args(argv=None, default_command="browser"): if not argv: argv = [default_command] - common_parser = argparse.ArgumentParser(add_help=False) - common_parser.add_argument( + chat_options_parser = argparse.ArgumentParser(add_help=False) + chat_options_parser.add_argument( "initial_ai_instructions", type=str, default="You answer using the minimum possible number of tokens.", help="Initial instructions for the AI", nargs="?", ) - common_parser.add_argument( + chat_options_parser.add_argument( "--model", type=lambda x: str(x).lower(), default="gpt-3.5-turbo", choices=["gpt-3.5-turbo", "gpt-4"], help="OpenAI API engine to use for completion", ) - common_parser.add_argument( + chat_options_parser.add_argument( "--context-model", type=lambda x: None if str(x).lower() == "none" else str(x).lower(), default="text-embedding-ada-002", choices=["text-embedding-ada-002", None], help="OpenAI API engine to use for embedding", ) - common_parser.add_argument("--skip-reporting-costs", action="store_true") + chat_options_parser.add_argument("--skip-reporting-costs", action="store_true") main_parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter @@ -61,21 +61,26 @@ def get_parsed_args(argv=None, default_command="browser"): help="command description", ) - parser_browser = subparsers.add_parser( - "browser", parents=[common_parser], help="Run the chat on the browser." + parser_ui = subparsers.add_parser( + "ui", + aliases=["app"], + parents=[chat_options_parser], + help="Run the chat UI on the browser.", ) - parser_browser.set_defaults(run_command=run_on_browser) + parser_ui.set_defaults(run_command=run_on_ui) parser_terminal = subparsers.add_parser( - "terminal", parents=[common_parser], help="Run the chat on the terminal." + "terminal", + aliases=["."], + parents=[chat_options_parser], + help="Run the chat on the terminal.", ) parser_terminal.set_defaults(run_command=run_on_terminal) parser_accounting = subparsers.add_parser( "accounting", aliases=["acc"], - parents=[common_parser], - help="Show the number of tokens used for each message.", + help="Show the estimated number of used tokens and associated costs, and exit.", ) parser_accounting.set_defaults(run_command=accounting) diff --git a/chat_gpt/chat.py b/chat_gpt/chat.py index d7d8fa4..84459bb 100644 --- a/chat_gpt/chat.py +++ b/chat_gpt/chat.py @@ -11,10 +11,10 @@ class Chat: def __init__( self, - model: str, - base_instructions: str, + model: str = "gpt-3.5-turbo", + base_instructions: str = "", context_model: str = "text-embedding-ada-002", - report_estimated_costs_when_done: bool = True, + report_accounting_when_done: bool = False, ): self.model = model.lower() @@ -54,7 +54,7 @@ def __init__( else: raise NotImplementedError(f"Unknown context model: {self.context_model}") - self.report_estimated_costs_when_done = report_estimated_costs_when_done + self.report_accounting_when_done = report_accounting_when_done self.base_directive = { "role": "system", @@ -70,7 +70,7 @@ def __del__(self): n_input_tokens=self.token_usage[model]["input"], n_output_tokens=self.token_usage[model]["output"], ) - if self.report_estimated_costs_when_done: + if self.report_accounting_when_done: self.report_token_usage() @classmethod @@ -79,40 +79,44 @@ def from_cli_args(cls, cli_args): model=cli_args.model, context_model=cli_args.context_model, base_instructions=cli_args.initial_ai_instructions, - report_estimated_costs_when_done=not cli_args.skip_reporting_costs, + report_accounting_when_done=not cli_args.skip_reporting_costs, ) def yield_response(self, prompt: str): prompt = prompt.strip() prompt_as_msg = {"role": "user", "name": self.username, "content": prompt} + # Get appropriate context for prompt from the context handler prompt_context_request = self.context_handler.get_context(text=prompt) context = prompt_context_request["context_messages"] - conversation = [self.base_directive, *context, prompt_as_msg] + # Update token_usage with tokens used in context handler for prompt + self.token_usage[self.context_model]["input"] += sum( + prompt_context_request["tokens_usage"].values() + ) + contextualised_prompt = [self.base_directive, *context, prompt_as_msg] + # Update token_usage with tokens used in chat input + self.token_usage[self.model]["input"] += sum( + get_n_tokens(string=msg["content"], model=self.model) + for msg in contextualised_prompt + ) + + # Make API request and yield response chunks full_reply_content = "" - for chunk in _make_api_call(conversation=conversation, model=self.model): + for chunk in _make_api_call(conversation=contextualised_prompt, model=self.model): full_reply_content += chunk yield chunk - history_entry_registration_tokens_usage = self.context_handler.add_to_history( - text=f"{self.username}: {prompt}. {self.assistant_name}: {full_reply_content}" - ) - - # Update self.token_usage - # 1: With tokens used in chat input - self.token_usage[self.model]["input"] += sum( - get_n_tokens(string=msg["content"], model=self.model) for msg in conversation - ) - # 2: With tokens used in chat output + # Update token_usage ith tokens used in chat output self.token_usage[self.model]["output"] += get_n_tokens( string=full_reply_content, model=self.model ) - # 3: With tokens used in context handler for prompt - self.token_usage[self.context_model]["input"] += sum( - prompt_context_request["tokens_usage"].values() + + # Put current chat exchande in context handler's history + history_entry_registration_tokens_usage = self.context_handler.add_to_history( + text=f"{self.username}: {prompt}. {self.assistant_name}: {full_reply_content}" ) - # 4: With tokens used in context handler for reply + # Update token_usage with tokens used in context handler for reply self.token_usage[self.context_model]["output"] += sum( history_entry_registration_tokens_usage.values() ) @@ -146,11 +150,12 @@ def _make_api_call(conversation: list, model: str): stream=True, temperature=0.8, ): - reply_content_token = getattr(line.choices[0].delta, "content", "") - yield reply_content_token - success = True + reply_chunk = getattr(line.choices[0].delta, "content", "") + yield reply_chunk except ( openai.error.ServiceUnavailableError, openai.error.Timeout, ) as error: print(f" > {error}. Retrying...") + else: + success = True diff --git a/chat_gpt/command_definitions.py b/chat_gpt/command_definitions.py index 1cb1eb3..45b64cd 100644 --- a/chat_gpt/command_definitions.py +++ b/chat_gpt/command_definitions.py @@ -9,8 +9,7 @@ def accounting(args): """Show the accumulated costs of the chat and exit.""" - args.skip_reporting_costs = True - Chat.from_cli_args(cli_args=args).report_token_usage(current_chat=False) + Chat().report_token_usage(current_chat=False) def run_on_terminal(args): @@ -18,12 +17,12 @@ def run_on_terminal(args): Chat.from_cli_args(cli_args=args).start() -def run_on_browser(args): +def run_on_ui(args): """Run the chat on the browser.""" with open(GeneralConstants.PARSED_ARGS_FILE, "wb") as parsed_args_file: pickle.dump(args, parsed_args_file) app_path = GeneralConstants.PACKAGE_DIRECTORY / "app" / "app.py" - with contextlib.suppress(KeyboardInterrupt): + try: run( [ "streamlit", @@ -34,3 +33,5 @@ def run_on_browser(args): GeneralConstants.PARSED_ARGS_FILE.as_posix(), ] ) + except (KeyboardInterrupt, EOFError): + print("Exiting.") diff --git a/chat_gpt/tokens.py b/chat_gpt/tokens.py index 9816b6d..9875d1a 100644 --- a/chat_gpt/tokens.py +++ b/chat_gpt/tokens.py @@ -228,7 +228,7 @@ def _print_df(df: pd.DataFrame, header: str): print(underline) print(header) print(underline) - if df.empty: + if df.empty or df.loc["Total"]["Tokens"]["Total"] == 0: print("None.") else: print(df) From 3c0300c19e51a15644dd0c3bbda25aedf48ed254 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 1 Nov 2023 17:14:54 +0100 Subject: [PATCH 033/109] Do not share history between sessions --- chat_gpt/__init__.py | 1 - chat_gpt/app/{app.py => pages/template.py} | 0 chat_gpt/chat_context.py | 8 ++++---- 3 files changed, 4 insertions(+), 5 deletions(-) rename chat_gpt/app/{app.py => pages/template.py} (100%) diff --git a/chat_gpt/__init__.py b/chat_gpt/__init__.py index aa26a51..1f9cbef 100644 --- a/chat_gpt/__init__.py +++ b/chat_gpt/__init__.py @@ -16,7 +16,6 @@ class GeneralConstants: PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name) - EMBEDDINGS_FILE = PACKAGE_TMPDIR / "embeddings.csv" PARSED_ARGS_FILE = PACKAGE_TMPDIR / f"parsed_args_{RUN_ID}.pkl" TOKEN_USAGE_DATABASE = PACKAGE_CACHE_DIRECTORY / "token_usage.db" diff --git a/chat_gpt/app/app.py b/chat_gpt/app/pages/template.py similarity index 100% rename from chat_gpt/app/app.py rename to chat_gpt/app/pages/template.py diff --git a/chat_gpt/chat_context.py b/chat_gpt/chat_context.py index 6f96628..ca314c7 100644 --- a/chat_gpt/chat_context.py +++ b/chat_gpt/chat_context.py @@ -2,6 +2,7 @@ import csv import json import time +import uuid from collections import deque from pathlib import Path from typing import TYPE_CHECKING @@ -40,7 +41,8 @@ class EmbeddingBasedChatContext(BaseChatContext): def __init__(self, embedding_model: str, parent_chat: "Chat"): self.parent_chat = parent_chat self.embedding_model = embedding_model - self.context_file_path = GeneralConstants.EMBEDDINGS_FILE + embd_file = GeneralConstants.PACKAGE_TMPDIR / f"embeddings_{uuid.uuid4()}.csv" + self.context_file_path = embd_file def add_to_history(self, text: str): embedding_request = self.calculate_embedding(text=text) @@ -81,9 +83,7 @@ def request_embedding_from_openai(text: str, model: str): return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_object_and_embedding( - obj, embedding, file_path: Path = GeneralConstants.EMBEDDINGS_FILE -): +def _store_object_and_embedding(obj, embedding, file_path: Path): """Store message and embeddings to file.""" # Adapted from From e19a34292316edfc7532221969c79ce9d0368127 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 1 Nov 2023 17:45:14 +0100 Subject: [PATCH 034/109] Starting to play with multipage apps --- chat_gpt/app/app.py | 22 +++++++++++++++++ chat_gpt/app/pages/template.py | 43 +++++++++++++++++++--------------- pyproject.toml | 2 ++ 3 files changed, 48 insertions(+), 19 deletions(-) create mode 100644 chat_gpt/app/app.py diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py new file mode 100644 index 0000000..cc34b36 --- /dev/null +++ b/chat_gpt/app/app.py @@ -0,0 +1,22 @@ +import shutil + +from st_pages import Page, add_page_title, show_pages + +from chat_gpt import GeneralConstants + +# Optional -- adds the title and icon to the current page +add_page_title() + +pkg_root = GeneralConstants.PACKAGE_DIRECTORY / "app" +n_pages = 2 + +pages = [] +for ipage in range(n_pages): + page_path = GeneralConstants.PACKAGE_TMPDIR / f"app_page_{ipage+1}.py" + shutil.copy(src=pkg_root / "pages/template.py", dst=page_path) + pages.append(Page(page_path.as_posix(), f"Chat {ipage+1}", ":books:")) + + +# Specify what pages should be shown in the sidebar, and what their titles +# and icons should be +show_pages(pages) diff --git a/chat_gpt/app/pages/template.py b/chat_gpt/app/pages/template.py index cb622c2..ce39021 100644 --- a/chat_gpt/app/pages/template.py +++ b/chat_gpt/app/pages/template.py @@ -5,44 +5,49 @@ import sys import streamlit as st +from streamlit_javascript import st_javascript from chat_gpt.chat import Chat +this_page_url = st_javascript("await fetch('').then(r => window.parent.location.href)") + + +this_page_state = st.session_state.get(this_page_url, {}) +st.session_state[this_page_url] = this_page_state + +# Set page title +page_title = f"Chat" +if len(this_page_state.get("messages", [])) == 2: + session_chat = this_page_state["chat"] + prompt = "Summarize the following message exchange as a short title:\n" + prompt += "\n\x1f".join(message["content"] for message in this_page_state["messages"]) + page_title = "".join(session_chat.yield_response(prompt)) + # st.title(page_title) + st.set_page_config(page_title=page_title) + # Initialize chat. Kepp it throughout the session. try: - session_chat = st.session_state["chat"] + session_chat = this_page_state["chat"] except KeyError: parsed_args_file = sys.argv[-1] with open(parsed_args_file, "rb") as parsed_args_file: args = pickle.load(parsed_args_file) session_chat = Chat.from_cli_args(cli_args=args) - st.session_state["chat"] = session_chat - -page_title = f"Chat with {session_chat.model}" -# Set the title that is shown in the browser's tab -st.set_page_config(page_title=page_title) -# Set page title -st.title(page_title) - + this_page_state["chat"] = session_chat -# Using "with" notation -with st.sidebar: - add_radio = st.radio( - "Choose a shipping method", ("Standard (5-15 days)", "Express (2-5 days)") - ) # Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] +if "messages" not in this_page_state: + this_page_state["messages"] = [] # Display chat messages from history on app rerun -for message in st.session_state.messages: +for message in this_page_state["messages"]: with st.chat_message(message["role"]): st.markdown(message["content"]) # Accept user input if prompt := st.chat_input("Send a message"): # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) + this_page_state["messages"].append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) @@ -58,4 +63,4 @@ message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) # Add assistant response to chat history - st.session_state.messages.append({"role": "assistant", "content": full_response}) + this_page_state["messages"].append({"role": "assistant", "content": full_response}) diff --git a/pyproject.toml b/pyproject.toml index 3c942b1..0916601 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,8 +25,10 @@ numpy = "^1.26.1" openai = "^0.28.1" pandas = "^2.1.2" + st-pages = "^0.4.5" streamlit = "^1.28.0" streamlit-chat = "^0.1.1" + streamlit-javascript = "^0.1.5" tiktoken = "^0.5.1" [tool.poetry.group.dev.dependencies] From 08e73096ca6edf62e2801b7277a9e7cabe232ad5 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 11:40:46 +0100 Subject: [PATCH 035/109] Experimenting with dynamically adding chats Based on the code provided at --- chat_gpt/app/app.py | 41 +++++++++++++-------- chat_gpt/app/multipage.py | 41 +++++++++++++++++++++ chat_gpt/app/page_template.py | 67 ++++++++++++++++++++++++++++++++++ chat_gpt/app/pages/template.py | 66 --------------------------------- 4 files changed, 134 insertions(+), 81 deletions(-) create mode 100644 chat_gpt/app/multipage.py create mode 100644 chat_gpt/app/page_template.py delete mode 100644 chat_gpt/app/pages/template.py diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index cc34b36..14ba6c0 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -1,22 +1,33 @@ -import shutil +import copy +import uuid -from st_pages import Page, add_page_title, show_pages +import page_template +import streamlit as st +from multipage import MultiPage -from chat_gpt import GeneralConstants +# Create an instance of the app +app = MultiPage() -# Optional -- adds the title and icon to the current page -add_page_title() +# Title of the main page +st.title("Chat GPT UI") -pkg_root = GeneralConstants.PACKAGE_DIRECTORY / "app" -n_pages = 2 +available_chats = st.session_state.get("available_chats", []) -pages = [] -for ipage in range(n_pages): - page_path = GeneralConstants.PACKAGE_TMPDIR / f"app_page_{ipage+1}.py" - shutil.copy(src=pkg_root / "pages/template.py", dst=page_path) - pages.append(Page(page_path.as_posix(), f"Chat {ipage+1}", ":books:")) +with st.sidebar: + if st.button(label="Create New Chat"): + # Add all your applications (pages) here + new_chat = { + "page_id": str(uuid.uuid4()), + "title": f"Chat {len(available_chats) + 1}", + "func": copy.deepcopy(page_template.app), + } + app.add_page(**new_chat) + available_chats.append(new_chat) + st.session_state["available_chats"] = available_chats +for chat in available_chats: + app.add_page(**chat) -# Specify what pages should be shown in the sidebar, and what their titles -# and icons should be -show_pages(pages) + +# The main app +app.run() diff --git a/chat_gpt/app/multipage.py b/chat_gpt/app/multipage.py new file mode 100644 index 0000000..8502961 --- /dev/null +++ b/chat_gpt/app/multipage.py @@ -0,0 +1,41 @@ +""" +This file is the framework for generating multiple Streamlit applications +through an object oriented framework. + +Adapted from: + +""" + +# Import necessary libraries +import streamlit as st + + +# Define the multipage class to manage the multiple apps in our program +class MultiPage: + """Framework for combining multiple streamlit applications.""" + + def __init__(self) -> None: + """Constructor class to generate a list which will store all our applications as an instance variable.""" + self.pages = [] + + def add_page(self, page_id, title, func) -> None: + """Class Method to Add pages to the project + Args: + title ([str]): The title of page which we are adding to the list of apps + + func: Python function to render this page in Streamlit + """ + + self.pages.append({"page_id": page_id, "title": title, "function": func}) + + def run(self): + # Drodown to select the page to run + page = st.sidebar.selectbox( + label="Select Chat", + options=self.pages, + format_func=lambda page: page["title"], + ) + + # run the app function + if page is not None: + page["function"](page_id=page["page_id"]) diff --git a/chat_gpt/app/page_template.py b/chat_gpt/app/page_template.py new file mode 100644 index 0000000..436a813 --- /dev/null +++ b/chat_gpt/app/page_template.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# Adapted from +# +import pickle +import sys + +import streamlit as st + +from chat_gpt.chat import Chat + + +def app(page_id): + this_page_state = st.session_state.get(page_id, {}) + st.session_state[page_id] = this_page_state + + # Set page title + page_title = f"Chat" + if len(this_page_state.get("messages", [])) == 2: + session_chat = this_page_state["chat"] + prompt = "Summarize the following message exchange as a short title:\n" + prompt += "\n\x1f".join( + message["content"] for message in this_page_state["messages"] + ) + page_title = "".join(session_chat.yield_response(prompt)) + # st.title(page_title) + # st.set_page_config(page_title=page_title) + + # Initialize chat. Kepp it throughout the session. + try: + session_chat = this_page_state["chat"] + except KeyError: + parsed_args_file = sys.argv[-1] + with open(parsed_args_file, "rb") as parsed_args_file: + args = pickle.load(parsed_args_file) + session_chat = Chat.from_cli_args(cli_args=args) + this_page_state["chat"] = session_chat + + # Initialize chat history + if "messages" not in this_page_state: + this_page_state["messages"] = [] + # Display chat messages from history on app rerun + for message in this_page_state["messages"]: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + # Accept user input + if prompt := st.chat_input("Send a message"): + # Add user message to chat history + this_page_state["messages"].append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + message_placeholder = st.empty() + message_placeholder.markdown("▌") # Use blinking cursor to indicate activity + full_response = "" + # Stream assistant response + for chunk in session_chat.yield_response(prompt): + full_response += chunk + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + # Add assistant response to chat history + this_page_state["messages"].append( + {"role": "assistant", "content": full_response} + ) diff --git a/chat_gpt/app/pages/template.py b/chat_gpt/app/pages/template.py deleted file mode 100644 index ce39021..0000000 --- a/chat_gpt/app/pages/template.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python3 -# Adapted from -# -import pickle -import sys - -import streamlit as st -from streamlit_javascript import st_javascript - -from chat_gpt.chat import Chat - -this_page_url = st_javascript("await fetch('').then(r => window.parent.location.href)") - - -this_page_state = st.session_state.get(this_page_url, {}) -st.session_state[this_page_url] = this_page_state - -# Set page title -page_title = f"Chat" -if len(this_page_state.get("messages", [])) == 2: - session_chat = this_page_state["chat"] - prompt = "Summarize the following message exchange as a short title:\n" - prompt += "\n\x1f".join(message["content"] for message in this_page_state["messages"]) - page_title = "".join(session_chat.yield_response(prompt)) - # st.title(page_title) - st.set_page_config(page_title=page_title) - -# Initialize chat. Kepp it throughout the session. -try: - session_chat = this_page_state["chat"] -except KeyError: - parsed_args_file = sys.argv[-1] - with open(parsed_args_file, "rb") as parsed_args_file: - args = pickle.load(parsed_args_file) - session_chat = Chat.from_cli_args(cli_args=args) - this_page_state["chat"] = session_chat - - -# Initialize chat history -if "messages" not in this_page_state: - this_page_state["messages"] = [] -# Display chat messages from history on app rerun -for message in this_page_state["messages"]: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - -# Accept user input -if prompt := st.chat_input("Send a message"): - # Add user message to chat history - this_page_state["messages"].append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - message_placeholder = st.empty() - message_placeholder.markdown("▌") # Use blinking cursor to indicate activity - full_response = "" - # Stream assistant response - for chunk in session_chat.yield_response(prompt): - full_response += chunk - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) - # Add assistant response to chat history - this_page_state["messages"].append({"role": "assistant", "content": full_response}) From e7ed4f3b5cbc462e8915f7ff3b4cc510ebd74c9c Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 12:02:02 +0100 Subject: [PATCH 036/109] Roughly functioning changing chat titles --- chat_gpt/app/app.py | 6 +++--- chat_gpt/app/multipage.py | 15 ++++++++------- chat_gpt/app/page_template.py | 8 +++++--- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index 14ba6c0..0d67ce1 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -11,7 +11,7 @@ # Title of the main page st.title("Chat GPT UI") -available_chats = st.session_state.get("available_chats", []) +available_chats = st.session_state.get("available_chats", {}) with st.sidebar: if st.button(label="Create New Chat"): @@ -22,10 +22,10 @@ "func": copy.deepcopy(page_template.app), } app.add_page(**new_chat) - available_chats.append(new_chat) + available_chats[new_chat["page_id"]] = new_chat st.session_state["available_chats"] = available_chats -for chat in available_chats: +for chat in available_chats.values(): app.add_page(**chat) diff --git a/chat_gpt/app/multipage.py b/chat_gpt/app/multipage.py index 8502961..9751518 100644 --- a/chat_gpt/app/multipage.py +++ b/chat_gpt/app/multipage.py @@ -16,7 +16,7 @@ class MultiPage: def __init__(self) -> None: """Constructor class to generate a list which will store all our applications as an instance variable.""" - self.pages = [] + self.pages = {} def add_page(self, page_id, title, func) -> None: """Class Method to Add pages to the project @@ -26,16 +26,17 @@ def add_page(self, page_id, title, func) -> None: func: Python function to render this page in Streamlit """ - self.pages.append({"page_id": page_id, "title": title, "function": func}) + self.pages[page_id] = {"title": title, "function": func} def run(self): # Drodown to select the page to run - page = st.sidebar.selectbox( + id_and_page = st.sidebar.selectbox( label="Select Chat", - options=self.pages, - format_func=lambda page: page["title"], + options=self.pages.items(), + format_func=lambda id_and_page: id_and_page[1]["title"], ) # run the app function - if page is not None: - page["function"](page_id=page["page_id"]) + if id_and_page is not None: + page_id, page = id_and_page + page["function"](page_id=page_id) diff --git a/chat_gpt/app/page_template.py b/chat_gpt/app/page_template.py index 436a813..cf6ee04 100644 --- a/chat_gpt/app/page_template.py +++ b/chat_gpt/app/page_template.py @@ -14,15 +14,17 @@ def app(page_id): st.session_state[page_id] = this_page_state # Set page title - page_title = f"Chat" - if len(this_page_state.get("messages", [])) == 2: + page_title = this_page_state.get("page_title") + if page_title is None and len(this_page_state.get("messages", [])) == 2: session_chat = this_page_state["chat"] prompt = "Summarize the following message exchange as a short title:\n" prompt += "\n\x1f".join( message["content"] for message in this_page_state["messages"] ) page_title = "".join(session_chat.yield_response(prompt)) - # st.title(page_title) + st.title(page_title) + this_page_state["page_title"] = page_title + st.session_state["available_chats"][page_id]["title"] = page_title # st.set_page_config(page_title=page_title) # Initialize chat. Kepp it throughout the session. From f54ccd6139cae462b161221eda5f656cdfa1da61 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 12:47:01 +0100 Subject: [PATCH 037/109] Stay on new page when it is created The previous behaviour was to go back to page 0. --- chat_gpt/app/multipage.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/chat_gpt/app/multipage.py b/chat_gpt/app/multipage.py index 9751518..72d8172 100644 --- a/chat_gpt/app/multipage.py +++ b/chat_gpt/app/multipage.py @@ -17,6 +17,8 @@ class MultiPage: def __init__(self) -> None: """Constructor class to generate a list which will store all our applications as an instance variable.""" self.pages = {} + # Keep track of which page we're on, so we remain in it when adding a new page + self.selected_chat_index = None def add_page(self, page_id, title, func) -> None: """Class Method to Add pages to the project @@ -27,16 +29,17 @@ def add_page(self, page_id, title, func) -> None: """ self.pages[page_id] = {"title": title, "function": func} + # Signal to `run` taht we should move to the newly added page + self.selected_chat_index = list(self.pages.keys()).index(page_id) def run(self): # Drodown to select the page to run - id_and_page = st.sidebar.selectbox( + if id_and_page := st.sidebar.selectbox( label="Select Chat", options=self.pages.items(), format_func=lambda id_and_page: id_and_page[1]["title"], - ) - - # run the app function - if id_and_page is not None: + index=self.selected_chat_index, + ): + # run the app function page_id, page = id_and_page page["function"](page_id=page_id) From ae60b02965b33624b9656d8b5f74cd2b412c4c7f Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 12:50:46 +0100 Subject: [PATCH 038/109] Initialise UI with a first chat --- chat_gpt/app/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index 0d67ce1..d64ecbc 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -14,7 +14,7 @@ available_chats = st.session_state.get("available_chats", {}) with st.sidebar: - if st.button(label="Create New Chat"): + if st.button(label="Create New Chat") or not available_chats: # Add all your applications (pages) here new_chat = { "page_id": str(uuid.uuid4()), From 512fe69e9c8e23f0a2e68058a19f35b72e9a6b77 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 13:27:43 +0100 Subject: [PATCH 039/109] Set page title according to context --- chat_gpt/app/app.py | 10 ++++------ chat_gpt/app/page_template.py | 30 ++++++++++++++++-------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/chat_gpt/app/app.py b/chat_gpt/app/app.py index d64ecbc..12cba39 100644 --- a/chat_gpt/app/app.py +++ b/chat_gpt/app/app.py @@ -5,17 +5,16 @@ import streamlit as st from multipage import MultiPage +st.set_page_config(page_title="ChatGPT UI", page_icon=":speech_balloon:") + # Create an instance of the app app = MultiPage() -# Title of the main page -st.title("Chat GPT UI") - available_chats = st.session_state.get("available_chats", {}) with st.sidebar: + # Create a new chat upon init or button press if st.button(label="Create New Chat") or not available_chats: - # Add all your applications (pages) here new_chat = { "page_id": str(uuid.uuid4()), "title": f"Chat {len(available_chats) + 1}", @@ -28,6 +27,5 @@ for chat in available_chats.values(): app.add_page(**chat) - -# The main app +# Run the main app app.run() diff --git a/chat_gpt/app/page_template.py b/chat_gpt/app/page_template.py index cf6ee04..caf8e0f 100644 --- a/chat_gpt/app/page_template.py +++ b/chat_gpt/app/page_template.py @@ -13,20 +13,6 @@ def app(page_id): this_page_state = st.session_state.get(page_id, {}) st.session_state[page_id] = this_page_state - # Set page title - page_title = this_page_state.get("page_title") - if page_title is None and len(this_page_state.get("messages", [])) == 2: - session_chat = this_page_state["chat"] - prompt = "Summarize the following message exchange as a short title:\n" - prompt += "\n\x1f".join( - message["content"] for message in this_page_state["messages"] - ) - page_title = "".join(session_chat.yield_response(prompt)) - st.title(page_title) - this_page_state["page_title"] = page_title - st.session_state["available_chats"][page_id]["title"] = page_title - # st.set_page_config(page_title=page_title) - # Initialize chat. Kepp it throughout the session. try: session_chat = this_page_state["chat"] @@ -37,6 +23,8 @@ def app(page_id): session_chat = Chat.from_cli_args(cli_args=args) this_page_state["chat"] = session_chat + st.title(this_page_state.get("page_title", f"Chat with {session_chat.model}")) + # Initialize chat history if "messages" not in this_page_state: this_page_state["messages"] = [] @@ -67,3 +55,17 @@ def app(page_id): this_page_state["messages"].append( {"role": "assistant", "content": full_response} ) + + # Reset title according to conversation initial contents + if "page_title" not in this_page_state and len(this_page_state["messages"]) > 1: + with st.spinner("Working out conversation topic..."): + prompt = ( + "Summarize the following message exchange in a maximum of 4 words:\n" + ) + prompt += "\n\x1f".join( + message["content"] for message in this_page_state["messages"] + ) + page_title = "".join(session_chat.yield_response(prompt)) + st.title(page_title) + st.session_state["available_chats"][page_id]["title"] = page_title + this_page_state["page_title"] = page_title From 572dbef13b099dfde972e7a5f76bbb04b5c580e6 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 14:06:12 +0100 Subject: [PATCH 040/109] Change pkgname to `gpt-buddy-bot` and exe to `gbb` --- .gitignore | 3 +++ {chat_gpt => gpt_buddy_bot}/__init__.py | 1 + {chat_gpt => gpt_buddy_bot}/__main__.py | 0 {chat_gpt => gpt_buddy_bot}/app/__init__.py | 0 {chat_gpt => gpt_buddy_bot}/app/app.py | 4 +++- {chat_gpt => gpt_buddy_bot}/app/multipage.py | 0 {chat_gpt => gpt_buddy_bot}/app/page_template.py | 9 +++++++-- {chat_gpt => gpt_buddy_bot}/argparse_wrapper.py | 0 {chat_gpt => gpt_buddy_bot}/chat.py | 0 {chat_gpt => gpt_buddy_bot}/chat_context.py | 0 {chat_gpt => gpt_buddy_bot}/command_definitions.py | 0 {chat_gpt => gpt_buddy_bot}/tokens.py | 0 pyproject.toml | 6 +++--- 13 files changed, 17 insertions(+), 6 deletions(-) rename {chat_gpt => gpt_buddy_bot}/__init__.py (91%) rename {chat_gpt => gpt_buddy_bot}/__main__.py (100%) rename {chat_gpt => gpt_buddy_bot}/app/__init__.py (100%) rename {chat_gpt => gpt_buddy_bot}/app/app.py (85%) rename {chat_gpt => gpt_buddy_bot}/app/multipage.py (100%) rename {chat_gpt => gpt_buddy_bot}/app/page_template.py (92%) rename {chat_gpt => gpt_buddy_bot}/argparse_wrapper.py (100%) rename {chat_gpt => gpt_buddy_bot}/chat.py (100%) rename {chat_gpt => gpt_buddy_bot}/chat_context.py (100%) rename {chat_gpt => gpt_buddy_bot}/command_definitions.py (100%) rename {chat_gpt => gpt_buddy_bot}/tokens.py (100%) diff --git a/.gitignore b/.gitignore index 0a53700..b8c0e73 100644 --- a/.gitignore +++ b/.gitignore @@ -161,3 +161,6 @@ cython_debug/ # Vim *.swp + +# Temporary files and directories +tmp/ diff --git a/chat_gpt/__init__.py b/gpt_buddy_bot/__init__.py similarity index 91% rename from chat_gpt/__init__.py rename to gpt_buddy_bot/__init__.py index 1f9cbef..b6a3ae8 100644 --- a/chat_gpt/__init__.py +++ b/gpt_buddy_bot/__init__.py @@ -11,6 +11,7 @@ class GeneralConstants: PACKAGE_NAME = __name__ VERSION = version(__name__) + APP_NAME = PACKAGE_NAME.title().replace("Gpt", "GPT").replace("_", " ") PACKAGE_DIRECTORY = Path(__file__).parent RUN_ID = uuid.uuid4().hex PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME diff --git a/chat_gpt/__main__.py b/gpt_buddy_bot/__main__.py similarity index 100% rename from chat_gpt/__main__.py rename to gpt_buddy_bot/__main__.py diff --git a/chat_gpt/app/__init__.py b/gpt_buddy_bot/app/__init__.py similarity index 100% rename from chat_gpt/app/__init__.py rename to gpt_buddy_bot/app/__init__.py diff --git a/chat_gpt/app/app.py b/gpt_buddy_bot/app/app.py similarity index 85% rename from chat_gpt/app/app.py rename to gpt_buddy_bot/app/app.py index 12cba39..d8d48c3 100644 --- a/chat_gpt/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -5,7 +5,9 @@ import streamlit as st from multipage import MultiPage -st.set_page_config(page_title="ChatGPT UI", page_icon=":speech_balloon:") +from gpt_buddy_bot import GeneralConstants + +st.set_page_config(page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:") # Create an instance of the app app = MultiPage() diff --git a/chat_gpt/app/multipage.py b/gpt_buddy_bot/app/multipage.py similarity index 100% rename from chat_gpt/app/multipage.py rename to gpt_buddy_bot/app/multipage.py diff --git a/chat_gpt/app/page_template.py b/gpt_buddy_bot/app/page_template.py similarity index 92% rename from chat_gpt/app/page_template.py rename to gpt_buddy_bot/app/page_template.py index caf8e0f..7c438cc 100644 --- a/chat_gpt/app/page_template.py +++ b/gpt_buddy_bot/app/page_template.py @@ -6,7 +6,8 @@ import streamlit as st -from chat_gpt.chat import Chat +from gpt_buddy_bot import GeneralConstants +from gpt_buddy_bot.chat import Chat def app(page_id): @@ -23,7 +24,11 @@ def app(page_id): session_chat = Chat.from_cli_args(cli_args=args) this_page_state["chat"] = session_chat - st.title(this_page_state.get("page_title", f"Chat with {session_chat.model}")) + st.title( + this_page_state.get( + "page_title", f"{GeneralConstants.APP_NAME} ({session_chat.model})" + ) + ) # Initialize chat history if "messages" not in this_page_state: diff --git a/chat_gpt/argparse_wrapper.py b/gpt_buddy_bot/argparse_wrapper.py similarity index 100% rename from chat_gpt/argparse_wrapper.py rename to gpt_buddy_bot/argparse_wrapper.py diff --git a/chat_gpt/chat.py b/gpt_buddy_bot/chat.py similarity index 100% rename from chat_gpt/chat.py rename to gpt_buddy_bot/chat.py diff --git a/chat_gpt/chat_context.py b/gpt_buddy_bot/chat_context.py similarity index 100% rename from chat_gpt/chat_context.py rename to gpt_buddy_bot/chat_context.py diff --git a/chat_gpt/command_definitions.py b/gpt_buddy_bot/command_definitions.py similarity index 100% rename from chat_gpt/command_definitions.py rename to gpt_buddy_bot/command_definitions.py diff --git a/chat_gpt/tokens.py b/gpt_buddy_bot/tokens.py similarity index 100% rename from chat_gpt/tokens.py rename to gpt_buddy_bot/tokens.py diff --git a/pyproject.toml b/pyproject.toml index 0916601..ef2177d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [tool.poetry] authors = ["Paulo V C Medeiros "] - description = "A simple package to test OpenAI API capabilities." + description = "A simple UI & terminal ChatGPT chatbot that uses OpenAI API." license = "MIT" - name = "chat_gpt" + name = "gpt-buddy-bot" readme = "README.md" version = "0.1.0" @@ -11,7 +11,7 @@ requires = ["poetry-core"] [tool.poetry.scripts] - chatgpt = "chat_gpt.__main__:main" + gbb = "gpt_buddy_bot.__main__:main" [tool.poetry.dependencies] # Python version From 42e6a2d4f10b88de3dc3ceeebabc90c92d747f11 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 2 Nov 2023 18:01:23 +0100 Subject: [PATCH 041/109] Implement a multipage chatbot UI --- gpt_buddy_bot/app/app.py | 39 +++----- gpt_buddy_bot/app/app_page_templates.py | 117 ++++++++++++++++++++++++ gpt_buddy_bot/app/multipage.py | 63 ++++++------- gpt_buddy_bot/app/page_template.py | 76 --------------- pyproject.toml | 3 - 5 files changed, 159 insertions(+), 139 deletions(-) create mode 100644 gpt_buddy_bot/app/app_page_templates.py delete mode 100644 gpt_buddy_bot/app/page_template.py diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index d8d48c3..bb0db1a 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -1,33 +1,20 @@ -import copy -import uuid - -import page_template +"""Entrypoint for the package's UI.""" import streamlit as st -from multipage import MultiPage +from app_page_templates import ChatBotPage +from multipage import MultiPageApp from gpt_buddy_bot import GeneralConstants -st.set_page_config(page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:") - -# Create an instance of the app -app = MultiPage() - -available_chats = st.session_state.get("available_chats", {}) -with st.sidebar: - # Create a new chat upon init or button press - if st.button(label="Create New Chat") or not available_chats: - new_chat = { - "page_id": str(uuid.uuid4()), - "title": f"Chat {len(available_chats) + 1}", - "func": copy.deepcopy(page_template.app), - } - app.add_page(**new_chat) - available_chats[new_chat["page_id"]] = new_chat - st.session_state["available_chats"] = available_chats +def run_app(): + """Create and run an instance of the pacage's app.""" + app = MultiPageApp(page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:") + with st.sidebar: + # Create a new chat upon init or button press + if st.button(label="Create New Chat") or not app.pages: + app.add_page(ChatBotPage(sidebar_title=f"Chat {len(app.pages) + 1}")) + app.run() -for chat in available_chats.values(): - app.add_page(**chat) -# Run the main app -app.run() +if __name__ == "__main__": + run_app() diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py new file mode 100644 index 0000000..edd168b --- /dev/null +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -0,0 +1,117 @@ +"""Utilities for creating pages in a streamlit app.""" +import pickle +import sys +import uuid +from abc import ABC, abstractmethod + +import streamlit as st + +from gpt_buddy_bot import GeneralConstants +from gpt_buddy_bot.chat import Chat + + +class AppPage(ABC): + """Abstract base class for pages in a streamlit app.""" + + def __init__(self, sidebar_title: str = "App Page", page_title: str = ""): + self.page_id = str(uuid.uuid4()) + self._initial_sidebar_title = sidebar_title + if not page_title: + page_title = f"{GeneralConstants.APP_NAME} ({self.chat_obj.model})" + self._initial_title = page_title + + @property + def state(self): + """Return the state of the page, for persistence of data.""" + if self.page_id not in st.session_state: + st.session_state[self.page_id] = {} + return st.session_state[self.page_id] + + @property + def title(self): + """Get the title of the page.""" + return self.state.get("page_title", self._initial_title) + + @title.setter + def title(self, value): + """Set the title of the page.""" + st.title(value) + self.state["page_title"] = value + self.state["sidebar_title"] = value + + @property + def sidebar_title(self): + """Get the title of the page in the sidebar.""" + return self.state.get("sidebar_title", self._initial_sidebar_title) + + @abstractmethod + def create(self): + """Create the page.""" + + +class ChatBotPage(AppPage): + @property + def chat_obj(self) -> Chat: + """Return the chat object responsible for the queries in this page.""" + try: + this_page_chat = self.state["chat"] + except KeyError: + parsed_args_file = sys.argv[-1] + with open(parsed_args_file, "rb") as parsed_args_file: + args = pickle.load(parsed_args_file) + this_page_chat = Chat.from_cli_args(cli_args=args) + self.state["chat"] = this_page_chat + return this_page_chat + + @property + def chat_history(self) -> list[dict[str, str]]: + """Return the chat history of the page.""" + if "messages" not in self.state: + self.state["messages"] = [] + return self.state["messages"] + + def render_chat_history(self): + """Render the chat history of the page.""" + for message in self.chat_history: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + + def create(self): + """Create a chatbot page. + + Adapted from: + + + """ + st.title(self.title) + + self.render_chat_history() + + # Accept user input + if prompt := st.chat_input("Send a message"): + self.chat_history.append({"role": "user", "content": prompt}) + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + # Use blinking cursor to indicate activity + message_placeholder = st.empty() + message_placeholder.markdown("▌") + full_response = "" + # Stream assistant response + for chunk in self.chat_obj.yield_response(prompt): + full_response += chunk + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + self.chat_history.append({"role": "assistant", "content": full_response}) + + # Reset title according to conversation initial contents + if "page_title" not in self.state and len(self.chat_history) > 1: + with st.spinner("Working out conversation topic..."): + prompt = "Summarize the following msg exchange. Use max of 4 words:\n" + prompt += "\n\x1f".join( + message["content"] for message in self.chat_history + ) + self.title = "".join(self.chat_obj.yield_response(prompt)) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 72d8172..dbe9220 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,45 +1,40 @@ -""" -This file is the framework for generating multiple Streamlit applications -through an object oriented framework. - -Adapted from: - -""" - -# Import necessary libraries +"Code for the creation streamlit apps with dynamically created pages." import streamlit as st +from app_page_templates import AppPage + +class MultiPageApp: + """Framework for creating streamlite multipage apps. -# Define the multipage class to manage the multiple apps in our program -class MultiPage: - """Framework for combining multiple streamlit applications.""" + Adapted from: + . - def __init__(self) -> None: - """Constructor class to generate a list which will store all our applications as an instance variable.""" - self.pages = {} - # Keep track of which page we're on, so we remain in it when adding a new page - self.selected_chat_index = None + """ - def add_page(self, page_id, title, func) -> None: - """Class Method to Add pages to the project - Args: - title ([str]): The title of page which we are adding to the list of apps + def __init__(self, **kwargs) -> None: + """Initialise streamlit page configs.""" + st.set_page_config(**kwargs) - func: Python function to render this page in Streamlit - """ + @property + def pages(self) -> AppPage: + """Return the pages of the app.""" + if "available_pages" not in st.session_state: + st.session_state["available_pages"] = {} + return st.session_state["available_pages"] - self.pages[page_id] = {"title": title, "function": func} - # Signal to `run` taht we should move to the newly added page - self.selected_chat_index = list(self.pages.keys()).index(page_id) + def add_page(self, page: AppPage) -> None: + """Add a page to the app.""" + self.pages[page.page_id] = page + st.session_state["switch_page"] = True def run(self): - # Drodown to select the page to run - if id_and_page := st.sidebar.selectbox( + """Run the app.""" + # Drodown menu to select the page to run + if page := st.sidebar.selectbox( label="Select Chat", - options=self.pages.items(), - format_func=lambda id_and_page: id_and_page[1]["title"], - index=self.selected_chat_index, + options=self.pages.values(), + format_func=lambda page: page.sidebar_title, + index=len(self.pages) - 1, ): - # run the app function - page_id, page = id_and_page - page["function"](page_id=page_id) + page.create() diff --git a/gpt_buddy_bot/app/page_template.py b/gpt_buddy_bot/app/page_template.py deleted file mode 100644 index 7c438cc..0000000 --- a/gpt_buddy_bot/app/page_template.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python3 -# Adapted from -# -import pickle -import sys - -import streamlit as st - -from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.chat import Chat - - -def app(page_id): - this_page_state = st.session_state.get(page_id, {}) - st.session_state[page_id] = this_page_state - - # Initialize chat. Kepp it throughout the session. - try: - session_chat = this_page_state["chat"] - except KeyError: - parsed_args_file = sys.argv[-1] - with open(parsed_args_file, "rb") as parsed_args_file: - args = pickle.load(parsed_args_file) - session_chat = Chat.from_cli_args(cli_args=args) - this_page_state["chat"] = session_chat - - st.title( - this_page_state.get( - "page_title", f"{GeneralConstants.APP_NAME} ({session_chat.model})" - ) - ) - - # Initialize chat history - if "messages" not in this_page_state: - this_page_state["messages"] = [] - # Display chat messages from history on app rerun - for message in this_page_state["messages"]: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - - # Accept user input - if prompt := st.chat_input("Send a message"): - # Add user message to chat history - this_page_state["messages"].append({"role": "user", "content": prompt}) - # Display user message in chat message container - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response in chat message container - with st.chat_message("assistant"): - message_placeholder = st.empty() - message_placeholder.markdown("▌") # Use blinking cursor to indicate activity - full_response = "" - # Stream assistant response - for chunk in session_chat.yield_response(prompt): - full_response += chunk - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) - # Add assistant response to chat history - this_page_state["messages"].append( - {"role": "assistant", "content": full_response} - ) - - # Reset title according to conversation initial contents - if "page_title" not in this_page_state and len(this_page_state["messages"]) > 1: - with st.spinner("Working out conversation topic..."): - prompt = ( - "Summarize the following message exchange in a maximum of 4 words:\n" - ) - prompt += "\n\x1f".join( - message["content"] for message in this_page_state["messages"] - ) - page_title = "".join(session_chat.yield_response(prompt)) - st.title(page_title) - st.session_state["available_chats"][page_id]["title"] = page_title - this_page_state["page_title"] = page_title diff --git a/pyproject.toml b/pyproject.toml index ef2177d..74f4c30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,10 +25,7 @@ numpy = "^1.26.1" openai = "^0.28.1" pandas = "^2.1.2" - st-pages = "^0.4.5" streamlit = "^1.28.0" - streamlit-chat = "^0.1.1" - streamlit-javascript = "^0.1.5" tiktoken = "^0.5.1" [tool.poetry.group.dev.dependencies] From a63ad743c608ccd610c431c7fee21d3575b56727 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 01:34:32 +0100 Subject: [PATCH 042/109] Implement page selection via buttons in sidebar --- gpt_buddy_bot/app/app.py | 8 ++-- gpt_buddy_bot/app/app_page_templates.py | 14 +++--- gpt_buddy_bot/app/multipage.py | 59 +++++++++++++++++++------ gpt_buddy_bot/chat.py | 29 +++++++++--- gpt_buddy_bot/command_definitions.py | 7 ++- 5 files changed, 85 insertions(+), 32 deletions(-) diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index bb0db1a..b35524b 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -11,9 +11,11 @@ def run_app(): app = MultiPageApp(page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:") with st.sidebar: # Create a new chat upon init or button press - if st.button(label="Create New Chat") or not app.pages: - app.add_page(ChatBotPage(sidebar_title=f"Chat {len(app.pages) + 1}")) - app.run() + if st.button(label=":speech_balloon: Create New Chat") or not app.pages: + app.add_page( + ChatBotPage(sidebar_title=f"Chat {len(app.pages) + 1}"), selected=True + ) + app.render() if __name__ == "__main__": diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index edd168b..e1bc54f 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -45,7 +45,7 @@ def sidebar_title(self): return self.state.get("sidebar_title", self._initial_sidebar_title) @abstractmethod - def create(self): + def render(self): """Create the page.""" @@ -76,8 +76,8 @@ def render_chat_history(self): with st.chat_message(message["role"]): st.markdown(message["content"]) - def create(self): - """Create a chatbot page. + def render(self): + """Render a chatbot page. Adapted from: @@ -101,17 +101,17 @@ def create(self): message_placeholder.markdown("▌") full_response = "" # Stream assistant response - for chunk in self.chat_obj.yield_response(prompt): + for chunk in self.chat_obj.respond_user_prompt(prompt): full_response += chunk message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) self.chat_history.append({"role": "assistant", "content": full_response}) # Reset title according to conversation initial contents - if "page_title" not in self.state and len(self.chat_history) > 1: + if "page_title" not in self.state and len(self.chat_history) > 3: with st.spinner("Working out conversation topic..."): - prompt = "Summarize the following msg exchange. Use max of 4 words:\n" + prompt = "Summarize the following msg exchange in max 4 words:\n" prompt += "\n\x1f".join( message["content"] for message in self.chat_history ) - self.title = "".join(self.chat_obj.yield_response(prompt)) + self.title = "".join(self.chat_obj.respond_system_prompt(prompt)) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index dbe9220..deaa929 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,4 +1,6 @@ "Code for the creation streamlit apps with dynamically created pages." +from functools import partial + import streamlit as st from app_page_templates import AppPage @@ -12,9 +14,10 @@ class MultiPageApp: """ - def __init__(self, **kwargs) -> None: + def __init__(self, sidebar_mode="buttons", **kwargs) -> None: """Initialise streamlit page configs.""" st.set_page_config(**kwargs) + self.sidebar_mode = sidebar_mode.lower() @property def pages(self) -> AppPage: @@ -23,18 +26,46 @@ def pages(self) -> AppPage: st.session_state["available_pages"] = {} return st.session_state["available_pages"] - def add_page(self, page: AppPage) -> None: + def add_page(self, page: AppPage, selected: bool = False) -> None: """Add a page to the app.""" self.pages[page.page_id] = page - st.session_state["switch_page"] = True - - def run(self): - """Run the app.""" - # Drodown menu to select the page to run - if page := st.sidebar.selectbox( - label="Select Chat", - options=self.pages.values(), - format_func=lambda page: page.sidebar_title, - index=len(self.pages) - 1, - ): - page.create() + if selected: + self.register_selected_page(page) + + def register_selected_page(self, page: AppPage): + """Register a page as selected.""" + st.session_state["selected_page"] = page + + @property + def selected_page(self) -> AppPage: + """Return the selected page.""" + if "selected_page" not in st.session_state: + return next(iter(self.pages.values())) + return st.session_state["selected_page"] + + def handle_ui_page_selection(self): + """Control page selection in the UI sidebar.""" + if self.sidebar_mode == "buttons": + with st.sidebar: + for page in self.pages.values(): + st.button( + label=page.sidebar_title, + on_click=partial(self.register_selected_page, page), + ) + elif self.sidebar_mode == "dropdown": + if page := st.sidebar.selectbox( + label="Select Chat", + options=self.pages.values(), + format_func=lambda page: page.sidebar_title, + index=len(self.pages) - 1, + ): + self.register_selected_page(page) + else: + raise NotImplementedError( + f"Sidebar mode '{self.sidebar_mode}' is not implemented." + ) + + def render(self): + """Render the multipage app with focus on the selected page.""" + self.handle_ui_page_selection() + self.selected_page.render() diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 84459bb..e66bd6e 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -34,7 +34,7 @@ def __init__( f"You are a helpful assistant to {self.username}.", "You answer correctly. You do not lie.", f"{base_instructions.strip(' .')}.", - f"You follow all directives by {self.system_name}.", + f"You must remember and follow all directives by {self.system_name}.", ] if instruction.strip() ] @@ -82,13 +82,20 @@ def from_cli_args(cls, cli_args): report_accounting_when_done=not cli_args.skip_reporting_costs, ) - def yield_response(self, prompt: str): - prompt = prompt.strip() - prompt_as_msg = {"role": "user", "name": self.username, "content": prompt} + def respond_user_prompt(self, prompt: str): + yield from self._respond_prompt(prompt=prompt, role="user") + + def respond_system_prompt(self, prompt: str): + yield from self._respond_prompt(prompt=prompt, role="system") + + def yield_response_from_msg(self, prompt_as_msg: dict): + role = prompt_as_msg["role"] + prompt = prompt_as_msg["content"] # Get appropriate context for prompt from the context handler prompt_context_request = self.context_handler.get_context(text=prompt) context = prompt_context_request["context_messages"] + # Update token_usage with tokens used in context handler for prompt self.token_usage[self.context_model]["input"] += sum( prompt_context_request["tokens_usage"].values() @@ -112,10 +119,11 @@ def yield_response(self, prompt: str): string=full_reply_content, model=self.model ) - # Put current chat exchande in context handler's history + # Put current chat exchange in context handler's history history_entry_registration_tokens_usage = self.context_handler.add_to_history( - text=f"{self.username}: {prompt}. {self.assistant_name}: {full_reply_content}" + text=f"{role}: {prompt}. {self.assistant_name}: {full_reply_content}" ) + # Update token_usage with tokens used in context handler for reply self.token_usage[self.context_model]["output"] += sum( history_entry_registration_tokens_usage.values() @@ -128,7 +136,7 @@ def start(self): if not question: continue print(f"{self.assistant_name}: ", end="", flush=True) - for chunk in self.yield_response(prompt=question): + for chunk in self.respond_user_prompt(prompt=question): print(chunk, end="", flush=True) print() print() @@ -138,6 +146,13 @@ def start(self): def report_token_usage(self, current_chat: bool = True): self.token_usage_db.print_usage_costs(self.token_usage, current_chat=current_chat) + def _respond_prompt(self, prompt: str, role: str): + prompt = prompt.strip() + role = role.lower().strip() + role2name = {"user": self.username, "system": self.system_name} + prompt_as_msg = {"role": role, "name": role2name[role], "content": prompt} + yield from self.yield_response_from_msg(prompt_as_msg) + def _make_api_call(conversation: list, model: str): success = False diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index 45b64cd..bf2525e 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import contextlib import pickle from subprocess import run @@ -29,6 +28,12 @@ def run_on_ui(args): "run", app_path.as_posix(), "--theme.base=dark", + "--runner.fastReruns", + "True", + "--server.runOnSave", + "True", + "--browser.gatherUsageStats", + "False", "--", GeneralConstants.PARSED_ARGS_FILE.as_posix(), ] From da05a932b4c7557a49f750a4fbfb8ad3c0cad331 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 02:20:54 +0100 Subject: [PATCH 043/109] Add option to remove chats --- gpt_buddy_bot/app/app.py | 3 ++- gpt_buddy_bot/app/multipage.py | 36 ++++++++++++++++++++++++++++++---- 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index b35524b..c882157 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -13,7 +13,8 @@ def run_app(): # Create a new chat upon init or button press if st.button(label=":speech_balloon: Create New Chat") or not app.pages: app.add_page( - ChatBotPage(sidebar_title=f"Chat {len(app.pages) + 1}"), selected=True + ChatBotPage(sidebar_title=f"Chat {app.n_created_pages + 1}"), + selected=True, ) app.render() diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index deaa929..0104e3c 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -19,6 +19,15 @@ def __init__(self, sidebar_mode="buttons", **kwargs) -> None: st.set_page_config(**kwargs) self.sidebar_mode = sidebar_mode.lower() + @property + def n_created_pages(self): + """Return the number of pages created by the app, including deleted ones.""" + return st.session_state.get("n_created_pages", 0) + + @n_created_pages.setter + def n_created_pages(self, value): + st.session_state["n_created_pages"] = value + @property def pages(self) -> AppPage: """Return the pages of the app.""" @@ -29,9 +38,15 @@ def pages(self) -> AppPage: def add_page(self, page: AppPage, selected: bool = False) -> None: """Add a page to the app.""" self.pages[page.page_id] = page + self.n_created_pages += 1 if selected: self.register_selected_page(page) + def remove_page(self, page: AppPage): + """Remove a page from the app.""" + del self.pages[page.page_id] + self.register_selected_page(next(iter(self.pages.values()))) + def register_selected_page(self, page: AppPage): """Register a page as selected.""" st.session_state["selected_page"] = page @@ -47,11 +62,24 @@ def handle_ui_page_selection(self): """Control page selection in the UI sidebar.""" if self.sidebar_mode == "buttons": with st.sidebar: + col1, col2 = st.columns([0.75, 0.25]) for page in self.pages.values(): - st.button( - label=page.sidebar_title, - on_click=partial(self.register_selected_page, page), - ) + with col1: + st.button( + label=page.sidebar_title, + key=f"select_{page.page_id}", + on_click=partial(self.register_selected_page, page), + use_container_width=True, + ) + with col2: + st.button( + ":wastebasket:", + key=f"delete_{page.page_id}", + type="primary", + use_container_width=True, + on_click=partial(self.remove_page, page), + help="Delete this chat.", + ) elif self.sidebar_mode == "dropdown": if page := st.sidebar.selectbox( label="Select Chat", From 4e19b4d119d9d8f42ea535592835bd1238633879 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 02:22:54 +0100 Subject: [PATCH 044/109] Remove dropdown sidebar mode The interface with buttons seems better looking --- gpt_buddy_bot/app/multipage.py | 54 +++++++++++++--------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 0104e3c..b7ea9d5 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -14,10 +14,9 @@ class MultiPageApp: """ - def __init__(self, sidebar_mode="buttons", **kwargs) -> None: + def __init__(self, **kwargs) -> None: """Initialise streamlit page configs.""" st.set_page_config(**kwargs) - self.sidebar_mode = sidebar_mode.lower() @property def n_created_pages(self): @@ -60,38 +59,25 @@ def selected_page(self) -> AppPage: def handle_ui_page_selection(self): """Control page selection in the UI sidebar.""" - if self.sidebar_mode == "buttons": - with st.sidebar: - col1, col2 = st.columns([0.75, 0.25]) - for page in self.pages.values(): - with col1: - st.button( - label=page.sidebar_title, - key=f"select_{page.page_id}", - on_click=partial(self.register_selected_page, page), - use_container_width=True, - ) - with col2: - st.button( - ":wastebasket:", - key=f"delete_{page.page_id}", - type="primary", - use_container_width=True, - on_click=partial(self.remove_page, page), - help="Delete this chat.", - ) - elif self.sidebar_mode == "dropdown": - if page := st.sidebar.selectbox( - label="Select Chat", - options=self.pages.values(), - format_func=lambda page: page.sidebar_title, - index=len(self.pages) - 1, - ): - self.register_selected_page(page) - else: - raise NotImplementedError( - f"Sidebar mode '{self.sidebar_mode}' is not implemented." - ) + with st.sidebar: + col1, col2 = st.columns([0.75, 0.25]) + for page in self.pages.values(): + with col1: + st.button( + label=page.sidebar_title, + key=f"select_{page.page_id}", + on_click=partial(self.register_selected_page, page), + use_container_width=True, + ) + with col2: + st.button( + ":wastebasket:", + key=f"delete_{page.page_id}", + type="primary", + use_container_width=True, + on_click=partial(self.remove_page, page), + help="Delete this chat.", + ) def render(self): """Render the multipage app with focus on the selected page.""" From d7f4c8037aa2242b2eded0559fc243ed57d6c548 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 02:37:53 +0100 Subject: [PATCH 045/109] Fix alignment of buttons in sidebar columns --- gpt_buddy_bot/app/multipage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index b7ea9d5..0ba6806 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -60,8 +60,8 @@ def selected_page(self) -> AppPage: def handle_ui_page_selection(self): """Control page selection in the UI sidebar.""" with st.sidebar: - col1, col2 = st.columns([0.75, 0.25]) for page in self.pages.values(): + col1, col2 = st.columns([0.75, 0.25]) with col1: st.button( label=page.sidebar_title, From e84f1bc106bc99d8f0bf1b4304a7fbc8dca7d7b6 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 02:56:50 +0100 Subject: [PATCH 046/109] Minor refactoring --- gpt_buddy_bot/app/app.py | 5 +---- gpt_buddy_bot/app/app_page_templates.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index c882157..76fad9e 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -12,10 +12,7 @@ def run_app(): with st.sidebar: # Create a new chat upon init or button press if st.button(label=":speech_balloon: Create New Chat") or not app.pages: - app.add_page( - ChatBotPage(sidebar_title=f"Chat {app.n_created_pages + 1}"), - selected=True, - ) + app.add_page(ChatBotPage(), selected=True) app.render() diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index e1bc54f..ad64949 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -13,11 +13,17 @@ class AppPage(ABC): """Abstract base class for pages in a streamlit app.""" - def __init__(self, sidebar_title: str = "App Page", page_title: str = ""): + def __init__(self, sidebar_title: str = "", page_title: str = ""): self.page_id = str(uuid.uuid4()) + + if not sidebar_title: + n_created_pages = st.session_state.get("n_created_pages", 0) + sidebar_title = f"Chat {n_created_pages + 1}" self._initial_sidebar_title = sidebar_title + if not page_title: - page_title = f"{GeneralConstants.APP_NAME} ({self.chat_obj.model})" + page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" + page_title += f"## {sidebar_title}: {self.chat_obj.model}" self._initial_title = page_title @property From c287198b5b3010e50153266fe27ff581159ab14b Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 14:05:08 +0100 Subject: [PATCH 047/109] Start to build engine to use a config for the pkg --- gpt_buddy_bot/argparse_wrapper.py | 53 ++++++++++------ gpt_buddy_bot/chat_configs.py | 102 ++++++++++++++++++++++++++++++ pyproject.toml | 1 + 3 files changed, 135 insertions(+), 21 deletions(-) create mode 100644 gpt_buddy_bot/chat_configs.py diff --git a/gpt_buddy_bot/argparse_wrapper.py b/gpt_buddy_bot/argparse_wrapper.py index 42e5e4d..20e8a21 100644 --- a/gpt_buddy_bot/argparse_wrapper.py +++ b/gpt_buddy_bot/argparse_wrapper.py @@ -2,7 +2,10 @@ """Wrappers for argparse functionality.""" import argparse import sys +from collections.abc import Sequence +from . import GeneralConstants +from .chat_configs import ChatOptions from .command_definitions import accounting, run_on_terminal, run_on_ui @@ -21,34 +24,42 @@ def get_parsed_args(argv=None, default_command="ui"): if not argv: argv = [default_command] - chat_options_parser = argparse.ArgumentParser(add_help=False) - chat_options_parser.add_argument( - "initial_ai_instructions", - type=str, - default="You answer using the minimum possible number of tokens.", - help="Initial instructions for the AI", - nargs="?", - ) - chat_options_parser.add_argument( - "--model", - type=lambda x: str(x).lower(), - default="gpt-3.5-turbo", - choices=["gpt-3.5-turbo", "gpt-4"], - help="OpenAI API engine to use for completion", - ) - chat_options_parser.add_argument( - "--context-model", - type=lambda x: None if str(x).lower() == "none" else str(x).lower(), - default="text-embedding-ada-002", - choices=["text-embedding-ada-002", None], - help="OpenAI API engine to use for embedding", + chat_options_parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False ) + argarse2pydantic = { + "type": ChatOptions.get_type, + "default": ChatOptions.get_default, + "choices": ChatOptions.get_allowed_values, + "help": ChatOptions.get_description, + } + for field_name in ChatOptions.model_fields: + args_opts = { + key: argarse2pydantic[key](field_name) + for key in argarse2pydantic + if argarse2pydantic[key](field_name) is not None + } + if "help" in args_opts: + args_opts["help"] = f"{args_opts['help']} (default: %(default)s)" + if "default" in args_opts and isinstance(args_opts["default"], (list, tuple)): + args_opts.pop("type", None) + args_opts["nargs"] = "*" + + chat_options_parser.add_argument(f"--{field_name.replace('_', '-')}", **args_opts) + chat_options_parser.add_argument("--skip-reporting-costs", action="store_true") main_parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) + main_parser.add_argument( + "--version", + "-v", + action="version", + version=f"{GeneralConstants.PACKAGE_NAME} v" + GeneralConstants.VERSION, + ) + # Configure the main parser to handle the commands subparsers = main_parser.add_subparsers( title="commands", diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py new file mode 100644 index 0000000..9fdc203 --- /dev/null +++ b/gpt_buddy_bot/chat_configs.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +"""Registration and validation of options.""" +from functools import reduce +from getpass import getuser +from typing import Literal, get_args + +from pydantic import BaseModel, Field, PositiveInt, confloat +from typing_extensions import Annotated + + +class BaseConfigModel(BaseModel): + @classmethod + def get_allowed_values(cls, field: str): + """Return a tuple of allowed values for `field`.""" + annotation = cls._get_field_param(field=field, param="annotation") + if isinstance(annotation, type(Literal[""])): + return get_args(annotation) + + @classmethod + def get_type(cls, field: str): + """Return type of `field`.""" + annotation = cls._get_field_param(field=field, param="annotation") + if isinstance(annotation, type): + return annotation + + @classmethod + def get_default(cls, field: str): + """Return allowed value(s) for `field`.""" + return cls._get_field_param(field=field, param="default") + + @classmethod + def get_description(cls, field: str): + return cls._get_field_param(field=field, param="description") + + @classmethod + def _get_field_param(cls, field: str, param: str): + """Return param `param` of field `field`.""" + return getattr(cls.model_fields[field], param, None) + + def __getitem__(self, item): + """Get items from container. + + The behaviour is similar to a `dict`, except for the fact that + `self["A.B.C.D. ..."]` will behave like `self["A"]["B"]["C"]["D"][...]`. + + Args: + item (str): Item to be retrieved. Use dot-separated keys to retrieve a nested + item in one go. + + Raises: + KeyError: If the item is not found. + + Returns: + Any: Value of the item. + """ + try: + # Try regular getitem first in case "A.B. ... C" is actually a single key + return getattr(self, item) + except AttributeError: + try: + return reduce(getattr, item.split("."), self) + except AttributeError as error: + raise KeyError(item) from error + + +openai_url = "https://platform.openai.com/docs/api-reference/chat/create#chat-create" + + +class ChatOptions(BaseConfigModel): + """Model for the chat's configuration options.""" + + model: Literal["gpt-3.5-turbo", "gpt-4"] = Field( + default="gpt-3.5-turbo", description="OpenAI API engine to use for completion" + ) + context_model: Literal["text-embedding-ada-002", None] = Field( + default="text-embedding-ada-002", + description="OpenAI API engine to use for embedding", + ) + base_ai_instructions: tuple[str, ...] = Field( + default=("Answer with the fewest tokens possible.",), + description="Initial instructions for the AI", + ) + max_tokens: Annotated[ + int, Field(gt=0, description=f"See <{openai_url}-max_tokens>") + ] = None + frequency_penalty: Annotated[ + float, Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-frequency_penalty>") + ] = None + presence_penalty: Annotated[ + float, Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-presence_penalty>") + ] = None + temperature: Annotated[ + float, + Field(ge=0.0, le=2.0, description=f"See <{openai_url}-temperature>"), + ] = None + top_p: Annotated[ + float, Field(ge=0.0, le=1.0, description=f"See <{openai_url}-top_p>") + ] = None + user: str = Field(default=getuser(), description="Name of the chat's user") + + +DEFAULT_CHAT_OPTIONS = ChatOptions() diff --git a/pyproject.toml b/pyproject.toml index 74f4c30..8f0c101 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ numpy = "^1.26.1" openai = "^0.28.1" pandas = "^2.1.2" + pydantic = "^2.4.2" streamlit = "^1.28.0" tiktoken = "^0.5.1" From eec70e4475742abc91cdbe67f57d41f76d561a63 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 3 Nov 2023 16:02:08 +0100 Subject: [PATCH 048/109] Use ChatConfigs to configure chat throughout code --- gpt_buddy_bot/chat.py | 72 ++++++++++------------------ gpt_buddy_bot/chat_configs.py | 64 +++++++++++++++++++------ gpt_buddy_bot/command_definitions.py | 2 +- 3 files changed, 76 insertions(+), 62 deletions(-) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index e66bd6e..395d1da 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -3,47 +3,19 @@ import openai -from . import GeneralConstants +from .chat_configs import ChatOptions from .chat_context import BaseChatContext, EmbeddingBasedChatContext from .tokens import TokenUsageDatabase, get_n_tokens class Chat: - def __init__( - self, - model: str = "gpt-3.5-turbo", - base_instructions: str = "", - context_model: str = "text-embedding-ada-002", - report_accounting_when_done: bool = False, - ): - self.model = model.lower() - - if context_model is not None: - context_model = context_model.lower() - self.context_model = context_model - - self.username = "chat_user" - self.assistant_name = f"chat_{model.replace('.', '_')}" - self.system_name = "chat_manager" - - self.ground_ai_instructions = " ".join( - [ - instruction.strip() - for instruction in [ - f"Your name is {self.assistant_name}.", - f"You are a helpful assistant to {self.username}.", - "You answer correctly. You do not lie.", - f"{base_instructions.strip(' .')}.", - f"You must remember and follow all directives by {self.system_name}.", - ] - if instruction.strip() - ] - ) + def __init__(self, configs: ChatOptions): + self.configs = configs + for field in self.configs.model_fields: + setattr(self, field, self.configs[field]) self.token_usage = defaultdict(lambda: {"input": 0, "output": 0}) - self.token_usage_db = TokenUsageDatabase( - fpath=GeneralConstants.TOKEN_USAGE_DATABASE - ) + self.token_usage_db = TokenUsageDatabase(fpath=self.token_usage_db_path) if self.context_model is None: self.context_handler = BaseChatContext(parent_chat=self) @@ -54,13 +26,25 @@ def __init__( else: raise NotImplementedError(f"Unknown context model: {self.context_model}") - self.report_accounting_when_done = report_accounting_when_done + @property + def base_directive(self): + msg_content = " ".join( + [ + instruction.strip() + for instruction in [ + f"Your name is {self.assistant_name}.", + f"You are a helpful assistant to {self.username}.", + "You answer correctly. You do not lie.", + " ".join( + [f"{instruct.strip(' .')}." for instruct in self.ai_instructions] + ), + f"You must remember and follow all directives by {self.system_name}.", + ] + if instruction.strip() + ] + ) - self.base_directive = { - "role": "system", - "name": self.system_name, - "content": self.ground_ai_instructions, - } + return {"role": "system", "name": self.system_name, "content": msg_content} def __del__(self): # Store token usage to database @@ -75,12 +59,8 @@ def __del__(self): @classmethod def from_cli_args(cls, cli_args): - return cls( - model=cli_args.model, - context_model=cli_args.context_model, - base_instructions=cli_args.initial_ai_instructions, - report_accounting_when_done=not cli_args.skip_reporting_costs, - ) + configs = ChatOptions.model_validate(vars(cli_args)) + return cls(configs) def respond_user_prompt(self, prompt: str): yield from self._respond_prompt(prompt=prompt, role="user") diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 9fdc203..ec7f1c8 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -2,11 +2,14 @@ """Registration and validation of options.""" from functools import reduce from getpass import getuser -from typing import Literal, get_args +from pathlib import Path +from typing import Any, Literal, Optional, get_args -from pydantic import BaseModel, Field, PositiveInt, confloat +from pydantic import BaseModel, Field, validator from typing_extensions import Annotated +from gpt_buddy_bot import GeneralConstants + class BaseConfigModel(BaseModel): @classmethod @@ -72,31 +75,62 @@ class ChatOptions(BaseConfigModel): model: Literal["gpt-3.5-turbo", "gpt-4"] = Field( default="gpt-3.5-turbo", description="OpenAI API engine to use for completion" ) + username: str = Field(default=getuser(), description="Name of the chat's user") + assistant_name: str = Field( + default="Based on model", description="Name of the chat's assistant" + ) + system_name: str = Field( + default=GeneralConstants.PACKAGE_NAME, description="Name of the chat's system" + ) context_model: Literal["text-embedding-ada-002", None] = Field( default="text-embedding-ada-002", description="OpenAI API engine to use for embedding", ) - base_ai_instructions: tuple[str, ...] = Field( + ai_instructions: tuple[str, ...] = Field( default=("Answer with the fewest tokens possible.",), description="Initial instructions for the AI", ) - max_tokens: Annotated[ - int, Field(gt=0, description=f"See <{openai_url}-max_tokens>") + max_tokens: Optional[ + Annotated[int, Field(gt=0, description=f"See <{openai_url}-max_tokens>")] ] = None - frequency_penalty: Annotated[ - float, Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-frequency_penalty>") + frequency_penalty: Optional[ + Annotated[ + float, + Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-frequency_penalty>"), + ] ] = None - presence_penalty: Annotated[ - float, Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-presence_penalty>") + presence_penalty: Optional[ + Annotated[ + float, + Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-presence_penalty>"), + ] ] = None - temperature: Annotated[ - float, - Field(ge=0.0, le=2.0, description=f"See <{openai_url}-temperature>"), + temperature: Optional[ + Annotated[ + float, + Field(ge=0.0, le=2.0, description=f"See <{openai_url}-temperature>"), + ] ] = None - top_p: Annotated[ - float, Field(ge=0.0, le=1.0, description=f"See <{openai_url}-top_p>") + top_p: Optional[ + Annotated[ + float, + Field(defaut=None, ge=0.0, le=1.0, description=f"See <{openai_url}-top_p>"), + ] ] = None - user: str = Field(default=getuser(), description="Name of the chat's user") + token_usage_db_path: Path = Field( + default=GeneralConstants.TOKEN_USAGE_DATABASE, + description="Path to the token usage database", + ) + report_accounting_when_done: bool = Field( + default=False, description="Report estimated costs when done with the chat." + ) + + @validator("assistant_name", always=True) + def get_address(cls, assistant_name: str, values: dict[str, Any]) -> str: + assistant_name = assistant_name.lower().strip() + if assistant_name == "based on model": + return f"chat_{values.get('model', 'assistant').replace('.', '_')}" + return assistant_name DEFAULT_CHAT_OPTIONS = ChatOptions() diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index bf2525e..6545e97 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -8,7 +8,7 @@ def accounting(args): """Show the accumulated costs of the chat and exit.""" - Chat().report_token_usage(current_chat=False) + Chat.from_cli_args(cli_args=args).report_token_usage(current_chat=False) def run_on_terminal(args): From 5ada88e1f566df16922ab8713ba6e228c06794cb Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sat, 4 Nov 2023 00:17:03 +0100 Subject: [PATCH 049/109] Add UI tab for cat options --- gpt_buddy_bot/app/app.py | 11 ++-- gpt_buddy_bot/app/app_page_templates.py | 2 +- gpt_buddy_bot/app/multipage.py | 51 +++++++++++++--- gpt_buddy_bot/argparse_wrapper.py | 13 ++--- gpt_buddy_bot/chat.py | 5 +- gpt_buddy_bot/chat_configs.py | 78 ++++++++++++------------- 6 files changed, 98 insertions(+), 62 deletions(-) diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index 76fad9e..b1cfb2d 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -10,10 +10,13 @@ def run_app(): """Create and run an instance of the pacage's app.""" app = MultiPageApp(page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:") with st.sidebar: - # Create a new chat upon init or button press - if st.button(label=":speech_balloon: Create New Chat") or not app.pages: - app.add_page(ChatBotPage(), selected=True) - app.render() + tab1, tab2 = st.tabs(["Chats", "Settings"]) + sidebar_tabs = {"chats": tab1, "settings": tab2} + with tab1: + # Create a new chat upon init or button press + if st.button(label=":speech_balloon: New Chat") or not app.pages: + app.add_page(ChatBotPage(), selected=True) + app.render(sidebar_tabs=sidebar_tabs) if __name__ == "__main__": diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index ad64949..a62184a 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -23,7 +23,7 @@ def __init__(self, sidebar_title: str = "", page_title: str = ""): if not page_title: page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" - page_title += f"## {sidebar_title}: {self.chat_obj.model}" + page_title += f"## {self.chat_obj.model}\n### {sidebar_title}" self._initial_title = page_title @property diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 0ba6806..4c07227 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,9 +1,11 @@ "Code for the creation streamlit apps with dynamically created pages." -from functools import partial +import contextlib import streamlit as st from app_page_templates import AppPage +from gpt_buddy_bot.chat_configs import ChatOptions + class MultiPageApp: """Framework for creating streamlite multipage apps. @@ -57,16 +59,17 @@ def selected_page(self) -> AppPage: return next(iter(self.pages.values())) return st.session_state["selected_page"] - def handle_ui_page_selection(self): + def handle_ui_page_selection(self, sidebar_tabs: dict): """Control page selection in the UI sidebar.""" - with st.sidebar: + with sidebar_tabs["chats"]: for page in self.pages.values(): - col1, col2 = st.columns([0.75, 0.25]) + col1, col2 = st.columns([0.8, 0.2]) with col1: st.button( label=page.sidebar_title, key=f"select_{page.page_id}", - on_click=partial(self.register_selected_page, page), + on_click=self.register_selected_page, + kwargs={"page": page}, use_container_width=True, ) with col2: @@ -75,11 +78,43 @@ def handle_ui_page_selection(self): key=f"delete_{page.page_id}", type="primary", use_container_width=True, - on_click=partial(self.remove_page, page), + on_click=self.remove_page, + kwargs={"page": page}, help="Delete this chat.", ) + with sidebar_tabs["settings"]: + chat_options = ChatOptions() + for field_name, field in chat_options.model_fields.items(): + title = field_name.replace("_", " ").title() + choices = ChatOptions.get_allowed_values(field=field_name) + field_type = ChatOptions.get_type(field=field_name) + + if choices: + st.selectbox(title, choices, index=0) + elif field_type == str: + st.text_input(title, value=getattr(chat_options, field_name)) + elif field_type in [int, float]: + step = 1 if field_type == int else 0.01 + bounds = [None, None] + for item in field.metadata: + with contextlib.suppress(AttributeError): + bounds[0] = item.gt + step + with contextlib.suppress(AttributeError): + bounds[0] = item.ge + with contextlib.suppress(AttributeError): + bounds[1] = item.lt - step + with contextlib.suppress(AttributeError): + bounds[1] = item.le + st.number_input( + title, + value=getattr(chat_options, field_name), + placeholder="OpenAI Default", + min_value=bounds[0], + max_value=bounds[1], + step=step, + ) - def render(self): + def render(self, sidebar_tabs: dict): """Render the multipage app with focus on the selected page.""" - self.handle_ui_page_selection() + self.handle_ui_page_selection(sidebar_tabs=sidebar_tabs) self.selected_page.render() diff --git a/gpt_buddy_bot/argparse_wrapper.py b/gpt_buddy_bot/argparse_wrapper.py index 20e8a21..10f6fe3 100644 --- a/gpt_buddy_bot/argparse_wrapper.py +++ b/gpt_buddy_bot/argparse_wrapper.py @@ -2,7 +2,6 @@ """Wrappers for argparse functionality.""" import argparse import sys -from collections.abc import Sequence from . import GeneralConstants from .chat_configs import ChatOptions @@ -33,22 +32,22 @@ def get_parsed_args(argv=None, default_command="ui"): "choices": ChatOptions.get_allowed_values, "help": ChatOptions.get_description, } - for field_name in ChatOptions.model_fields: + for field_name, field in ChatOptions.model_fields.items(): args_opts = { key: argarse2pydantic[key](field_name) for key in argarse2pydantic if argarse2pydantic[key](field_name) is not None } + args_opts["required"] = field.is_required() if "help" in args_opts: args_opts["help"] = f"{args_opts['help']} (default: %(default)s)" - if "default" in args_opts and isinstance(args_opts["default"], (list, tuple)): - args_opts.pop("type", None) - args_opts["nargs"] = "*" + if "default" in args_opts: + if isinstance(args_opts["default"], (list, tuple)): + args_opts.pop("type", None) + args_opts["nargs"] = "*" chat_options_parser.add_argument(f"--{field_name.replace('_', '-')}", **args_opts) - chat_options_parser.add_argument("--skip-reporting-costs", action="store_true") - main_parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 395d1da..3ec6b8e 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -59,7 +59,10 @@ def __del__(self): @classmethod def from_cli_args(cls, cli_args): - configs = ChatOptions.model_validate(vars(cli_args)) + chat_opts = { + k: v for k, v in vars(cli_args).items() if k in ChatOptions.model_fields + } + configs = ChatOptions.model_validate(chat_opts) return cls(configs) def respond_user_prompt(self, prompt: str): diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index ec7f1c8..2e8df66 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -1,12 +1,13 @@ #!/usr/bin/env python3 """Registration and validation of options.""" +import types +import typing from functools import reduce from getpass import getuser from pathlib import Path from typing import Any, Literal, Optional, get_args from pydantic import BaseModel, Field, validator -from typing_extensions import Annotated from gpt_buddy_bot import GeneralConstants @@ -22,17 +23,21 @@ def get_allowed_values(cls, field: str): @classmethod def get_type(cls, field: str): """Return type of `field`.""" - annotation = cls._get_field_param(field=field, param="annotation") - if isinstance(annotation, type): - return annotation + type_hint = typing.get_type_hints(cls)[field] + if isinstance(type_hint, type): + return type_hint + type_hint_first_arg = get_args(type_hint)[0] + if isinstance(type_hint_first_arg, type): + return type_hint_first_arg @classmethod def get_default(cls, field: str): """Return allowed value(s) for `field`.""" - return cls._get_field_param(field=field, param="default") + return cls.model_fields[field].get_default() @classmethod def get_description(cls, field: str): + """Return description of `field`.""" return cls._get_field_param(field=field, param="description") @classmethod @@ -66,15 +71,36 @@ def __getitem__(self, item): raise KeyError(item) from error -openai_url = "https://platform.openai.com/docs/api-reference/chat/create#chat-create" +class OpenAiApiCallOptions(BaseConfigModel): + _openai_url = "https://platform.openai.com/docs/api-reference/chat/create#chat-create" + + model: Literal["gpt-3.5-turbo", "gpt-4"] = Field( + default="gpt-3.5-turbo", + description=f"OpenAI LLM model to use. See {_openai_url}-model", + ) + max_tokens: Optional[int] = Field( + default=None, gt=0, description=f"See <{_openai_url}-max_tokens>" + ) + presence_penalty: Optional[float] = Field( + default=None, ge=-2.0, le=2.0, description=f"See <{_openai_url}-presence_penalty>" + ) + frequency_penalty: Optional[float] = Field( + default=None, + ge=-2.0, + le=2.0, + description=f"See <{_openai_url}-frequency_penalty>", + ) + temperature: Optional[float] = Field( + default=None, ge=0.0, le=2.0, description=f"See <{_openai_url}-temperature>" + ) + top_p: Optional[float] = Field( + default=None, ge=0.0, le=1.0, description=f"See <{_openai_url}-top_p>" + ) -class ChatOptions(BaseConfigModel): +class ChatOptions(OpenAiApiCallOptions): """Model for the chat's configuration options.""" - model: Literal["gpt-3.5-turbo", "gpt-4"] = Field( - default="gpt-3.5-turbo", description="OpenAI API engine to use for completion" - ) username: str = Field(default=getuser(), description="Name of the chat's user") assistant_name: str = Field( default="Based on model", description="Name of the chat's assistant" @@ -84,39 +110,12 @@ class ChatOptions(BaseConfigModel): ) context_model: Literal["text-embedding-ada-002", None] = Field( default="text-embedding-ada-002", - description="OpenAI API engine to use for embedding", + description="OpenAI API model to use for embedding", ) ai_instructions: tuple[str, ...] = Field( default=("Answer with the fewest tokens possible.",), description="Initial instructions for the AI", ) - max_tokens: Optional[ - Annotated[int, Field(gt=0, description=f"See <{openai_url}-max_tokens>")] - ] = None - frequency_penalty: Optional[ - Annotated[ - float, - Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-frequency_penalty>"), - ] - ] = None - presence_penalty: Optional[ - Annotated[ - float, - Field(ge=-2.0, le=2.0, description=f"See <{openai_url}-presence_penalty>"), - ] - ] = None - temperature: Optional[ - Annotated[ - float, - Field(ge=0.0, le=2.0, description=f"See <{openai_url}-temperature>"), - ] - ] = None - top_p: Optional[ - Annotated[ - float, - Field(defaut=None, ge=0.0, le=1.0, description=f"See <{openai_url}-top_p>"), - ] - ] = None token_usage_db_path: Path = Field( default=GeneralConstants.TOKEN_USAGE_DATABASE, description="Path to the token usage database", @@ -131,6 +130,3 @@ def get_address(cls, assistant_name: str, values: dict[str, Any]) -> str: if assistant_name == "based on model": return f"chat_{values.get('model', 'assistant').replace('.', '_')}" return assistant_name - - -DEFAULT_CHAT_OPTIONS = ChatOptions() From b8f6e9ca2c04e27961044dcf61ef29d37101aaae Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sat, 4 Nov 2023 01:44:33 +0100 Subject: [PATCH 050/109] Starting to add reactivity to setting tab --- gpt_buddy_bot/app/app_page_templates.py | 3 ++- gpt_buddy_bot/app/multipage.py | 25 +++++++++++++++++++------ gpt_buddy_bot/command_definitions.py | 4 ++-- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index a62184a..8ed601c 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -15,6 +15,7 @@ class AppPage(ABC): def __init__(self, sidebar_title: str = "", page_title: str = ""): self.page_id = str(uuid.uuid4()) + self.page_number = st.session_state.get("n_created_pages", 0) + 1 if not sidebar_title: n_created_pages = st.session_state.get("n_created_pages", 0) @@ -67,7 +68,7 @@ def chat_obj(self) -> Chat: args = pickle.load(parsed_args_file) this_page_chat = Chat.from_cli_args(cli_args=args) self.state["chat"] = this_page_chat - return this_page_chat + return self.state["chat"] @property def chat_history(self) -> list[dict[str, str]]: diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 4c07227..9c88dd7 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -83,16 +83,20 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): help="Delete this chat.", ) with sidebar_tabs["settings"]: - chat_options = ChatOptions() - for field_name, field in chat_options.model_fields.items(): + chat_configs: ChatOptions = self.selected_page.chat_obj.configs + updates_for_chat_configs = {} + for field_name, field in chat_configs.model_fields.items(): title = field_name.replace("_", " ").title() choices = ChatOptions.get_allowed_values(field=field_name) field_type = ChatOptions.get_type(field=field_name) + new_field_value = None if choices: - st.selectbox(title, choices, index=0) + new_field_value = st.selectbox(title, choices, index=0) elif field_type == str: - st.text_input(title, value=getattr(chat_options, field_name)) + new_field_value = st.text_input( + title, value=getattr(chat_configs, field_name) + ) elif field_type in [int, float]: step = 1 if field_type == int else 0.01 bounds = [None, None] @@ -105,15 +109,24 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): bounds[1] = item.lt - step with contextlib.suppress(AttributeError): bounds[1] = item.le - st.number_input( + new_field_value = st.number_input( title, - value=getattr(chat_options, field_name), + value=getattr(chat_configs, field_name), placeholder="OpenAI Default", min_value=bounds[0], max_value=bounds[1], step=step, ) + if new_field_value: + updates_for_chat_configs[field_name] = new_field_value + self.selected_page.chat_obj.configs = chat_configs.model_copy( + update=updates_for_chat_configs + ) + print(self.selected_page.page_number) + print(self.selected_page.chat_obj.configs) + print() + def render(self, sidebar_tabs: dict): """Render the multipage app with focus on the selected page.""" self.handle_ui_page_selection(sidebar_tabs=sidebar_tabs) diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index 6545e97..9d2d957 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -28,8 +28,8 @@ def run_on_ui(args): "run", app_path.as_posix(), "--theme.base=dark", - "--runner.fastReruns", - "True", + # "--runner.fastReruns", + # "True", "--server.runOnSave", "True", "--browser.gatherUsageStats", From 6bb13acf44cdfac29cfbfd8906f3d92a80f39f6c Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sat, 4 Nov 2023 19:06:56 +0100 Subject: [PATCH 051/109] Make the setting tab functional --- gpt_buddy_bot/app/app_page_templates.py | 55 ++++++++++++++++--------- gpt_buddy_bot/app/multipage.py | 43 ++++++++++++------- gpt_buddy_bot/chat.py | 21 +++++++--- gpt_buddy_bot/chat_configs.py | 31 ++++++++------ gpt_buddy_bot/chat_context.py | 31 ++++++++------ gpt_buddy_bot/command_definitions.py | 6 ++- 6 files changed, 120 insertions(+), 67 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 8ed601c..ff2a429 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -1,4 +1,5 @@ """Utilities for creating pages in a streamlit app.""" +import contextlib import pickle import sys import uuid @@ -8,6 +9,7 @@ from gpt_buddy_bot import GeneralConstants from gpt_buddy_bot.chat import Chat +from gpt_buddy_bot.chat_configs import ChatOptions class AppPage(ABC): @@ -16,16 +18,15 @@ class AppPage(ABC): def __init__(self, sidebar_title: str = "", page_title: str = ""): self.page_id = str(uuid.uuid4()) self.page_number = st.session_state.get("n_created_pages", 0) + 1 + self._initial_sidebar_title = ( + sidebar_title if sidebar_title else f"Page {self.page_number}" + ) + self._init_page_title = page_title if page_title else self.default_page_title - if not sidebar_title: - n_created_pages = st.session_state.get("n_created_pages", 0) - sidebar_title = f"Chat {n_created_pages + 1}" - self._initial_sidebar_title = sidebar_title - - if not page_title: - page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" - page_title += f"## {self.chat_obj.model}\n### {sidebar_title}" - self._initial_title = page_title + @property + def default_page_title(self): + """Return the default page title.""" + return self.sidebar_title @property def state(self): @@ -37,7 +38,7 @@ def state(self): @property def title(self): """Get the title of the page.""" - return self.state.get("page_title", self._initial_title) + return self.state.get("page_title", self._init_page_title) @title.setter def title(self, value): @@ -57,18 +58,34 @@ def render(self): class ChatBotPage(AppPage): + @property + def default_page_title(self): + """Return the default page title.""" + page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" + page_title += f"## {self.chat_obj.model}\n### {self.sidebar_title}" + return page_title + + @property + def chat_configs(self) -> ChatOptions: + """Return the configs used for the page's chat object.""" + if "chat_configs" not in self.state: + chat_options_file_path = sys.argv[-1] + with open(chat_options_file_path, "rb") as chat_configs_file: + self.state["chat_configs"] = pickle.load(chat_configs_file) + return self.state["chat_configs"] + + @chat_configs.setter + def chat_configs(self, value: ChatOptions): + self.state["chat_configs"] = ChatOptions.model_validate(value) + with contextlib.suppress(KeyError): + del self.state["chat_obj"] + @property def chat_obj(self) -> Chat: """Return the chat object responsible for the queries in this page.""" - try: - this_page_chat = self.state["chat"] - except KeyError: - parsed_args_file = sys.argv[-1] - with open(parsed_args_file, "rb") as parsed_args_file: - args = pickle.load(parsed_args_file) - this_page_chat = Chat.from_cli_args(cli_args=args) - self.state["chat"] = this_page_chat - return self.state["chat"] + if "chat_obj" not in self.state: + self.state["chat_obj"] = Chat(self.chat_configs) + return self.state["chat_obj"] @property def chat_history(self) -> list[dict[str, str]]: diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 9c88dd7..57a2682 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -2,10 +2,12 @@ import contextlib import streamlit as st -from app_page_templates import AppPage +from app_page_templates import AppPage, ChatBotPage from gpt_buddy_bot.chat_configs import ChatOptions +UnchangedValue = object() + class MultiPageApp: """Framework for creating streamlite multipage apps. @@ -53,7 +55,7 @@ def register_selected_page(self, page: AppPage): st.session_state["selected_page"] = page @property - def selected_page(self) -> AppPage: + def selected_page(self) -> ChatBotPage: """Return the selected page.""" if "selected_page" not in st.session_state: return next(iter(self.pages.values())) @@ -83,19 +85,26 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): help="Delete this chat.", ) with sidebar_tabs["settings"]: - chat_configs: ChatOptions = self.selected_page.chat_obj.configs - updates_for_chat_configs = {} - for field_name, field in chat_configs.model_fields.items(): + current_chat_configs = self.selected_page.chat_configs + new_chat_configs = {} + for field_name, field in ChatOptions.model_fields.items(): title = field_name.replace("_", " ").title() choices = ChatOptions.get_allowed_values(field=field_name) field_type = ChatOptions.get_type(field=field_name) - new_field_value = None + element_key = f"{field_name}-pg-{self.selected_page.page_id}-ui-element" + current_field_value = getattr(current_chat_configs, field_name) + new_field_value = UnchangedValue if choices: - new_field_value = st.selectbox(title, choices, index=0) + new_field_value = st.selectbox( + title, + choices, + key=element_key, + index=choices.index(current_field_value), + ) elif field_type == str: new_field_value = st.text_input( - title, value=getattr(chat_configs, field_name) + title, value=current_field_value, key=element_key ) elif field_type in [int, float]: step = 1 if field_type == int else 0.01 @@ -111,21 +120,23 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): bounds[1] = item.le new_field_value = st.number_input( title, - value=getattr(chat_configs, field_name), + value=current_field_value, placeholder="OpenAI Default", min_value=bounds[0], max_value=bounds[1], step=step, + key=element_key, ) - if new_field_value: - updates_for_chat_configs[field_name] = new_field_value - self.selected_page.chat_obj.configs = chat_configs.model_copy( - update=updates_for_chat_configs + if new_field_value is not UnchangedValue: + new_chat_configs[field_name] = new_field_value + + if new_chat_configs: + # Update chat configs. Make sure not to lose the conversation context. + new_chat_configs["context_file_path"] = current_chat_configs.context_file_path + self.selected_page.chat_configs = current_chat_configs.model_copy( + update=new_chat_configs ) - print(self.selected_page.page_number) - print(self.selected_page.chat_obj.configs) - print() def render(self, sidebar_tabs: dict): """Render the multipage app with focus on the selected page.""" diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 3ec6b8e..c5b0dbc 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -1,8 +1,10 @@ #!/usr/bin/env python3 +import uuid from collections import defaultdict import openai +from . import GeneralConstants from .chat_configs import ChatOptions from .chat_context import BaseChatContext, EmbeddingBasedChatContext from .tokens import TokenUsageDatabase, get_n_tokens @@ -10,19 +12,26 @@ class Chat: def __init__(self, configs: ChatOptions): + self.id = uuid.uuid4() self.configs = configs for field in self.configs.model_fields: setattr(self, field, self.configs[field]) + if self.assistant_name is None: + self.assistant_name = self.model + self.token_usage = defaultdict(lambda: {"input": 0, "output": 0}) self.token_usage_db = TokenUsageDatabase(fpath=self.token_usage_db_path) + if self.context_file_path is None: + self.context_file_path = ( + GeneralConstants.PACKAGE_TMPDIR / f"embeddings_for_chat_{self.id}.csv" + ) + if self.context_model is None: self.context_handler = BaseChatContext(parent_chat=self) elif self.context_model == "text-embedding-ada-002": - self.context_handler = EmbeddingBasedChatContext( - embedding_model=self.context_model, parent_chat=self - ) + self.context_handler = EmbeddingBasedChatContext(parent_chat=self) else: raise NotImplementedError(f"Unknown context model: {self.context_model}") @@ -60,7 +69,9 @@ def __del__(self): @classmethod def from_cli_args(cls, cli_args): chat_opts = { - k: v for k, v in vars(cli_args).items() if k in ChatOptions.model_fields + k: v + for k, v in vars(cli_args).items() + if k in ChatOptions.model_fields and v is not None } configs = ChatOptions.model_validate(chat_opts) return cls(configs) @@ -154,6 +165,6 @@ def _make_api_call(conversation: list, model: str): openai.error.ServiceUnavailableError, openai.error.Timeout, ) as error: - print(f" > {error}. Retrying...") + print(f"\n > {error}. Retrying...") else: success = True diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 2e8df66..a5c06f8 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 """Registration and validation of options.""" -import types +import argparse import typing from functools import reduce from getpass import getuser from pathlib import Path -from typing import Any, Literal, Optional, get_args +from typing import Literal, Optional, get_args -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, Field from gpt_buddy_bot import GeneralConstants @@ -40,6 +40,16 @@ def get_description(cls, field: str): """Return description of `field`.""" return cls._get_field_param(field=field, param="description") + @classmethod + def from_cli_args(cls, cli_args: argparse.Namespace): + """Return an instance of the class from CLI args.""" + relevant_args = { + k: v + for k, v in vars(cli_args).items() + if k in cls.model_fields and v is not None + } + return cls.model_validate(relevant_args) + @classmethod def _get_field_param(cls, field: str, param: str): """Return param `param` of field `field`.""" @@ -102,9 +112,7 @@ class ChatOptions(OpenAiApiCallOptions): """Model for the chat's configuration options.""" username: str = Field(default=getuser(), description="Name of the chat's user") - assistant_name: str = Field( - default="Based on model", description="Name of the chat's assistant" - ) + assistant_name: str = Field(default=None, description="Name of the chat's assistant") system_name: str = Field( default=GeneralConstants.PACKAGE_NAME, description="Name of the chat's system" ) @@ -112,6 +120,10 @@ class ChatOptions(OpenAiApiCallOptions): default="text-embedding-ada-002", description="OpenAI API model to use for embedding", ) + context_file_path: Path = Field( + default=None, + description="Path to the file to read/write the chat context from/to.", + ) ai_instructions: tuple[str, ...] = Field( default=("Answer with the fewest tokens possible.",), description="Initial instructions for the AI", @@ -123,10 +135,3 @@ class ChatOptions(OpenAiApiCallOptions): report_accounting_when_done: bool = Field( default=False, description="Report estimated costs when done with the chat." ) - - @validator("assistant_name", always=True) - def get_address(cls, assistant_name: str, values: dict[str, Any]) -> str: - assistant_name = assistant_name.lower().strip() - if assistant_name == "based on model": - return f"chat_{values.get('model', 'assistant').replace('.', '_')}" - return assistant_name diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index ca314c7..f187598 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -2,7 +2,6 @@ import csv import json import time -import uuid from collections import deque from pathlib import Path from typing import TYPE_CHECKING @@ -12,8 +11,6 @@ import pandas as pd from openai.embeddings_utils import cosine_similarity -from . import GeneralConstants - if TYPE_CHECKING: from .chat import Chat @@ -38,16 +35,22 @@ def get_context(self, text: str): class EmbeddingBasedChatContext(BaseChatContext): """Chat context.""" - def __init__(self, embedding_model: str, parent_chat: "Chat"): + def __init__(self, parent_chat: "Chat"): self.parent_chat = parent_chat - self.embedding_model = embedding_model - embd_file = GeneralConstants.PACKAGE_TMPDIR / f"embeddings_{uuid.uuid4()}.csv" - self.context_file_path = embd_file + + @property + def embedding_model(self): + return self.parent_chat.context_model + + @property + def context_file_path(self): + return self.parent_chat.context_file_path def add_to_history(self, text: str): embedding_request = self.calculate_embedding(text=text) - _store_object_and_embedding( + _store_message_embedding_data( obj=text, + embedding_model=self.embedding_model, embedding=embedding_request["embedding"], file_path=self.context_file_path, ) @@ -83,14 +86,17 @@ def request_embedding_from_openai(text: str, model: str): return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_object_and_embedding(obj, embedding, file_path: Path): +def _store_message_embedding_data( + obj, embedding_model: str, embedding: np.ndarray, file_path: Path +): """Store message and embeddings to file.""" # Adapted from # See also . - emb_mess_pair = { + embedding_file_entry_data = { "timestamp": int(time.time()), + "embedding_model": f"{embedding_model}", "message": json.dumps(obj), "embedding": json.dumps(embedding), } @@ -99,10 +105,10 @@ def _store_object_and_embedding(obj, embedding, file_path: Path): write_mode = "w" if init_file else "a" with open(file_path, write_mode, newline="") as file: - writer = csv.DictWriter(file, fieldnames=emb_mess_pair.keys()) + writer = csv.DictWriter(file, fieldnames=embedding_file_entry_data.keys()) if init_file: writer.writeheader() - writer.writerow(emb_mess_pair) + writer.writerow(embedding_file_entry_data) def _compose_context_msg(history: list[str], system_name: str): @@ -124,6 +130,7 @@ def _find_context( except FileNotFoundError: return [] + df = df.loc[df["embedding_model"] == parent_chat.context_model] df["embedding"] = df.embedding.apply(ast.literal_eval).apply(np.array) df["similarity"] = df["embedding"].apply(lambda x: cosine_similarity(x, embedding)) diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index 9d2d957..6736faa 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -4,6 +4,7 @@ from . import GeneralConstants from .chat import Chat +from .chat_configs import ChatOptions def accounting(args): @@ -18,8 +19,9 @@ def run_on_terminal(args): def run_on_ui(args): """Run the chat on the browser.""" - with open(GeneralConstants.PARSED_ARGS_FILE, "wb") as parsed_args_file: - pickle.dump(args, parsed_args_file) + with open(GeneralConstants.PARSED_ARGS_FILE, "wb") as chat_options_file: + pickle.dump(ChatOptions.from_cli_args(args), chat_options_file) + app_path = GeneralConstants.PACKAGE_DIRECTORY / "app" / "app.py" try: run( From c0f873af40bf1e8e355f129cb8032da72cde01c7 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sat, 4 Nov 2023 22:20:58 +0100 Subject: [PATCH 052/109] Fix selectBox valu memory issue --- gpt_buddy_bot/app/app_page_templates.py | 12 ++++++++-- gpt_buddy_bot/app/multipage.py | 29 ++++++++++++------------- gpt_buddy_bot/command_definitions.py | 4 ++-- 3 files changed, 26 insertions(+), 19 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index ff2a429..8c5651b 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -113,7 +113,9 @@ def render(self): # Accept user input if prompt := st.chat_input("Send a message"): - self.chat_history.append({"role": "user", "content": prompt}) + self.chat_history.append( + {"role": "user", "name": self.chat_obj.username, "content": prompt} + ) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) @@ -129,7 +131,13 @@ def render(self): full_response += chunk message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) - self.chat_history.append({"role": "assistant", "content": full_response}) + self.chat_history.append( + { + "role": "assistant", + "name": self.chat_obj.assistant_name, + "content": full_response, + } + ) # Reset title according to conversation initial contents if "page_title" not in self.state and len(self.chat_history) > 3: diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 57a2682..f8986f7 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -6,8 +6,6 @@ from gpt_buddy_bot.chat_configs import ChatOptions -UnchangedValue = object() - class MultiPageApp: """Framework for creating streamlite multipage apps. @@ -93,19 +91,17 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): field_type = ChatOptions.get_type(field=field_name) element_key = f"{field_name}-pg-{self.selected_page.page_id}-ui-element" - current_field_value = getattr(current_chat_configs, field_name) - new_field_value = UnchangedValue + last_field_value = getattr(current_chat_configs, field_name) if choices: - new_field_value = st.selectbox( - title, - choices, - key=element_key, - index=choices.index(current_field_value), + index = ( + 0 + if st.session_state.get("last_rendered_page") + == self.selected_page.page_id + else choices.index(last_field_value) ) + st.selectbox(title, choices, key=element_key, index=index) elif field_type == str: - new_field_value = st.text_input( - title, value=current_field_value, key=element_key - ) + st.text_input(title, value=last_field_value, key=element_key) elif field_type in [int, float]: step = 1 if field_type == int else 0.01 bounds = [None, None] @@ -118,9 +114,10 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): bounds[1] = item.lt - step with contextlib.suppress(AttributeError): bounds[1] = item.le - new_field_value = st.number_input( + + st.number_input( title, - value=current_field_value, + value=last_field_value, placeholder="OpenAI Default", min_value=bounds[0], max_value=bounds[1], @@ -128,7 +125,8 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): key=element_key, ) - if new_field_value is not UnchangedValue: + new_field_value = st.session_state.get(element_key) + if new_field_value != last_field_value: new_chat_configs[field_name] = new_field_value if new_chat_configs: @@ -142,3 +140,4 @@ def render(self, sidebar_tabs: dict): """Render the multipage app with focus on the selected page.""" self.handle_ui_page_selection(sidebar_tabs=sidebar_tabs) self.selected_page.render() + st.session_state["last_rendered_page"] = self.selected_page.page_id diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index 6736faa..a63e00f 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -30,8 +30,8 @@ def run_on_ui(args): "run", app_path.as_posix(), "--theme.base=dark", - # "--runner.fastReruns", - # "True", + "--runner.fastReruns", + "True", "--server.runOnSave", "True", "--browser.gatherUsageStats", From 04310b52a7ecc5688f0693d48f6d5a7b408b6765 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 01:45:27 +0100 Subject: [PATCH 053/109] Allow changing chat params on the fly --- gpt_buddy_bot/app/app_page_templates.py | 62 +++++++++++++------------ gpt_buddy_bot/app/multipage.py | 21 +++++---- gpt_buddy_bot/chat.py | 23 ++++++--- gpt_buddy_bot/chat_configs.py | 7 ++- 4 files changed, 64 insertions(+), 49 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 8c5651b..c4efe57 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -18,15 +18,11 @@ class AppPage(ABC): def __init__(self, sidebar_title: str = "", page_title: str = ""): self.page_id = str(uuid.uuid4()) self.page_number = st.session_state.get("n_created_pages", 0) + 1 - self._initial_sidebar_title = ( + + self._sidebar_title = ( sidebar_title if sidebar_title else f"Page {self.page_number}" ) - self._init_page_title = page_title if page_title else self.default_page_title - - @property - def default_page_title(self): - """Return the default page title.""" - return self.sidebar_title + self._page_title = page_title if page_title else self.sidebar_title @property def state(self): @@ -35,22 +31,26 @@ def state(self): st.session_state[self.page_id] = {} return st.session_state[self.page_id] + @property + def sidebar_title(self): + """Get the title of the page in the sidebar.""" + return self.state.get("sidebar_title", self._sidebar_title) + + @sidebar_title.setter + def sidebar_title(self, value): + """Set the sidebar title for the page.""" + self.state["sidebar_title"] = value + @property def title(self): """Get the title of the page.""" - return self.state.get("page_title", self._init_page_title) + return self.state.get("page_title", self._page_title) @title.setter def title(self, value): """Set the title of the page.""" - st.title(value) self.state["page_title"] = value - self.state["sidebar_title"] = value - - @property - def sidebar_title(self): - """Get the title of the page in the sidebar.""" - return self.state.get("sidebar_title", self._initial_sidebar_title) + st.title(value) @abstractmethod def render(self): @@ -58,12 +58,12 @@ def render(self): class ChatBotPage(AppPage): - @property - def default_page_title(self): - """Return the default page title.""" - page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" - page_title += f"## {self.chat_obj.model}\n### {self.sidebar_title}" - return page_title + def __init__(self, sidebar_title: str = "", page_title: str = ""): + super().__init__(sidebar_title, page_title) + self._page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" + self._sidebar_title = ( + sidebar_title if sidebar_title else f"Chat {self.page_number}" + ) @property def chat_configs(self) -> ChatOptions: @@ -74,19 +74,18 @@ def chat_configs(self) -> ChatOptions: self.state["chat_configs"] = pickle.load(chat_configs_file) return self.state["chat_configs"] - @chat_configs.setter - def chat_configs(self, value: ChatOptions): - self.state["chat_configs"] = ChatOptions.model_validate(value) - with contextlib.suppress(KeyError): - del self.state["chat_obj"] - @property def chat_obj(self) -> Chat: """Return the chat object responsible for the queries in this page.""" if "chat_obj" not in self.state: - self.state["chat_obj"] = Chat(self.chat_configs) + self.chat_obj = Chat(self.chat_configs) return self.state["chat_obj"] + @chat_obj.setter + def chat_obj(self, value: Chat): + self.state["chat_obj"] = value + self.state["chat_configs"] = value.configs + @property def chat_history(self) -> list[dict[str, str]]: """Return the chat history of the page.""" @@ -108,11 +107,13 @@ def render(self): """ st.title(self.title) - self.render_chat_history() # Accept user input - if prompt := st.chat_input("Send a message"): + placeholder = ( + f"Send a message to {self.chat_obj.assistant_name} ({self.chat_obj.model})" + ) + if prompt := st.chat_input(placeholder=placeholder): self.chat_history.append( {"role": "user", "name": self.chat_obj.username, "content": prompt} ) @@ -147,3 +148,4 @@ def render(self): message["content"] for message in self.chat_history ) self.title = "".join(self.chat_obj.respond_system_prompt(prompt)) + self.sidebar_title = self.title diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index f8986f7..6a2c915 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -4,6 +4,7 @@ import streamlit as st from app_page_templates import AppPage, ChatBotPage +from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions @@ -83,8 +84,8 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): help="Delete this chat.", ) with sidebar_tabs["settings"]: - current_chat_configs = self.selected_page.chat_configs - new_chat_configs = {} + current_chat_configs = self.selected_page.chat_obj.configs + updates_to_chat_configs = {} for field_name, field in ChatOptions.model_fields.items(): title = field_name.replace("_", " ").title() choices = ChatOptions.get_allowed_values(field=field_name) @@ -124,17 +125,17 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): step=step, key=element_key, ) + else: + continue new_field_value = st.session_state.get(element_key) if new_field_value != last_field_value: - new_chat_configs[field_name] = new_field_value - - if new_chat_configs: - # Update chat configs. Make sure not to lose the conversation context. - new_chat_configs["context_file_path"] = current_chat_configs.context_file_path - self.selected_page.chat_configs = current_chat_configs.model_copy( - update=new_chat_configs - ) + updates_to_chat_configs[field_name] = new_field_value + + if updates_to_chat_configs: + new_chat_configs = current_chat_configs.model_dump() + new_chat_configs.update(updates_to_chat_configs) + self.selected_page.chat_obj = Chat.from_dict(new_chat_configs) def render(self, sidebar_tabs: dict): """Render the multipage app with focus on the selected page.""" diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index c5b0dbc..453b48f 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -13,12 +13,10 @@ class Chat: def __init__(self, configs: ChatOptions): self.id = uuid.uuid4() - self.configs = configs - for field in self.configs.model_fields: - setattr(self, field, self.configs[field]) - if self.assistant_name is None: - self.assistant_name = self.model + self._passed_configs = configs + for field in self._passed_configs.model_fields: + setattr(self, field, self._passed_configs[field]) self.token_usage = defaultdict(lambda: {"input": 0, "output": 0}) self.token_usage_db = TokenUsageDatabase(fpath=self.token_usage_db_path) @@ -35,6 +33,14 @@ def __init__(self, configs: ChatOptions): else: raise NotImplementedError(f"Unknown context model: {self.context_model}") + @property + def configs(self): + """Return the chat's configs after initialisation.""" + configs_dict = {} + for field_name in ChatOptions.model_fields: + configs_dict[field_name] = getattr(self, field_name) + return ChatOptions.model_validate(configs_dict) + @property def base_directive(self): msg_content = " ".join( @@ -66,6 +72,10 @@ def __del__(self): if self.report_accounting_when_done: self.report_token_usage() + @classmethod + def from_dict(cls, configs: dict): + return cls(configs=ChatOptions.model_validate(configs)) + @classmethod def from_cli_args(cls, cli_args): chat_opts = { @@ -73,8 +83,7 @@ def from_cli_args(cls, cli_args): for k, v in vars(cli_args).items() if k in ChatOptions.model_fields and v is not None } - configs = ChatOptions.model_validate(chat_opts) - return cls(configs) + return cls.from_dict(chat_opts) def respond_user_prompt(self, prompt: str): yield from self._respond_prompt(prompt=prompt, role="user") diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index a5c06f8..844a285 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -112,9 +112,12 @@ class ChatOptions(OpenAiApiCallOptions): """Model for the chat's configuration options.""" username: str = Field(default=getuser(), description="Name of the chat's user") - assistant_name: str = Field(default=None, description="Name of the chat's assistant") + assistant_name: str = Field( + default=GeneralConstants.APP_NAME, description="Name of the chat's assistant" + ) system_name: str = Field( - default=GeneralConstants.PACKAGE_NAME, description="Name of the chat's system" + default=f"{GeneralConstants.PACKAGE_NAME}_system", + description="Name of the chat's system", ) context_model: Literal["text-embedding-ada-002", None] = Field( default="text-embedding-ada-002", From e9b45ef19241873da577c94ec849e7a6f71c4bae Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 02:02:16 +0100 Subject: [PATCH 054/109] Minor appearence fixes --- gpt_buddy_bot/app/app.py | 2 +- gpt_buddy_bot/app/app_page_templates.py | 3 +-- gpt_buddy_bot/app/multipage.py | 4 ++++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index b1cfb2d..a00f048 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -14,7 +14,7 @@ def run_app(): sidebar_tabs = {"chats": tab1, "settings": tab2} with tab1: # Create a new chat upon init or button press - if st.button(label=":speech_balloon: New Chat") or not app.pages: + if st.button(label=":heavy_plus_sign: New Chat") or not app.pages: app.add_page(ChatBotPage(), selected=True) app.render(sidebar_tabs=sidebar_tabs) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index c4efe57..f5abc05 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -1,5 +1,4 @@ """Utilities for creating pages in a streamlit app.""" -import contextlib import pickle import sys import uuid @@ -60,7 +59,7 @@ def render(self): class ChatBotPage(AppPage): def __init__(self, sidebar_title: str = "", page_title: str = ""): super().__init__(sidebar_title, page_title) - self._page_title = f":speech_balloon: {GeneralConstants.APP_NAME}\n" + self._page_title = f"{GeneralConstants.APP_NAME} :speech_balloon:" self._sidebar_title = ( sidebar_title if sidebar_title else f"Chat {self.page_number}" ) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 6a2c915..81bb746 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -84,6 +84,10 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): help="Delete this chat.", ) with sidebar_tabs["settings"]: + caption = f"\u2699\uFE0F Settings for Chat {self.selected_page.page_number}" + if self.selected_page.title != self.selected_page._page_title: + caption += f": {self.selected_page.title}" + st.caption(caption) current_chat_configs = self.selected_page.chat_obj.configs updates_to_chat_configs = {} for field_name, field in ChatOptions.model_fields.items(): From c36fdd5ba343433d59694794957fad04ce0c182b Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 12:37:10 +0100 Subject: [PATCH 055/109] Some refactoring --- gpt_buddy_bot/app/app.py | 8 ++++-- gpt_buddy_bot/app/app_page_templates.py | 37 +++++++++++++------------ gpt_buddy_bot/app/multipage.py | 29 +++++++++++++------ gpt_buddy_bot/chat.py | 4 +-- gpt_buddy_bot/chat_configs.py | 9 ++++-- 5 files changed, 53 insertions(+), 34 deletions(-) diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index a00f048..46afcb3 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -1,21 +1,23 @@ """Entrypoint for the package's UI.""" import streamlit as st from app_page_templates import ChatBotPage -from multipage import MultiPageApp +from multipage import MultipageChatbotApp from gpt_buddy_bot import GeneralConstants def run_app(): """Create and run an instance of the pacage's app.""" - app = MultiPageApp(page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:") + app = MultipageChatbotApp( + page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:" + ) with st.sidebar: tab1, tab2 = st.tabs(["Chats", "Settings"]) sidebar_tabs = {"chats": tab1, "settings": tab2} with tab1: # Create a new chat upon init or button press if st.button(label=":heavy_plus_sign: New Chat") or not app.pages: - app.add_page(ChatBotPage(), selected=True) + app.add_page() app.render(sidebar_tabs=sidebar_tabs) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index f5abc05..2c78e66 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -58,11 +58,14 @@ def render(self): class ChatBotPage(AppPage): def __init__(self, sidebar_title: str = "", page_title: str = ""): - super().__init__(sidebar_title, page_title) - self._page_title = f"{GeneralConstants.APP_NAME} :speech_balloon:" - self._sidebar_title = ( - sidebar_title if sidebar_title else f"Chat {self.page_number}" + super().__init__(sidebar_title=sidebar_title, page_title=page_title) + chat_title = f"Chat #{self.page_number}" + self._page_title = ( + page_title + if page_title + else f"{GeneralConstants.APP_NAME} :speech_balloon:\n{chat_title}" ) + self._sidebar_title = sidebar_title if sidebar_title else chat_title @property def chat_configs(self) -> ChatOptions: @@ -113,24 +116,24 @@ def render(self): f"Send a message to {self.chat_obj.assistant_name} ({self.chat_obj.model})" ) if prompt := st.chat_input(placeholder=placeholder): - self.chat_history.append( - {"role": "user", "name": self.chat_obj.username, "content": prompt} - ) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) - # Display assistant response in chat message container + self.chat_history.append( + {"role": "user", "name": self.chat_obj.username, "content": prompt} + ) + + # Display (stream) assistant response in chat message container with st.chat_message("assistant"): - # Use blinking cursor to indicate activity - message_placeholder = st.empty() - message_placeholder.markdown("▌") - full_response = "" - # Stream assistant response - for chunk in self.chat_obj.respond_user_prompt(prompt): - full_response += chunk - message_placeholder.markdown(full_response + "▌") - message_placeholder.markdown(full_response) + with st.empty(): + st.markdown("▌") + full_response = "" + for chunk in self.chat_obj.respond_user_prompt(prompt): + full_response += chunk + st.markdown(full_response + "▌") + st.markdown(full_response) + self.chat_history.append( { "role": "assistant", diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 81bb746..b135497 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,5 +1,6 @@ "Code for the creation streamlit apps with dynamically created pages." import contextlib +from abc import ABC, abstractmethod import streamlit as st from app_page_templates import AppPage, ChatBotPage @@ -8,7 +9,7 @@ from gpt_buddy_bot.chat_configs import ChatOptions -class MultiPageApp: +class AbstractMultipageApp(ABC): """Framework for creating streamlite multipage apps. Adapted from: @@ -37,7 +38,7 @@ def pages(self) -> AppPage: st.session_state["available_pages"] = {} return st.session_state["available_pages"] - def add_page(self, page: AppPage, selected: bool = False) -> None: + def add_page(self, page: AppPage, selected: bool = True): """Add a page to the app.""" self.pages[page.page_id] = page self.n_created_pages += 1 @@ -60,6 +61,21 @@ def selected_page(self) -> ChatBotPage: return next(iter(self.pages.values())) return st.session_state["selected_page"] + @abstractmethod + def handle_ui_page_selection(self, **kwargs): + """Control page selection in the UI sidebar.""" + + def render(self, **kwargs): + """Render the multipage app with focus on the selected page.""" + self.handle_ui_page_selection(**kwargs) + self.selected_page.render() + st.session_state["last_rendered_page"] = self.selected_page.page_id + + +class MultipageChatbotApp(AbstractMultipageApp): + def add_page(self, selected: bool = True): + return super().add_page(page=ChatBotPage(), selected=selected) + def handle_ui_page_selection(self, sidebar_tabs: dict): """Control page selection in the UI sidebar.""" with sidebar_tabs["chats"]: @@ -83,8 +99,9 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): kwargs={"page": page}, help="Delete this chat.", ) + with sidebar_tabs["settings"]: - caption = f"\u2699\uFE0F Settings for Chat {self.selected_page.page_number}" + caption = f"\u2699\uFE0F Settings for Chat #{self.selected_page.page_number}" if self.selected_page.title != self.selected_page._page_title: caption += f": {self.selected_page.title}" st.caption(caption) @@ -140,9 +157,3 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): new_chat_configs = current_chat_configs.model_dump() new_chat_configs.update(updates_to_chat_configs) self.selected_page.chat_obj = Chat.from_dict(new_chat_configs) - - def render(self, sidebar_tabs: dict): - """Render the multipage app with focus on the selected page.""" - self.handle_ui_page_selection(sidebar_tabs=sidebar_tabs) - self.selected_page.render() - st.session_state["last_rendered_page"] = self.selected_page.page_id diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 453b48f..b926d54 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -47,9 +47,8 @@ def base_directive(self): [ instruction.strip() for instruction in [ - f"Your name is {self.assistant_name}.", + f"You are {self.assistant_name} (model {self.model}).", f"You are a helpful assistant to {self.username}.", - "You answer correctly. You do not lie.", " ".join( [f"{instruct.strip(' .')}." for instruct in self.ai_instructions] ), @@ -58,7 +57,6 @@ def base_directive(self): if instruction.strip() ] ) - return {"role": "system", "name": self.system_name, "content": msg_content} def __del__(self): diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 844a285..09c1aff 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -1,11 +1,12 @@ #!/usr/bin/env python3 """Registration and validation of options.""" import argparse +import types import typing from functools import reduce from getpass import getuser from pathlib import Path -from typing import Literal, Optional, get_args +from typing import Literal, Optional, get_args, get_origin from pydantic import BaseModel, Field @@ -128,7 +129,11 @@ class ChatOptions(OpenAiApiCallOptions): description="Path to the file to read/write the chat context from/to.", ) ai_instructions: tuple[str, ...] = Field( - default=("Answer with the fewest tokens possible.",), + default=( + "You answer correctly.", + "You do not lie.", + "You answer with the fewest tokens possible.", + ), description="Initial instructions for the AI", ) token_usage_db_path: Path = Field( From fc842a1e8cbed211a861548d79d7c10673f74e9c Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 12:37:23 +0100 Subject: [PATCH 056/109] Allow changing AI persona on the fly from UI --- gpt_buddy_bot/app/multipage.py | 28 +++++++++++++++++++++++----- gpt_buddy_bot/chat_configs.py | 2 ++ 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index b135497..3dc7c37 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -107,7 +107,14 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): st.caption(caption) current_chat_configs = self.selected_page.chat_obj.configs updates_to_chat_configs = {} - for field_name, field in ChatOptions.model_fields.items(): + + # Present the user with the model and instructions fields first + field_names = ["model", "ai_instructions"] + field_names += [field_name for field_name in ChatOptions.model_fields] + field_names = list(dict.fromkeys(field_names)) + model_fiedls = {k: ChatOptions.model_fields[k] for k in field_names} + + for field_name, field in model_fiedls.items(): title = field_name.replace("_", " ").title() choices = ChatOptions.get_allowed_values(field=field_name) field_type = ChatOptions.get_type(field=field_name) @@ -121,9 +128,13 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): == self.selected_page.page_id else choices.index(last_field_value) ) - st.selectbox(title, choices, key=element_key, index=index) + new_field_value = st.selectbox( + title, choices, key=element_key, index=index + ) elif field_type == str: - st.text_input(title, value=last_field_value, key=element_key) + new_field_value = st.text_input( + title, value=last_field_value, key=element_key + ) elif field_type in [int, float]: step = 1 if field_type == int else 0.01 bounds = [None, None] @@ -137,7 +148,7 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): with contextlib.suppress(AttributeError): bounds[1] = item.le - st.number_input( + new_field_value = st.number_input( title, value=last_field_value, placeholder="OpenAI Default", @@ -146,10 +157,17 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): step=step, key=element_key, ) + elif field_type in (list, tuple): + new_field_value = st.text_area( + title, + value="\n".join(last_field_value), + key=element_key, + help="Directives that the AI should follow.", + ) + new_field_value = tuple(new_field_value.split("\n")) else: continue - new_field_value = st.session_state.get(element_key) if new_field_value != last_field_value: updates_to_chat_configs[field_name] = new_field_value diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 09c1aff..61e8e08 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -26,6 +26,8 @@ def get_type(cls, field: str): """Return type of `field`.""" type_hint = typing.get_type_hints(cls)[field] if isinstance(type_hint, type): + if isinstance(type_hint, types.GenericAlias): + return get_origin(type_hint) return type_hint type_hint_first_arg = get_args(type_hint)[0] if isinstance(type_hint_first_arg, type): From a89f52825228b9828746d5637d05c23966278690 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 17:57:48 +0100 Subject: [PATCH 057/109] Some cosmetic changes --- gpt_buddy_bot/app/.streamlit/config.toml | 15 +++++++++++++++ gpt_buddy_bot/app/app_page_templates.py | 20 ++++++++++++++++++-- gpt_buddy_bot/command_definitions.py | 7 ------- 3 files changed, 33 insertions(+), 9 deletions(-) create mode 100644 gpt_buddy_bot/app/.streamlit/config.toml diff --git a/gpt_buddy_bot/app/.streamlit/config.toml b/gpt_buddy_bot/app/.streamlit/config.toml new file mode 100644 index 0000000..fe8febd --- /dev/null +++ b/gpt_buddy_bot/app/.streamlit/config.toml @@ -0,0 +1,15 @@ +# Stremlit configs. +# See . +[browser] + gatherUsageStats = false + +[runner] + fastReruns = true + +[server] + runOnSave = true + +[theme] + base = "light" + # Colors + primaryColor = "#2BB5E8" diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 2c78e66..1a96f1d 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -59,7 +59,7 @@ def render(self): class ChatBotPage(AppPage): def __init__(self, sidebar_title: str = "", page_title: str = ""): super().__init__(sidebar_title=sidebar_title, page_title=page_title) - chat_title = f"Chat #{self.page_number}" + chat_title = f"### Chat #{self.page_number}" self._page_title = ( page_title if page_title @@ -109,7 +109,23 @@ def render(self): """ st.title(self.title) - self.render_chat_history() + st.divider() + + if self.chat_history: + self.render_chat_history() + else: + initial_bot_greetings = ( + f"Hi! I'm {self.chat_obj.assistant_name}. How can I help you today?" + ) + with st.chat_message("assistant"): + st.markdown(initial_bot_greetings) + self.chat_history.append( + { + "role": "assistant", + "name": self.chat_obj.assistant_name, + "content": initial_bot_greetings, + } + ) # Accept user input placeholder = ( diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index a63e00f..0c2b238 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -29,13 +29,6 @@ def run_on_ui(args): "streamlit", "run", app_path.as_posix(), - "--theme.base=dark", - "--runner.fastReruns", - "True", - "--server.runOnSave", - "True", - "--browser.gatherUsageStats", - "False", "--", GeneralConstants.PARSED_ARGS_FILE.as_posix(), ] From 8eb1b75c380f7f8e21c9b09ea72f1bf79ce92a4b Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 19:13:51 +0100 Subject: [PATCH 058/109] Use avatars freshly generated with DALL-E3 --- gpt_buddy_bot/app/.streamlit/config.toml | 2 +- gpt_buddy_bot/app/app_page_templates.py | 19 +++++++++++++++---- gpt_buddy_bot/app/data/assistant_avatar.png | Bin 0 -> 51452 bytes gpt_buddy_bot/app/data/user_avatar.png | Bin 0 -> 45312 bytes gpt_buddy_bot/command_definitions.py | 3 ++- pyproject.toml | 1 + 6 files changed, 19 insertions(+), 6 deletions(-) create mode 100644 gpt_buddy_bot/app/data/assistant_avatar.png create mode 100644 gpt_buddy_bot/app/data/user_avatar.png diff --git a/gpt_buddy_bot/app/.streamlit/config.toml b/gpt_buddy_bot/app/.streamlit/config.toml index fe8febd..94668a3 100644 --- a/gpt_buddy_bot/app/.streamlit/config.toml +++ b/gpt_buddy_bot/app/.streamlit/config.toml @@ -10,6 +10,6 @@ runOnSave = true [theme] - base = "light" + base = "dark" # Colors primaryColor = "#2BB5E8" diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 1a96f1d..00a07fd 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -3,13 +3,20 @@ import sys import uuid from abc import ABC, abstractmethod +from pathlib import Path import streamlit as st +from PIL import Image from gpt_buddy_bot import GeneralConstants from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions +_ASSISTANT_AVATAR_FILE_PATH = Path("data/assistant_avatar.png") +_USER_AVATAR_FILE_PATH = Path("data/user_avatar.png") +_ASSISTANT_AVATAR_IMAGE = Image.open(_ASSISTANT_AVATAR_FILE_PATH) +_USER_AVATAR_IMAGE = Image.open(_USER_AVATAR_FILE_PATH) + class AppPage(ABC): """Abstract base class for pages in a streamlit app.""" @@ -67,6 +74,8 @@ def __init__(self, sidebar_title: str = "", page_title: str = ""): ) self._sidebar_title = sidebar_title if sidebar_title else chat_title + self.avatars = {"assistant": _ASSISTANT_AVATAR_IMAGE, "user": _USER_AVATAR_IMAGE} + @property def chat_configs(self) -> ChatOptions: """Return the configs used for the page's chat object.""" @@ -98,7 +107,8 @@ def chat_history(self) -> list[dict[str, str]]: def render_chat_history(self): """Render the chat history of the page.""" for message in self.chat_history: - with st.chat_message(message["role"]): + role = message["role"] + with st.chat_message(role, avatar=self.avatars[role]): st.markdown(message["content"]) def render(self): @@ -117,7 +127,8 @@ def render(self): initial_bot_greetings = ( f"Hi! I'm {self.chat_obj.assistant_name}. How can I help you today?" ) - with st.chat_message("assistant"): + + with st.chat_message("assistant", avatar=self.avatars["assistant"]): st.markdown(initial_bot_greetings) self.chat_history.append( { @@ -133,7 +144,7 @@ def render(self): ) if prompt := st.chat_input(placeholder=placeholder): # Display user message in chat message container - with st.chat_message("user"): + with st.chat_message("user", avatar=self.avatars["user"]): st.markdown(prompt) self.chat_history.append( @@ -141,7 +152,7 @@ def render(self): ) # Display (stream) assistant response in chat message container - with st.chat_message("assistant"): + with st.chat_message("assistant", avatar=self.avatars["assistant"]): with st.empty(): st.markdown("▌") full_response = "" diff --git a/gpt_buddy_bot/app/data/assistant_avatar.png b/gpt_buddy_bot/app/data/assistant_avatar.png new file mode 100644 index 0000000000000000000000000000000000000000..9c6ee50a7238432563f9141bc7e45139a874a54e GIT binary patch literal 51452 zcmV($K;yrOP)Nwle8=9D zUOl6sHROmokk?AlAN-#EE${KK0E%8mfjLKM`k~*N-cyL^1ZBPxD)V)i@9{@+$aVSc z$Y0a`lQ`VHb;sHIyM2rYEKzc^%O!CO2i)-@GQuqMv|ijL&7d^W!t$RhHKHJpQhkDNCF7%#_?% ze%wRq#0U+AnlH>%JT9)Hi=)}CgG-%X0POb{=Iwu5oEH$0%@^Sj0ozi+-5fBt5s zK1o6vSHFflhl&|mo;~r}ecn5k*Be)ooO;LAea1$w6Z0YWPsk9hyTM*d3-^TSZH-sf=m+3Mz1}Q)3pM7&4?C8=9;e^1eXz>lacOq za_p+>41JZ?kk|2|nmKYrd~MGYfA4bEOllF0t24q?jvm@IuXX%c*NHH6rf`pjBVN`KouPNh+?1cBwN8~hk{AgaQcRlm#ypAO_Zs@>C zeK&ck=Xwc5Vh{JXN(k=DekbH$e41fo)%A=~Vu76SO^nCQt%uth4zLKlDYFnMyBhI{ zU0+H{S1U2(zLAm2GA_TObWba%0)waG&&;^AoMUj5FqV#0?PUvsjp0R?R??A`?tvlp z1bXXIMs6~hx>%FQ;u^-(2}ayVuDv1X5}A4A33fUtN~ql$>Bz2SQad^( zH%P(2?1bjCV@eqvs^Z%?ft-{QexNEb2zaBxv8KoG$pNWe0}>v(xKG@*{S;}hWa39k z5UaKyigppgRU#psgh;NlPGAJ(XIFV9RNc3arEFr2df`B?v~)&~fiAHCis#d)Qsr=` zR{pj2V@q5w`OMcq?N@^;S2}tbr1$r=T|22ABCi#?-@o$FL-&dIRpM3B_`DouCZ)gf z!bAxYiTpE$4x`&)Yd3f#RG@!iIiYL8lM=LRu-${}`qV8RlG0px4dm<;rIQ4!V@(iP z2^lHjbR8Ln^ct~8On51p6jog#J0ZgqE}zIQ$%o5cdjd%iq<=m)7xuv-d%m%4+zD2z zXyrA}HHq@89IuDdlgE`bs{1U-0GGiYwH693WP@b!926FgY)ATm8&jO=ht#1jr(gcMz2J~R~Bqu{2(SO z`;RyyLOZ->(zMCKzEFBiWNb$?|8>9CQc>osn=U0(6;SR(7LMX@5R zYZ|>BVgFDTqBxhPaU%s5(kOeb@|5PiE=j!Z|FO4B@TOq`1LS5SXbK3O6)<3wStO``PZSDD-rHSTDcca^um_pGe(hvu?~({FPw67>^^sE zfdt;%SWEo%!5A66{WtmB%x{-OSBVw6+CpE>{qe(@k2aEzF;yu`n3wsW5fh`og*Hs- z<%!1x=25&agH>l_B6!;Q_h6){gF~KfVt(-VN&1&85a}Y8D=mcz1#e&q~i zcTIP1`(&0wwEa#HxX9ks3FB(M{WpTu<+eS<@9igt0~7P=p@Fgl71?!VjTA-x{;UZ% z8k@Z%5;`2++TsUgh9sg;NsQjT)X5udGy#`Oba!rYaxH{#P}5qPO6kk)Awx?_Tld)R zdrop%Bpa#NUFV~;D#p0(%27h8sGfUu{f*>#@w3TWS0EU%5iHa4eF6(P{BU3)mBliF zV}3th$wgIG{qtc2g(I|Ubr)_Vr8ly(u0;Hv7#(3dpSDw3cIqd_al89p z$-e2i`xT$@jUR4gfQzLk2who1?1mNBA`#*TiEk8t7B6=Ex_bfrJrcL_tfK5R_7w{U%QKlHR3>@d^sNDn}Htgwfmi1BX`X! zNq-qzV!WQ2U1B1G4Hl!bAIzO;#Z;Bs1upR4!zRL^^FPSQJOb+pk%=*c;KUG=uX z=bp|Ic@Y~Ev>nWb(#3IOLR;#DWH;ZmfSWTZ-Q=6^B(vMP0#ZM~aaD}2HJtI#(~HsVWOdKp8$S||Hf3(Q5&XC|HP?NZK}QihXY;B>QGDI53H6g;sMCUbEYCl6Hwt1WU|S z?>Me@^sd*nes5!9ht*IT&L^a-q;_z?N$R9V^N>FIj6JguKNtV&gko%$#0vWL7(I!T zl8oX6U(7TqLrlqwZH)PnH+DYdD;?}Hh#$&$5R&J!RW8?xjD&E;sj_y0a*-Z89^>3H zS&nYXorgllqR>xDVDnivBF<|wjIMrMjgSmn(w-3`>zyz}Vk5kE~39(ScNI=~++$0KgSl9VRwZ${SMD9_q z1Pc<+();>arQSw!a;3vTEj}DpckjCo&3KapVbYc(m-zO2hQt-dCD%&KLu7s6@?Vd@ zack<2wXy z#ZRh}&e{Y+VnWtd3RrmI^tyI9)JZfNT+lJ++Z7 z(U$;^eY$_SiyHe;sV;KVPHnq@mAp1NASOrMj@$_hWtqRrfVPmpa<OXLQTgh@Qli|EMv6NDX3p|p6EysiadKLr8SxNX5jk3~-xiEPn%09RAi))v^V6~aQ zl$YKjdkmw<`b2TERAf}JkJYdjH39D=vCU^gCN&$M`_iK#qSn_Z=sH>@-A) z_YDolVp<{b3|){#CH{@4w3ol!TgklrAGjW%>&ww)pZXL4Qxu`?gSfm4<>6Rzm{H_l zj=6!iL#!kZY-ps{#bd4&zgBiaW5SOkoqh}!t~k$%S)@2Qzt@enQSc-weas~j8o|@K zG62+J3b)b$yd;+UN6z?yw*%ydnLjYU#UtdT0&6%jIJuzR zq9XpDxqS>noHh;I)WlP4@`2p4ydmd@zhmNeQo46R7i|qjOE7McdJLP$RqHtQspFgI zq`%ZjYWMvlUbn>MJq&iXlns`+>$)*0d%jr=Q_$eXx{h{3}CsmR&=FWH5 zyJq#4JlSxTc^;!e%xfq4crYCkWwjzqlPDE}CutjIhzV0ExoWZr&K{{D3wp3swSD3z47Ub4k%-{IB357fd9D?Nk( zB8%k$^IFbJaJqvmajuZ|lkF7>;)xLH?}$EO3|-MOzbuYzE*N-@NrEJEY7`lxOrnU{ z44q5_UTUuNk(m=~3Yo%$ zIH@aC#$aT9o~8Q*R>enly!^dtrGxVN0?wVEr|+Ghdj-fBQWzN<#OAFd*tT;sHf(uyOQQnW6f;puoL$l3jVFY@h25$eY(W+!(we|HHa) z&#__Qg`r=Yb(T4FV=yx@U7HB2O%k=)?j*{$NOFgKeoEkp%BJOesgyAajsH0y*luuptKCM2 z;0<4|Lj>B?AkkteFwH- z)Ak9ZN<~r*ZP38HU1hXYLQps=c3v6h;Xyald_$C-oPKn_PRbwWGENq=qf78ygFw5+ zqoMO|? znV=hu26T-%m54r*M!wXKv4H`Ss6*%<8bIGbKS~3HuS*77r?V;fVL}hWR4lrzt4`C6 zn$0>|wFYX{8cFa9Dl4m4SzM-{71AIpSfk(dN)63wUA~$k4Zuv4&E*Wy5R+XbiGSwQ zB|P@MmoYZhkLzyOjq7jRgYoTS2nqkQcIuFZQGwHLfuuCYMmjvfC*7Hs5$)?7Fxm zVLpz(;~(0=6;Ir+^EAo0yJfaGeNoxG_DBrpfH8L=gXZcwjy-=2FF$({7tYU$!I;iu zkR}4kvMY}WgC%X92#N7}zEr}*&M}N{o5a?g+b~X;d2o1;-Y@!qjB{yTjyer-Yemvp z*J&JekabG)*rt1tv-b2Elk}tsqhQ%M)Fo-qr!P`=67azxx|GW@baT#ZH@?Y1UKoW|s~E!etu7q%YQiLKkWU~s4(2^-xm7#o5e>q}SO z=ro@Z5x2@{(lzU~HB<-&RS2q78TPaZhOnk^Za|pljYN=Wam8hbh*JbvvV%nojPJwv ztptV0@NAVESiLll3nxw!)SAM|N|mnPAT2}MgT|f7l?39PJu!#V$EU@Bz3qi)?Jz zIiI#D8H>YpV)QZmFy{%1w3eSKu`;D_*2|LM1NcYd&pUP!Ld2j4*F3{riBu{cix{tI z#>rW%&n)5DCl2AI=T2aGrG`v8L-3+czT9dz1sbGsd5rDYj(sSEMoN zG*-xn{^nPo!BdYP#_e}shkM?3Bl;$W>6#?nTeg9eiiu4j)~K=MeH~hfNv7+y|1A2)cm6dUNth!`!V-#&w?Y9G!n;p@{RKS#_-iN=49MiNgrV~ywVTx@NmN1(eJR{@ z&keZk{`;|I$0mf9xH-2zirSvg`5$o+YGos!CgjY--K-L>F0Wvz-o$!?AV-HkNud*@ zNji3rW}Cn*8oITdB+R*>m7ObRv?@t8u+Bpavk153v28m+s{3xm z>eM{uj-SP)6K7FdF4H{&VT=q(`UWxG_rLWbUV7#TZn^6^yyfk;p^psNs7{cEG>!7E z;sgf4<%D zx^J2@Rd;4u5~BEjt#NzV``36ltrJ=J6clr4mTP$aYmeip$6m$Kauqqkum!@|orpap z9ikPD;-+EjylFRXzi9_{AH0pmngRQ=S?wD-Hlqk^Zw<5lfyp*R4ek08R@PRruwKSu zGeT8o>2rb)3^~#e;@wkJ7|0ZGe3f7e&yI4;ZVT)r{aBmmDqIE&tZigvywV8|lo2(e zUqds`n(ke<9;E)Ao6x^|3%1>N3l_<^o_hHh)`{6W9F?W>^Q9bW#E9Sf_A7Yh`O|pI zTW`Rf@3;wBV*aSw@De6cE(azi_Kgz#-mIaRq_fx=lFYZ!-g)rm?vudi8y&qDyez*3 z&RDe4TF$qU+?6~S%)7>vOb7T~1>>#7E5M*(YG)04+Kcuk@WeERgU<}sCpjIEYx7Qa@VSV{Z zIec%LXG)Q`(l$vffoJVBMS+2f;?!jWwrSQPO>?pJ-s>^EXEUDs{V&S1i~eb~#Z}3X z*9wjEYhQj6&y!L9(0lK~jdvW7l!J(ja1{o=0Nm#wB?!|mLlUc8<8gJV>;zpbS>G6u zC#?dX)TO%KbcW#N61`icBn{9&n^Z3G&u9~z;KS5*x}0(2&gz0F+AZkixyY|4&keH~ zyrOM{F(?+WLbQ*L)?J@5q-PS5Z7Wv5M%b@6eh>^A?^|5 z=EJ2Lm@+hlCM|vqOKWqOUtPsK3BEeA#b4(@a-7hI*LxE?&rkJMVvPY7T#36Br{FrSsAs;~dWxAF1|Cy>kK zkSEFBBtmO6n%J^)8}9$WLpXTbO{lh3@$AtfIKf!7(Uyqg+RP#us0~~QJIF1Z!objG zgHH*s?A;FXmYX|*=N7WQu&XyOxseQvoi zGMx|Gbzm@S!R!$E_=Cw?iL8`hyCAZ`7@K#^^PmF6dG5;EN+u(+L(jQ>Zi&X87Tbge z)xT{Vg{>pF^(Wthy~n3;?D6NZFg+)3$s*x*_Sk&sb5G*LYZvjOKmH*4MhDTU)zTCXFmHu9t-KZ`vx30l!kD8_-TAdy~ut(wFaUc}jmxVlPlK345gcwH`Zgt&EW z$55f^!VXtBpyBoERVTu|z7mo9@zGC6D);=63;4pHd<)Bq>$FIHa{YR_f=nTc2S4;7 z-1Dxtkx`ft&q*Ow!j_3i_jJ3o(*7M7!t<>`6!jqk-P3(FWG!!k!wy-ep<*aeO> z8GKo~hBjU&>lvvLgs2f5Vz^;?qeI|H%lXlvY_gmoW>*bmJhOpFW+(COQ+tD7nlWi3 z(Y2coPyC4<;;ws^jw9EJ$r47caA0X^NnSH-$@Y~{8XG1pRg*a^Ubhu@?wY_k!sut7 zJ%mQZc%?Gw6pqo{{`PcM7p5VapFH(Ua?Lgr?Q*?(w{EjORFV4NP(E zMrtaSBF?CbGZ}jalML1?=ypk3{7f1JeN^Ho#~a;`cdu^Y+U#1OeP<~aX@B=_^* zeHLH;(vy<;S19JiQ&K15+jrAGy!R(Rg0an;gupwUhIN}L38)k|kE3#S*7%0#dW+|$ zvE`QiXayO}we7R2v`BA}ph;)2eFomDNz}Proj4Ncf>jBVO1Z^|{L?$zM2^5|LuX_f zqGd?pn=sir$fU;9mNQH?ChT|2DbkseG|c|N$rNfng(er zp4XjkyA8t!cH+o)p2W)W^GK5|Q|v2Zz0$((|CcZ0y&rrF?tAy0CaT)DUN{}6XJ6~F zAzZmClV~_+ta0kSal0UuoV4U|CDo)8mVz^q%qCu;$0gE8(|O31^UgTQZv#e-bFbmh z8lK=KBQI@?vaf&k+j#7|FCtG1$OcuDU`o3a;lYo-7x#bgoq{_%1V1EImXo9C`>QK6 zm@cnlZ0jWEf>$M}in9Tire@Ko(ZYw$$L-iS>SETIpkw0ozJ4!?+yq57r^n5PNtN|G zWAHn+zq`Dab4|sNnOisUtRVwQnRYpcHq8!phXr_Ho)C)yW$2W7B6J~0EknZ4_D#@i zhO`x28Zn>Ss8x_3F5%9PzXzwEcnPPUd=aYIk%)1J&i(QiAI0pFuif0cxrTRm#B-j!#4I68UL-jrQ`B&5D~Bic+uUW~{ZwnSdm zgJF}qY*LIy92Irf*=W3%`BlYUFhm|mz@vdb|F3_ABd?rAsaO(e#JS+1sq#iK|7Vh;P@y)f;Q|kPN&kSFRr4zu!6xY+>c@#@a>-faa{fGqII_$mjM(UN<^h!r>peR7>GIdbdMNnfoVxRd9 zK&Mnpbh-<=TsiV(?`E;08 zEqQq4+>)(Y4HLVzq4>!U;Ki>#g7VoJWNH3OG;ha`Pvf`$<6q!!{_PK8XtW=lx`9A( zw$d9v{QBEEJLmLpg2CH{mYZefg6?}=P0A-Eda^XG+i>t4Fvg{24J3(bI+6|( zshBFxJNCKGmWjHUx!bKKQ*INL#j`?qacyNCfB0Kp#_Xk4!s9t`x(UOLzMY$J=Z`&r z{t|tmSs{iP76NZKmT-v({9?I|8aUlHuuK*qO&1^Dwgn5v&LPD_K?}TadJ22)z1ftj zA+pdUkVWLf#|ljOrre*Uq{$yQC3mLK!i-xqlEv8kt}E(xa@|N~X^t`lCQ4gkAC}$QUp=R8zOI!+RSa=jDsJ%A38%go22rknX~BcUz7~N z3zY`SZIbF?j{PQvIcdzPFeMB-vVDS>B9yQ=FMefu5p|M&Ig(l(PM_r@ETZ6chnR*W zV6&|S)3erZM+PBB31cjB1|%GcCT5WxC2XChHdxy%3amtm>|(hzXk)Mk37i^A5lbYD zFKl50O&kN@($$cDB!`B70{1!;PFyB@F2&a{_ZZqTNq=iePjDK-`3x+H`msnrjUF~f zzi)ZZy*QN4VBxt}ks*9uEM%~>T*jw=>o4#Nzw%LxPWB6C@0GF}d0MW77J3cSUZb=3 z8_9Rwxk)sVq~!#m$CmIFt>GS9!$tRi?g*Cu+r43-kqLd9YV?f~blf&0hGUg1LYPj8 z`|l6`3laH+H6e15rp+euMC7-BfZ)omaq*d7TwTG58a-5Xnd~eMn-fe)@zpv4FCr4H z&6Y;3-bAgojv6yT{}7_ZMT8{rTIDKcFU+HFcLue!suW%{+MG(;mIaT9(AnG2Y0=-U zCK+li!l*5YKDOH}gvU(V1{ z&L6DBli&zEX+Z~xYb7IN$=}{b&f~F@{MrBdHC#HsMhladzdNlKGTS!et`EHhZ@Y0H zP56L}yA_?n#dR{A!mQji6&TgrAOfwf)lp^%-!etWY{;eZMHDtoVtt;Z9tY-$X)nHd z8iTZ0wZ%2TsZphh7Wg4n$#!6;3PRwr#jgxeN1VN3IP`uI9D85xrF zQc-2`^V;dNKt6+Xu}G^=c26@vd9i`Cs-iE)M4v}4ABf$;%E;}cCjyMFGe8h=$K3~T zj_zG~;jlOx`bs&RI({Bs`qS^><3IBwWX#tRg!9IY(~bPu(liIPj5aCBVZuuVoC zMJ9d2iL?N3SYCed*sO{l%=Z*=wCnr}2{qRH+xxEBUL<%B7PEN#8&Bi8Cr_dzX|<7r zrPG_nvH8B+aPR)D$Yq8-kx$Q`$8xI*4#=;STUf0$QEx*HaFrhJ^$vUEB1~s6Y}#L=wnLy%-UE9V#mOd&rqch zyWxx%MN)G~s@G>0P`NlmT7V!=K8rNjBH58)q=$!*p`Ugqz)Gzml6#0?NU3O2joY1$ zcU$t>j}}AJ?mUQ@Rs)q+kCR3TQ0gn-`DYGeXrvGC{P2CG32Ww=T_ML64A9=b?OjBj zINw7P&jj$=6GUCS+^dedh`Z-E zyAi_zNr+cZ;agvQR-$=a(Ay$ICp9*N;k$0dt}Xo-9T~OmwH7YSU%>g*3f5{7)>fNn zu*-3c{yu*Zm8pwpFD{GAP@ebs(XSY%}DMo&nN9zLczdk!pQ7D&e50( z{RQ#cmj+Gov*gt9X9N{;xjdblGG%OBu1CbpEigp*vzs$;3~x23)_#JtmBjL+jZMJ4 zVLHvW7{$$ILrUb>HmFqA363-cl9bD3taGEJ^|H7SIUS)vMtFWQWYm8DLZ(qMsNhc(WPp@o`j|8u>L~WH2X&C-2N{8kPpE%rH>`>bG47 zU2CI$@+3mi62)R34}bG%Y~DJ7gSYP?Vz-{14TRg(j*Se`jS>ulgot%6-q^B64QdXIu9=GYe;r?GS^kzV|@(L+1_3dwM)u9oqapL^7MAUL-=Ditt#&rKK{ zCS08jX_1$ZFSKxGejX?1D_CCdh-+M3I5>*Za6d+dhNaNCG(?0vG$;WP4uKa+MP!Jmd2De9u`4ZDk0G<+#D_iSx+7t| zUxV6D3XNuin1KkZLPTFC;;*c%Nz7qoc?HWeOIRXJw!TW5jcg9qAku;(FkK7;nm6lc zoW6kane!y!hcK{rC$M8HW{CMSD^&~+6$sB~EK`EkAdOL)oA&KDp;K8yG&>JuGf1Vf z_{!(LgH2mTWv$pa4T9Gv?%tzU+h5+Q+y$0Y*K8-}Cdn{~d~};3ccmMnXX&&{IkizW zu8@gbmZAeI*K))R#)PVJS$y-e-^Mg84ogy&=1ux+)HmfYsX3GEtMW7pgh*6@vVcOo2* zk0IFT*aSvKC$aAaEA7g&SU!0aCr(_ztB22F>e8~zc{ZCRC>Ef#vW)pho9 zBi6$bDwme9X{;ZGf~lt+A4-w2UqXS1ef_EDWInim)rtuneRi?ml2Nuo9vceRf(@opr?HVPidUNR}*E05B9De-Ky31Wm3JNCT!&VC*ClRP0Rfwp=ejv>8+*!L zN>!G~c+BCRO*i7Mdk*5_xp}8k;t$K(X@uUG>y2x_IJFaYKfXZT9;tAdDDUfAaErm5U*<{PJO}ym*)S8}zsk zypvT_{5kbK_W1%)Q@PLo}s!<1l`8A z-J{sPcN6ZpFo#DUJ%q!@E{G<`l`>=}wK4boC(*cJ8N;{ThL!au`U-iOfl*?XC0dir z{ynJA&d@dM(wS^=zKrjD<7s^46Yn8=sqM0)WB042i7OH3RS(tkyNAAdXS1tj)M=6~ zsp~x^W)72fg-d*L(#sLBy-4@CdW;L7@dxAjTAf?Olixpte6Aocrqya8OOPWqG=RZE zK+>9w$(Aun`UeWQ^zCP{cIX%~g|s|Ios7`&ZDV-fPy8tM9^5Yz$vNnz-jjbgD~}~4 zcS&j6UgH*G^k6-0vjUySaFs4{2o1Xck(KU-LoAH3jP0e1gD!TjPJbb{y>8_E28o-v z8Fi$b&=5M_7s>hXMRME4GqDb3qNq-KOX8O$EH_+!M)su_2B645*R@r$ZOEXkuVQk` zAb#Q#58;)UUcc*A;11~a^Y zgoMSLNMTi$5E7iVk$+TM;^6rYNoqXt{TH#m(lXe*MUr-K5Se|u5fIUjkP#}^VShE9 zJN4bCu=LVlWXag%YgEeX0#SbSZ+w*SCFj93jKq{WHYRSC^i6Y($o?Y)VJo04aU>aA zo>#jNvzG5@6Y?|@5?DhO+S@ID<5`u+klP`%MF^aN#T&)?6O?&(b`5m!uP-V96tF= zAF@wv<|7E=XG_w^n>tjCEUbb{URnO_YG5Q&Y$aKIa^iIq%Oitpn{8{DUyPSQ_HD7HdDM(C-G6g*<+XcAcmS3iB4XGO@6NV~O8^N7$C3-8PTfRY`fc z;L=EC_WXpzo`CpwkJGhlA-b2R7riIH8#ABFi8t&M#cSn@HbN7@wVor>o}p!+nlO!J zC=vpy-IlD)U~q&a{Scz1OK7hzN&d=@fBXTAJa!BZKm4K?svJAG`20~xb-wXk_n}d# zNM`7Ge;%h7Ye?_ggWCMOXjP6s96odsXI?vpT?cj`B0DU2Ge)|5`kug(6HeY)V%|&$ zEHhhOE$R61`vYlb;K2XwFRnK3$#uwIGY#SobQPdxG>%9T0_#ex*_MkAvLCdUxc zq6`j{giupN@N>t`;N-(k)8yqP*P~Ld;J)`ifcJm=LnK*Ra!ny}O&Fe}xJWRIt$h14 zW!qoVdECDxntlO#^^6dJDosF5+=(DR!j5ID{w-lAB&e&h>XsV4guCG~CCojDu)L0$Skj+Hr$VJ z%O>axbYIfIEfQRheeZc9^zBH};Ty)~z-2V$-MGnF+ZdxK{`B19z=&17aaMk}icnWy z)+-Itz2;23<>@87_}obe$Z%RLH{0y&-XUT0Ts9zsCo36w)zx*p^0h~WkU8wmL6O@Y zx)&c{BJZ>e;R$4_($6}4&o3R>gg0$Jj|`DF*3mxtrwEpgAq-5$ohFFdo_-v9-%lbO zy}?L#C%v7^QMm}6lbp#?vc%+UI5SBLs`qns<<|~~>KKnQObV*RZ)^yK(SC5Oxu0-(b)}4O*Iu+{79?wv&pmPEEH0k9fUUc? zAmV@>uARuQm@?uZm~x`SJb;5{+JC2q(EWvyNKz-2W?FNWiO?hYUHduheEP~}in`24Tc9HFi~Y<*lo2s;sF@dDqy*}C_q%Sw;!+i_zIF*k zF1D^V@DdUGJwNq+Nq8G5rZB%u^IOURo3}upK8aL5EqO=JKXn9K_iZzF(PjSdUTv4q zVmc=7BrRR3?@&yl@pm_DN$34AI)OUvTKPisH$dcO9($xY`^hU+9C_u8MD+Nk9nQPn zwq1xaO|YRzn4K}+#g|WD`jr#N5Ju;!m;Q+neB_fqPJ%v^JQc~zi)ys)Yu>6?w%e~$ z^)hy@oJoG+nucWr-TfR17-*7>Or5N@ zEk0bWf7hO)*FIbRw{~#EGE86zT8c>A2Dy=ShjL^&RU)|ZBuR2oPiZpNxpvmtT(ao& z%&GE4=*8o}#4XS*Hqygbm?>vlS^VExMwlbaUfN6=k04%35Hm3ofWS~9{^|n6ShY&W8MppC z3)dvh(Oe;%K0=ye4V|?)lm`0nzW3aU|MuA@1PT_?X`FlV6>PtLuVgV6h}g?@(iDAU z3ruW6`@)oz;H@r~aqQJ|xbq#vV6|GecTPEYsO_e3?s+zjH1sBYUEXXfNxWQ*mBc%+ z!IGp4(P>``)i*+yZ|b0n9D38B@h}(WB$Yry|SakY)R zp0Xw?K|=ea@mfhhlJ75oo#r_Y#|@E+JURLE{UmB(RNWy+IJlV@WQ`zZ9S5%4goFDh zaQMhH@&qwk>s6e7<}hx3|69ca!=-#Rt`8vif{T~@JRT;^eCOM4+K6<`c!lfCwmC_y zc^Uci;Pfb=Auf+!1PO^AIjotuva@Eb#IcO_rX+e-s3es}jjZ!i$1alO&PYR?PM*~4 z=qPl%P28W7q^y9X_Y_SY=R`|#E8X-h?|z3|tHaS$@vzis5!g>|`pS6Hk^imlTiJvn zrVTQ{f`Pq+T}PoBOVSma7@eSUB=-k7QKJ_kz zK?hdqlT8!$lO435zG-6%*3J;^k(7*1o3I8W?TK^&QQtVQd_h`u+<)(WoH@B5)koaf zYvz^XsNQ`Oa-^+;)t30_b$=gFD4|{BP6Qd8J2Qio`DF}_^h?opQneY4f-b47Ux(P; z?{2Jb91qDO6)&WV5GJfl%UrSm!Q~qET8^Z)&^GgrEBY@SnZoko8ezARY)IP~9YjRL zou-H9JeU5#ek`52h&7VtoKL~qqHLNR!u|uh3;_jcm|D!5!LE+DtyFa~(L@A;xTck` ze2^YNwBx<#9Q`A@PD^~vhLMR(>SeTsZb5GIJ>rM=44MqPxdliVLCR(1xi~~%jbICD z{8t@9gtD#av}c_790zr@hdtg-NL(wj_k|-$et06taPUU7wmt}b{wwsXydP59UbiD9 zbYzV>WRq?E?uT^p4*je25!0m+p+a9KP=pwfcu1MUke{6&W zA%&7wzLrIo}Np+SqArg>yf{5vAVdyPMs)o{CJ_U7bkD_H3T+bY7UFc_o5 zgw6w5;9%2Rxnvrh(|<)1zGS_qdFaV^Aamejkl<0f<|q%!!#Haoy7nM;_X$_{?Z`N= z^C6u9Y_AlT8{_h390W1(sK5t$jN{O3wGvse=G%<;F+?$bHC$&6*A{)Uj?~_d6Ku+& zed!SdtyLLSC+LIT^ATi-$q}v7cUp#b5U*d%batB^)^d^22#VdUnLf1H6+HDKvN(sE zwy)vH@r;a#3p-|BJA?fMFGUN}Pm?t-5jxJEvvZ_KlV8AH+@%-YL~aC_nWxup5S=t| zaD~@#dP_?6d?%JTc47H#SL>WmuRcHeGQ#d=MK2Y!EqLa|sy1=%^a4qntZ6t;m@?=a zBEn~BZ+auvSJ$z8ep+^qV*@dlj>J< z>Z`A;AaT&p40`k1 zpx4gJy@Jwa1jVgFqNZ>CJ^eiIP2*f3s?H@pt&x`+@lf=%STR?*(dsYfL;^Z#CxdUI1$dkmJ zge=L|GJE!Sw6S|J3D08by)k4-MmxLRI>`%~Wi{I#hJ?|Uu(+@)6+7JBwbf79k8n9i zm8H3p77_RQ%p98KisY_vdFPhl1}27C0u>Nu=vw8ApxJ^ZLU*vkFjX=?$Kli1W8uyL zNj=?eA;=5>W8+q%aeIoYcyJ7n!*^~qu-jo-Crb^EHb#_Hn2uNgT|lD0tO_s$+ObH) zN%F{!F|c0!ggX-G>i_MGIBzg}Mzhc4F}N~u18?A#j`1dt;Fjz5+AW>u6 z?)6&AB36yv<892Zq{IkqlMq`cxEb!~Gfi0Ot1W_0LH{81 z^d(98SY0dA`mbQ~u1&J}SQ3XP1!b>){Mxe|lhWPLLCOhcon(mQ`9N1oK{qOI@FgAf zNf%<0F;o-CZ34+xp*#KtuXTY_1 z^UtZAf$3DNic=??Z`<1@b|O6lv7OC~nD<6%GP|ftN+c~Z=M`}6{#4v>O4%-6@|+!F ztPTs76=JbvqivjYli>!iMXW~C+Occ9PO`IxihN+q+dQ08o+W}UZYRxp#2DZ-SKBss z@a*xpZTQN?SqyI5g;dJ49Fgr1hUj~jE{cZYQpV}21#I5CMTQVwq1R`x@a(29_qrFZ z^!_%VdFYayN6*ZT)0`L0t2XoHBo`(J)fEMWMsg=U z_f?r&G^&!N5=q)^@jlF zi;ib(6OI8krpW z;hL6M${34$|0bzxF3f1QB@2w9j>$up2)S?ZfG?nf0|SeA|6*h2TkMTCPDll zKKyJ6)ceD=_G66UUN^>~s0N$HGY`7{|7@tJ8B;xqU}nn3n~HbeZ5* zbzCwilHM>mKNq|7IxW-cgL8hQ%NK96&d%>3vd4 zo6nmate2Z8R~x8ty9*+GZq+te$k4h6(gR$cmpgQ=SBRi$WEJPhVC^Rro`yHpl)?8( zAd^c0HnXU`(;XJ}Ypdbi{FsS$UC#VQ#nuzx@)S+?0e24FgN!`AP6o3OcqBrBGdwxq zrihrEwHB7^BxPB9(Hx~|UQ79u{G@U@Ge)lBYS28B^i}+9q*ML}|0dUtqCplS}2e;eRTShMr^nWTa-enNJ`Alunq3N)MW5K9@Q8xspRkUlUbb zH-x~Gmd{Egb7`T1Cr-}biPOtCF<-{?N)wfOWEvFFx3e_H!CZjN136r`sf4%g9l_oE zhf(a$Llg0INYm&#F-?Do2)<;3DHhBsi`yN9irscd#3+mwlDV=3oisacTwUm+DxlcF?fBYB;UAmp6%x+e zJRF&xVr`4tQhl_*1^0$o04FtR5;bW9yr|FINHxQh`%EG`S2V(7=+%vR}H{^IeA z_}uf;cxASZN+*zru^573TKqb$=zED6`dl@_vlkcfN6*h=TYm~4zHuC%x_2{nZ7rfh zj2saR3K-fD6Dvu!ix4YhD^WPtJ>=f>7e(tb_l=IQpFtZ4N>9^fEy~}O_SZ@@wMu8? z`wUTQwFXvK$`~9cfu-AUZY$@#@(8>~VkbALaD5qF3P51zO~Vh^e?O(|>d0gy?xMY6 zWe7egsgvoEy(*;lJh`?uCL0@tYbzDhNmX){IM*9;2wvHAQOOiq3e?b(Hs)?=DzAWQCX&>B_0We>@bbRf-m}fm;`(w9id@sYCF}sbUfC+1l)8A z=6UB?PFPaa#uKl;)sJbR&n{{9@s>8DE4aG3-M zmx!eYi^#A#74LwRCbR%;x=y>+l*QtO$&kTXAY4CJ>EM6*!ZAEdTH#-O^dLt1Qqlll zEiREoHU<%+x^vkgIUC2+rP}+bC~h5PA(yP1n=9ptrUT=lSz45gKeijV_1>?2=>&f7 z=@~g=sE|g3w98tHYz$%mekN`a$6dIj52P|iB@-8GqgJ&po+nPc8Y*Q#<_Z4I_ongi z$z}ZOk6eeFb`R3?*9cFOAzmuV`WP**Ou`WlI6nlv6dDGl`5w#%7$`)TEt~XVa9cd? zi%F0%WCs_`O@}u#wv@wkvsSK}7TD5T(zr*xZTJT8ch6*Rc%+o8gjX(fVY3$lNqVsD zj}4(YGgUdCaF4aClMDkpSZ;lmPYa&xtX45IwG_i6wuU*OYjz4D{f^27)OiPqMooOH zEt2j-B{D>LCyR~|>e3iAJ>pfpzc-hLaW6rd^fVFq=Sfoik6%86CA#iU-M<5$y8i}j z8|%Y7Ns-@qFC^4@iBb$_zE7lX$yYgq5arB(I@RD{`Bcncy7Kb#pF?#P~l4XG~<9bH=wm>5;UtX ztf5t|NlwZr*&r`3H1W|-KaYR?!2|f%y;}`LE3=Urh#L~FG?C{k({I~nbVTBZIa;fs z4yIPM=}AxLG%lVec-05hB!R>hYpv>CifpgVU0Ou6LJY+&+)PT0e%|F;+cfCj?LU(# zCP}>Ra?Z6)gV4?wk|gx*?8Rj6qFr#C73dfMuIAEO<+_*PZ2*EFC?HkZTr1zvo1azqpaV^V7f zh6ZeaE#B=dP7-p|?vj^a=QviS%J0=`yP%OR!0totlrUdu<7fWxD87HLjGuaNAO7>t zy%i?ij9*`@Vg1q)8l-89 z*%TS&Df|y#IDr)s1V8`44tg$vN;R^XiY4n#)k54z5up=S=i1yzbHgc(v0`NM-5DN{ z8gIP#irn9MR3yG6`@KezsQ2Qtui>>L=P|i?7&qLs2RGch2Za%0(pudJw5!;H#{C%+ zweb=eG|?>rEukkRvC=VQ5jU9);ZQ>tCLWhZnJLSI)YEcqH8kt6EW1bOcoXhvKEa$SJbKXc@qJ$B!8Hs z?f>!@$7S;G-aml<@uPQxQ+YY9mkrKNrzW+IpSWooUYt8F31pl-X`--VF%u(Y#ZAZ8 zlngsMDZgtHsgXgH&&-OX?28H%M7yYCXjYeKW;d3<)d zva@RE+yswwylQN>)ESCZ#ote;8Nd3u<9Pgh8Mp2l#=rS#CUWk9XnLh^i;pzHfS8s+3`N4hl*IcIEU)|n!uT`uZVy4^)qD8 zr12B?PNGw8Az<5w1eA(QsxntE1*wv0nPF*QWH2;FePklOOw@sjL9^1%5I4`SXkBY` z-Q!0u;Mn2w`0h8K$6Ft`32%AuR$|7KS(m`<$mePSx)6h#Vn3Z`nG-{m$9k}RZfVZMpKPtvGOh8e?& zhi>0VMrDSGp(=hdA|u`uhog9-gn^AAkNRe&=u8N{dRwL%5cm$=p^?A(o`Db80i%4fL~% z=irP@g!IKCAcIi0fFZc_8(+JKZ=EQU&7$z&^;DPrZLAvE4qyKKBRKNvS$yo1@5JbqVX>1uY;n@t!XLOYMJs@H5;j`F>2``s8Q1~) zrPw9t@S>66L9BiQBc%d3uYgHGH3>vH%wC+qfBW69U~#U7zEVG7<_L9C>-F zjAHAKF$|21Ad|^>Zs$wq=$!9;<9j%EnpAHtPwG%%u-}eJ_MoRl2}>py-R`@?#-rH} zB8`9gC9M*Hwdv?eDI-EaWG#HdIZjLCU=yC3XNN^nSRcHeSE}evXVr0)N=> z-L+#BZ+-VYM8sL6sjU=cV}EIJ5oa&V;nMU98Vf53c*8Ujk^`AOoIgE--~RWX!zX|4 zeb{yVc62H=6EU}tI9Yh^so^@GaSl&($l=i^4hy7clH~owTbuaN_jNd-5mQSkJbNjNd{!gP8(dA1^d_m_zVQIm ztp_ECJ|OMUzke&1Nb;~9Q6P!*pB}q__uM#%Z37v!iP(9wUJ1Qh*NTH{$)tY4@h&*0 z?B{$gM}XN7iEq^&*Fdnk)31^cTB<<=K*oPex?9dZ^~;?;!Sr+5G+>>L34KK^>^GtkY~&~FRT!S z2}*XVb2xJD3=W^?ZpcTlaQcE|uI5OC)$1L6=6C-RfAjBr7(4cDA*OG@mwS3q;r`W9 zvZ$iaGo6{x$Wnah0!Ch_S|SI0B9@A~?8@7A3vcC4-|MuyxlD2J09}n4e zfi7|5d+x*LTMuA}aPq;)%}_+-oa7To5|x8N5I=N#c?l~^Wy0|(fvbJlh*YjKI+ZC( z=H@%jXUS54Fn*1s_y6_eC1iLjZzd8tZ`;91JU+9Ezc_LMAH9CJq!>tP8fk-XKmHWH z^jA-zSn5Nw(!!P@mZ02*AV6PUi($g!YjZFun@5g!#dzTr)YoVQ-*%7j1rv1O{Y_Vo zUqF_y=rkGGfB5tz{L>HZMN~G)RYBh5&$ytQ5f>f!3L4&RTf0==$QNp>W`h{0h7{QW zpMP-)k6)~yp9D#Rr0%9oLpVuP|1G-jyNTJfEt28Jq0b+E6~FW8uOLSdgtv+xEw)Jk z(fw8w#`=hHdB0MQUGUT~UME{5y+}~y;YXqAxf+`%38K`|w`+{Ba}CXMgK&BVzxT`x zK6dLQ_6+BR@q(x=5$Yf{V7b`rAC)26&*$1$D5qr$NZy`tef|<@3Su7agNfMXY@8Lt zxN$qtLcV}`5`Z%=9m8wic}nuuv$+frdjo&=nXlvT{J(wz{R26Zqh$S$j^Mo~FbW*W z8-(H0Z4Dkwc{h#LiS$u;cT_?anfG0U8m3-GE=XMX)h|4T*-L9E4irdjwdtut-1cMd z!sPC4Xs<0}H(}mX|D;GD<~tp@c%aB7@D?rQwMA4(8nV&K`+5!z(!}MRPXW>XBwn7NiwM-L8%kR(WV6m@4H*_Gzt?#Xe_Rx zwp1aUNm}Lb9Dd>L+b}@{-sWBGvf3scYPUY#!(ue6(Y29^U(5Q?{4sjr(ERT|cS&k& zd9NmhdgHqZLJ^Gk*N-2^%ZJb5j^P|Zj{wgcI*#vs_eDw1%@B^SMOg#`93NPeRZP>E z>a+%AvLYmVBP4?69khAFqOjFK?vW>;-tz$3!$VT%IzUXkNE$0kygE~9oAEWx`|>K z1IZZ;@zIu;R#<{ zz-4NJ^*X%o&N9|k*RiHsIC1I%>V)l5vdOI`V^52r#Px}Wv3O5S<63mxx6su1>am5)Hu81)j8jq3?y0v*+aX(4h$4{@}g=u0UPKc_vFurq$=9>tU-HD`K zzVLhBz!w`;119O>MQ$2IW-!|tK~58)lh(+W%g7Uv zY0^k3-s9=%7f^rygXqw;`bYZE7#~1=u8cln?ynwOz~6shm*fHQ+DJ4nof8{mjf7cAr0iej@ad{y~CHL$be<3A{(5(Mp4p*t}lBX+i}fyEfsvAAJa~ zeD+%eeKeh)$Dx-`;pOK};Fh->Ah=V5GNRHOr^fh<%zTP83=%CXjG*>uy50@$VT{G( z+HBntOZJ3+*n-2S9(mEI48m?~jBUH?I&8gRHyV|y%<};EFVEzRb)ZI5e*`W)mDgyCKfPCA2+KlGECEW%ceR8kG74pPjSZ> zf>iwjXjRiFvOhcBLF>pX2=`11w8@MYksdCfwNito>wo*?Dn4<~cFA>$)4TjsS^vLv zFET7N=8;vo328*c{q>XUD0k8rq6x_m#_r!dK#WiNFO^61%pnY@HabI`6h*{G#9GM} z&|!~JzKF2g#M{T$WzBeAhKmZfk2mp^3mm_o_X5%c^qn^0jXolHaf{B+lh!(m@aBWU z!~@%gQCln{OY?kwt%;}4tl+)33=z|Hq%WqVEe7->MdT*lGTdhz6TEjH*A*$|FJa$3w~9NCjXlmAVGjxy2%xo&nWaTsTp=vZdC~NqY+FdiZiXhav9@N~ z9mt?t7%IeFw4~z{EsnUioCTi_@V&FESRskeA?Q{vM3J-MIAx2*);@g}8G5d2n(!@+ zuS0md#T(Bm#|!$H2EWFF29Hy$CrrstN8NV2uf*ZJ4R?qkSju-w|rk- zhgw`lYU_lwpD1h|5mGu-@&r~wuO*ub0yfl|U8o@p$&c~2QXYN7 zr=P>_Gs=hT7q2@W@0d!KHjWL%m@MA*4S7`K(q%Xd;b6wS6=2max$tGH!z8Lem(nf*V8 z&e=g=>W}dE@0h}i-z(r!tAtXj!-*OuiIB8GJ>ve1Wkl!aklnMHgRS(u1DMawaGM-l zDz|WQrG-1kO45ByqQtI$xr_AKayw;vn6OvNCR-S0NQXGLRL7}hGRoL!Y&S75(l7A@ z-UYfbb6&#g4Yo>37o;ozYvM~1o&#JDhndClY){r}-F~V3N;j%Qngb5NFyC*gypAtTm`Q`I71X;G; zem#zT^$`++Y4NTdIdl>SZ{OR6Je*-_4jU_x#j@}QCirPWn9yBcb^dgh!025%j#cc= z4qO#+{?sf|Ot@l=q%cNmN87|ErZ>*-K2MVRR6RnQZpT$G$uPSuXp{x6ab%I*PTcI1 z-Yex{)r-MgI>?($KAteeElfh3U9Cf9v^3EfAzYcJ+3GMXonDdc%v&YG;{;VW3y-&M z=P{5l!_OaB!>`;rFU^Y7fe%YKJKX*Nnk$E}BlRNw<6AQL2TyIMaTn2-YSMW4!87t~ zx=nTOiXkUYCs#hup;3z3J~Wh@#@D$!AwGNEkowYCk6oNJ62 z;>1E5t8L!onLSNuG4fd(NWl~tc%1UeFswtigqBEjfN~>)k#rUR>b{G(adZid)PAJ5 zzl*LqjBwZc(LVBr_|@AhSdA{>&rWY4gOSEf(l^YKa(bFOK7pxjs~w3ZxW`lT6dd5uW6~gG?wHf*E$HHrSt=#?(qJY3#&K04v#5 z*Mu4&W402E42Gv?ys+9pix!We5?2nTIWbO|PFyWA-0F&zaQ<|9D*}{iC?=LxNacAXHWHEs+=O`R06@c*>*t0mZ5bm zF)`5`r}_xX=ggi*3$(6tg#9N6igq38eG(#42g-y?%ax_?^oHB;T`-EQnPIo>A zVN(Di7ngv^Y*l(=iSCF!`C7$)7(Y5m)+%q@D#RJeHrjbw5Ka;0Cr%Rwo}aIvOmJrJ z=23}|ntUI}*`XzKBh)+fmZXoeLCbv%a~UGstnnBqr$xwkG1A>=msMgSqH}3i%vZ{= zM~ zsE$_tAkx==3PFB5IE9uW8*7eW$H%}+uFrl0pFFsLkL+B+nMxXqjT8|!YXsmwJW5)U zG}ACoJ#PM9j@j3d>x4&B@~}LsUUnutPUluv|)4%$PtH??)PuXm8Mkv&DRu zac&4bkWxGr0-SbmeyL24TgCSALGhZg4ahs2RNHf6Kk#O3Pt37NZ4Re zpe^EX-b53zOB6@PJ^f@LE@?|U2S4J4;deOah;Gc=d9&3l+{)D?Y+Ta9%#gKyZf*_Z zLwO_3e1y9FxpAN`8u>|8d0Bemlhp9<>wW5?=Ny9)YW@$6ip4hGNq+$Kb zv4G)>k%$N_Vq-SsE?AJcsga?}POQKC_8PVhldV7+Ca{a$Y7jh}yc6l`eu2K7l}7)Z zbK{IQDKgpdQFKb9Xdd}}(iS0Z7~=dPB^rfkwo=RCAHA$ZOYy-QaVd@jak*WR>Ps+f4S`-kueVv@Bw$c5rJ+!PJy-SMw{Aj}wQG&Q#UPf$^#*p2~Bk>f`!j~&e zoSs<|2obop#hyR|_=fiO%UmtQef!#6dBiOyh`9TTEXf!uDA_7eBC_04EumSM2&Bt| zjFQX80=Yk{OimG=s?f@{D&~Qu?FfCI=BOtbpy$4daBB;}z|FF6Qe2!&%+y>!=gilT zs!Y?LHj|55XX6tx#7Nb20n;@y)=)OEjtvF2YqAkB^*OZ9|0TlB?C^ z@&pkRhcie6MH$;&K}&7zugJWv|dvZXiy2Gtw85;_FGkoGAtHj=|{voFj>OCJv52utL!H1 za1%NVMo7LVNKvqCFpm+VXB=^vaem(`X&~ePpZ0Q4a?3_37PsYN-$F zBuEM*AyZ@|JxazIx6tEgsN4Ir&1HF9VMHo*xER{mG6W|32U$s=?83S^RSx3V&IL|v z6mo&_9!PbemK+e4+`(Xmv#0P69@&EXcagyx!Lc&`=Xu(sYfQ#^Yyc?st^Ps85k$+Ceig zDGZ&IDct60r5U^}N>61?&W=-0v)Y=j&$V4@4+SEq|R*Gy3G7jn4DjmkO z#3-bNwhX7>D*dha@+HvPHqeaVvPRA1x`JwFTuqiT!n7PKsL?(79%Rpv4V1yy_CZ`Y ztnrzPm zZaZe`S*Zz#h{!`C_OQ-9=hrYgIUp^b*+;cb>)EuDn_DFfa{dnID*1ZWyad;@Zdi{; zBo$$yG6c(Kai2voFuO2%Y^!K1ym)39o3qWaUTw+#rh)ay@xgXpM<-R^wI3wWSbgS|_gWM5e5a{~7GF zIq24rA2CRktrAU!44*bwN@Kh~i_^8TyjQKZNqvVBejnU2j9hu+O5ds&vP0o zX_$fSyt6!vjhRNF~(rwg6O*E~;P3rx&s0i4Q}LIt}1?%_Py zD5i*+@6A>7gZ+Kt#+%%~4fCg0W#<;1>c}{PR3sYZdzA^CJzBtTU4ICBR$fNq!grD0 z`5r_kzAo7Q52mlfzd65$5PTbh=>`!Nhv5mr@pTCL)ailxw@t{1*s!k7@GYD*I-0CE2n&oMqE4ckhIMS(d#*c2r9>bkfK85Qi`|-Wmnv}#XEpiKKR;?YBMsk?g zF@~9wizF0H<+FHnq`ig40D;v1a%nqm>tDrv1831Z`bS9BX3V!D3XzK&pvm zJM>bPzspRU9VlXa&zNYb0!h;|Yh;vbQ{}REAdP)PS?|^|?$N;8H~zU@uM}k$(FQFf zl1fv!XEcv3gT%yZEfF#cWTfrfIw%>8BP7vFoBB~%s*9E~J6)Sh$>FexbDe(te@^em z?;Kcxntlw;<~nlCIXu(cf&X}Er$l@?`K#l0=@1)(v}(OY*B!y|{cV&YH@~M+gI3NFlcfq^rQ)+xqcaugr^e zzOqt7rPf4$UsfJ`=S|x%H?=BH$5PLER}8U}(4}ks*Xhl;Z+Hd)?@&j?|3rBgUabrg zkrQ0#SezW1oho=e8`UOu-m)Eqk*vICDSlz0B7Fwe2m;))r4OY-MuHq-q6k*iF1IKy zm$OYiO&zE9mOa}eTs2rKrttP{gLw6oIoY)A+*}#Aa7}C&Nqgqqw{62q-#U&=XuLAQ z$O1MLhSF_3T^q-D*0$h1gXhq?^f)-V{|_&0K}ThgS42SEXNA`^h-`IIim*Pg^~UYU zaZXUwMxCBvVab%}m5K1*vV(hC(6d!JVU54sHvL+xH_SZ)x4q!v{(8q4q-`#GZkyVG z$R%58?S-H#q8I35jZ)e2w9P+8du#h%T%vgO!%eq4GX*5_&8?Kate2gJ-2sjN@bz8> zx-b_87H^5#wF9-B$hw@5Osh zze>jHiS<708(5LgpIhmt1rHOg;R53+;Ub7Ssr!i`?74l51hod}n#YL%XGx-S4mh`h zd(W;x<9*>`?zXm)+9Z)0jTs~sOZoZr*_a8{{;`9@`2E-B#fQE^_Q9pODh|@~vQ*!_ ze;5nrM{w!J*B1t1+l6MS^n4GIf;sa4)o*c$^nC11Li?3&~mq@r59dt>QdnFT) z{At!aRR&M`Mm^S7B!z8OCq>GaN+>&^Tsx;311T{Xe zbJ)z0%t74YRmSWW&H+>B9&-z48aQzUJCg_!2B0K-kJt%e1*-SBu#_fjC>$?-S1o01S=pkf zV^zEssljOk;uq$73LJ{0C&@40If>7mT#}U2m0AZcy*7)7?%9S0H{l68c*jEr@x&vi zaA|sth%_UH&KDOuH|*9o~#T)Z8>?Eu%IWhqD{5b5QT&o546 zfbjVXXP2>M^B`{9)lX`e`;`QE|9ke~*=MJ4_~fh@r8!ok(w;5A*B8ks?OVnxwP74< zj-wDpBJo=7NZg6r$qsnyy*trAki}}bfnnZktlY#CuPqQ$hFGU(xPE*9fBX6gV!)Q& zuCV0;7;Z`9*o3pkUHUHnZ5Y}_k%^GFyFr-7kMAGBZxiEwcb23sX_Cj!&SU#z9`|hP z6Ju?7D2I=H>?S<<1?f5+I zhT!c&HV`+lwBjHQz-!!b%qD=q5M*0BpSU(Cz@89Uf4X&Fz_mzBMwoTv3iPjVIHqvW zC2kP2G2W5O`v!+mm-dbGG)(rGvy#P2XtAddyV2co@iknOFf={-I+6EZX4AIygUxA? z%*8Nc5sw`o4~JVdv?6(me{}D5{N>a-W(k{z#XP=2xO{wY7Y>XU2^O*2QRBV$@4)`u zeR%HB9A+2m^mV>x8;>_9@$sX>XmNHTF+qw$=frH|<2l@N-3Sis9uzEFZg$X5Mk0L` zfAQiR8U%aTGr>)b{=cL_MsktZ30yDgXVJwXY@EUrvY0tj)?kmlU1m}R(X#=U&XGin z@UQOMjfei~IGWs8G@HlgUs}MXz6`D-*j8(DKS_lTyk`$Co-5B=n zg`4pkszJ<{BiPcGyTJwd=-j`*bAs=MA-PU$G;Hgk+(s@@~(Qfs?_5w!I_#K9+* z-;rhmwALi+x+R4R+*?~bE^N@Xlx=o(jnQIMXG4U%97b`4|SwjoR})o^i^7-P9kc$@AYm&mfeB^JiP&4%m%ch^`E|MH%#_0 z`zH-a#^}-@)>icx*&LXb(RPc>4z~p7e0*)1k#Zz9azessMOcWZt@BzXTCsRKRf&Qj zbZHrq32k`^*NhB$PY6b1SB0jdY5C1DcjQ{b7w_ zw5v~WJ$raKt$62_686&h(nnWEw(W2r4fBRJQY=oz0D){3U7O=3c z(MNcAg|PbHylxWz;^qlj%!&zmNcD@&YeBcR)YdNXMv}v|I({N2-sKZ^xu!Gd5a#{W z>n8A8wT1tDY*Dt3Und;@@1L5%&)zzYw{OkM95w5txkwZ3oG8&xKM{^8O%$TwJRgod zh~#8rlb@|EWpZ3w`{Yy|pCJb3=3rTp?8~b){Jk3|@DK0UOwU#&n}fzxu3Hzew#}Cu zE%PqhCb;EVXEQWZ+SJ3!jw79gCkg{ zMf{CdXQb^$o22RQy)cW{R_gdD;mqCrS@G*L{%+a8JQH|e^H5rhU9H|L@%}!{>yQ)4RR^)?oGe-@-hyu)bQ~=1K8P@k+~P!!lo3s z9jRqo7y6Ism1RbOTh9^UT_SDu`J*fN?um5*ZfQ zp2}G575TJ+70!s#4e_dUJVD#brJHE2tizUI$Y-1tXsqS7_aYees9`FJogpkwNJAp^ zyu5^9lq4%jgMWA9CJd1=^uNA*i8M=Eyb<5ONceTWiu<+?;Qq};iD34#ab~rdlz2f| ze%4qewRvp4jqhDt$0O$}SR;ll5VLasZ*E@tPj8&WuU|h*c)luzBon!6T91&80-J?T zs9o(?H5Lv&dr>>1JHY__Y{XEyyaMzMOE;_k`_`?P%!l~jkIV^f>kPSw6aLvPllZk8$A~x@ z5`k7L%d-EIY#ReN|H?>k8D=ODK`Pixaj{7BKs8{pIhARRH@haTM(r2R%2F$RjZJ5x zpJJD#V;7`ljI>Q3Hjfqb&}#z!_dxe6Z`Nf`HWJ3>Q+wfVK42*wN(iiN%Ae>V0ia*G)OG`PQq}dlp@~WN0=o+UXh@eT#w7dX} zjFS{^M&zq zA)>ZCELB`qCA+R=X5Ea-H{g73MUuTVKv1NCUq3L3n+Oj6Plu*)mTU$tcxlkWeCbRT zU%ybp*1jzE4P>#S$o}jAIhs2T+^x{~FWQ>cv-E5&E^r}1l_lF_y{@q(7vleS*Cc*? zbH6;B^i(8Cj`gZ}G;PwQU7E2GSgd*ZL(Y3|do7&fL^Hn&w9Oq?W;bbh*J?N$OOrY1 z5L_`4c$*eMwpT}*p(=re+zo&`G5Z9WWKiG1mL!EI2;O>K;&&yorrdaAH=vwnp^Vua zphJ?Eq-8Nr&|_5+fZDv+Tt7+!)RnRX0Xni9nl_8U?eXxPT4g8feHdq=k%xU;h zia`;W9n&Ifv<3db9IR!Aqs3X8LohThc^n@mta|T52k;vw=kYt|Rls+9)m-fjHYL?7-OD&XIoS->A$T*Y!bkj%yu z&DD7#{!?oeuNh<)p~%)$vRjZVmWW|PlA85)3w`vRpV`@mf3jy7yYg&j)l9ktK|+Bn zHt*XBCr9Mo0F?ZDvulZ)$5bjuYqI9;4c?@IiCbivgFNC}Ff_(?n#ql^8j&+Wvwdi; zubK`5^cgpRE%oIE9-4hst?=}MLB+J*BM^Pt=ysJbV^l zkVv(+2A4?IXjXQh=;#baQW5^ogPZX4J4f;RQ)~Fj+!~Hmn!*UuGa9Z)!r@}&H5R9^ z?kr^R;mJPy($;=lU*J%CQ(UOhHKDdnL|%5X*pa1$Zcaug(x5$Yd$Hn^ldN*Ki{Uh> zk?y8v4xP<-olDI6hei8L()fSl06p8b0sJmOv#%|bajMo9hk$u3NtUqtR;xHYBWjDL z-AhdWfh`00`K<%Ev1n3lB_)ZI-z<^GSjuVj2yVN{$PtfdF;i9LjLRU*TFtGjjbPe* zyL~(a)>>`M26K~l&$Hu<+GMy6?;zOLL1%Fu8FqEn8yFZIA{bgQ?Z$PSR^doQBSBY} zV{|9cV=FZd9wj8YcJjsiZ5q#{v18{Zy!^rmHX#M0*G|5Mfp>in>laQMX>KE{O$j8K zX7F|{OdVrj_Y!i$k}8&r9U4D~s|7UHe&ERoHe)UkxDpi@$N+Ibj=!b=dSM!BfTMW? zF?0)iNiF}&y`%WYyZZ5LxrygjBD_YEx7gGIS8Cei)8|PuOcGu_SPXGTUkZ2j=ZINY zm3HJh0ws8>@wHVlq-$^i3~$Cb_LbbAz>+f@$$LXg-VvM0x)q&$WH&#FwT!`{^6Y#{ ztxVHQ=evjT z=gQ!2vQvgq1`>8`#5riubFM85Q<FH(IY9CXHD`P$-GFY}@uRR`(=asuFSKdj*IIx;qAR znM8Ld@}gY64PL@lunS^1n7tJ2Q{8i5n;4Jm=;yW!jT5f|_kRGTExS-Y_Zn9U7QzzkK#-Uu%oruPUvS&`(-CdNgw#;TMZ9AG|UXCYzBLxEvqt8!~5jTLK#Kjlp z+&)^ocMjz6P(M?d>EqZzWWfbQ%>%BU)3`l5+N2_Ib4|^=KU7WABhGc{5ghTnX*YH! z<=lMg_z#b6w9o5_rINppHzwVNW&CV#?X zMbe)xnBQvk$kye_yZjI|;aj$E!ZwoXr%udC6@IijkCo>hLH~pALv3LRt;#&%wG3P% z_ug>0_)t4W9cpa^bV!pFMR^~Liewz2*r7f>z9bGOuJrBns zbY)EfUCLX~5n^o>Kc{I~k!abW?Rzl04~kK#2kCbnzEc)!Hx4vejqB34g_>->kLbHj zn*>r8g9mO0P8(EE_kH0Fd0EPcn(kP`%TBQNQv&NQur`RHi@+J(94%z)WL_|G`FqJiQ)Ukro+E+RVJStO` zX^frLLWdZ&bp5?Z5mS^O`MPA0^3iBC@z(oqL%J`A4!7hE%zC>PiBY1xMOQnK1hFS_ zUCip3?M1pHq+UB)EN>njCbg2J!A*A@AZ~jZM~+M(U(6w>m$CBYKfusW{|bh0c?Xt{ zzJT`1CCTX)gV9~X8Xu~oWA21eXKj>FZ|*@Oy@*0;$=FuP_*k`PPVH>BBp`@+(%KW= zVt+`GIEU;H4k4&ycO5OIMuPCClbrhi3yKfI;$g` z9ynmG8Eb~vIpDr$uW!%swhjMD#2YoLMn470`nTPNhxT6Vu@E)Vv1=R(bkVjNs&{WH z1;k9~=GM)3f|0qsMpF-=MueW`bPe7yS}9)&Cz?4>e#}Oh2NK|7w_-X!f`NT^AU!yM z%I7`}ELw_PE5NH1NA4z3Rn9U(IcL6JZ`a9_Rv0udSO>aT<{25diPLrx#jj7VPsqCfz zre%RP1E<$5Ne`GLDKUtFPD&bl@dZ14pq$}>QWe_fEteb@0OEb|$^fUQhI zZ~C;d8pXsCB^@DZ`?(QMiMEXB&xONIi1&cB<5srTAYl|c0qBK`y+7xqtVP?2V3jQL978~7b0*Rt_;=3gs+4cI`sx72&aGir{0Ip{PtH!s*{mK z0}W=+V*L;Q1M=_r81mQOg4Cw1XfDknT3Mn)4Y1{ljLbHHdgJY@*v!isZztOZ@&f0OUwAe zpFNCHX#gQN(=RWf`h`!UdHi-{Z-0QecN?OKtFzSob()uDrYggR5 z55>*?CAQmiy#@Yc&uqJ8W|sIjdGi132G5*dU!wCRTl zQiY~!c36OptZ7u>F@sus;6IqYF3QTp1!ljW6cj^;Fh;z z)7w5O>XIeAME@-FdY`Z}Djict+Lou;b9{}Koh#nNAe$z8LW}pPP~iPBJ01$UN-G~6 za+26irb0qGL&s5Vx);}qwqy@g$UL?=kiy9d%5sP{c8ZtkR_k~?uV9N@;B(&{zE>h0 zBg{YkMJzx6Z6c;cZyRuC<3frEbNdb0^x!9u9o>v}gLi#S`Cyon50*Wpt%!3n@jroO zUKRq}T`Oo+QF-K>l2*X3M#Zs?)p@A7i_p~-GG5t`&PiaX*=XRyAA39A`_XqG;$Aad zg4a>$#sqz(BXv2EtDfK8N35Q;$D5Aoc%nMhD(?T_J1{;zjL-e?cQC)WhGMZm#ztQ7 zf9K*Ubj}@@hC^o5B>#xmd9zoi{-yn>GTsNCKxMIVWYw)BAydS_8xoMUzdVhLU-%CQ zmZyl=xJ~qc5S~mrOVmyk>rZ?R&6zXU`q96S6fq%xZsJW!GUb$8AULfRV_6mXFpu}u zOKT1y@@J1P;-7u#uylp}wGUp8hprzto53nac(HbHK4;1Pgx%A}LM88g;m^`3ymEFL z|MTY$VWpYE|MtFv_`ofbbX`}VV%>Q@!OdyF7{7aMq~p1hw>aN~ht@GGd z4D7KNksK%%I@yvibai#KFUb97oKsI({v zOsHS*suQ4*l@KNy&J}Km6z;S!|L`9nEYBd-KOmdL@&+53zMPDQ+fnn^a|6R@oqGwh z-}(%;eCSu`er@<2_H2~(GM0C+!WlS@P_&lpR4)Dd=U+L7SC-mRX#8tmJ&Lz%AC!JC z9UXbeW_-qudF&iJyl?MkiW2MqhzbACzdDBR&yoz!h4}UFoW+BC$5F^ezIeegr$H)~ zNu2xF$y)7*FoqlfG5Oqg{}|Pyk0UoYhz@66()-!Iw7}4IjqI5kJzsx6N%CdPeB*bq z^(X$Bl+054{Q1EIUagt<4eTrX>X;SXhvy{>qlEN(=Yy*N6-0$y&PD}lwYSp(>kr)Ap<-kMb-07-k7(BeG>)l*z6&WN{BcKoam_QF}**Y&*dTn z8rL}zXxyWQI|SEs3Tv$v3c1igHD@=uN*WJitWZDs zWOi`^ZV%ETsc!WR*=8EWLKYJTw&CWRcjK1Z4xltX1kFy0O2ZV|DEO$nt6AszAdg6`o$ z8apP2aDHkXeWa~8-StwXjeU+GV)qH$9_W#iuw7csJUgd1hrHeUS{>6h8he;VjST3$ zlUeD<$hJk0)V{VWt;|Jat`RuS;<@T{{A+n0-i3mmT}5@N^(;*EVf>CAvPrNoCY_Wj zg&4VI3rXM*Ye#1h2Q74PE=kA1$`ii0YSZ@FEOT>?(?CJt{r8R2I+^4? z-U4Ru`g@U|+-0CC!6NpoNQy%kcpJd)_o+>9q1QXEVvKgzikkV4C3q6PB!9b;-5!~4 zzfdEwOaMYz&?xO2n%sIKg~72w4Acn^BIhOO3&*RtxZDuQW!!5flI9FSqiH(g=5#93 zqF%Z#N#ydP6j-E&@)+DVCdJCJb*{|r2qbO$NqRI^j|+1|t5Ytl%?Yy#)=K>Rt;OxXw zPT|%Y21o#ElfcQ($-7{15jgjXvC(NW&+&YyYs4o>^l=43ciVJF_C#m*U*Y1hZDpV# zT%f}cicD_U@kUbM7D9@Jm9$LIvdNIyK{@mmlJy)w+vu_u1CVzpmdbJ4$IrRpL~_{z zsl~F7(f-*OpL%L+#d1k`` z3)e(gDQWqaA00$cHnrNRUH0C}Dz2+;xl7LPgW@PR<$A zEli9Sc5Agd1VwT%J4B1_ljv*OOLGT1dDE@}vJ(eTyLbdq)zu>!OFe?IEg~x-nAW*C zmoJ&0+$TvxyhVT+dvX%BjhqU?^svT#?6zhRqm?u1$ngGqw~ym{7uNBOGpm>&+`ir_ z;O{*$kIx)k!aH}Cuz#q4fozDJ?N<|NlUc(8ZkeFMq4_EvKDUNLi*3^AIpoODU8uFN zcOr{_^59;ewqoXvJvA;3z$2}mwQ(9Y5jl)3o=*|P%WOV?_L(Qi5O0{Q!XSv@4xH3x zf=!t%(djywZ3mzVMd<31$uAdyZi$rZKuvQ^6S(6&W4UL2WKt?P$d}J%Oie*2W-7_C zLHr!bwZd>gGutIVnS`X*g)zMXxrfkQ3jtswcIn0HPUV`}3;Za3lYckUmRq&UV}Y`M zZv`I#72}9K3$~?!;ND0&;)bJr)_gDtR2|;R8)N2LHb+`|S`rf!58jK~vG1d`u!83D z0Metp$7kE5fI3Xgx$=wah!$6A!A6kZc^f)hRIRlOf_j_5O@)sQCA5|YHkNq;tA;bR zDt_-n`|!!HoWvuiRxn1Iqu5`>v*ieny;!5itRT&uYu$LD3_k{j*cqbaNO}&EMqune zO-y@mtbosa`~W8DyV9RMWnh4c67d8T`?2FnFel;RJuvRz>Yd!~dr&+2ZPE}IQJ-Hy zVe^>jUZAaA!#;T$3vY(oo?9S|If&AA4@ljq>62)+uAPx)dEE#Li(Yv9neuW~Ix1ut zO7i9a#nAWaQ8tlITK-vOsl6k@(j6?FII_G7Oz#!OSIBiqR+wJnMJ!BF)?$<3k&n=s z#S1JXirk>JRZZe=4+W>Y1g+756nu!92z{B8|96cVoE@3AW>K|D#NLo(pUCE4@U>di z8Z!HDLGJB8jmFpi6RIbskk^}$8|pKghbeQ-#!?xTQy0*xuOt7!k0YGigBBMrScxCU z-pq42l8*d@xjAywue~NPHIES{nrrytM-Jfs{mdNx;JFJ}t+Y@ic)-QWLFlQ`N-49t zu3YJOEz zdpeUP+-iEh5Ew>hbrnHA7fUSugv;P&X3&54PhquLNA3H+hxW^_p*1#&)IiDL=yDa! zg(Z>xxwm~118@HnN&i{1XOi^gj;vi^2su&-sSAe><7lJLDYL&=HA^dxwnnMEh<|(E z5Ptf)VSM`4IXrfH73V8e)ElPAL;IRb*aeKpOd&{^!pKp!aw!<_-j2X(I`LXqe5|c!Oj}Ej4bS%aGK#mnUwr7*@Bb$B$&+Z$k09(T zh8B-e&V@kb5&BkZn6s5xTN)oXihK8EK8~| z-#bMUyLqfAYt22?W%wL?3Nv1aIJ4Dd>WH_L=gL01Mx3vcpgQ|nC0=iQB>d=CLHfzV zTQx4r&k(oLF$Cg_yTD>vH^F|9c#9U^;HhDo>=Ut#5`G;jWH3*HfqNNFEv;j{+D1-t zAFAGE-}(UBc0G#k%a)tr@J`#W;LEU)AS};SjMd+#s9DS#<6(Mgfu|9 zR5k9%P9oCBSK!Tnb7Z4T76R-mgeV2v$7LCdH%cCgk?|8q~@(`tJM*q{eol^2}$^nLUPJYToQ=lqrzLyMyNIgD4!lUl^ltY2Ns_ zEzvnh*jiY$_&k*Nvbh4PWLsREUzd&4*ke}gQJ`19^1;x+Tzr+c4NEYV$_ zxsNxfn4YzYC9V7WD)6SKlKuZ$GCq3SuiFqaWS}WuavKwm!|!n!Sp0Vk&Fs7|^EC;J z)R!0)S6t#{eE3f=hJ*pLXt z>+V%Gs^5HrC4Xyr7TgmuzvFh~w%>~O+C}KqS?PtS`birOZI|tInrl<2pPx1tvjHzL z5@~#$wROVrYoc)sv8RyfE8@iY1uW5Ld4g@VdK3G$6wud~Gez*B%|guP&GkB+c!Z8< zuXZE^ZJYz360bKtx;E5(eKp`LG&_sNDv5QuN}OkLtVtEMy-Tz)VN@z_@hN0-nBeyB zCBy9m8=NUg&OdTy1_yUdNU~99&G>QI=iXRaKze8ZZfg{$gm|Hu*c)rh$c;@PJ-8j2 zp?wxllDe*~kvcv~@MqE9H?VDqt)VQ2p-B%Gf){T=WZ_ld^F_S!OxP`Pu|zC1L{IRJ z+AO^Z(r|=Wsf3|qE)q|dvIMTIq_X$&2BM+V7?UT6+}zQ+UcCn?_7aJ#vL^*C;_e|y310%BZ`N<1^!n4B!z`Z2#Av3vTqUcc9o=6rPJx^!PT@)gg@wdtt~+t?|{8FK?x-|Ju=()wIUoKY&t%$fW>l0I=LRZu!OnQDn?5stk$aH zEnvdYWPG;g7Lnb&86DXs+iDFSKX(gTC!=${BE1-c&}?AEK3>M-kgfefD@jahu_Q0J zO%1g1yZbBl{EipyE?2pk5m1h6X+b%;o;kWxmj&YztlU5jH}3Akw_jM1 z&92s~4Lo!70zPox4M^qlq(U1q$=uqmwlFRE>_M?l3KHNvKtf9?v)JqnU|3)$P@-$8 zbeA;M2?_1?7*8Hq7FQkj1bq3}6mHwU z1>6zL@*7Aw zSE8dsOQ0TJ*vINvVhXGVU}+yZB>-)^^Try`sAOwyMK*IY5zW)i(9&)Uj(N;QwUhb| zA@SHFjX0|(UY^)9qbz(hg2v!;)5FGBB5B9Wj!={}I9)_CG#ifzw1Uia_53Aa3W?&= zx}``%7Y6#UvR1+KN6t#)S(D(|z+JaYn(x`JVKN?)=2}YkoX|;7-&FtwkT~JZWAvon z==s8BkR^z5`c*km!SAtYX4}-hFnr+L0IQlSC%E9Bu)HzE6af)WQcel ze*O^Vh}ioISy?+aLa*D|hjufv!66fI)p^45+y=bUiD|Zn)UHZX{`cij`AWyNL6;=A z1TIC>68r0o&P}vcQ#L=12)*CMD@gF!ZEV*ZXHN9;m(;Tn~Y;BFiaeksu$ zO2uwFCurc%ymgg2Nn!dS#&E#660{*^-g}e;i(Z&V$UX46gBfviZg3d+QXUs)=kd(p zGqh$I(a2%c#zS{)mBcXJw2WhW=pZ*_orQ1E!5Dghl{%K_ zou;%?5Lr#-A%!Qp7(PU?e>H|EFx67#Y~^?D(~dAj1P4a4YX>8ghg>EZOnJrXBDlU# z*+#3K2J=Zzc8Cs&h`_SXCg%rs4x9|&)uDN=Up$Y-%#`$G=QMz}6j2W$-8V?u82H8$ zhfr&FB&u7kv~kz&e(c}Vk5;QCIYr``;rY!J5D;wD+UdoZjy8&L{;-a&!iy^&U2CTj zKkhqM1y%lBm1HVbWrcns^feeJXq(PVtq5=kt$b2~^t_9B!(d=T$oZnt&QI4sB63w}?<33o&eFpLU%O;mC<<5{hQ=o(#ujBM5T-#$`6>0*L37b=#A8VzS$$y6H>Usu>mgf zD(~ty=t`edG5(eJ@`KQ8^E3p`C^Y%dM%V}V(1Vlsy)T?6HJFk<>|7{3_Iw}SbN2zF zjgoY~Z8aK1=t_b~EmEl*qAqOT2A$2CjHZsjY^mte&Ib$+TsX{t(`&Z z3{3}50>pSZvSQSQ;gR?(*~(2q++M=FUWdvTm>A+qw2RXog!ub-2qX815*R&UL-YtK zw47ew_%es@1Hu#oBIiwUORVD#y4u znaQXUX{^1^Y388gyLFj4@(EaQ6+b#L!*)E@ST(T0ZvL)~XT&FODVIE#2q1ZglF$xz zsF8(a9!1IZ0#D3{Ok$OC5KcKoV+kuXpSKnlP$MGe#-ohS1(Oqzhxw8ac`ld3g_$LM z>5*5YVNp62p}bbdT{jNmuG`0GU0ULAm)UnvGE0ypJ1EFMNW=jgotW{*y>+hCUhd5t ziJhabhSN8Y=x|ssmfFcdxdA8pI@)g~u0ub?0ie1p>fX1U;QZ)D8xOo?6E4nG@xt*s ziunwhsVu(o*lT3r=WyfBNtr0#V?j26;;Zq5Hk+uPK8@7s5VD&mkxu8$HruuVlIvm> zN*1Lh6zw992FJ_afy1-5Qc8Ibvwt=>2hLt`zH`Z$u^P8z?S*G>9*ixjgej2)@p_%g==U`UqH0H zBFSb-B)REf9~S$hH)4h)`Sj8{KKrfb2$x5aJI`L0t&>@N>|Hx(4H__2Lp&eS*1^nA z-=yiK({ZWWae{-Roxetjel0kAJyr^L3GmeF1> zBR4)OmH#T8iJON4h^z49gdy!xQ2F+etwMLq>

xe3kXpiDXl2zQ0)XIMwrS-un1>Zfuw^|Ghaj}4C&R#-& ze!=XO8XAY3w(aE714D$%iO4e<%q*VrFwD{vjJGR5&w zR+`gwvSg1H$HXR49zduK3Hq6@M_r0~-p+KvDFv!_uZt3E^0qt#B~^WS+9_4^Lu&VxH;GC8rRSza~nN~bQ{ZPYJb zLStbG+0kL7hKCRkT;d>yX_Tw|q(>G;2&izF-4%W~DbJsFlJKh=%4o~T3JJe=>=Tr~ z1LtB?F)arxs92d*TKuI(3J7pq_8*r?RbT_tdSQU@Gm zF>PZTZ6ak+iMW0Jl1cK?hRWEr*lB4@Y|7#1Ke`>i^Vw6RDs#xC6&3hvXwQdE)?*&_DCRUD&gypJ0y(^P9r-!0tav*DH>b0Xt-hXn@$u zas?^=*CNqP>s@6rruM;#ALfzt>v&b%^_nKA-%9o7Dr8Slt~k(a{dc4d+qRwk_{AUJ zgWvu06IidMQOKtyJ@@-BoWyFShWEYodb7o1FpO5Cf_9}M$y_3l`I&fz3HLW2+Jf$dG=qLbPYyq#MO zEBRe6yJaYw!(mt>5iW9DGV_@(1JOY%{ybDj7N@%peQ$TZH)m0V(7Y%|quma-=+H^InuUcWj8Ykc8#V)s@Wt^MKc+1q^M0p6A^q z>c)U^d&+Pyi6(OU&H?bkC60o|AAWKtFDj{7OmL7x=sbjPG4vfFt<(F8D8wIwFvTeKO zQUqLO=P7YLRbpGf{Z@AVwe3hODY>0C z&BHp`E9=rADB>MFq%&_QL@P`o51SxlLh`-hrsqc(WaTQCF9KF}ST?|@DSa1O@g@iEu>m_h^q>O7#RyC&e zYrJ=oxUZ;gWPx15J|9+{nQw~vHLd76CedqfGxYK~yU}XAd6*&si|TD`-qMd>{nT~% zlP{gdD`%@j>^Z40pITVQXTJIZ?!I9w-gfgI6pLBZd54W+ftaK&yCp=NBOTcB(1OWG z8ZETzi|8z@ARx)kkR~h@iRfu8*(@z!8gYm`#@-&^TP83$AwjQS%BwR;1y&{>U&EfH zW0~z*R*O5(LXW^3rCuPSYiA9a7v=G}c&?a;>vgoNWvS)tR0+nBu_*5=D^OHuF?d@% zQk+=Fn`3eIA-CSn5rNm+4Lo+}D4uxn9M*`C^955Z!`gH2<}6|Iy%-)#p;eD0avCVp z#zKmoxjd0!;lO5Mnqg~z1|E9FME=Gh@|!(U@X}UU%y;d#?=?pG=q321vElMVoaeyg zg4_4BFUrO9;pha6xO5`+?cGix~@N`(&LPYOPk76w$^CM#05VXVKM!FjzB2L7> z-jmCQMOteT#&rZl$SvOSjhLD36ZW74ZMz?xvgOz81!1RveP8C9o+CvBp2`+T$dI&7 z$=16xe>3?0b4PG?b_E%d;)NpNae_H55(*F8I*Jdxa}&)s*&Fqa%$BIN|Hb7Zr& zV2o2}A5j$L&=%^)``)_;JGRW?bKjl9{Az$cT9gvu^!fD${`|X#u=lko+u$rM23i-q#F^wlFq=y zKujQvPeZdgmT{qno=BaC$#_F_Y=k)sZ&kYSlvvR1MY$FqwrpnAZb%Q64!5MPH-%x_ zody&am53L^q$LYOgn{?w_}#H(0HO4U=Z5`>uYrk_bGhn-P>@-E#p!I!#&#>u9(XHz#F;@i*#uW!O&K(&|*K)M!4JwLUg&O zf7sra6z6t3!zOv_9~Q6m4%oT7OwLH)3EX>tZSaEkn$$}qb!y9Eoa&$>es_Vbf$7)6 z(YNbw9Kk=_+J~=xZwk-6vMME7`CKSOd1876CubJ1Ypfr)9oULp+eb(`6$FQ~gy;R1 zIvum~9JoKEB!$^dQ+8u1CYZpDj*Ybr;oMp^R=$CmyOc779+9EORX5MG?OdAPh=}(eBI0JZ zYRG*PxfeZ+D?>lLReZHo#mSSG@bYWtacXJ_&5lBWm^e+Bs@6?A>3a|MjQq&Q+3p~0bO6;Aq5gonJBc(U$Q2ImE zD82B;NjPOa6wagyoCi3tMi?xyuh8%EvgsHQ5#a+e?5urea`SuAGolONfI+pk;2 zUp;aOXJ;BnWzw`@DKUu7&aLCz%wdcSW^vt)NnE#U5)&hYNsGMqP6q9Kk%*o&2uXK= zFS5%-Q;b&OI|SCavWwDHrs&{GN1Cak8Heh<5sp}O$7!*KTz%&|XM94Nz>~-SkX9OI z12Yx-+W8yWZ20SfI=&SkpBH9i7Z47VfnPI@CI(Ce#v8M=TE_9SGdObkA}-CXg7g~M zIy7e_q#Ml+TCFyAZOP)jZ`*`}*9{Bfv5#Cz5gimk;4!~!TV5QZHQNbWnq=$;Z;U%U zrn=}WDF8PLIR3EQjw#o^;fF5D?Xgkx6mUkJb)I?8nXN$at~i>=fvtuOs*7fl*^S2= zT-jZn%~!Of4D7pZ2*0wk4==njk4K+dz|>-k2tFq{(oAHFlRMc3iDz_)aLJ=LP}Cc7PSVj2B+*^{L}0bE!IAiDk?QWX z^GW*+<~Y0_=r}yCVwfs(Gq{C}LxL~s3kX?FhAlW-&qy~8RAj_Zcp3S)JhC#e= zXbq3PyntzTSt?r4bPCzLDf2pcsf-g-YshB;j13n_dJSREmQf53_9LHX9L{*NKoY+r zL=wsl4F*z3?`D3@E;;_3TUtkvtvR;d4==^7SIcWJ7s5BLOY?^rR+HwCk_xrQ=!~jLY z;Ec^VPbDOozHOq2x8Jb?x7;{{R4#?68JTdtbyqr=BF|`T8Fo}^TA#3Q(kylqbrI;5 z=-^79yCVr*eke&@hTD`UF0WaC;7ecm@IGG~@;5o(mJo#C6UqPfNDqt~PsqxwVF)%- zc~PamV&tVe|7M?iI+ej&?%9MpZW_XCr&r0yTE>ZsRq$@<=@b(^1f#ZyfX~ma3=#lU-#Q;AD>_{nR@jHqB2D~oUdTfNV`~%5r_qp>> z^oVpA!zwI|K&Oh(s}~6iS1oRi%;0ro{qD+#>)G#~qQ$-8rU~45{RrmgYB=;7sqRy& zI6v1Q>832vhmyy^Yqd~mVQy_6`qV7hvkF55dxi#kYE=wKehLnRE9a?)stTRNmN zS-9Pqow73h!p*{qOHaGH%ox|ZEJ_oex6x6DuaiG6DM~JE7>x0At=hm^rGllkIu@48 zm|v=5WvxnZq)vowIskKBCqt85*34_8)sbD_3PkDqCbGEcx)EHzZx|!vdE>ez8^aK| z)e4RvY?})1l?dzGM{a=d_;v}*nQThL(6h%IcO}>JmFOhS$+7xRyFV_+_}6-*0-G3Q zllA_)Nc8T*!5C#a&5Fx86lv%@3TC2A102n55=M4Gt)GYlWoY!lh*#CyCGG z$jG6XOQV!eNlkVoX!FF)d^>{^Hh0BLF-i7;O+f#oF&I0x?YP#S zW>nznKzj&xgQHgn#($gOnvo6((Dx>#aLVON_^a_*%GSfdk0Zqb2en{7#4`3x5e$+Y(nLB%D7S&tRc;~25yw^IJ)wK@eg-Kb zZmz1~ZE6@=q`12**#YTH$~Jt^5_(tMpOPJyM1)OlM@K~2qOox{V3S^R!c?oRWut*8 zvW-Gza?F_c+PrmP)|H#H5l!bdISR`C%z286X>8t>!`|&B?A_6aiHU;Qlz<7A7`Ri_ zvDOHT^mf%iMuI9s*m46cMKTy5DV{A_Z%-#C=7i>P#;6F+*D)e5b3OaNZ7|Lnkm8{5 z=@xJNNSSR)5}4}0gDCIyzzEX@CWOw1Nnv%(OUe^r?SqA}+m0m}dpL+7w{0t7>&||>gSP`LN0?oz;rvV)m!@mDv{WZ}&_<&c zq0WrLaG8nUO8>xaw(F*l`@L`j{A$a3NzgWFGg0KDziyfx1oe_EG;ULA45TtbW4yDr zJA~lrIxL}?sOz?QQ-+{Oo(OniD2K_>BDPFsu$2tU%_9XA`cee<=&EhA&yXi>@rB0_ z!9_QH$)h85#{X>a4)7+(lh#AlhTGxWcT%wbZWKIxa}quAnJq5)L2S~L%eU|v;jaOs zyJO=*4d+ISb2v;OL|e~6dneh3Z88Xhc_xEAj?tbaQ!5ysD2r{g!HVERBF}Kvf!crScs|+6^!ozL+ zKiyqxa~r!21t|H-c{t7_X{R4L(@y{YKWjg<$xY>gxs191x_qdL)wVT+Z0q_i{-P zJa|%VIY8g!x^{G2q1Cnc%#csRRc^Bk1X)#vvRuFsFzH7W7*tlSd9zv1Fsb}) z=;NoEpg&yG$M?U{#}8MM;cPWa**V#_ZnLf#^?5W+-XyHvgpj{qs!l|CUwM1m~OHm-EU5*KJ^>N@yaCC$lz|XWfT%7xVG= zw;Q_su%Una>sC40Oxcfr%tGHC)8`q-eE#;8zRokjmv1z~o_qTG{gJ-jJxKcG0zdBm zCvv52U^{WltyiK$4yP|hDztQasMGBFZ zX#1io&ui1s7mKb%Pfhoeqt9%xB$+C=wwq#ZbsnpRD4k_ADeUy-J*78)6F(nOjTW0f zAPwA?O_i!`d1i`G8pZEyba*Ad;Hg$)0y`e$T;b5~ z-Vxp0UeVuYq^#6y;TxqclXBB?-@RD!x2ASPwRP4i?gEAGM0hQ@yAa@-b+-2Gx~iI# zxKd&L0oj7xxg^SY z_(W3)zAljF@!kkr1uXILUcu*t)t^Q>&+?d~F2aR#HENXsH>rjP3yNV0OEvmT&L!7Z zWwkteXU?IYY)Ym8d2Fe1fk6dM6>|(+gk>NGxREUHBZ$p(^4VdiwU?_d0S?LhPq1vt zOzc;$MbP9|ABnGCS=*QIGT8>)A2xxXv{L?jIgQ9m3Bl32X6KWrT_|vMg9qM*7J7!X z4(udsy5}4^*0zNJ`%pleYI(+%{QfQFo4?HX{R`#0{}KK1MTtH@pbXDHn;IqW3q}7M zeFq|B)XQ+C{_WWa42G`|^qY-)XRNOhr(s}Y#y|`0L)oI4YKI~TQTBxdFfEy!0}Ce9 z%?wRNuCPQdF3xJ##IN6qY{QldQDXz9YLPY@i?|zDAGwPA$%s4>b)-$~ufqOvc{Yyj zc%9n7-NsO!$sL$ryfC@6;a2+1!1GiING>!EUzU8F1*`n#!_4Ks6Wx`ycV>9=@KwzD z?D|@@0($ZwKdyRRMG|J&bQ*-Sn#}@^1*|XzCz0m@C$`nU?Y`h#$0oE+0d64HGFx;K zI5e3tDkX4SaBc;9saSHedQ9f4H=4<|5cl=Gz1oP1Q?iBSM}j8Y7GE^YOV7D4Mc^ZX zju4e8I7Nkfjw?U6w98MF)_NvejRg&YVL3i=_sK>ZHAuK}MUzf#I~m;2!xKnn?(mXk zgKp+;F5Zgw-y~j@9_PP@J2g0EXn>Lvv^XYUMqwjbnv9EQQ<2)NlnmeJdJkp;eIUCV zs}Bsk$lE69qi{aq`ztrs*I?iawQTq2seZnE|yrU!vufg(k_=Oftv-b4APb*J4Rg9pmIlmyb~i+ZHR&bfOI>v7;oD5~cf=(cY5QP{4Lll=hQUS<>PTaub9893Wns_> zCd{?{QwKQ++?cuHa;)hW{tUh)n+cUzSyg`G9aIxY-8tyPtdrte_!$$ zkB!I?9;i@3a|)_;6-oHz8u-a%3n(tW8=KdLX}7v0$@X61&V5;Fx#$Y#U&&IC9Y;Y3 z)MZ&1wic~p(pqN^-_zf-YOyrQ06*=yniBF^ZnA?6asv z=OE9|7xBFdb>)i6W#xLRGXUAIce_&%*67)q0H)Kq6?cG_{>f`;$ih6rnbk zfl#zPWe}9(Sv?!8o-{jz5t(CBmEdHRbf+#dHwHF0hT_}3vaAGF4_X(C={y6Ot1j81 z3~a0G$zYi*E|e?B7R;Z6yyFcU>PS>)PG(>JaHI}Ce~HkA!gf5~|ALs8j*)La8%7_M z-pdUHol@GlwrPAwq^@q82ubbTuvYdb^3blRy%F9eszu#b*JV3CO?3dF(& zwUH8zEvQ0*mnBYfYky>fKRGdp_0cSq3MDizib9lRi)U2`7MF$>*dxRZA<_m>P&s;X zQF&FeX^(gKV%M;hbV4?*bH{Mj($_>p>u@|0a^%`0A|DB=PUtU(qc4@-k$chKV~$Jmn}86pXBru9KC;L#OnPy z3;_HVdRs zga)x5Nx;v?(Ic_;1Uu5JbNAbCx8;{0^kYioPVSe%5vvo`)E-fVru(jPWBAe-{%z7D zW+hW_M7L~4E^9vU7G}Y)rw5k>FIMi|kc_z`1ee%YYyWNt?Ag?TX+K9F z?Z2!NaX4nOpR4bmhR~B+_K1+26U(tctylA*7>NOHFtF}mT90i!8N~7T0Cr@DIH1P} zs3N>jnpG-<9|}rCd%X z(WH}Swu<#ozvT&&{J{ydNN= z=Ul4v?*YN@JAZgUuWRHtfNS1+!*i6;RPM)32y}n0?!KN2o=V8aUVn@w_tZ-}m)%d~&6+=qf=TaH6HE73(vqt-3x5+c|9Q(pD8l z5zd9UH6R%>cmS@cY*dXdO^^Vrf=#WP%5!kSI3+#DlIBBjEDR>ywWE8XW^4wtE5V!I z=$?fpRUFIJ_t%bd&3ZK~OH*!rVSU#Wv_i+Q1basCtB?LR72X*q^c?i%`&V6^LVW^n z^?vgK9;;M?INCn9*Jbo~Mx&BDBzYcu3QNS}TS8xFd;?h@FcG!fzlJyt{F6u$7=fZw zP5o=e6p3V!huBgfoQxM3hc|zb+HLGB3Oe58S|x0=Zy#5B2-0)C5p-DQh^Px(cJ*81 zf(YY5$R_Zbu_@I5@V9V#A=XUdph0lr;EuKrwGxLP?dYi4QH~BRj}ZvQ>Eaw40ztC5 z3`^6)cOTJIHedKP#&_X@U~S4+?3Z*~Dv&rdTx|H(xvkDeU})HyLXDA%B)bn?--m10 zb4i{Z* zkHK@%ModlYqt7;x8=cZn=wYt?L>4qM87HUQLfL(AH@?=1G1Wd4#u{eFsP@~rm-PMe x9!n$WZ4x;>&U0J2SigpzC~VucPK#~p{2$tPq-Q5eIdA{~002ovPDHLkV1o4uu4n)N literal 0 HcmV?d00001 diff --git a/gpt_buddy_bot/app/data/user_avatar.png b/gpt_buddy_bot/app/data/user_avatar.png new file mode 100644 index 0000000000000000000000000000000000000000..6f6270dbe8a64cffd9a23c3d849b1e5ca4ec1e39 GIT binary patch literal 45312 zcmV(+K;6HIP)OV=XdRz#8|jfidwt{C8Om$Sd)) z|K1<`K68J$p>y4l<#dio-}|2oeeXZpj6Ub|J3YspXl=Z|G4Z|sGsYCYE5GM+%D?aF z;Opc$p$)2^ffn?9*H#nSm$}b*_&&9b`fI75zJGZZ>89vtsg3+qAd{`cYXa>nv0?rP zQ*-yPxVP?^{&SAl7EfPfVSId7t$z+wJ1i8y@;LsHTv_57<{^Hr>t}4YEw-}}&-sJzg-Dk$dY%ols(asrs~{c9H0bp^JnIebxlCw&8m|XSUyW zci24k%Wwxmy8#|p=99?uvHzC(VxT&1(s|*;Ofi3$Ak<&?mlKUF4{=?3W6EM6^4 zLhlVTc{t+W8IpSbUQsgHI>u}yr8VK|h`1Yxt|hk~2>Ffv3(^{jK!}siGP5GmAYO0e zTH@Lyh2juK$!;^6FotO5BwFU%VLZROd)4T?$OX&_53)fdK~ifUiDd|DNE4M^<3K5=+II;%d&bplSDQ`Q=8^=7HJzF>s~W{ZbtXE zX(9$XHezOKZ+RjnDdi^A4607V#Cr$zoJUouU`mu9mR_lS88JXkv(oZdTVif`1Ti!Z zz8~Po;6^0L|Lo8V&POBZzDFq^RDG7(Br4v3WT050>K09vG~uZanVE(M=GB~)s*Mt*FaQCQB1sJD zugqA(0~2q~?K7dHL}N=2rI}4+lEKp$T!ym2`$pes0+*$=KKM5tfOJpDARA3klEPpK zAO2)bhb&bI|FpKNhJ8ys`vaet0_-hl8D|IG-}|D7;MxZmi$`vTaq&b)r@c^ zKZ;a4t7jB(s1YXPVPZxlT6!ZkCy5=TwQ>lE)VD?HElkC2W-ZPFDH^@UjL)ER$_>**)906%cW=BYsR1bVR)5wET|y4vNiKuRI94%@+*93* zjX)D6NIbIC05KAoffO_9k&%HwfuS1rOFcJy)NKX|1h83ZFTtldfXZ@cOzR%&H?igGkBb6)}}T_{)xgeyjQTRk@qJkGxu**9$erdQplttL*Me~ zB9k+AGgBLROEdu*NoRdNtGF^@9^Fymbe2jYotQwg+*>o8sB~6qCY@7e)Pfrp={LSs z;DnN_b@oaCOSAM#&j{{Yp4!CE$g{-93VIaDV~cH;SJg&K6NAsy^plAZ`&bruS}U_~ z)bLUN&LA*2!mLdSMj#h;m9>U<4&)K?1@9UCR_3ip0)uKY42ralUY2lqluH?_(IVM3 z<|+)wEJotq`nvxi2H%|&XACV^dN)iiI5tlbQ~P=Kd78n?5>Y=x;6|<_F2e+WR3MW< zw1z1?PxVb}q@1O$f-uaGGSQfUt$Kcr;)H5&4o@O(kMs}ezSM59A&5@`1W|ldwzmqN zG7o`krbwr7-%9kj()7>Z>G(#Vk9lkvIN zhjK*~@T``?GFcnp0;qGyt;RteVdzNR=yHt`Cq6Y-HVOGAG;=Vd(eTHT@gqVyFFYf` z8>D`5pTvb;(kP&FWfC(pE6pO4hufm1oR)UF{Y*zqYt6-q_M)Mb_R*)SuQ}Kyh&FUR zX^MtipuS`;kbIs+Kvp9}ut+u%Gl>bF0ls*5P=lGdEXDad4{rilOKPjEzRGG&|J7ly z25K^%!B`Fd02tdy48bl4gEOUK=ZP;T{V8BfgCjJ;Ywvg$y+giPN>4 z(jUR8ff#48^1W3~jai0jFYUNwj2fQUB!N8+DtSG2_n_IlBxU2H(QEk`sZB*mU%tP* zvib_Np&`)zZ772+Q&5D(NS$!EidnRe8tGnn#hsEzMh|Vt#Pybh7{wS!3P*mjX}q(V zVqVZiN@`16l$I(OidxEZIss5;Gz$@JaRLe6M(S(mVkMr(zEBl>iRV<3ETBnZL{b}o zQYDhuMp5$s1u3k@InOf^9+`}t_H-45z@s>-Z1ffNieij9P*^+~N%XzJ*L#xae2yl> ztoLjLSwuQf+nLa8Xz6pJXezf568y1>L3pr&q>j&xkMK54mIhrhsu4y~1I*+WqZGn) zA}tMhB5al>iH0s3hG?m;pU-$(q^p`71(>4x&zM;ExeTS`#iV|jD3z_Lfm0q5Et6dk z4b6HR&L0wt3`51Vd ziIQ+mPU=r%&N@#6l4KCmo`2-am>C!z=*iBbV1)4k zF)H3p-PZ$?9G5JNq6lx2LxgWGt+ncXVY`RHW`dEa7*wgON`5mYLpZYx3Iaq zHwCXudJ|S zu936haeiMnLHJ)Sc?<7B3{jE>66B#iGsF>zpOUhe!C2D9@i!s1kP~BNmL)wuCTSK! zGz=kruxExP7EK%up?iO@)zHOF3Gywo9TlV^So#wj5IZ1O%%(j(B_@~IN9V~zlm^pL zOi~k0AxB0`TlQ~R*klA>kc`-ZOSL{dK%SISIsnsMu0xuu-0TvHX~bLnoN94Ah>|8| zFUH7I>}YII2F}HvZ5Hm%RoDoZCnVsYl4;=ZSFf{qTf%G331u8vSEE;nOaPLCswHoHSukz&83E8J+#UG&?zG+6eRW0Frc9vv5ziUAqd)Obg;O z!K)2|fwLK{eAIbexZQ=BuQ}DwGff&t9vcPQ5{!%2>6ySVq7#itGPwDAhEl21JgE%W=20+Aa68|hnMFx*>bV%^JU%MO z7Vk?M8_}uCy-&tt^psd`3(+#>Ma5f}6TpgN%ZwcvL*}e@kN)o;7Wy_Rd zhoFKgWPH>Fd6uOs`^ts+Q0!0{n(z$uzsJ5+kc{D?R%kR!3%@gi>N%&ztuWBAC}i>( z^+Yd8S=b2-h*}`C?BC_wAYR>*6@)`>A(G2aKL6(jxLT>YE>;AZr1lw7`8GlqPmdY* zm)C@*#;9?&Al7ov^!KI)dkv70y}{X!aWXJ%Y#Lylht4!A`0F`HW(%iB#)`BQD4T{o zO{^fzYYBQ#aE-6gcQS1C5G*t{*nwrts)q$R#PVBSKV(hc; z5*)DqKJITnEM7JrOZS4W-F*6`Y* zcd8J)fQdmLqe_Sap&&Ft=l}t?Rbpmeu#+x@&L2EjQnRhaTC8U4z3I zbC-H%uFzj#WJubUd#~s~2sW=-4A&+x=T6HYuFhaS2=+UE5Im}5yxM5q*_v}{KRFS60dMzY4|L31mYV&PE4Eho8AkgNcO zLaS?p*((9^a!jGoF&ncN1!HL$qbW-A@EtPZ3pRig?^KJAWc(fdv;_-3yhO%`=g|3v zxr%RL(}PXpD5rlfIX2+*O_Jy~Ew^In4Z>;==8w|V(DUkWI1e6J`>=GNwgt}tfi?10 zc)aWJcCcybUY@z(RBxrV6_xfb)J7+9_bvC~^1t4Qzy0kx+;Q7|*fKgE7_y1}JVAR5 zV^M$`ZWTo4Wr-RHdTKOti-oqtq>d4$Yc)K$VI%HwfB*fvzoNr^zhc=UoO;@Ec=D6Z zz^P{(gZBAzV5g^$kBp&4G&aNu!k@zIh!a|9r!l*NCmQxKBZz3G_gR_t(X$XSdrfRQ zxkc&Fa5T_|V2qaRg_TJqxD)uyMEi;PvwZIYqbnO95|m)>YGCnNlwEy1K*h#uXccM(PSm<7`vPY z!#2`KDh41RA$%Ae9J@hk6u*H-nM8Sh18ZllQ%OAw0Pf`5!6S{~{Azmj#>e$y- z_d|Qdgx?5sObGA(V|)#~`UXp9Oz zPgUyUEgw=LHHt^u4CWCP-wE)z_m@~f@bLaG6-n=>J>FYvQcNaQ3JA>x%?HC>!!SL`nMjg)H zv10(+x9!Ac$GF1|uTRvX@mfXHAguuwWuYB@;?!!9&ModvZSMPN2Nf>A={Ee?{T=>^ zrFiMfF2HjyIuD%-=AhO;;M(q>n3qKUdWJQmC1ObrpqQtQB(+tvxDf3O?={|6V>^pP z=c6p0%Me#HeEgna4NXE1eRE4_fYAd-Y)jBZTek6g&cq{W$DIj38TGzG6M>>)+K z9)Uv+*$>N>Ey0|*edwOk?r?rHY?G7IRZgWKHzz|mFR9&gyi0O=Yyw-i?!eZK+wkC; zM{w6&58(D&@4=dT9>V5TilOge@fqcw_)om4*eF~{SvPdEi9oOA?M9Nz z7xL1%h5CHzdE6G7#j#dl9572|DFQSh%@T2Gl;pXS;+3{mWNq!Z^oN(>-{1Ec-0PUZ zGk9+$ixRw~`bOOKWA2NEtu1)MsmJ1}Pk$U9clIfmzj7ID)jfM^0(No|wZTE^cNPVP z_&}pE8fzpm6f}yY6uyeqteY59?(Jkjea~6gjmJInEIj^#b1=Pg2)F+IPW;o=$(6WBBKZm81qv^zlF5NYq(-?F8RJ2d60cZ8z{<>4 zbCf1T@~nh`@G9GD5UN;^N#%lgujrDhq1oZ(?dx{neeZZbe)!wJ1jBD`7Nz`5EozV| zN$_EJ4=0y#=+cFF-V2_FXFcn2*k{$gu&q_c+|!P+2jV;@LodJ@uJ~oi%?QY{L_RDV z!wjjIy-XA|h5}bSfd23jI3J%3^r<>YeEeC*F5QfwW8mY^10**j z^FbQIfeIOF@Oc#)(Lwlo`{ zpmM#+;?&hxroe~~9fl=4&pbWKP>fu2RlBB z+Q<;>L=&<~)%{h{WTHOAr_)qt>%TNtlfdI)N&hO72?7OaAS#9!(0gEphT%i(Z4RuT z`+{?D-i2r3#w%CjEC2N)T>7VLLLYiaQ*uQte|W?Cp$%Jc;S2r^uYdV-@V58=3tGBb zFf%;npiG5Kbv9BN)^MAsXcB8_<-kY`t{epA$SU`n-G(qKA&=>$CkLWN_Yzq{TunvA zM|uv{<+`M^tvo#XcljEoI;$k^bxQa*l=r|ZZWxal9y!h2Vwv-jEpD0O1~x3%m=V{J z%88(=wY(@Hw#ukTbo3bI4kKqRjxk$W@t^Pf3_kYRZwEhkPsJOfx%99SJS!Hv9D^LX zbP3-0rsv~X&wnDCyE`3IjbM7muD}RcQ?sL77191MLb15&PG$N*M z(fvJrdjnI>Od;6Joaduc-C=Pebue_)F-}+-~IKv(6-KyG{r`%!}b0i?(gGY z{}Hae@m7593-7~$N9~8HUHwi9HYFyu96V%ej0Bf7OljrN_8q0?dFn)h<-90)3~(o~74B;!R0Dreg;8^pHjIa?myMocW) zxAc9a0S` z02a8(e&ZeY;fYUr89w))Z^yYWJQvfR*~u^#wxtFdfi(P!P6i{v$FNeqZ?LGPKorH` z>@{OpDk;Kw;nYX~N|GR^H^gI&0#OZ&(I_o)QUg&Hki;SbmA_-xm?79uXmjzVO0Ah9 zqZTH^EQc_SI?qymOn9rTb;CA=XYv_TyF0LT{dTZ!_jYbBO)UJ)0jRgr;_AnVrJR;g~fTJ!Y zLK|gFRQes%A0v1@ae8?16F1MvR-nl~GzJed9((@DIPLK#;u~N3DL(lhUqk=s7`lC* z^Js2+KYWjeYU2}l(W~E!4?MC7uYLE$nCb72eWWZ=Ih0*lV+j~`9UtV>| zi|~|lPs1f|{5XDh^)2Z0Ir2G2ZmpB_5emKi<6p+EfnmJoV{bq{<|C?ka<1e=d4_8R zEUmFS?`T2si|}nqv=UF0W_sZxps>P8PZ&*S9iIX78lWTbd?Y_QKNnUE%G~e*gD6of zu|L2ytuq;zy4CXKjwHROX0V#pdh%+hqwhrw75i%G^omHhoIC^>N%E%N9^7-wJ$TNA zZ^9;LboRJGoT|;pyzzs&(=plUM;(Yyefr%v^b{w>cMb-%UTv-t^h?r27!esyePX!O zd_a8;-QYnXiJh6`$zuPx)ikig(6ksuGdshGN*oW#Ok~YK6bbgGwNy632G^ngerEeN z?6cnjeE+wf!RJ5nO?>P>z7bOmC|(dV3vwq6GJNFAKftd3VSMWI?{dG5hd@@Kf#C=w zO(cm2fS8=emtbpVlL^KF$O7zCV;uy5wgC%-OrJ$TsB2a9U3*<*RJ=q_UYB&O$Wj&> z(lLu|I!4lI1FDKn+UY}*AKlG zM;&u0UiB~U$IijwV5rt8^6Qy;zWe;8AO8lmsTq9sEAMl!Bb?bR&`bB5(LmyyOg)>> z0883BZwy_NtB83)4(bp5!uKHFP>bFTKesH=3%-12-(bu|i&h$o7>M!EC9@onQUM@l z_J$0shC)Z*$|spUzZZ2${{852vQgJ~!?ULoYwmam7e41r*t}zx8^o%^qcc(3`*Cz{ zd}Q-K-uoK7=AExXZEy&+pCyC8vu#?8MP+YCXob7;=eqo3)y|=|<_vj~g&}{2=ocWA1@tb|Wj2FM) zt+?lrjSk9GLX5#*>EV<&QosJo-=n3q8UODK@5ap7AjMiTf+QSZVi#miV`3toi2t0@ zmCLm(W%3fgND`G%Ql5uGq-~P?1d&7lnl!PPxeM+^OT?vpyhH+odyDFhAU~AwB^=JT z6hlnGi+U#iA0FI)dQ50%Nh{e*MQU z;i3!PitBE>H+Z9_^XQ|tB-#J#zkh|kzBzd3M_-4TL8tXh1)Q&+F$NEFKcny27Ew7& zBynYqGmm6xGDL)8#mJ2l#ERhIXgdcr!Mva(`E04Y{ve5N1ipk>nH%Kni$uz5<0M_# zw}@%YJ3;W%cgN{63m?s)MVA%H5oBi&s*YKkGK`N-;uSA_2iC0L90EbpoS5(aru}8r z4Bz_lhj89SPsh~u&1kC7c8ekZ95T@vB}AU&gf%wQ^^8mT;zfy$$hZtu0v_Vh7dG9C zCqzaIK{AdB`0vTEiz$Q`7y;wIsYGdkzZ@5PUQ`+2UMB!*lwF3+l#M!>bv;P&;JwGXGst2nUAW4T|rbdiHqP6_#wd(B^nR+qe38$`U1Yr6yQo^^2qo;eBa#LiCeC{ z6(9KEe}&AwJkNy`RX14M-RIXo=N!Dusom+F8=WMt1l7%nPRzLMC)Z4_7iGs_O?3Sg zAOccERtX@1EmjQzuXd6zOg%w3Kx3$%+Sib-M=9F0Mz5E+;^4e`p=v@wQLli{JbR?9@=w5=;XkfR!=h z9h`F|=}QY}m|J}`G_&B;6MC0m3kjhlh?2kvciMx{OcK3L-Kj(#Caacov){A+*rUWp znh3MbB4!#(Gu$=a4bWp#zAa!}T$=9mkEejS{|yu*HxEAR4PoUz_??Rvqzykbsfcaz^T6#N;j% zv}$DjTFKLr#xnJz3}c|SJ@lzNj@gGtvGmB5_}pjTf#<#WT_KZ`?J^H#JS6AbzU-+1uZMhgqK~D?ACQ{JdNwF|T)BauNvzfzaDS9!PmA*-5i>TdsXxhKG8^B- zqpGde(j-Y2&;G^#>?mP(%Q(&??ZSj%PYVZFd=5+VygB&fdp?V6?zkWGNY(no4~}2? zvtM@yKJ$q;p|z(C`RKSCfSBVVQXCfASW}dk{qWDISa3p-#4Jn%K^!%N;ouNS{)|%- zMC8X8{a9NZt2Bt5$3Z#SB*eui@QXZ1(+DpWZ(HmYe_)#e(Sgba>dEJyj`zRiBE0oK zzJ=CMD2YUad-W@u-uHpe;Pl5FizSQapf)i{f;pA{2H62J!*wQNK`QW60Md@q;><=$ zLZN$d^#|^hjY)H{PYvAF>Zoe_P_OWihnb`j{~^e-yF*9Ev|&P#!vq@=VM2)8LzW%~ zS+2STpa0zV&?hh{j{hG}W{M!q#{J4Wrb5cL5>0AaS&aXVdk6BfUa&Sda6(p!& z-p~zdGw9M;ca)QkCx%xZ1fC#DWPwTo2v{NPMy+JnQY&Z(y;F%E{;0T$NlyVuWN||_ z--PKcwaczWR~Ca`{=Th_j_Y;~;Jxqt0>1j~55Z1M1eYTt-1rFA zf=nT!U<2n0#fi(g=Y`mVkR*+VMkZ{-L?K0ROk|?sVeLm3j85t*)RC zU;hzL`^DKf@7ZTzX4gPWaVEdFKo;$C79uf5kBo1?tA&szxU^faa{+NgxJJ^PTt3B$ z6h$WYN1fNDpvpxxSRzd9M%=xZwTG1n0_f}i@k8X5js91LEZ*xn?I26j+#dY&yO-kf z>u(AEa!b#_QbOte>;IxiVPeBDAd7^;%LP>n~ zy>r#G%%lY_<+2(6DAaWc?_+36aBOho({$o;#ASX ztgjE>{p5FX>x1jW0OhJ%Q;wdmdg`e-{O|*ib@w_Zp9%O8WM>q1zMkcEPlwPtFqoI< z)g)2*896~e$~5GBb4%FVaBWEzT43$2a#_GIMr#TaCvTAOIZ1OC1Vl;&{0PApFuKOM zb+63y^f+&vPR*Z#3(q+of4=2@T(@Q|+KdScuKgtJai@Ll>;H|5UU)7RFY0wLaf-%_ zwL3#Zo1jTn>~xB9#&g7V$nsCQW7F_a?+gn>Q{cUF?R9@tQ$&gx6=Wbpb&=O-6Dn8= z))e$)-5#QKp=Sk)=^YsY6Z8O&Hv~6JP!6kI;sgN6pILFJeC9;1xLk zltX~|^HFVSMn08t&kNaXg8R53B?8uA^afHr5U>`Wo|wc!C=}MH<`I$xDV>pKu?)Qf zvxp?OHDQh@^(`&Z$rD|5(8SvbmV3djxX2Bf%yk%jmZPv(nX*H@OR;%QFWMK)!)u@U zSgii|w^Nxs_YiNGuJ0efH^21@yz_&v3FSO0m8Mt?jpwW2XND%+ki^XFd*_S^tJ=hT z!Rrj-_o=2j!Fc1@Thd)4$7I z_5JVu60dyuGn{NifBXBgQwfNy{6=b>Ov zPS4HA%ksQa4#%-8=L2)*qN%kts?kzgG%G(ECQywkw$E6~moYJnFMi~lq^VDP%|Z?7 z!fxmx1CaX0`v7^m2E&s)EVO_Eu);V|lIQUniU(*t#XZ{nl-+FylVBo=>v!MiT(kf? z);x@tJozO2>F;-AYnC4Uysu*(ynHFUj|ZE6cM< zGj)=}tm>hG8j1>4e^-Jzbd%_Vrs0~d;TJ#sJ(R2n#2j|de%i4IOoY;&c@&{@#E!J=14KF)91U zqco2)hwBUaXVeJNL`e{Q1w<;i(rq0r|inF)6o%#S6n0_B5q2i3+X=do^=6s*kSEZ{!3qj3fXh z(V-G^xMZ{Zqh#S;K^Cq46l2TJim~PP4BVK?@zqcMYI_SFyk{M*zWSyBTNs;rBKrvF zv(G#_sMwAbi$bonbw+5x&9pZjpuIfm@`1Q=Yj9P}kO3MoV%h|O{J@dr)A@SF7-Dow z_?WdpkmAhfATI>b)TFUdWBNa5atYp%Em506+uUAkZ*Io2_GX-O%2D{vU#>?FMNHX# z`PonZfTup|@dcPm>|Jmfiup$BkDUW$MlF|0V`6LyeT(Num2Z+ig%kHn)0KOp-a|cq?^r~yW{-8(YqYe^Mqy#qLTXAijydD> zY;onr{#(+d!pkA*S_=|gcIF6|q)ijFXG^#Ge>AlPO3QCzTejo;QxC__ z9HZB$*s9l{zO?o=SN$F99@vO|m(NGdN2FuNBa*(dp>bUL+si|M z2)SfMgeiLd=|_d~$5vH7+!uoCjp zc~7KZm6oQRt;x;epb$2X1cn%M>)$-A2{gVHsN&tP3NNd*t%{L4q3Sn zN9?x`Zg^;YNDc6Il23`=IyQ#C{P|kE^!3j{ZDJy%1;lJoL^r9TwYWxEF`*O?+RKB5 zt`@w){M-j*(T^@rLByak8h3N(y<~zAr|q$2M+K4Ej0lICeZC}5Pc^ykMVOv^AuxA~vTjFHhS+vaQ zMb_LBHV%8z$w%Ni$LO5KOGyv--KBrUOD}!~Mb3$lW6!`zd&%xAjzc{U)F0*4h4M88 z36CU%MWSM`CO%s1cG%>IaZB$PD2#3Zl@u;-XROKYM?1b1N>t z{6-AByLjLEG{F{Mg#XN=_Q!&r7VO$JjLtcIkjyyo_sipE!L2+@kLEDywGln6WJN|h zj-;^7NRibdv?5PQN&rLfu+S%xfS~oHoJq>hee{y=(H}<9Eanqc+FQ}w(}nT=ew=aC zN-X+mGlm_UtRj}V^Fh2@ZoU(btl5MW`!5bDyoNSLLzcu#N)AjgxQwr7i>^0vi154_?%$4m1bdlM)X*OEJbJC zUwOrKl-EM6Wg}6~IA&GYvBHPin|pgArlq_I5=8}eRMV1P^SWQCcSsDSrB3;Cl4trU zsU$R`G}0n8=_n;88JnJsQ$z_y7(NOFSs*M4P_^lS~s^zaoXEkP(#wl}m|Ei;ym zBsbOuMVX&0MfLz$^8F?Yj2S*7&Bg*CF+*xg8FH1|_s;<`I$tJ1rVPzp?IC;BX9J#g z!~yt|V{|P7{Bh+KH{kiNcm|Dof@N{}B2xJawOzU%gS|a8UpBf z_$Bos4-rvCmt+dX7IvZ*DvG=iDQ+{lPfl5B)s`mQea|DZd z#oX>z3=NJtDKi&MEiHk8IO;!}f-g>5l9WVr96#~fekFQ+AUj&Ho?z2)Y+FuJWD9i6Sn zXQm-U22v^n70BX43L%ts8hodq|EiN|=8xy35! z%90ikkBA6gzM36u@t(d0`Ayf}971o=@+7|8@`HNwOkqwp8 zM$4*~4gBQ%ec9bTn;*Vv$rF;b^lCuq}IOYY)r0vcP50Z8-vh4hx;qdr?C^jnHM~pfkmZXoH8m5AwI*jIfwng{ay{L)%w!glOV7rlHATV} zCAER!a%(g*LnIU0<5a|kk5&pPqSG@O9sxyzxlx^wdI+|sh~c9?Lbqe8gWtig($a>B zu}So{RB_}1%W#Wh^sr{u#)`ZnjwW~9`2dc2%rRlySU^ZlkD;{UT(LCez~oZBSUr9g z?7lnbQvO`kfi}yKf&vB60~P}rOjbE;3Rqa1(lXgKUVJJuy1t8HV3yK2IWNUSl;#YB zJNxnQgKHx`Cw%8Ko>$K6#j?4bn4XylM5(s6Dg2d5O+ZFe+I~z4YKTiMk^Jqpz+9U7 zmDj_N4Z>HMG8}reMcwbqk>=Qq+=5HPYFNAPSnYl9Qus_O=!SLuqpB@0pGTk|9WR4t#JD72m+H1%kAXXJng@ zx*Obx?49r)kvQ{Hkw{B0L}7GLAWHeuNq0no8rn(C-^=0iwQIKq|9N*<{A|UL*>9mU zK0`f`EHZj?Gxab&Hs?GIFy>Buj-M%LlUadQkvNnIM;M%?zhN>+8&g?_S<{pEa0rYN z#v~|blaIMDQU!_L@Q$g6mKcHi&dkAQ(LWO6-$VNXeL`MK)WS^;qk9SMV;T$R_F!I5 zCpHa^go<8qDG9J~!&Zz8jG(ilIad5)rOl$dKLA-%+LHwzx>u(`J%R_+sGtq6PB&=hMJH9=P8%p~!ZB~Fw?hL}Se zh|ykPJ}eK$qd&CnG^OvE*^9P>g%QCC=8WMU}7ebA(n%6Hxao zzL>(f;VW^`GQ?Jts)H>FidJ)$Sll^QF$}m1wF&umnn@L;_ohawE-NxD<7F^IXKKL( zt05gU;-4~XSiiY2^kH*Y_qxyg&QJ?Ixas_Y+$!x*LAo$)18HPJvT+?ZR*tYF3tpE> zs5TV}e#%a0v4(dDa=5|tnUT4B4HL%KKq13d4-fBBQO1w69R=u;*W*ioVT`{=B~Ii3 zvm%hQgl~Oc0yERvQD31fYpTReeogFud|)2oT`i^w`@Yp`X?1C)H%XX|hbU83O70H!R%5+X^k5#I&p_=Nk zxh7455hEN{Zqx*LeiF! z;MyLE^C+oWZea9kI0;aj!PYH13ev|9Wt$tozV5DA?bXJlsjyw3CHEg0PZ=*jCb;8^ z-2278S$iu?Yg2$@W?~xC!z01y9Ud8TOi&9Iz^0~W(A?C7j*fP;J3QLi-HxhzX4v}N zul}EyL~U{^+&4@Xf_z!Q7r%R{zE>Utfsr{yoe4(x$Ap4H(@wg&A#}#LZEA_vj=TPB zAHjg@*T~2SMn@;Ycr`hA*VfjIzFya-jt;bUb)nkR5+kzSsCV{GEv&Jn$w7n{;o%<| z++HS3cFFT`=kC+nBUroUD;xVo)ov1O-?}}nx*|`Nc*Oj*2NDEOlEZGJsLnk%KFLyj=8Jt?Z`*Q@z6bw;FeqO z!)>?SjR)^ti%lE0VArl8jEql)Dg$21#lRHM(&C=g(TX_^hb~*T7>6CcKaM?a6^=XR zV9Z~!1lFH4JcgQ^7+$mRES??tmJdPA>U~nV3?PKj5GQQtZ+n|#@D@zDwy(MQe%x~N z-MD4--FRTlTC88Y1^o_6OgRRgCL!VJ=rMG6XB*l(+cAIsJnXmMGS{d5aNKc6;qasO zLx;0{d@RBaj|TcyLiq~INLJb$svB@6IQRYBIbB8R>;CgK5;kny0xHBvn45&5crJic z+D|z?0_#~6PE#baq);<8DGH18Wg0?|Lp9H+9I@njTEVnQ>P9>$`Q)%NBs@qa6&^{L zng=aBjEYExj@mTyyvE2<+NM9J>Wk4&PGZNdep$uBjNaMS5|(R)5_HxEiB`!nFc)}A;MaP1^u#0m(U4s-ZMRIY@e(WBXvo`a6 z3f=V)$gYmSC=ae)~1I6WA7@a(tnTUED07!wCZxprNi) zl+zgKA1ZOG4Gw@<_L_MkGGih8le|e{x1zXwj)h6!U4p3WbP(901C$miGRgs?&{z`< zIDz_>fIs2i^koj-PY8Mk1pSJn1;QuN3ej!FK9?+Dd@=UiDU;JvVed=M?(qNkj(0dl z_q9TO4fNnc^!Us%E}G;0y@BKtF{%sa;E~%N!skByUHsygf5A3)FF&9@8EkIG65kRQ zwWM~F(U&24(_111qgTp3Z@@9i&;M{8e&+rT-FFc#e&uuUyo;WS_POnt85j!mOH;jU zYRx9aOW#=oS??OioT1X!iQBHc4PW@o5AcV-T#sFDGW$Mx(BijK3|59^L6x3n!TV<< z1CzZ;IWf_Otvm4LAN&g6b$_Rwa0D)X>^fQ_n=DsTTHlGTyBy`&vb<;=Nn`+774r zLV=*X=nNfk@TtJ9ERI#%z%W?~Pmnz&t+m47#Q>cpK2BOBe6lvtUcu_BFp;6CqKu18 zA`WDvz$%=buzI><8dOOOpg@rXT%fQNO}coBN*8t#WPEHKh>PVj)| z(bzUk_{hI~2A}=Xk1^qX_hlyLRVtw`zE8D038o-$jT(Hz9xy-@gI6q7Q zIZq#AAW+wMfiM@MVV5MkoMHoNM3KzdM-E?3&xqP(HfeFNC6*tfkg*?VpQ`I5d38Rw6w3+bwtEDd%2{cfRFi zc+Dj*gPj_8@N6uk-;zQORH{~3gB1geW8;&8PWa)5mslD=(nB=7VL5;pH;Fb_InqhO z$RDNUUfPH%k0&R|7H*j3G7zC88tp0;f6t`@D6~8IXJLVYhaqq= z7n|Y8TAX!3`m)a}>dP^DzB zz$7(ai_q`VX7RC4d>v1H`swKH>p*Q{hWy7hfoU9hjfPV$(ukw6tTlZ9yl1JYuLn2( z^;UexG5JD2X=}NPher6}S6lf%&$x3Oj9B1|!+Cw(=;`i2dt0-kS2cK&CMG6=-La#8 z5LrJfU#n(OOo(b7=rPu+|paY zq88Xt)qNliS4KeDuPj6mLp0u(WS0zFtQ1wmax!zhnzps5*8C5?UbG|p;FfLpm)Cp% zyBw4EdSg~eyAW-4*Nr&F>UI<0tX0c##xV!ti2avf@w^^%v^NI^_t92yaXRMp2CvVj z85^I*)}5oc?V%00_KpW}(_If?i(?Xxja$fe@C@NU_ix{YyRW$!r=I)RxO-dJPbqCp z82bj?9*b#5W1gr1q9x#wv=ZX(++ouMrNqSXz^?2d2(`|p;FhqOs=8%u zfu}-6V*n}P(1_sViDYR8RN4KK_L#S6D?vdov^XPA+--!h`8&(^PVfg1m7^nE#e4;7 zJVzgCo`$WxO}bC|#RjR&1&sU_i`mo!{&v~5cxdew^i(2~1BAKVNxqWTd^hkHoOUG6 zKkH~5vSI;RoWVLdRl~Hy^Iq*&DZgNb-InfXas$=U(u8(r$@H~Xark}<@Vv8+!@6w) z_|27f;3t3nJ05U`S_d)wkh^TZ1$~$|GKkHWUxT^FAC9KZt{B{+K5*VOLm9)8Lh6Q- z-Qe20bIl_dyZ=6{>}d&h0q1>>675dhe_uTJi6`OIBlg2wA8L0H&eOKqR0*CLU&hbU z_+(+9Q-u6*@1k7T*@ERqAAqxuKLoFO*5h&gT@T==mtBu5-6ZzZlVYNXn$`F5yWjm4 zo^kR4u+ueyI_~$1MNL_t8oK0C4LI>cimjy7iX`ie@RAf|r90+|Cz)@66m$#eC@=*< zEBM}6DqzGAoBfb=30Q={WO8x@kr z*tC5JH{NtVZoPjaw(lBr5^pN_#pm~SBc~j0LBjgyzn+fTxY2GSMnpne%po zrlur$Gx}YB25-9~80M!Ovmai2{u%hjudfOi{P1OqaM5|E;wdK|5=z32jE-Z}wX37E z1N|e@xb3bDxaGdJ*t~rw#wUGWssWby)aIiPS%CxhnH%)53W@un9%5vRM~CZ;AaI6MkAoBF!ju*qTX&;95Rxb*7PSnrs{Gf|eB zCDYa?m7RFVO1$*kQ}LLi55)NRICc(Btgi($XM)BS$tkt=ch!58CumtKpn|LU*U<|c~Qc;oK3^G-V= zxDtn*pa{j76dYq{#X`y-Ev>EIs8Yk7{M{fX4_v}|IC7#9LsTEdmJwGr6Z4XEX6Uaek zH!yh!*M|^5&BKoxy1H8Mxp%(=-}%Lr*t}y9Cm(Y#&OP;DH!u|pxryNK*Ho#7=x29V zC;oWjz4-9ge~X8PMnaltmr3oB9gt~YJXZaSQ|VrBrqvPX28~FS9tjZY&ZFbyx7Z#p!JoP=&o5Sdqr=EYVvlAwE?nM7Bx4E`v z2-~XL_?tKE_FFO!*)ro8f6Db~I#6Yfn^;%fx)vY&`Y&r1m2AhnlMltu|9m61xJi8aaR=l4GmplE zvrV#`AVE|?0Y+LiQoet_m#wWW!YmxC$TBTL4AORG9a4BaKn%aba7`v>c|}egb|(jEvm5l5rY1_S zh=b)@>b(cy9%M~>w~}{7PE9vE9Nmm}yzE>dmsjUr`c$fM-+Dhcdb>OE!^>{M`@Z#? z(BgUIS;%SNqMot|QXy*8UK6SGovT*k-t}AYg}1!`^E+A{gG_~qH#I#O&ds>IgN?RM z)k*e=&08__$U5}wzY?{{N!n8>xgEn(2vcvr?QT?kK8=&?k}6EePm|NUyG3M>-p+3P z=ufNh@89@k(0)CX;8df&u(78Fg{NU>7Dwv`*WQLlw)W$*mpmUmtrbko)MzgvXH*Z5 zVda8eyz8asMqj0S=CGT@zHc*AQ_&A!i`sp1DngeE+jR8Qj*d=n3V;N_2udV=Vu_rX zUwG6seqZ8t;>F@hHp#^yW~e5d49a;)06wQidFwVpXbn9ggQhACNm=Ba>`yRAAG%ni zW_-rxtSy?Gs%UO)4sfCh(U2IKm~@2A;l?HGS?`T3pKIbLN6mG1z)5eWRmDStuob!u ze)OKMcKqURci`QQ$%A*ndSjM-<`gAVq-N5@@OfmKc}~*a{KzJ};gdhcH{Nkks8Hp7 z>ZU>`c?MI4d@zKZaX7r@A+$RvRc&gG8cSt``o3k&RSe$y0A~6JoB`Y#f6ch`2v~Ur zT6wo=kHg%TU3U-O{f%Fs+5PB!{OsFDgFa|^nNzhhU*7M!2iM`>KK&DX>5>;Z;~Qe< zR6{Id8bi~xiv@g6cQquvdDrVqEsl7_(R|El7_VQNWm?7Oc6YZ*|D=Qm?O{}g9fid> zbz*sU2hT1Rz64!3Kt{K!7N}u3QpmF}{utdiQSBP5>AS7?;OY~nO zZO(m}gc&i(?Qy2W^3(k4n0}e!=fOyEba*t*Cw@)$w@|-J~oCEY>e!26mXc0oM}wD zhfX^!zw%^|b{2w1Ea`1{M#T+gZ%=nFETUVnJ`O*rR_<8b1t#h7rAgm*j6 zm0OhEh*ihz*H`S{9Z7XueWWL`ySKsA|yC6tpB2;UxHskZIFPoh9 zn!~HF(3r8ylU5)8d(Z6M?)Ce>yAn@5{cx;W)`#(lX)@Cy#0oZxlkU^r{_ue!&Lbzs z`Jhj6R?lY>u*bo~DCm@<(ASD*qtTMjoS9tG#2Et5(r_r)OEippokx#WK~u!HRbei| z!4g6Rt&ODK@Yd9vr5Fd_7OG{G6K~3X^Sa#Y(8g%U#$|bVXk#t+iwCNhH>aBj6hkHc z-*G1m2i-s{w$wf{Tzzi>vrmkNg%lME{`mg1I#{q~?Kb@3`a9uCU(0nWXKb6=E3NJ@ zrPt7RcW>{${Lvq7z$u45+x2k*ITeMDc1;|Focru2qq7-i#zxW9(-%v3C`~%Cb4Oe> zfmByL3MN{tK-$aa`rNaA7yfv|9f3h-6kI7VLW6cU0tZ2mIH^A1?3|zf^=7=!**Ftp z(W~O0>;3NDfcN$3O)WYIEOMDxo|XfnlZ7;muz7%cUAS-#cvG{K(V3~nkkC^;c6J>a zCH19GC7Z5*lZ;Vbm%2;6o0Kpj4@`J|j8X>X4yll)e4Tv9A^tLS;fApUFE4YFNuXA= zFx!RS9HTB?G@mq48ca{NA*U)sxw>h>_F-Z~qsKqaLxYf}OWPxMxWT#c_6IQNzUy&d zx~8^8DM}*6duV*{Z($2U_j=>)_hG}fek|x|55`>h)whSUJsHyYSH~>V4(2q^okxZy zeea{aLnAU_8S?Sw0X!M#UcknfAy`J<5}-`m3*%^pqv?dE&2^BIqc_K&;2 z7aTF4#>U;D)k5$Zd+4k;Tt`!uDbdEK74(EMfxPkz%3H@{N&;4r=TZP|PZB*+h;cdF zmMI8Th}wI0lBH8&>6j7tz%AzmvD6fm=j(4wdEg*lxmtZb+X|W@oQc7cNJzg_O>Hjg zeqZd2H$7@xJH-3LK})U-X`X68VKzXOu)NNPnw^pAokV$i2X1{}BN->W06H0(%p`ka za+H4a2H^Iwaoo3V2bLXo0H$Xqi0zT2H^nu?;*) zQFhT<1~E;Shb6f1!Ua@#G%&njI;WYb!?_K}tQmXOZ5g=*1`4mK8rpUSitHrImmk}@3-rAJUX6!}piX~{9J0I&l@|~bgnW~JtXH)OHJ7hALSRQZ9 zx-B^S*!@DZ&_gcH*Oo435N#Q+%feb!B9PQ1lpsi6N!}{@AtqbZFfcfZO`CRxz7%q= zWyn$Yo&F8#97Wn-*GxLeKG4;T`A;|#_O3M;d1M2O6Bd3kbl8l5489sAYWYCY#AFRS zhsFr5#j*9$e%`!3%$d^_mP8sD)ew^!V4|qv&%C%pjP$~e7c|kb)FFle} z5=(kJrT{47#MZ`~7AgoTlcmCr1Sqr{jiAl-^4x`TUo;M;wH{1NKGM)`Fp3BiOmCU+M$$pqInedwX#2Ipnj$ z^|$NjRTx-(KdhUC6gs+tcrVL+X02EeFU1gk3TE(kwhi4Zqn2UBN`6lz|ykd#C zdFMd*kQI8y-8EQn!eQv$Z{G+9oJ5+QoDNaTa1SNgQo8G&Axonp3^vHFT)6^^97Gyj zzZqlquZvr~aLHtA$oEO4l%q8~3^9Q!~gXCeb>#4@=HC4qL9g8B+s8F;UE~S0eMu7UL7EziS{fW+az|_a39e&-xGZjv+~KU{6x#5Ru^MX+ zhFc_tE>SV%QPFmUgTkrNRErOoWdu22RV}W2GUX_d_T27G8GkIlhFBYA6l!HXg z0q=&z9MuZA@I@Jx=!u(`elJgiLU4>|;&;vHu#6fw&RX>Y)TvNE7O$L?TEvhL@vZ zX=(S~z-Y=_5Zl9e&7#vRQcZfce^_<#|`ub$LLjB*x<3& z;P5cU9V2(QRl{?_>V1#NyE@Qw;PSu_hQi#r->t5v3kw%5#Dkl6gq857A@%os=pN`& zMvIp(4M7$!>HK7CU%CK2Q&Ska{oW82rO+`+azG0Ele!jdL`8`F?_m{wcNdl(cO){0 zk$q~cUwz-z-R)>H2U`Y5&|Ie(iwDD!K!?(I<@ao}EL=1nag#6`v`y|bV(WrA=-Pi@ z46j*F(mW^Vl3|M*n~)>VHD<{E*>~S1(Wn;M8P!O`f}0og6b^_QCV-l}O$|RBK;nOWaV^pYIryypbUl@!2l5Bm|z402OoK8oLrS^Tu137anHU>F|~asCO2<$n4R?z2!lr) z%heEco;7cU<7qMP&;wCvi~BP9`_9bN9E|C8eK`=fKC~{RF%%ZGP#bxe%Kg$>NdM*! zA3*DN?L6YhgJYc+%6SP_x;NeX?}PD&HwCXzC>mrV)9rHFbw+v@$2(L?SUfk<#MIM{ zA8cp_26;-W`*Sdl@RmPCHzVi$@T3aKh6RTQ^CV3gx;8L++D4G;wB#WwmWPMLNERo; z2Y%Nfe8c#wv6O5it;>w94OA{Xl>2Q8oitp$?_y9YlSrKr$6(tB#&F=0UWdQ);0AN3 z5-kh*SXY`(Q#)%%>y7Mb(0Np>` zEBD2=t=pmm^!3}l6y)S1nzp<@k)>KULG{l}qiz0tw9lIp-d8v-;aga-IQ#5V@q=Ie zMWmh7~L`!07ckaPWLW+XG_nagCu%N$clzQ z9E_N-6;*F8;po}aKBb?UGP~3tWoVP$Q>9*TI}KK@*tdu|U8DRu$8C<_)u!cP^V|yc z&2Qx$yhOYs8c!&v0nU8faX9vRmf z)7OpK)?ukd86=afp}D6UO%AWm3=W~vTuG{wSJ)6vHDs4aKivb`9E9+t-^_GQ$##^s zJUoiCpL8k?T(%f%H*A)reA0H?7`N;83v5tg-oIS@y5~Ev-R$Uuiq& zq1>eP!ez?QaFbsI4qC&27|`o;nt0y)K9cDE+X{G3)j~SAWg!bH%V(5UGbry?$h^b+ zQ{F+kNQ9zJuA=TR5_XohA*tM0?~XpFD$qV&GYmr(X1rJ36323xyz zXY@hK`pZasu|Wh1L~T*&uHemYxd<1&=pAAQ=@yomgQa81rin(N>*tu$D*GR}0v9~> z9AIELdQVJTh8I(Q^HBQQdrT&G4a9;%7F^*c(uehW(At>@o2;qHNziUt45Jp#hur(l z#dGi)$CR&o`+uU1urVJRm+KA{APY-;_v%;h54gUZe%A4L;@PLc4pU$BVwyO((N?e# zX85u0cUo-Qz%aROVIq`*Ofh%i(ZcLNgTRn95Re*=Hn}5H>Mj?Br^Fm*2<@|M zeo%$HEqYM3h%QF|ozGVEU)BsSFkq)B5sQtgfl)l^>1W`@7o3Z){p3Q<&|%%(=u|Qp#jh8XPIRc?A@kL zs#aTD;yAEsCm!+rwGJ-4^yN>(?|*+KF8$lzL+WrNmm^PT;WH#Y3v@JIE9L0v?ZU@C za!F|NjNiTm5yo0je8Td*Nb^@rSQyg3eLN&|RlymK{(6tk=b@#m6*Z?xgYn3Qq3lW+ znJ_L0kM^KZHEx3YFw=Q2M5dCvy3V0tdC*MQral7d*yadb6m$@OE>c~M)UXov5jPRS zz$Vv@BC0Sqz-0_PGiC%Y1gD4l0UM>*J>qJwdEya!Nj*I`Ae`)$WDyFsFJ003Y~^(xTSe? zSN_+R{~gab?{&EKfwi)Ad_k@WgQoG|*QU&biU33I`CtE!x8bPcj>OFNEeMOca$dwp z5;mxbJS2D-3WPN^SFvMo0>iG~ypY=WbK3p3{{bt458aNA!kAPp5W*SKLZMgG;$(AJ zljs7-SkC66)}J{FYiWym#_M4zT~VSZ)sWi6DxieVRS2JIN0_lBA8mp9lZ+?Vfr$G_mMZ+Qjs?ftRj zf)9_w7U;%rMN`wiVS%(sN*eNgdTxAggjoyg1N?w`kI2L%<}K*OkAMDYy!82R$KUR} zFO=E~>rC=kC{_1`kVUNi>pl$nRA~3+vmbjiUhw?$FtcrYh)W=!A~?uVU7m36o>P-y z%X+Wg{RCdOqdz(d6vO$mSFPHg=x!1-mop2=07wD#l*3}c@GezEj*lrx@VeKeuOc!? z)@K#S(yW4(?z#9m)2Oq`Ez1Yr3Gx?nia=q6)dzzJi)a~KAU?OjrYM&OC_ePv)Y*iU z`z^yYcioQ;LkV2&q@7MWZ`?JA1#=cUNj?)EJ2O6^X_y9@8ON7dap!inSMlwie;l9x z__y)d&;JnHCnf?sYLfE_E!#L{#~oAnxaz7!^YESzycSP?-g&4E>>zcS6Rnalo>HGF z7{@r$|7ENd^C;c*Zr4hR@l4^o4M}rlZ0nKV|%!3Q>P z7g!fsP9B#758NkcW6?^YR;)I|3xqu!GLe}7jA-R51OwSbP^;1; zMTncE%L@K?;1J4W9pe9Lt6^;$;V8INom z#_T9G^ zMAJw6x*c9V?$879)Tccb7ryX`=v_Dmwf-G3KqQiiS}l^CeB-dBy0ZXftn8Q(dJ`vN z@G?cnVyVGej2QWbJH@KI7FQ;XO*oWL!AHJu310Z(^YNYU{u)>P?Ix^WyE*I*S0fGR z`!c_$3r8Qh3eS4x<8bb?&T=@q4Kq7;xcfBG*haS1-i){cDhsvKC&tEu(eFQxJ8k>O zrX2xJ`s6w;deqa}jOEK02m7iJB;r8{pSc#g4ZDX+;h_qtbXkCbf<%w~)^QmUN{!Nb z6|4I1!$ts!Z8?jMaE_YU3|d)*N`Qbu_ng-rc+-%BveZzLbJ6%PMy&2|&?FG=j$yxO z4F|2-my8J`658MF{tcVl!w!!U)7@rbXe5?E(~71Pu#yJ(b)-9XVc-21;@zKk11{M$ ziggcfz?QAsF*-aE&h72(#9}>o3p}=aEdl_lKU?us9Wnl{$ zFWU1GiZEN&00dW{4{e-TvTca&&OVuO(C(099Kn|DyMpGZ(fPi9 z<)TFkuwX$S?DSNkiI(6DOAu$dXwPsO@rA0?R_|U+uSq~NG-jIf{Zee^>$irL z@nPk+OH&KG^Tbm1!GtZ*)7UTpJq=?XHlEP?*NXM6&q28rf9_Tv}|xP4JN4-_#WL zjPfN#1}0(_WzZV5%f<5L`=Gh26Ema3vl8JX`m5K(_TTXz5JR>HU+2C%R z7#fO&naG{HX8pF*REDhX!hJseyjDLtb}n>=-p(06)9ut<2PTFC~j|#%HP%g(;uzLH~yuA{A%g z9Cr9YxcsKuqvr*2N4ZINzcp(&;nV|{U^Fx*$Kr!ZWO$WD!`htaowISyObMo!9%6kTZii;p)Z?7m%CD)X(viYv zQ}{j7xi&fB@cc+zGwSnV9R6P8jNQ1(iN-09)dUVZZ2w@0X6#*3Gb`e5%1kue%Gl~W z+D4U!f5R*g+UK5Ar}Bn@+MXsgDcG4d4O<|!SM*XxnL zJYK+oB$DtpgH%bH5C9{RnB-C+95@zVawg7pw z)Y*l}+2GZd-ZMAIv~jR*dVB;mU)0~VquELIE#7e7x>I1JH@tm)_7zJPV#SJOaOxGZ zHi2D=#IRfk$Qbo(QS4UAs`wv+=PqN-ii9RCM^iJ%o?3+)LL@Txt11wMJM9gdJYlog zusk)Gab&2^!FM)7+|Ad=NGQd=DMC+V-Sb7FSF<}_JAL+E#?JL1TLN~_*uZu9!mMQBiiv@K%#4kp$rtsk>7~zmJ~TSq6H=_dmW89GJRPEyhecTqVM-C1f;Hkg z0J=UbxudGPGFJ;eHUrJznmy|m2d2K8(zp6mzc<`A!`>rjWDYt;59zs82fqy-+^_{Z z9nG877le3KDP_Ck+G5%RH+QS!y$;jatSKOfFtcn9;%difk&0Sl)Fj^Up2lW zl#PNN2~ku-ZnTa$T43cOiOBsq`8Dgk=Ok8!Y=x z8uFb_Okw4UrOu*Xf_op{5D4r8N51gBU%9@qe;E5M=yt^ROTZ>D**}P;r5zzX_t6~M z@4M$6<@i+cuSfwVh*!rjv+`%w^9?5-4@&gAYol~`-~x5O>F?8(`jKS0Ik(XS;k<99)0v-KvOj?R)pZPNgW;H{Gw$v3R}4WSpyf%=J2 zl;--r#RTIF24rk-nCn#Oh$Mj?e^i}rFUaynP3ru zC>b$$WK4ElM_VTDC&~gLr#K25!mRxn&l?IT8R@&Cf$&zI^0ElNRN2);T}#@*pOmqi zEzU%v)n%bnw3YY7?=VW$q=a~jv|E^o?+Z(m)1j3_m|6~+9DCfM`2KJH62AAXU>?48 z%|<-!jKkv7+<6l_2ZJ;h1BUV2ULq0gIVrz}wkhV9$)UCxj5PI(Pqo1*)xip_l+@MV z8%3kNYOH4NTIsug@UY@Pi?C&M6w^Ks=dgLRBlxa?39Q+;Juo)+!@G9pw727+gH{BX znF2+8Q-&T$sy+;29!h&8sG$}piquU8sU>(4VgpdQIKtG8`s4q}!unkz4Pi_AQRGC% zeol2C3+QmPKpQclxHD-26dG@e16_Wff&xTv-+zY@k3Rm;P*TogZ!X~DE&n?nUXPKn z$q0?yfJ_Yzhjf7Eu8t7)S2$_bW7gEq-Ms2UR|)1##wanJAe`X1VxI-hr2ghCrFMsO z;sa2W?9y_+`D_=?ei#KAe5l_U_dfjZ141n=%~*5)I&2;u4Jife$(eK)9C6^jSh{q9 z!}3!?AC{3u+=j8Oy!0)joi1{sRd6$c`Q1`A3euQ)x{nCUoP@6MQ%;Oy6j|c?s5GXv9Bzc91%^nPQ&g3KxUbPbY zE?tC&H*N_yeTD-^n|5I1juGs)xEJG74!%szgaK*}$^X6!IM6DYkKs{e&7uckl7-JHZ=? z`K?-RS+h2-`VWi2xL}&jx6VgyHsIapDU1??;3PcN< z)S;!p;f`_Vj5Z`VbB(U&9jiI?cE*_};0M3@Gg}mL?hSt1mB*hDrH@bW@+**Nqbi9~ z{)3#pHyB-#)VQxCNx?fh)7a2OU|dpk05G500MAUd2z82*xg=!hyFgvFq>-bwdphJC z^4a@}#1qa4t&NU_g&#lJd%wM|126qo-H^OKQZ@dhKm!@|zP-eMPw{bg4d)8Ni)i zabIl;HC?>N16-lEa4r0Xc~qTX_Mx-A8d3fGSl*Td=qwHu#lyD<|IT#$Y&yK z(v4yZs=U!F3+Uv9$%(PE5}8>=80bk7e=jyf#k@J~A!Wgb(qnKSkLH#St53`X z?c=3?m%Hx!zquOseD^YZ>T_=ok~FU+XsHhk^LZ2Gav71(w!LI{o$KUl$&5IN=-cPf zqhCpS#cdBzUm0k|*Hcb9iq?gyoF`&h`G*=RbOFG@hQf-xtNayMW;tyFA} z{|Qjv%^`rxl3#c_#|J<3Rs8yvdvVs02ja!&o{X{4NsK?d0UfIjK+~L_2v4#qhVOqE zxtqj(9p?CC4gY<`&A9rJ4S4dCPsh2>KO43FA*$_C&grnlHYknL$a|EoNxDQyDK;NN znT_!s{UPw?Cr-12e;aoU;+Ff@2JIQvhaxUm?{#fH<&>lRwsuLOm4%??7z}G}D`^s( zDiA?HR+P3H5;LX`@F($!w(|2~5F70SMBe-2ak}68(=Tj}o~XEd?AqHOz}A6r^tCi$ z((ku3IE*njU|y;t)FuDlJuDH}PplzlBdl|9s@Hw>l}oS2*{2*CHrN^;8o~BIU5$=| z_e0j!jIs4wG5*N9(D&Z1PJI6lH{pKAHi%KLru01t zG`~sjmK`vDF|=dyW(Qqvyz5SE8Jh@6b-o>8t47Bwk3aK79Cp}2$cKlR3ILCpNeLB( z3B&;)llsK`B(m0WVA2%ZWeC1dm4&#JfzI^ao17;~JuRvsYb?rMq_P%#UzSqL&771V z_$2*SQW-3veXfh8UC1)_v{>>vOQ9Mf5F2{{8NE>w@VCMDNt`r()u9`i-gF}!IrI+rdG?@^@VqsQrca~RTOz8|fvO=xwD z?5+Jrw)f*rpZ;%r;mt2Z-@G0SIruVg{mro+i#xrotrb1(J^14dcjIH<{xw>tNK;cq z$?m!~H)7PiJ(v;FJp>}Uar7niMmKDYr8P{fhyK@F)(DqS5sZ)q@bvSa5cAJ%PBl4M ztCSwaK6ExNRZVz*LS9oB7=kqt2vR{tNg#|VCGqMqRH@h7)qt2}7O+n?T~HvfOcb5~ zfPnkFLfG1WRT1Wv&au#ekGA7Tuv<>cWJO~KEw5&xpE4NrOMS-8qEde{X6Ry5e3 zuD=uKpK@4u?>#P~8@FTn;IU8@HY9n8TTdr&JvugNcqruMs}1;7HV3TO2luSshCatI zSKad)r4 zLeC@^5m<)oFOsM!yHlliOZ^}tpNSdh8`2uJEKLMnY!6ByFjn0o1dQ1I7i*#D9PET? ztuSc;7YfufVu&R2QC>0cLKcK~uI`gcMHey|!=y1NSk4^q%1E7!PvD%hPsJxc@@0o) zCc|$YH~N9N{@#ah@5XJ|Z((29s&U3i_Tf!iF=y5Ourrg&DP$5<`lm1qgDfi$=1Iq| z!tbxTIrP())mXD-C*JU1Kf!)I-PmXG94E1xFg!XIwuRj0`qAO;;A^CeIwo4)+JZxU zm0smBN>BQqx3v*Q=dm1KyD2~iziC=adli3LeJ^%6DA-Fo;Ca|G=!C{oPCXX;9k>iL zLxY7nmAv*frTQ|1Bf(vMDowHwfhPeU`Fr!vbyIxzDDgJZhBWy2QXt zlCJ4h5yoiT#Kd7-U*JS3YN?8P8$fusAlj9zk9j6LXUdEd+4z zWxs<~;*`^l!=;yBi_S{ib=jAM-sN6@bNyYotxLH zkzLx&gJnQL()~m2PZ&Lodkp;ez|h3t0EV}04cohCj(Pl2#Xmbp)dEOeOiSB6KK~`p z3~ornwj8@q4JBVmGjoBW91}?iUMb}`=JXRkX1o)kiCKyUiJ$T)@l6n-SrO96Gg*to z30-ZJwO2ookPCQVC;D&zAkSFq#R8&i z=u{1!dBHh>(X$+)_WVw9e{}=%igQl#4w_J4$7kP-Zrc%z(tIkdZuv({xyHPsg+c72 zub*hDRlN3`)57l5zqtH+}5m!34TKt><>(8NJEp%F9yNBV*B`pX;oVsIqlr38$f})u~7|* zjOr`(1t4xR4S&;3Wg>>aU?|Bz`LzUQD|M)UtV4ajeLU=td2{fxb56nG zOXp$27yX~#OZ&#?#l&SdZKC(1>4$XO#$IKiU1H)Hd_ zkeh!sv^dGXWNsgh+J6a7+;2JBoEjSF@5e$1d78VrP@9?*qdhW7-1ROeFNY{r8njfh zqNuGjGM5OX?x17zLC55N$(%28+TPrXo9|nP8}EHMXvv&>=iZ?2aIfc{cXn716M{U3 zJS5hNhBk}>lt75lZqoX^?Wb}^q$6zt5HHUnT3}y17`j-wKBnzvCEHSUudMcBR4q0L zoR=c`#7fEA2JeNk6gQK67`5a}!ybCivzqIRXXIZG21tzqT1k?{sH!_>d=$@l?s@q7 zSAOUW|M95kY=ke@-S;rAzh^B@TeTE}Za{oe!SH<#p>yGa&>l`)5_~DF(O53=V&(7* zze%i9^Og3F@J&8C5sKw!3>Q7#bPUnrF!sEo4#MUuZwV5<$4$7Wo^S-tJ>f`9%}j^; zcxG+#`8ZDXjZIEt-2FARv|x!}#DEzXHTmM?a+9>Z1=W_O5F7CJ3foe=`!^ILmlxf| zxC5QbaMf4i9U8;ngB#G))Iv;X@srDM44H}?^7oz+pFsDlXPtu+Pd@>*T{|ceOL&_m zd_)?Yp~w_vhG#!GpdBCSAZcz%OE9yOb`Q03xH_-p{`&V7;v=h=od+-X8|oIfGcQCf zXBqUu!JT245uQb>(;H!&9zKp4-GkKEJ)v z-iFc9Y25zj)qze&9Df)(I$JU0V3Oa)#j=#2p2V_~kHEmg>oL84r+1iR*zfvg*xTe- zdD>#i7e@t!+THIU%@}KX0b%C!^0pf5|JJ53V_x z%#t*AiFp*fV|O1FrV``2WMhcULQEb;p9CQ9t(6Lh(z8XCCh#CVXJb@vjmHT=L9*5i ze~YwgUnTmhXsSGgZ?0<$vY(U5ZJAL?)fsLmNrP~HP1I{h`r*lA$yZ7>Ujk=1W_sa8 zPs5KKqtDQqL(dp~fUdgZeq6m~15RDFICx-up`!kK?nj5i>c%9j9qopSsCb)NTVQcr=@nc&^1_IBe-ANwXg`q8gp zmxn#>jCcLp%kZXmy$H3DabkqnV(x?XGie5j4u2aa=ERO&Ve{N-*pV)G8m1ZF`{Q-+ zC5w9fDkBe*yxY^!>L>@PPCn@v_q<`k<~0Q-Qb0)S6sDoI33zC?a(N|Jx-RzLP+$#Y zfP?SFwR<@5NQSPGyuw=|(IK$FssWEo8X8INy(FCtm6M|>#Irj5#IBwq(NhGJW~Je@ zUjr225fYmz|2$|(TAXK568MU;$DMK{&U*am_}yQw4wVAJ`b~fqbGX>u<0BvXDz0=)?l%+g^t<++2k?xizY*W~ z>icojV~;`Zm>?gYh%jbq2CcoFIQ02X!g^;n>~h!D+;4@N367bXdONWE2`6CD(FX-v zBd7FU-}h=yw*hZ3kk8*p8y|ejC-Lzw|I|SV$AGS%e&gT|eBv8;%wvzjai<*}R^M~s zCvG< zeDT+R3*mjA*Wwx7d%64uX=j{vJkEdWlQ7jk;GjSShG{LXXQP-LNr7{~C$x=?&@3Pa zrnI&bWy==-eHOD_8yT&fsaZ}>ECb4t+(8XeVjpMZIX8u$-ASOgw?RWXG=#X7A;wTyC2?wZr2|Vm;AFUe~^bU0lP%U zncpkbxQVfC{dQb)?d|x<4}Xu#uU;JjNj+gZ8Y_+C!f*SpTpD+P6V2R!$-&w+`k{T| z+4R6LhMgd2s`{NjX3*WW0cTBjLAa1Fu zLop46G>L9cDH0!3jkSPHAgt^`t=rKMnJ|3Wb(|QTC9jDy)NmCH$r-RQaZ4Z6DRu!x zFDD`UQXRsSFJqI2;QR7y5SXu+U6REN!Hk@6n~@0+{qfUyU2{vw z_h=3bk<*e$i+OI4Mx5sO)-V2q@4CNzdb@D+F^A%?Ll4CM2QI-HdlAkS8epCpEscCxXnr89~FOm;=qJ5JJTt~LPVPa$&FlgUVcXL56XIKiY?KQKimCMmne1w$hWq>HjF%}- z$M<1+cnl|;eq?Yre&fG?8@4%#3}^$^{MH|?#hK2~JAB1_jQPEeS{$R?etTFz-Q3q3 zB%&p?Ue5Yvb_uTf;Srqiyr}$^Lzcdykg29(q}bE^nEnnMTt(tPp%m@Fpx6B zte{xw9B7L>BiJjcpv2e;k~BI5#Iqy^5{$8h$t2qrhKUs!-cVARpOk)`=*mO)!|Psm z5#IChF9)e!BUsVy80Chwo1LL`E#7+JV=y!}hN_chewF+7Tkpk!6Ap)+PIUocm2J#Y z0&#+5PPEVL9KaDL9DqwN{|3JErJvw4pZg9TaTCBV0BM6Nk!>w4@CKRVsRnMs&w*%y zznLNx+C{|21cux_{A9Z5yfg8Zx4#7YA9WyV1A~YG6fW}+>1d?=4EqTCR;!_Y{MMUp z59?SxU7DSd{J_>-_}tI_N=1p%xQ#p7tz5bgZ+-V0Lhbo#K7$AyYeWf)6d+1t5$!uI zrP-ZW9Qt~f;BUVZkuYrg&G8*sG4%4Z+5KZZu9Lh{wl`yN8Ivkh|&cg!0+%JHhcpmhhCJY(GS4 z>f7s$zX{3`T-M!*C!BpUUiPwQ;K$a2q*xlko(s6y%9?mF2>}p?XK;4#O~~rNEkE$KK7~z z!*TGD8M+>#FjJRpy24&cB@IxP_}T;0=%^A$Lp&-h{&}V{=!-)_4JCJ9NGyy@$=i68 zJ0KE*m{guY1}2#u;KHJbNIXLs`O#z}24^E!6g54+HZzSD#~2^}@LTb;r@cxB4sl)v z#`*B~|A74$Uxa<;c3@(<7AD9JC)Jys;n=y);;`LIlDTQ(8X3F@nV7gUJ9c8u{7$^~ zT`$2aUvmL&z3FcJ?Jqas#=oz|n)@HYj$K0`*fL{N;+M8fw8Ha~qsuYjiX{th+zChE z@n@fc(@r}abA66$~{*h!hlIVcWK`^wUI(nBBP9^nYcqg%d-=jfTUPPhBk#dDIGAyI- zJqos5u3?oIJu1=*uuIC2;!ml%p`o#~umyQweEMq>W(LP_(&LW98{Y6DeDE{h#(W>x zsYO`Q>h8I1XbkW9=5O)&e|>iFAlT548rENV6IPsa2HNKKg?)=MF5<)!8!Y9?03!+I zp^T4Y=8oZg71 zNhF4cjU_s33B5QY8J^ZCqXR6>6Q-l?gG*t6at^sfhjwp!j7FZjTVpMt50@t9fZr2D2T{*GmjKM^gC@qD1i zNYWAtZDJPV3GR=IXF_*sZOCtkH4-L3@07v$gZEAMV?9ThK^R5TNJ*Iuur~B@@yVm1Be)6Z_ zQIYkVh{a9(?$7W0%zLnM`HHY}U#R2|@Of-O#{cHhn_`&*MTrJGao|N#FmC$i?IkJ0` zHHd~T#dn7P5%(sg5(GFEpW)YSHg&e*bD#frJmpC*!_eTcg9WkJnOD(XRsa0PJF#?5 z4_^25GcYhRg65_c7>89i{q;sHIm1cbiJxtalH^vVV2F11-}NB2t-cFY_kLz_ z8trXW+_Qcs-ud-kW2%-1n#|A@O>Uxkx8;j2z5vg=;2D_O+8-B4=hI+8C^c8|`kxeQxiWU2zdSf7a?17%Zga?Z=|EJ+AvXm*_e+GtKLNfsXjDS`y3V#Yy~{SI4! zPyFX4c=0RV6PDVAfGQ%?>T#ER<+5uqzpoR|J?mHukBkKgSQ{P1#>=nA(lZ^i&+84a zB)X`1afmXE^fWX~7NQwJs^mOZf}VtC<` z$@=ep5L<7(Cse_$h3(<1*tlyP@A}$ruw!gIM043c?_G&Lo#3?7kHv>S_V37@T~YNK zIG>8zCurpuYls_bsj(n!ZNpZ_X(B88IHZ0D*qK?OS{Zii&d4lF_5#1{!F%Js=Y47k zV?~t|4a1x^h2{-%l~DGL*~_bL?_Y1PRPprFj>gdVSg0Q0bIdn7 z$-eNEqtUU9(|U=?*)V9PY?F1kG9-+I|kRRLz83jDF?k;9ODm+OyP}R_%ZI-v^7*Dm{I#wO?ipe(TXp9 z|MOV2Vp$LlS;e~&slSNc7E|P@!ol-7w2cFdJe;)Xop%>ENHhaxhbxVaM&6gQRBwdk z1EWVfA)_=$#64ob(1qBU49bW#xohcY6uF8yFVrAr%5wYlca*rLGZ*Wkq|t$&o-c-J zWE_!zVPeL1hDhVs2tN6__n?300DgJ-wV0pz((*05pKoFfhj6Ts4L8Xn@YWKH#(cmQJpnh`pcQ%I>FI$)+=z~vz%(>n{3CN793 zTZu8CMZn`zLnGLH{c4PF+m5Wc>I}~*v^Te4{gwf|jKxu4`-s!zKI zeCccd4`-fyGA6febXXm+`apmzF`p&V+LgxxN%$ELb1Xp@wVjVRB|t9E671mjvq6t~ zw`2Xi%h0D0vsamAd3jQ#8>x%HYiwn6k^a;;-HuX#y8V8P?HItKla4}jPiI*DlQWTR3?{*Hu!Ox*R<$=Peb$}ULL)H-L=$8bq+!n( z**4*M88U~xht_Vwwwvz6v@@zbJ`WoacX#6U2e#mCU-~)L_78>NSuR2dq0PSXz!yII z5nS->r(t}{CdcG9dVT`*n2Ef=xhxECCB~AgqMnuFu(H-ef~Mhney*dzEX51Prow{?C#H@kCgQgu z4QwpBGc!4b&bC&3_XnTEh0lIHuD<9cdx-5XQ0m1xK6i4kGtllH{OX&Tl?{mSDuf> z^SUtL_w8wFM(%S(u3nAdEjzKmL765eHGMU$ocFxsOhFk$8eDlTGk#f`lk{f^&U|Ad zeDO6;e9*{;@W;07z~)=-!1T`kP``YNztp3%fc8p3P0t{sp^@$H+>5A`J|A;PT*dO|T-~ zH;sG!(a6u97@c4=FS#_u^M&tN;DO3r6!HjU8*{b(&Tz(JCbLn7aF#7Z9<@{#te$XR zIC4dhjI$PrA!q^=JQ+s!$H=+t=k`qElfUN9?ZS_K_65A)IsY0Y`@Bjel%Mct`=HVs zcm3bjZp1|&{|?^$yeH!E#~p~ViD_rJAuK)^ePAsfaT0#cfh*B>;Br*jTB6T9&x_5o zBp^&`3ph&>(qNjq5u(I};c23WGT1<5@T$Y&1Ec7__aO{CwAR@RQ^CvP+uqySj-9S= zA97OMm#g-k6_4;|6q9?nGUA#19k0gg-QUcP?IF7p>`f2<lzrir{OUb27gqi@EGUvm+rckPU;a$w1AN=!nq zf_*$}P_wrrje(-GHc%x^zz76p&r?#yX} z-}}jD@s`(o5a0UcA2BD362~)ym+URn&97f^3vRh@9o}%k<8bn!E5oY&@#$$)n%#Yd zhq3AUTT#8|!64mx9K%;T+9RWQrq8V`J_wEC#p}%2?1azR;LoBP%|WfqeK$EYf_9ws009Q9QL9K89jPre<` zc*)ZoT-g!UrW$!ZtzNPQ?0!fNFxsG22kHccG3L&d_61VfM_SNsIo83GM<)NCn5bBg zwg{Dx_)x?-^K2?uDvYjYP)eCZlH3Aaii;EeS0uU^U-V3E?1;;ee)mKp^%>om{s-;K zBZ>i82s|;=4L0!M@F&j`qkKZ_xc~b_^a;0(>bA*RED4&GSk#{^TIaSAOv~+_`mE@T%~( z<^J%LWt)LobdG+z3k>;7^4%Z zNIL#gCAvOjf(bXLgAG-dg7HhF0Vt@3q{0jvS_pmMl!nYy!hT27LqmAOdtZY?4m%KU zc=N}wrGF5;v>2HQ8FqVcm;dyJJ8;FV_v4uk&OF00&WZ&+n4X%!guA`3pp`pFGyL#+ zj5;Y??P^C`UmsfL_Mxe#6U}XHVZn#rNg_-*@5}e)EgZg|a)$17|4^v2GBq$9%F+3( z%FHpehcCYI9X=n&wSM)3>+#*+UxUBg^-$<$FL_lMs$?|jx4U~k?Ti!g=`X$;^A^pC zlDrb@x9AKMCUvHQU@4PSNo`xCY0|W5oCNB`vS7m8TrD^7nv}L_6fBD8W*KLTa{4cV zEnu1dP)rqILqxn@9L%!~FnYZaAgxrAT~X+N>tJocP=>912^g959gmo3ABEBQP;E{@ zoM>e8W&%^&cjBC9Js!Vz%>Md|--o~6dS`$#Rcpf1HcL3xGu7DSB);^A>+s7fSL11? z9)PBJ$~W@9K-n+a(NHD9~K`>_3B zIcOE~N_@?i%rkf!My955t;69z`^ybsV}VhNu=LFzQ_6MdS0;Ib)9>E+fj7PcZ+P3w z-J@Nb21jCsr9UrIi#Fs+rnyZTl>Ow)@B&|u-t!j>8%*@HNG!(2b2F}?0~Gj8yN2U$ z2xH7!ZzefwVlrEneCgpTz7H&=t*9upRnVnwv4k*3nmGmdu)A3BKkE?qV$$c>bb2XG z1RkVZT_;S>zg56H4M*=CmCgv=%R+TJyoC zski}`CFNSRVexgv!6?5IUHH$JmYA5rhHb;R=FW$4*-dxgw)I;=DeMjtncNZs`r-8L z_j!f`?pY@txB?&f=o@hI*{3)yF&OBQg~>G&F3LGqUFzDGB$s?-ZXz0T&_GKLOJ*Ri z@Xs}WWCVqi1g?iEg@I4wNE0m%am3pky=M(kK6u$8_bIL^ZcsM5p4oY^1ie>#$o#Ki zbg0`FUX`TLgqG->677amnNB6h(w?2s5R41D!+__J;k~P_HZl>I?2?bY0grp)NqE;K z|B36J1dSLtuEYsfd;WCS_8IR=ui+23-;c}P-#*=4IQo#~IC|A`9Jq8o7WTBGyQ34G z9Zg|`ywtCdUpcnghH>?SG~pO~%dSzZ-#UOh9^8nV?t2(_KC&4*Yqb!23~N#i;sR{y zO-7Jvz}gVC&e|-U3yIL{5V=yk(sKiMh6P4qNu*d=pm|P!)U=JYGxR~2$ zY{V%S5L2ckcG*jk5JP>eg~!xVrmTO<+r?Y(MiTLP25F^bx(7p;nDI$ttBq0VhnDOj zE$O|+Zz45kr_n$CC`fseh^==dmSB@o!=SboBCW}YO-d?W7mPC}h5Qn^<4-#lzxmU* z@U1WX2%q`vx3PX`IP5gnLjpuc zVwNJqocNqrkSIPQPHHl3$ws-6%~sHX5!oE(18E5_%vkAzZm7r*VLc=`oT!RJ5o9en=>zr>D- ziBKoJC5wZRGX!zhInyynevp*jHZqEh?yt6flQf0(l1?}_^y&x2FY#+3V=O|Zbe(4; z|2!YU_iaA?h(qzlx4Z;Tc*rd_M4_=k9VS?dBAvwVn)&n{ZlI5Oec7}a9-9Rz7 zL^XW@SiJ=g)3rWtrO|>3eqa6^O)Y8kVt#$?Cu%VJA^y9xZde9?W^ElcEe`cJaI&Xdl zKJclx;^h}V3*Y+I&+ya#{yo+W42A{G9pIeloNBvRQlTdN-b~CTSXCK*abT&Q3Jr7! z_SBLuJflq%{I-nV(DV@I?30hdD=&U7p75m8P;p}42b8?U=hv+=p9TqNIj7ryj*>k# zBTF6%W^zFfOj!1Zv-VTTTb6Rr>9&QsICQLfp-{{JUTUOu$VARZYd#r1pbF-Xubz48melK=sxrZF?I6Z@{1 zkN5t+x8QZJeIb7Pn=9~(pIwH(-*T5@z*x%J|5Yj^HEH3zq&!Cn0bzHW9vicYsf36K zj69?rOpsJx>R``vpL`Zx_=59s+^I(dx1-lOQ{yx4nN@Ho+>+0kd=aFGEsbGhPQ!ae zX-Eo_-dmzs#x|PPl5}3iJ5i);(Y-llOR3fF?6}q;3_=vZ{+kBc!XJe-r47J_O6p=2 z5sfm-MR$w})e>~k&iZm*aWUZZe<;}jWiuqzC4PTxROvhm%Tkt!L`0^ge6*uCxU_?* zI@mBd1v@r{Idi-4;@3S77rp$Mxc#QPap`aWhAS?=4);IsF#2n;Zn!6~U&X`{B^sb9 z_a;bJev1Px$pK59mYELa@w`#$%XN4r zk9UYRMn(Y{&^9??=k730UfSnsUTZL=-*P?<(=Wjl6quTWK)WC3veed6pHrQ~x}1a654uPku10%E0;QX)Ul#)5Vy=a<}s8 zpYj5o#2-qf%JWi4O9ZZ|pH$`N2+o^VWN84)fZ120M%&OU=YNCsX>ZN2b9S6KQ# zW%Nc8-INF?kzEj{F|%_XhEmUfe3oA18I{$DOyw70``>0p#$10#gW-DcAi%X!yiwF-u-pmn7g>+8_9Tn$rP@dJ^J+;GNk3aE%~FqqU!gwS!z289dq>tZC~bsb!-y{Z4%~L89xNZzzby#s}xOSeYG-qgu18 zr~li7JCRn~r|HN(O zhP;+gCo%fQR9j>Iw_n6z{EDZLk}ycmh(7O7yhv1hY%62>P(aEDQgV)Z=Ap1q)XF5a z3wiwX?6S?m6M*|KQr;+rCx5n#e`LVSG-hCb=mpODC{BhnAcoR#!8tR&gouY?K~6Q= zb4xebsQx6aP)&IvX$EJ8{f}Bb0KDk65iT#k*B$)7^~SLDMuAt8BxgT6NqZd(lXD@` zF-bd9`kv1v?^Kp2xaXPJR0ZPs8Z&;cCH4_pmaJg7Xk?(H(U>$vU}VJMXW_%!+VdtZ zAgH%3iXgg+NV{|EI!l!TW^d~OB^`zkCgSNT8bg* zVlCd8;Cf;h{>?(XPh3(@dv|DZ#8`%+1v7G%v_MiKO4Sm6&&ciQ9}w53=1RIDwTItI zH>ht}1FusFde)z(8pU)hEPSDW6DY6ITAAF1$q&>CJkL9g1f8ajCHqBxj^`@)tYHbt z49_J+ly;`m{eB}T%9mI%*RavTxyWRd&gL5}&*0g7r~ms$iRz-Z>Ci5+I{a>mOszE? zU!(6!j31?KEYuWvFf5~wd}UZ#Qw1*GRgDFt2ofFv&d#I78DSZkm7I-m@G5&y8)WWi zI0W!celh4%V&KphWsGX?6zH1pSuwg8gq#GO;t-9FMq}U5rPu&p7qtRwuGruBJ?ic& z0!aTzZyRrs9-^Tf8djFCP5t7AXd~1gWn+W=nngT}!k9doqpTK-mUdCSiF6KcdB8M7 ztK9m!^kafF>}wWADiSb$Yb5zdR%TiC8(m-+Uqja0sB@L1=QX2HkwLvwO%|w^+EP#q zDYRcCU0CsWs` z>m|iPjhTWL<&eMr+Z|4Cs6Wtv$V3#<@CQ^{ABD@>BQbDyS2ABp^63!nvN7eW= zBcbv}N4Y}(JP6!3p>6jQ3FQE-Hx&(S_&$i%EjgCEY5r3^|NDG_Sb zvnU!FpA!jfG#mhm3}Y3HuuLGNw#a=;{hF0Hi3!1QOkY10G#%wk_ zjE_ck+pN6N;B(ncts|e2lpHrux#^bu{48K%!^!JXL-8P0E(4LP!C>51KRX*4sR_E+ zbYB^Se@vQ}XM92B)xjgpVEmnKJGv;@xw?V`0k285C};$I9Uv2%y(A#a zEBQiPfHm-Zg$z!A78fkCB+1jT?4B@Y_C7ZfEQRmX4?0P%6xrwaK_e+Xo9#AhbNAqy zu8Xj1s0F1|eHX-C%WYCj;7+VcIbha|p>uh`0=Aa%Cx?~eO?D?;3yr2MycnL2_hwFD zqBRNvG{WMVC`Dq>g>`sS!45WTn0X4)IhX4!n7HxxxN3z;PLu|kM_KTfFKKj(J)GM}DmSWGt{GC-Dwo?efG)d} z=;=L=wf{FQg$zfQCt<8f6vjJC0>Wm=y(k)+Re+x0S(H9;8Ix@|t9N&BeQa*5V00vg zZA6$xuR01_mMUV5s;tDK6r<;gGO@}1CJRac-cP82`?&okP6fW3NS6}6PY{jZ3`_pZ z7NL^Tc%mq2;&9%zKtP%j^cOo@Fgj<&YIJW)Y9_RwbB;~v+{RdrnOqZ3ygp15 zh9Od?kl&YPJ~JPyX%3u_!5U8EO}n5p1~EU6LCj7xFxrq~NgnQ7NyHBmivmp46k+Bt z0d3qU%|y*Gn_>2UeUx3ub&iITbl0!5@kJvSBO8^GC!jPMz@9M{1d+rQz8R$CxQ5iq z9!kcj_8!2B=OwD@_+VLRZR(8#?o1i0HR@z&L!Kas?x;VsbSN`@0sHg)Y4UMh?f{R# z0hgS?h`_G0*C(xaEWs<97-`1_I-l{m%ov_P34RofUV4_{)+UW1MLJ24uq4tFuTc>0 ze_OgY@~D*CqFu|gonLl8^RZ`%4v10Ac3oEEUtxLNEZamT*-RAhWa#66Yp}wIAId=_ z4pZ2aBGDiXw~=-IXdub%CPpKPU-y~p2!;mS2VPg0(h+$16*H|Z4IWD^Bg~$(h$fW( zKVortZrN)>U-9_`O#%qxL%|Zw7@E$zqsjlqu{+4JcW||6jQ+z~iCw~(Ra#L6v1Yr5 z#=;VN+CuU@KLm;mT}AN+Y#ao>V}(r+i?=gtR53G=-%^ zAVOnZ){q*%`94E zZ-i1K!*q|DNZg}DC((v!p*zF8*d>4O3Voj!?!%8@aUB9i5`vZo85tLr=82(55-J}i z6Gb{BgU*d-0G%*-Begu6iYy;t6J3TW#z5oW1WAhbU^NzuX<|~Ku?h~)C}}M{i1L}; z%~l003V|u431#FTFiFc3e>P@n+?V>pDtcIk0U1zOhW02py{^BrF{++ee4U+LSj>8q z-J?{JLu@IACWn2#f)M7j7# zrLG$hqmh;n3}|AYzW+WLmL_3JBg9EUfupn9 zF5u6CNx_)0NX7=ZcXtAyXe&iKE9amYb`L2V9gSRv^-NCf);v%!3Lk|EG3qr95z?6P zT(#n`lJEJge#0*&l+CfFMyX%rA`Tv@Adj}sSP`&PW%)+Fi1X}-+lkby*c&zgTX zgCQ1vUGSww^D`9&(t{MGc${!?ZZ+PcveAGF)1_Gq**UM#L}zi5tNZYGn#>I#U?bt7 zJx+Xn`ACxjv6>Z)qfr4ri5~9;)--~R4G%qG%fTqAn+{!gpa`FK3$YZ8kz9HuC<+&r z=vvBL^p{k5zeuaY)rIN7v(pwICi&NSVpjIpjNh&3nIEps2$#1SlyDXs!a0Nnd zR<>oAYWZ`qFXc9A=FC&#m)6XBTnT1b%afs~Eec6_4T*qsy+A_BkT+EB+8RP~$vL3m zlfYjZe+DA4`UJNrjx+Z~-dhx)iwF%p-?T&zR~c5zr8D$f9$YyqL#mEST0w}WRj9sa zCJYh3r~F~|krJLYtgeVyR!ugRNFtdO1Ix3bDk&p@JsO(3J9siDBF8C~294fpO~;4M zcF;_ut>r4u98d?~}o*=!WB&BbFK zlzm0bsSJ8`Q(XC^ctj*E38XL~#Na_OR#Q|IGFQQTlIU5G&x(9OqWc=$yed5XXo_bk z36SnY`5Q9Zitic5Fe(T{^CGuq5R1UuFaT04(y${lO){oSq;1kJ8ABG?{VYP2oYuGc ztpnrtJPCHs#CvljybL8Zd_QQ-rF>Z$nF7|Ve{YZ`QP#*VXy8r_f8Zchgp^haH&F#E zqHA)eg6IS#xJ-eQG-H?3t&43j1%{F`(Lpkk6sPvdPSb^%pGZ@d3DW=4dDx3{OMEYE zi@@liE}Cz+w@Yi#jMn&I;<8(k6>n6#W;svXSjx-2S-6d%9&kn8#_AhWP3erjrmLP`Lu%TOx;(57R@>ryc3XB=n3J}9RTw7Yfzk9kd4q)lcoCTyMirnW0e5C{f+geal og1M^^{6^zD7H0zsO858w0Z0QWJJQ=U-2eap07*qoM6N<$g6+QOoB#j- literal 0 HcmV?d00001 diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index 0c2b238..c6b8b37 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -31,7 +31,8 @@ def run_on_ui(args): app_path.as_posix(), "--", GeneralConstants.PARSED_ARGS_FILE.as_posix(), - ] + ], + cwd=app_path.parent.as_posix(), ) except (KeyboardInterrupt, EOFError): print("Exiting.") diff --git a/pyproject.toml b/pyproject.toml index 8f0c101..f8be4f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -25,6 +25,7 @@ numpy = "^1.26.1" openai = "^0.28.1" pandas = "^2.1.2" + pillow = "^10.1.0" pydantic = "^2.4.2" streamlit = "^1.28.0" tiktoken = "^0.5.1" From e3d8e26974ea1a35d654db349b494b7a8d0559e0 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 23:21:05 +0100 Subject: [PATCH 059/109] Add openai key input to UI --- gpt_buddy_bot/__init__.py | 3 +- gpt_buddy_bot/app/app.py | 14 ++------ gpt_buddy_bot/app/app_page_templates.py | 8 ++--- gpt_buddy_bot/app/multipage.py | 48 +++++++++++++++++++++---- gpt_buddy_bot/chat.py | 25 ++++++++++--- gpt_buddy_bot/chat_configs.py | 10 +++--- 6 files changed, 75 insertions(+), 33 deletions(-) diff --git a/gpt_buddy_bot/__init__.py b/gpt_buddy_bot/__init__.py index b6a3ae8..02bf216 100644 --- a/gpt_buddy_bot/__init__.py +++ b/gpt_buddy_bot/__init__.py @@ -22,7 +22,8 @@ class GeneralConstants: PACKAGE_TMPDIR.mkdir(parents=True, exist_ok=True) PACKAGE_CACHE_DIRECTORY.mkdir(parents=True, exist_ok=True) + OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") # Initialize the OpenAI API client -openai.api_key = os.environ["OPENAI_API_KEY"] +openai.api_key = GeneralConstants.OPENAI_API_KEY diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index 46afcb3..f611459 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -1,6 +1,4 @@ """Entrypoint for the package's UI.""" -import streamlit as st -from app_page_templates import ChatBotPage from multipage import MultipageChatbotApp from gpt_buddy_bot import GeneralConstants @@ -8,17 +6,9 @@ def run_app(): """Create and run an instance of the pacage's app.""" - app = MultipageChatbotApp( + MultipageChatbotApp( page_title=GeneralConstants.APP_NAME, page_icon=":speech_balloon:" - ) - with st.sidebar: - tab1, tab2 = st.tabs(["Chats", "Settings"]) - sidebar_tabs = {"chats": tab1, "settings": tab2} - with tab1: - # Create a new chat upon init or button press - if st.button(label=":heavy_plus_sign: New Chat") or not app.pages: - app.add_page() - app.render(sidebar_tabs=sidebar_tabs) + ).render() if __name__ == "__main__": diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 00a07fd..a9d22be 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -124,17 +124,13 @@ def render(self): if self.chat_history: self.render_chat_history() else: - initial_bot_greetings = ( - f"Hi! I'm {self.chat_obj.assistant_name}. How can I help you today?" - ) - with st.chat_message("assistant", avatar=self.avatars["assistant"]): - st.markdown(initial_bot_greetings) + st.markdown(self.chat_obj.initial_greeting) self.chat_history.append( { "role": "assistant", "name": self.chat_obj.assistant_name, - "content": initial_bot_greetings, + "content": self.chat_obj.initial_greeting, } ) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 3dc7c37..d8717c2 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -2,9 +2,11 @@ import contextlib from abc import ABC, abstractmethod +import openai import streamlit as st from app_page_templates import AppPage, ChatBotPage +from gpt_buddy_bot import GeneralConstants from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions @@ -73,12 +75,32 @@ def render(self, **kwargs): class MultipageChatbotApp(AbstractMultipageApp): + def init_openai_client(self): + # Initialize the OpenAI API client + placeholher = ( + "OPENAI_API_KEY detected" + if GeneralConstants.OPENAI_API_KEY + else "You need this to use the chat" + ) + openai_api_key = st.text_input( + label="OpenAI API Key (required)", + placeholder=placeholher, + key="openai_api_key", + type="password", + help="[OpenAI API authentication key](https://openai.com/pricing)", + ) + openai.api_key = ( + openai_api_key if openai_api_key else GeneralConstants.OPENAI_API_KEY + ) + if not openai.api_key: + st.write(":red[You need to provide a key to use the chat]") + def add_page(self, selected: bool = True): return super().add_page(page=ChatBotPage(), selected=selected) - def handle_ui_page_selection(self, sidebar_tabs: dict): + def handle_ui_page_selection(self): """Control page selection in the UI sidebar.""" - with sidebar_tabs["chats"]: + with self.sidebar_tabs["chats"]: for page in self.pages.values(): col1, col2 = st.columns([0.8, 0.2]) with col1: @@ -100,7 +122,7 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): help="Delete this chat.", ) - with sidebar_tabs["settings"]: + with self.sidebar_tabs["settings"]: caption = f"\u2699\uFE0F Settings for Chat #{self.selected_page.page_number}" if self.selected_page.title != self.selected_page._page_title: caption += f": {self.selected_page.title}" @@ -117,6 +139,7 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): for field_name, field in model_fiedls.items(): title = field_name.replace("_", " ").title() choices = ChatOptions.get_allowed_values(field=field_name) + description = ChatOptions.get_description(field=field_name) field_type = ChatOptions.get_type(field=field_name) element_key = f"{field_name}-pg-{self.selected_page.page_id}-ui-element" @@ -129,11 +152,11 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): else choices.index(last_field_value) ) new_field_value = st.selectbox( - title, choices, key=element_key, index=index + title, choices, key=element_key, index=index, help=description ) elif field_type == str: new_field_value = st.text_input( - title, value=last_field_value, key=element_key + title, value=last_field_value, key=element_key, help=description ) elif field_type in [int, float]: step = 1 if field_type == int else 0.01 @@ -156,13 +179,14 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): max_value=bounds[1], step=step, key=element_key, + help=description, ) elif field_type in (list, tuple): new_field_value = st.text_area( title, value="\n".join(last_field_value), key=element_key, - help="Directives that the AI should follow.", + help=description, ) new_field_value = tuple(new_field_value.split("\n")) else: @@ -175,3 +199,15 @@ def handle_ui_page_selection(self, sidebar_tabs: dict): new_chat_configs = current_chat_configs.model_dump() new_chat_configs.update(updates_to_chat_configs) self.selected_page.chat_obj = Chat.from_dict(new_chat_configs) + + def render(self, **kwargs): + with st.sidebar: + self.init_openai_client() + # Create a sidebar with tabs for chats and settings + tab1, tab2 = st.tabs(["Chats", "Settings"]) + self.sidebar_tabs = {"chats": tab1, "settings": tab2} + with tab1: + # Create a new chat upon init or button press + if st.button(label=":heavy_plus_sign: New Chat") or not self.pages: + self.add_page() + return super().render(**kwargs) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index b926d54..57799a6 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -83,6 +83,10 @@ def from_cli_args(cls, cli_args): } return cls.from_dict(chat_opts) + @property + def initial_greeting(self): + return f"Hello! I'm {self.assistant_name}. How can I assist you today?" + def respond_user_prompt(self, prompt: str): yield from self._respond_prompt(prompt=prompt, role="user") @@ -90,6 +94,15 @@ def respond_system_prompt(self, prompt: str): yield from self._respond_prompt(prompt=prompt, role="system") def yield_response_from_msg(self, prompt_as_msg: dict): + """Yield response from a prompt.""" + try: + yield from self._yield_response_from_msg(prompt_as_msg=prompt_as_msg) + except openai.error.AuthenticationError: + yield "Sorry, I'm having trouble authenticating with OpenAI. " + yield "Please check the validity of your API key and try again." + + def _yield_response_from_msg(self, prompt_as_msg: dict): + """Yield response from a prompt. Assumes that OpenAI authentication works.""" role = prompt_as_msg["role"] prompt = prompt_as_msg["content"] @@ -111,7 +124,9 @@ def yield_response_from_msg(self, prompt_as_msg: dict): # Make API request and yield response chunks full_reply_content = "" - for chunk in _make_api_call(conversation=contextualised_prompt, model=self.model): + for chunk in _make_api_chat_completion_call( + conversation=contextualised_prompt, model=self.model + ): full_reply_content += chunk yield chunk @@ -131,12 +146,14 @@ def yield_response_from_msg(self, prompt_as_msg: dict): ) def start(self): + """Start the chat.""" + print(f"{self.assistant_name}> {self.initial_greeting}\n") try: while True: - question = input(f"{self.username}: ").strip() + question = input(f"{self.username}> ").strip() if not question: continue - print(f"{self.assistant_name}: ", end="", flush=True) + print(f"{self.assistant_name}> ", end="", flush=True) for chunk in self.respond_user_prompt(prompt=question): print(chunk, end="", flush=True) print() @@ -155,7 +172,7 @@ def _respond_prompt(self, prompt: str, role: str): yield from self.yield_response_from_msg(prompt_as_msg) -def _make_api_call(conversation: list, model: str): +def _make_api_chat_completion_call(conversation: list, model: str): success = False while not success: try: diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 61e8e08..ff9aa00 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 """Registration and validation of options.""" import argparse +import os import types import typing from functools import reduce @@ -8,7 +9,8 @@ from pathlib import Path from typing import Literal, Optional, get_args, get_origin -from pydantic import BaseModel, Field +import openai +from pydantic import BaseModel, Field, SecretStr from gpt_buddy_bot import GeneralConstants @@ -126,7 +128,7 @@ class ChatOptions(OpenAiApiCallOptions): default="text-embedding-ada-002", description="OpenAI API model to use for embedding", ) - context_file_path: Path = Field( + context_file_path: Optional[Path] = Field( default=None, description="Path to the file to read/write the chat context from/to.", ) @@ -138,10 +140,10 @@ class ChatOptions(OpenAiApiCallOptions): ), description="Initial instructions for the AI", ) - token_usage_db_path: Path = Field( + token_usage_db_path: Optional[Path] = Field( default=GeneralConstants.TOKEN_USAGE_DATABASE, description="Path to the token usage database", ) - report_accounting_when_done: bool = Field( + report_accounting_when_done: Optional[bool] = Field( default=False, description="Report estimated costs when done with the chat." ) From 0f47a2c09cdf3493e7d908f48a585209a351ea94 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Sun, 5 Nov 2023 23:57:56 +0100 Subject: [PATCH 060/109] Pass all api-related chat_configs to api call --- gpt_buddy_bot/chat.py | 16 ++++++++++------ gpt_buddy_bot/chat_configs.py | 3 +++ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 57799a6..433d1e5 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -5,7 +5,7 @@ import openai from . import GeneralConstants -from .chat_configs import ChatOptions +from .chat_configs import ChatOptions, OpenAiApiCallOptions from .chat_context import BaseChatContext, EmbeddingBasedChatContext from .tokens import TokenUsageDatabase, get_n_tokens @@ -125,7 +125,7 @@ def _yield_response_from_msg(self, prompt_as_msg: dict): # Make API request and yield response chunks full_reply_content = "" for chunk in _make_api_chat_completion_call( - conversation=contextualised_prompt, model=self.model + conversation=contextualised_prompt, chat_obj=self ): full_reply_content += chunk yield chunk @@ -172,16 +172,20 @@ def _respond_prompt(self, prompt: str, role: str): yield from self.yield_response_from_msg(prompt_as_msg) -def _make_api_chat_completion_call(conversation: list, model: str): +def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): success = False + + api_call_args = {} + for field in OpenAiApiCallOptions.model_fields: + if getattr(chat_obj, field) is not None: + api_call_args[field] = getattr(chat_obj, field) + while not success: try: for line in openai.ChatCompletion.create( - model=model, messages=conversation, - request_timeout=10, stream=True, - temperature=0.8, + **api_call_args, ): reply_chunk = getattr(line.choices[0].delta, "content", "") yield reply_chunk diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index ff9aa00..933a18d 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -111,6 +111,9 @@ class OpenAiApiCallOptions(BaseConfigModel): top_p: Optional[float] = Field( default=None, ge=0.0, le=1.0, description=f"See <{_openai_url}-top_p>" ) + request_timeout: Optional[float] = Field( + default=10.0, gt=0.0, description="Timeout for API requests in seconds" + ) class ChatOptions(OpenAiApiCallOptions): From 028924a7b6709d0d3a8c556d1acd459384feb165 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 09:52:11 +0100 Subject: [PATCH 061/109] Add max n_attempts for connection to API --- gpt_buddy_bot/app/app_page_templates.py | 15 ++++++++----- gpt_buddy_bot/chat.py | 30 +++++++++++++++++++------ gpt_buddy_bot/chat_configs.py | 9 +++++--- 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index a9d22be..27158ed 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -9,7 +9,7 @@ from PIL import Image from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.chat import Chat +from gpt_buddy_bot.chat import CannotConnectToApiError, Chat from gpt_buddy_bot.chat_configs import ChatOptions _ASSISTANT_AVATAR_FILE_PATH = Path("data/assistant_avatar.png") @@ -142,7 +142,6 @@ def render(self): # Display user message in chat message container with st.chat_message("user", avatar=self.avatars["user"]): st.markdown(prompt) - self.chat_history.append( {"role": "user", "name": self.chat_obj.username, "content": prompt} ) @@ -152,10 +151,14 @@ def render(self): with st.empty(): st.markdown("▌") full_response = "" - for chunk in self.chat_obj.respond_user_prompt(prompt): - full_response += chunk - st.markdown(full_response + "▌") - st.markdown(full_response) + try: + for chunk in self.chat_obj.respond_user_prompt(prompt): + full_response += chunk + st.markdown(full_response + "▌") + except CannotConnectToApiError: + full_response = self.chat_obj._auth_error_msg + finally: + st.markdown(full_response) self.chat_history.append( { diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 433d1e5..4d8abce 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -10,6 +10,10 @@ from .tokens import TokenUsageDatabase, get_n_tokens +class CannotConnectToApiError(Exception): + """Error raised when the package cannot connect to the OpenAI API.""" + + class Chat: def __init__(self, configs: ChatOptions): self.id = uuid.uuid4() @@ -97,9 +101,8 @@ def yield_response_from_msg(self, prompt_as_msg: dict): """Yield response from a prompt.""" try: yield from self._yield_response_from_msg(prompt_as_msg=prompt_as_msg) - except openai.error.AuthenticationError: - yield "Sorry, I'm having trouble authenticating with OpenAI. " - yield "Please check the validity of your API key and try again." + except openai.error.AuthenticationError as error: + raise CannotConnectToApiError(self._auth_error_msg) from error def _yield_response_from_msg(self, prompt_as_msg: dict): """Yield response from a prompt. Assumes that OpenAI authentication works.""" @@ -171,6 +174,13 @@ def _respond_prompt(self, prompt: str, role: str): prompt_as_msg = {"role": role, "name": role2name[role], "content": prompt} yield from self.yield_response_from_msg(prompt_as_msg) + @property + def _auth_error_msg(self): + return ( + "Sorry, I'm having trouble authenticating with OpenAI. " + + "Please check the validity of your API key and try again." + ) + def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): success = False @@ -180,12 +190,13 @@ def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): if getattr(chat_obj, field) is not None: api_call_args[field] = getattr(chat_obj, field) + n_attempts = 0 + max_n_att = chat_obj.api_connection_max_n_attempts while not success: + n_attempts += 1 try: for line in openai.ChatCompletion.create( - messages=conversation, - stream=True, - **api_call_args, + messages=conversation, stream=True, **api_call_args ): reply_chunk = getattr(line.choices[0].delta, "content", "") yield reply_chunk @@ -193,6 +204,11 @@ def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): openai.error.ServiceUnavailableError, openai.error.Timeout, ) as error: - print(f"\n > {error}. Retrying...") + if n_attempts < max_n_att: + print( + f"\n > {error}. Making new attempt ({n_attempts+1}/{max_n_att})..." + ) + else: + raise CannotConnectToApiError(chat_obj._auth_error_msg) from error else: success = True diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 933a18d..2c6d4d3 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 """Registration and validation of options.""" import argparse -import os import types import typing from functools import reduce @@ -9,8 +8,7 @@ from pathlib import Path from typing import Literal, Optional, get_args, get_origin -import openai -from pydantic import BaseModel, Field, SecretStr +from pydantic import BaseModel, Field from gpt_buddy_bot import GeneralConstants @@ -147,6 +145,11 @@ class ChatOptions(OpenAiApiCallOptions): default=GeneralConstants.TOKEN_USAGE_DATABASE, description="Path to the token usage database", ) + api_connection_max_n_attempts: int = Field( + default=5, + gt=0, + description="Maximum number of attempts to connect to the OpenAI API", + ) report_accounting_when_done: Optional[bool] = Field( default=False, description="Report estimated costs when done with the chat." ) From b3ef6a506894570bb8ddb8ef0c18b07768aa0ed0 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 12:35:45 +0100 Subject: [PATCH 062/109] Add unit test for Chat --- gpt_buddy_bot/app/multipage.py | 2 +- gpt_buddy_bot/chat.py | 4 +- pyproject.toml | 20 +++++++++- tests/conftest.py | 68 ++++++++++++++++++++++++++++++++++ tests/unit/test_chat.py | 6 +++ 5 files changed, 95 insertions(+), 5 deletions(-) create mode 100644 tests/conftest.py create mode 100644 tests/unit/test_chat.py diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index d8717c2..2550ecf 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -87,7 +87,7 @@ def init_openai_client(self): placeholder=placeholher, key="openai_api_key", type="password", - help="[OpenAI API authentication key](https://openai.com/pricing)", + help="[OpenAI API auth key](https://platform.openai.com/account/api-keys)", ) openai.api_key = ( openai_api_key if openai_api_key else GeneralConstants.OPENAI_API_KEY diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 4d8abce..f51b389 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -195,10 +195,10 @@ def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): while not success: n_attempts += 1 try: - for line in openai.ChatCompletion.create( + for completion_chunk in openai.ChatCompletion.create( messages=conversation, stream=True, **api_call_args ): - reply_chunk = getattr(line.choices[0].delta, "content", "") + reply_chunk = getattr(completion_chunk.choices[0].delta, "content", "") yield reply_chunk except ( openai.error.ServiceUnavailableError, diff --git a/pyproject.toml b/pyproject.toml index f8be4f2..cd0516e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,14 +31,21 @@ tiktoken = "^0.5.1" [tool.poetry.group.dev.dependencies] + ipython = "^8.16.1" + +[tool.poetry.group.linting.dependencies] black = "^23.10.1" flakeheaven = "^3.3.0" - ipython = "^8.16.1" isort = "^5.12.0" pydoclint = "^0.3.8" - pytest = "^7.4.3" ruff = "^0.1.3" +[tool.poetry.group.test.dependencies] + pytest = "^7.4.3" + pytest-cov = "^4.1.0" + pytest-mock = "^3.12.0" + python-lorem = "^1.3.0.post1" + ################## # Linter configs # ################## @@ -49,3 +56,12 @@ [tool.isort] line_length = 90 profile = "black" + + ################## + # pytest configs # + ################## + +[tool.pytest.ini_options] + addopts = "-v --failed-first --cov-report=term-missing --cov-report=term:skip-covered --cov-report=xml:.coverage.xml --cov=./" + log_cli_level = "INFO" + testpaths = ["tests/smoke", "tests/unit"] diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..7dcad89 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,68 @@ +import os + +import lorem +import openai +import pytest + +from gpt_buddy_bot.chat import Chat +from gpt_buddy_bot.chat_configs import ChatOptions + + +@pytest.fixture(scope="session", autouse=True) +def set_env(): + # Make sure we don't consume our tokens in tests + os.environ["OPENAI_API_KEY"] = "INVALID_API_KEY" + openai.api_key = os.environ["OPENAI_API_KEY"] + + +@pytest.fixture(autouse=True) +def openai_api_request_mockers(mocker): + """Mockers for OpenAI API requests. We don't want to consume our tokens in tests.""" + + def _mock_openai_ChatCompletion_create(*args, **kwargs): + """Mock `openai.ChatCompletion.create`. Yield from lorem ipsum instead.""" + completion_chunk = type("CompletionChunk", (), {}) + completion_chunk_choice = type("CompletionChunkChoice", (), {}) + completion_chunk_choice_delta = type("CompletionChunkChoiceDelta", (), {}) + for word in lorem.get_paragraph().split(): + completion_chunk_choice_delta.content = word + " " + completion_chunk_choice.delta = completion_chunk_choice_delta + completion_chunk.choices = [completion_chunk_choice] + yield completion_chunk + + def _mock_openai_Embedding_create(*args, **kwargs): + """Mock `openai.Embedding.create`. Yield from lorem ipsum instead.""" + embedding_request = { + "data": [{"embedding": [0.0] * 512}], + "usage": {"prompt_tokens": 0, "total_tokens": 0}, + } + return embedding_request + + mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) + mocker.patch("openai.Embedding.create", new=_mock_openai_Embedding_create) + + +@pytest.fixture(autouse=True) +def input_builtin_mocker(mocker, user_input): + """Mock the `input` builtin. Raise `KeyboardInterrupt` after the first call.""" + + def _mock_input(*args, **kwargs): + try: + _mock_input.execution_counter += 1 + except AttributeError: + _mock_input.execution_counter = 0 + if _mock_input.execution_counter > 0: + raise KeyboardInterrupt + return user_input + + mocker.patch("builtins.input", new=lambda _: _mock_input(user_input=user_input)) + + +@pytest.fixture +def default_chat_configs(): + return ChatOptions() + + +@pytest.fixture() +def default_chat(default_chat_configs): + return Chat(configs=default_chat_configs) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py new file mode 100644 index 0000000..80c7a35 --- /dev/null +++ b/tests/unit/test_chat.py @@ -0,0 +1,6 @@ +import pytest + + +@pytest.mark.parametrize("user_input", ("Hi!", ""), ids=("regular-input", "empty-input")) +def test_terminal_chat(default_chat): + default_chat.start() From cb1cbc94bc009fba637294bc6c8894b825a9fe20 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 15:16:21 +0100 Subject: [PATCH 063/109] Add some more tests --- gpt_buddy_bot/command_definitions.py | 4 ++-- pyproject.toml | 1 + tests/conftest.py | 31 ++++++++++++++++++++----- tests/smoke/test_commands.py | 26 +++++++++++++++++++++ tests/unit/test_chat.py | 34 +++++++++++++++++++++++++++- 5 files changed, 87 insertions(+), 9 deletions(-) create mode 100644 tests/smoke/test_commands.py diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index c6b8b37..cec6f5d 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 import pickle -from subprocess import run +import subprocess from . import GeneralConstants from .chat import Chat @@ -24,7 +24,7 @@ def run_on_ui(args): app_path = GeneralConstants.PACKAGE_DIRECTORY / "app" / "app.py" try: - run( + subprocess.run( [ "streamlit", "run", diff --git a/pyproject.toml b/pyproject.toml index cd0516e..2531aae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ ################## # Linter configs # ################## + pytest-order = "^1.1.0" [tool.black] line-length = 90 diff --git a/tests/conftest.py b/tests/conftest.py index 7dcad89..74aec0b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,18 @@ from gpt_buddy_bot.chat_configs import ChatOptions +# Register markers +def pytest_configure(config): + config.addinivalue_line( + "markers", + "no_chat_completion_create_mocking: mark test to not mock openai.ChatCompletion.create", + ) + config.addinivalue_line( + "markers", + "no_embedding_create_mocking: mark test to not mock openai.Embedding.create", + ) + + @pytest.fixture(scope="session", autouse=True) def set_env(): # Make sure we don't consume our tokens in tests @@ -16,7 +28,7 @@ def set_env(): @pytest.fixture(autouse=True) -def openai_api_request_mockers(mocker): +def openai_api_request_mockers(request, mocker): """Mockers for OpenAI API requests. We don't want to consume our tokens in tests.""" def _mock_openai_ChatCompletion_create(*args, **kwargs): @@ -38,11 +50,15 @@ def _mock_openai_Embedding_create(*args, **kwargs): } return embedding_request - mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) - mocker.patch("openai.Embedding.create", new=_mock_openai_Embedding_create) + if "no_chat_completion_create_mocking" not in request.keywords: + mocker.patch( + "openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create + ) + if "no_embedding_create_mocking" not in request.keywords: + mocker.patch("openai.Embedding.create", new=_mock_openai_Embedding_create) -@pytest.fixture(autouse=True) +@pytest.fixture() def input_builtin_mocker(mocker, user_input): """Mock the `input` builtin. Raise `KeyboardInterrupt` after the first call.""" @@ -59,8 +75,11 @@ def _mock_input(*args, **kwargs): @pytest.fixture -def default_chat_configs(): - return ChatOptions() +def default_chat_configs(tmp_path): + return ChatOptions( + token_usage_db_path=tmp_path / "token_usage.db", # Don't use the regular db file + context_file_path=tmp_path / "context.json", # Don't use our context files + ) @pytest.fixture() diff --git a/tests/smoke/test_commands.py b/tests/smoke/test_commands.py new file mode 100644 index 0000000..b3d17fa --- /dev/null +++ b/tests/smoke/test_commands.py @@ -0,0 +1,26 @@ +import pytest + +from gpt_buddy_bot.__main__ import main +from gpt_buddy_bot.argparse_wrapper import get_parsed_args + + +@pytest.mark.parametrize("user_input", ("Hi!", ""), ids=("regular-input", "empty-input")) +def test_terminal_command(input_builtin_mocker): + args = ["terminal"] + main(args) + + +def test_accounting_command(): + args = ["accounting"] + main(args) + + +def test_default_command(mocker): + def _mock_subprocess_run(*args, **kwargs): + raise KeyboardInterrupt("Mocked KeyboardInterrupt") + + args = get_parsed_args(argv=[]) + assert args.command == "ui" + + mocker.patch("subprocess.run", new=_mock_subprocess_run) + main(argv=[]) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 80c7a35..dbf1251 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,6 +1,38 @@ +import openai import pytest +from gpt_buddy_bot.chat import CannotConnectToApiError + + +@pytest.mark.order(1) +@pytest.mark.no_chat_completion_create_mocking +@pytest.mark.parametrize("user_input", ("regular-input",)) +def test_testbed_doesnt_actually_connect_to_openai(default_chat, input_builtin_mocker): + with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): + try: + default_chat.start() + except CannotConnectToApiError: + raise + else: + pytest.exit("Refuse to continue: Testbed is trying to connect to OpenAI API!") + @pytest.mark.parametrize("user_input", ("Hi!", ""), ids=("regular-input", "empty-input")) -def test_terminal_chat(default_chat): +def test_terminal_chat(default_chat, input_builtin_mocker): default_chat.start() + default_chat.__del__() # Just to trigger testing the custom del method + + +def test_chat_configs(default_chat, default_chat_configs): + assert default_chat.configs == default_chat_configs + + +@pytest.mark.no_chat_completion_create_mocking +@pytest.mark.parametrize("user_input", ("regular-input",)) +def test_request_timeout_retry(mocker, default_chat, input_builtin_mocker): + def _mock_openai_ChatCompletion_create(*args, **kwargs): + raise openai.error.Timeout("Mocked timeout error") + + mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) + with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): + default_chat.start() From 58a78234e349878e234ba6501b8f3e7d7f865b8e Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 15:19:54 +0100 Subject: [PATCH 064/109] rm unused code in __getitem__ --- gpt_buddy_bot/chat_configs.py | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 2c6d4d3..6bbd83d 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -3,7 +3,6 @@ import argparse import types import typing -from functools import reduce from getpass import getuser from pathlib import Path from typing import Literal, Optional, get_args, get_origin @@ -59,29 +58,11 @@ def _get_field_param(cls, field: str, param: str): return getattr(cls.model_fields[field], param, None) def __getitem__(self, item): - """Get items from container. - - The behaviour is similar to a `dict`, except for the fact that - `self["A.B.C.D. ..."]` will behave like `self["A"]["B"]["C"]["D"][...]`. - - Args: - item (str): Item to be retrieved. Use dot-separated keys to retrieve a nested - item in one go. - - Raises: - KeyError: If the item is not found. - - Returns: - Any: Value of the item. - """ + """Make possible to retrieve values as in a dict.""" try: - # Try regular getitem first in case "A.B. ... C" is actually a single key return getattr(self, item) - except AttributeError: - try: - return reduce(getattr, item.split("."), self) - except AttributeError as error: - raise KeyError(item) from error + except AttributeError as error: + raise KeyError(item) from error class OpenAiApiCallOptions(BaseConfigModel): From 164f56e9487b2bc2522f7cf5d4b740a68dc35ea1 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 16:11:13 +0100 Subject: [PATCH 065/109] rm some more unused code --- gpt_buddy_bot/__main__.py | 4 ---- gpt_buddy_bot/tokens.py | 7 ------- 2 files changed, 11 deletions(-) diff --git a/gpt_buddy_bot/__main__.py b/gpt_buddy_bot/__main__.py index b90a2ef..9f012e3 100644 --- a/gpt_buddy_bot/__main__.py +++ b/gpt_buddy_bot/__main__.py @@ -7,7 +7,3 @@ def main(argv=None): """Program's main routine.""" args = get_parsed_args(argv=argv) args.run_command(args=args) - - -if __name__ == "__main__": - main() diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index 9875d1a..4b0cf3c 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -117,13 +117,6 @@ def retrieve_sums_by_model(self): return sums_by_model - def retrieve_sums(self): - sums = defaultdict(int) - for sums_by_model in self.retrieve_sums_by_model().values(): - for k, v in sums_by_model.items(): - sums[k] += v - return sums - def get_usage_balance_dataframe(self): sums_by_model = self.retrieve_sums_by_model() df_rows = [] From 66f3a215d6925a456c35891a947f3fda52a80c16 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 16:12:43 +0100 Subject: [PATCH 066/109] Minor fix to embedding array type --- gpt_buddy_bot/chat_context.py | 2 +- tests/conftest.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index f187598..d4855ac 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -87,7 +87,7 @@ def request_embedding_from_openai(text: str, model: str): def _store_message_embedding_data( - obj, embedding_model: str, embedding: np.ndarray, file_path: Path + obj, embedding_model: str, embedding: list[float], file_path: Path ): """Store message and embeddings to file.""" # Adapted from Date: Mon, 6 Nov 2023 16:13:15 +0100 Subject: [PATCH 067/109] Add test for text context handler usage --- tests/conftest.py | 8 +++++--- tests/unit/test_chat.py | 12 +++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 440674d..fe6e194 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,7 +13,7 @@ def pytest_configure(config): config.addinivalue_line( "markers", - "no_chat_completion_create_mocking: mark test to not mock openai.ChatCompletion.create", + "no_chat_completion_create_mocking: do not mock openai.ChatCompletion.create", ) config.addinivalue_line( "markers", @@ -61,14 +61,15 @@ def _mock_openai_Embedding_create(*args, **kwargs): @pytest.fixture() def input_builtin_mocker(mocker, user_input): - """Mock the `input` builtin. Raise `KeyboardInterrupt` after the first call.""" + """Mock the `input` builtin. Raise `KeyboardInterrupt` after the second call.""" + # We allow two calls in order to allow for the chat context handler to kick in def _mock_input(*args, **kwargs): try: _mock_input.execution_counter += 1 except AttributeError: _mock_input.execution_counter = 0 - if _mock_input.execution_counter > 0: + if _mock_input.execution_counter > 1: raise KeyboardInterrupt return user_input @@ -80,6 +81,7 @@ def default_chat_configs(tmp_path): return ChatOptions( token_usage_db_path=tmp_path / "token_usage.db", # Don't use the regular db file context_file_path=tmp_path / "context.json", # Don't use our context files + report_accounting_when_done=True, # Just to activate testing of this feature ) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index dbf1251..8a579f2 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,7 +1,8 @@ import openai import pytest -from gpt_buddy_bot.chat import CannotConnectToApiError +from gpt_buddy_bot.chat import CannotConnectToApiError, Chat +from gpt_buddy_bot.chat_configs import ChatOptions @pytest.mark.order(1) @@ -36,3 +37,12 @@ def _mock_openai_ChatCompletion_create(*args, **kwargs): mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): default_chat.start() + + +@pytest.mark.parametrize("context_model", ChatOptions.get_allowed_values("context_model")) +@pytest.mark.parametrize("user_input", ("regular-input",)) +def test_chat_context_handlers(default_chat_configs, input_builtin_mocker, context_model): + chat_configs_dict = default_chat_configs.model_dump() + chat_configs_dict.update({"context_model": context_model}) + chat = Chat.from_dict(chat_configs_dict) + chat.start() From cc18bef2361713d1d4fef5319b6383eee80de36a Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 19:03:42 +0100 Subject: [PATCH 068/109] `--report-` flag now only when running on terminal --- gpt_buddy_bot/argparse_wrapper.py | 5 +++++ gpt_buddy_bot/chat.py | 2 -- gpt_buddy_bot/chat_configs.py | 3 --- gpt_buddy_bot/command_definitions.py | 5 ++++- gpt_buddy_bot/tokens.py | 1 - tests/conftest.py | 1 - tests/smoke/test_commands.py | 2 +- tests/unit/test_chat.py | 1 + 8 files changed, 11 insertions(+), 9 deletions(-) diff --git a/gpt_buddy_bot/argparse_wrapper.py b/gpt_buddy_bot/argparse_wrapper.py index 10f6fe3..d9ae923 100644 --- a/gpt_buddy_bot/argparse_wrapper.py +++ b/gpt_buddy_bot/argparse_wrapper.py @@ -85,6 +85,11 @@ def get_parsed_args(argv=None, default_command="ui"): parents=[chat_options_parser], help="Run the chat on the terminal.", ) + parser_terminal.add_argument( + "--report-accounting-when-done", + action="store_true", + help="Report estimated costs when done with the chat.", + ) parser_terminal.set_defaults(run_command=run_on_terminal) parser_accounting = subparsers.add_parser( diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index f51b389..bfabf53 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -71,8 +71,6 @@ def __del__(self): n_input_tokens=self.token_usage[model]["input"], n_output_tokens=self.token_usage[model]["output"], ) - if self.report_accounting_when_done: - self.report_token_usage() @classmethod def from_dict(cls, configs: dict): diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 6bbd83d..92f2d68 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -131,6 +131,3 @@ class ChatOptions(OpenAiApiCallOptions): gt=0, description="Maximum number of attempts to connect to the OpenAI API", ) - report_accounting_when_done: Optional[bool] = Field( - default=False, description="Report estimated costs when done with the chat." - ) diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index cec6f5d..070608a 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -14,7 +14,10 @@ def accounting(args): def run_on_terminal(args): """Run the chat on the terminal.""" - Chat.from_cli_args(cli_args=args).start() + chat = Chat.from_cli_args(cli_args=args) + chat.start() + if args.report_accounting_when_done: + chat.report_token_usage(current_chat=True) def run_on_ui(args): diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index 4b0cf3c..de59f77 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -1,6 +1,5 @@ import datetime import sqlite3 -from collections import defaultdict from pathlib import Path import pandas as pd diff --git a/tests/conftest.py b/tests/conftest.py index fe6e194..1f31ed4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -81,7 +81,6 @@ def default_chat_configs(tmp_path): return ChatOptions( token_usage_db_path=tmp_path / "token_usage.db", # Don't use the regular db file context_file_path=tmp_path / "context.json", # Don't use our context files - report_accounting_when_done=True, # Just to activate testing of this feature ) diff --git a/tests/smoke/test_commands.py b/tests/smoke/test_commands.py index b3d17fa..a882c25 100644 --- a/tests/smoke/test_commands.py +++ b/tests/smoke/test_commands.py @@ -6,7 +6,7 @@ @pytest.mark.parametrize("user_input", ("Hi!", ""), ids=("regular-input", "empty-input")) def test_terminal_command(input_builtin_mocker): - args = ["terminal"] + args = ["terminal", "--report-accounting-when-done"] main(args) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 8a579f2..4c48277 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -44,5 +44,6 @@ def _mock_openai_ChatCompletion_create(*args, **kwargs): def test_chat_context_handlers(default_chat_configs, input_builtin_mocker, context_model): chat_configs_dict = default_chat_configs.model_dump() chat_configs_dict.update({"context_model": context_model}) + chat = Chat.from_dict(chat_configs_dict) chat.start() From fea8f79d7f7172b13622f5de2171707b5484c9e2 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 19:52:06 +0100 Subject: [PATCH 069/109] Add rudimentary test for app Streamlit is notoriously hard to py-test at the moment. See, e.g., --- gpt_buddy_bot/__init__.py | 2 ++ gpt_buddy_bot/app/app.py | 3 +-- gpt_buddy_bot/app/app_page_templates.py | 6 +++--- gpt_buddy_bot/app/multipage.py | 2 +- gpt_buddy_bot/command_definitions.py | 5 ++--- tests/smoke/test_app.py | 8 ++++++++ 6 files changed, 17 insertions(+), 9 deletions(-) create mode 100644 tests/smoke/test_app.py diff --git a/gpt_buddy_bot/__init__.py b/gpt_buddy_bot/__init__.py index 02bf216..bd1eda8 100644 --- a/gpt_buddy_bot/__init__.py +++ b/gpt_buddy_bot/__init__.py @@ -13,6 +13,8 @@ class GeneralConstants: VERSION = version(__name__) APP_NAME = PACKAGE_NAME.title().replace("Gpt", "GPT").replace("_", " ") PACKAGE_DIRECTORY = Path(__file__).parent + APP_DIR = PACKAGE_DIRECTORY / "app" + APP_PATH = APP_DIR / "app.py" RUN_ID = uuid.uuid4().hex PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() diff --git a/gpt_buddy_bot/app/app.py b/gpt_buddy_bot/app/app.py index f611459..7d3bb67 100644 --- a/gpt_buddy_bot/app/app.py +++ b/gpt_buddy_bot/app/app.py @@ -1,7 +1,6 @@ """Entrypoint for the package's UI.""" -from multipage import MultipageChatbotApp - from gpt_buddy_bot import GeneralConstants +from gpt_buddy_bot.app.multipage import MultipageChatbotApp def run_app(): diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 27158ed..e33f351 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -3,7 +3,6 @@ import sys import uuid from abc import ABC, abstractmethod -from pathlib import Path import streamlit as st from PIL import Image @@ -12,8 +11,9 @@ from gpt_buddy_bot.chat import CannotConnectToApiError, Chat from gpt_buddy_bot.chat_configs import ChatOptions -_ASSISTANT_AVATAR_FILE_PATH = Path("data/assistant_avatar.png") -_USER_AVATAR_FILE_PATH = Path("data/user_avatar.png") +_AVATAR_FILES_DIR = GeneralConstants.APP_DIR / "data" +_ASSISTANT_AVATAR_FILE_PATH = _AVATAR_FILES_DIR / "assistant_avatar.png" +_USER_AVATAR_FILE_PATH = _AVATAR_FILES_DIR / "user_avatar.png" _ASSISTANT_AVATAR_IMAGE = Image.open(_ASSISTANT_AVATAR_FILE_PATH) _USER_AVATAR_IMAGE = Image.open(_USER_AVATAR_FILE_PATH) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 2550ecf..1bbaeb2 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -4,9 +4,9 @@ import openai import streamlit as st -from app_page_templates import AppPage, ChatBotPage from gpt_buddy_bot import GeneralConstants +from gpt_buddy_bot.app.app_page_templates import AppPage, ChatBotPage from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions diff --git a/gpt_buddy_bot/command_definitions.py b/gpt_buddy_bot/command_definitions.py index 070608a..b8d1ee5 100644 --- a/gpt_buddy_bot/command_definitions.py +++ b/gpt_buddy_bot/command_definitions.py @@ -25,17 +25,16 @@ def run_on_ui(args): with open(GeneralConstants.PARSED_ARGS_FILE, "wb") as chat_options_file: pickle.dump(ChatOptions.from_cli_args(args), chat_options_file) - app_path = GeneralConstants.PACKAGE_DIRECTORY / "app" / "app.py" try: subprocess.run( [ "streamlit", "run", - app_path.as_posix(), + GeneralConstants.APP_PATH.as_posix(), "--", GeneralConstants.PARSED_ARGS_FILE.as_posix(), ], - cwd=app_path.parent.as_posix(), + cwd=GeneralConstants.APP_DIR.as_posix(), ) except (KeyboardInterrupt, EOFError): print("Exiting.") diff --git a/tests/smoke/test_app.py b/tests/smoke/test_app.py new file mode 100644 index 0000000..2225492 --- /dev/null +++ b/tests/smoke/test_app.py @@ -0,0 +1,8 @@ +from gpt_buddy_bot.app import app +from gpt_buddy_bot.chat_configs import ChatOptions + + +def test_app(mocker): + mocker.patch("streamlit.session_state", {}) + mocker.patch("pickle.load", return_value=ChatOptions()) + app.run_app() From 0ddb13f8cc55cee0c6e744c62299a2d4e50eece4 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 20:15:37 +0100 Subject: [PATCH 070/109] Change user avatar image --- gpt_buddy_bot/app/data/user_avatar.png | Bin 45312 -> 45121 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/gpt_buddy_bot/app/data/user_avatar.png b/gpt_buddy_bot/app/data/user_avatar.png index 6f6270dbe8a64cffd9a23c3d849b1e5ca4ec1e39..7edab9b00688ba079b410fb01136830238872bb9 100644 GIT binary patch literal 45121 zcmV(#K;*xPP);de=A2uu+nt%{ zWoG8J4D{VjXdw+SH5hJ_zcl{jU^;SeX#BS)2G_DU^a6e)13)*`XHCrkE$0KW{JY_T zsq;Mp(BS0z{7lY($rm)_tnXx^(4*aaJp;4I53R6>pU?4q{>f%x=nlX?h5zP%4i}6_ z2lU=%e%FDVm!EH30VEi%4Z{Fn~;y8~vH*Wi>8jZVX&z z4o+U4-$ahr&zSN7-iP1iBKSY2`ll&>H=WR3{4HPS=eWJRZWbq(7oo!CLz)-Txz?O~ zm21!Px53|I2fm7w+>2{jP?>e1c=(E2ZJule)WezTkYur@L88-0r5f+eUDRY94%zJ_uj5tZ zsPT|xr`0u_-0T!T>*Dv^Ilf~m+RbEBNF`$E%jNLa*f5tb&%^(5jTK%O=9PIdv%rNq zzYB$K2Rsg_KS|zyIA!2KEQeiV8EhNNBAiZJ`{d;Qa(|4hVXaG6&R+rUuh-=w5tzIM z|H}zeD9YmD8*0r48O0Pr5F%GZW)lM+p|LoFfCoq!vU;AK5CMt(_07X10x~YIZ@?x_ zd44$)5^#>sUBqWtv80^$m#v5gq5INspPEMY1KzBQ8y$ePxv~s<(=>ir3ww}V&j(wa z^M>Pc=mxijmw4oDZGllOE>s>o|X=G-Vfn^f!^Bp2Z^;98kL_H%N3yN3s@sr@n(g9}^pM<>;dP%^B|!o8GB4+w`t9R!?V zE|7X7=T;P}j#Ps!CSi+eSZYEoPHm%guwyZDB)_M-)h=ix7Be{j_$r{K68w(S+97Y5 zI)BYbDdLn36{$nERvV;!OY#`%z^U?V4%4zR2)j5OrdpT7C2WwPbf;9(TuAX?^mGcD zWK2mRi)eBP@)*9`5X5)t3J~DwC;1vbsSaXtSc2l)LDKPh_+8#tHq{i3mP2u|r4j~50WMEH zC;1OmWW@SHQE^avAm7VS^1Qkug1zm0Zi*?x+Hgxk>xxAb;n#3srz|dqi)vydks%Ff zN;s-LMv{l`=Ej`B4O`9c@AD|h0E^Mohc!~A?xe&>xRBex#UD;?;({Oy#{)K4%BT)Mti>s zDajl@r;?QYePlwOL5V+yhCQ=jjR_RsP}j>PDM4zz zIB{XA5@N&gG>J!~3-`kfE9(9&oz(48$!O*JWfV>UgQXUJb1sS?ujdqrEqW zwmuU*eOdI6ng~b8#Tm^4Ugkk$wA(DHLEWkae9#Jf3b%MCmE-NHmH4E(3NvRlB2is} zXHsr#BB2{Ovc2xH)Q&n44Fi-At)$~GdLnqFIf9EP25`y5pnAI{8b>(=+dXrO@Q-~_ zyxGYQQVI=|(D#s-ruZzeZxG9P`20>M51rQ(h*RdsDxf?>X{Hn4)dWq&-6_8BCGkXg zd@pPpLu)S?(`^xf?5}1MgTfmmf=NwYET3P56p1O85n!%as+iC26rG_Ew-O(`e7ZpJ zC|t{VC|(D$J4JuX)pCjhOb^~1lS2EO2Xg|NK^{BaLJawmW|^Q zeIY)~m}Och6T=ocg7hg`a>etZO}+!>=o*;q5AvW*8*y`!MmiL5a#G|wNvLK}INgQ* z{v0+mXRvt(0a|ksEu9(cYa>vL3#gQz3@lKP#sBvoS)<$j&9zW}V$cFt>$aI#r`E^& zXnr_L3z4ZLxSG@m$FwHorc8h_sRl+#p@sCxVUA>0pbLl-@z|ay){Z7|&Ex`fk>S=eknV30|+@2C4^s z);>S-w^@AI`wxd?1QQ{c+L`iGw1SD2|+;h907=LLwbUX0Vq;(uw5GP24R%9B%$pR>C=d z67e^NW_zZPqB3{i{B)MaUMnT%#fERA=LrbxSXmZ!p(lCgKCb466eA0 zA+DK92=Aw56-_2k7z`nja$sklW&Vj7U+g!PxcBiKddcO_Y>HwqLV4!4Fr5+rnuR5l zbwMdLi-dUGoB|9b4U8pW8S?_NMY`K!+bO57bh!|KCb~#;E%k7Mhej@S&vYWobI5Ja zC<@hFnzD6;!NvX;90$XVY#TYX0ZNrXz?M@sIHXKakqBn~zy*_#lC_k*Xx%0Tp-drJ`6m{He5}4-pprgaUyI&^p{ugn4MvO8RlY>v%oX(^16=M4IX_zu) z8m7;fi78WOVDgkEG&Gi>w9HR*?zHV`;o5|fHn~_A5`!N@_dDU{kCS@cbXwL&hQ!y` zokZ(_C|X(uv1@lPcJJ*+^WFgx;wZA|D6$;{$sNtm-ue`-igM^j%mJ1yf^qmPN;h(K zk~o4y@l0C`ODZ%>FK{AA2I`MxFul-$fs7yPdgF>|B0ReT#1zxKIuaxXJ((mP@WD$a z%5betS{(sO1TV2q`>26uwvJKiIh1r(+Fk8sUfA=nDgNuK-I!eMLUUJ`Dum}rEaD^% zkuwzL)(J3uUN;RI9-7PT;+c~B37oRwcFR0-k!#(2kO=MkDkl#m4P$qLU*O?}f|heb z3<*K!T$oTxj^bFa=fw*I;YXpT{7RP!q3O!UkfF38rzE3_YnAUnfRRWEWgffX$c`Y_ za=)^>J>`WYASX6%OX1O%NATfS33Lw7VldLyIZQAaPtiW8UNu!RdyQ&EkQ zVoEdzi8(b0ZMYA{=x&(Z!^osaoWvj+C5J&1(M_wTb88tH0_%Z~q9={awIYozUO^ z9Ii%6iIq!WtXv4Ap#~|U^)G1Gx1>CnL#gIfn%Km2N{L~rll`M9C51`2qLuhbdUZmNni@>KrUq{pGRpqaY2EBC{5T5@fUXv zgW{+nk}eMVseZ_0B#7p=Fq}iX_rhD?MZhnjpWv&oo*PofgDm1gy%rEMd0-3T$d(Yv z@9aQsv>%S5fD%Wh!?09<(l6wujwp;8F0fdINPv2(A(m01scYsY^F?W%rvT1fAmFJ-q_v zq$Xr`H^XRdC1;UP#4RO=MC=jWaxI%g#D?L8ULk6N2cI%6VxaU8GfSb^l4|PLMg`)s zf+zdB;R?A3OkMcun+*Q=_z*r`Cj^k-XA%@?1vvZci*Wh(ufUN@m!PsWxgqY3~gqU;3y|tD7b*&&f)Y>JqWV* z!aaXJa)sQaZCiPxpaljU3Z7WDyGccyymm^Ab)C@u(++QN1B#~?VgA{TSbjzmE`0x7Y}++}d+uJ3x8L1|$jA`1 zXZ{E0C#&JU;#}a=Wz^XLEY2Oo|CMe=kt>TrJ&Loo&*A-fFCJ_+4?4p&FQEF^Lo?)+ zEyL^ z+N9J=D7KdhYCFG2IgMQ%^@Be(xUXwhySERRu~ zIs}noE*hC(7;Rn13^cpX~+RoIUj>hRQ`Ux)61YjP&Wl4}SRh&V*!XlDOj{>TW zhSB*U9An!_NaSAXW`o@2$@QXR5Ac&eh4I>FVn)O$k~N%i$~m~>uHRz*+}SV&S0VZ3 z6L5^SE2(AD zLA|HvH{4VKO!+^TCLTojw!(U^wOxRd6ln?E^h`)CMTKKck%(>LI}n-0h0 z51xtZu0H}l|M7?TVs#r5YrlbO=YFjJ(UrL9d#7NLqZ3cHRVpsmHEUh>)VJZBvL0OD zItd?+)^krYT%WG21ItM-L0kikg*tvXwFqOhGy)`)=12y=THA-NF+QWqcHQ%NBQzg- z$i!dwr118EG5p`cN=z)I#Xur)ipw~c96)Z@TR?gg@cAvSvVHC1j8H)mKfz-<45Q^E zVAdrT)yhrkXfw7qTPB%l%UsO@nDFpBjTew}tGTKC?Dn0?IuU|#Rk-o``)T~-u0gc- ziJTq7|CodS>1mc=i z$8Rd%n!-5wLpJk%w!C^!VNuQUcIKmjiW9Bqpo+X&j{3YaTuBjmFJlU3&}fqzh{QDv zjcF=aC3nr{ga~!GeFA*k=-V5|eOG;q*Z;B&7yo`SK40}cJn{H?-13{xFgP>}=b!(M zkGGtRk8b=P5OTl~AEZ=pVU|CFU(_{|A;<8mIvpPlRa+P=AD~hiRs73YTsozTtYg>N?H-6fKKQ6Dr+(HkDfwrl!>r?17 zRp6KCUdD1ek_fUCRa)6hmD zT$~`Tor^7iIkrPB+(E%%20Xa){xRJ3*syI4WbnQ3U5P*3^9NLy5R`3u7-(Bd#&^S* zzLLbYkc3!bVSuPu1T+I_hMEqsJ_I<~xetJ?8m!Yd?Z1miR|3JdAYc=Wa;-S5D5wAh z&Gyo7Ul5LdW`^&@zQ6%Kc7iNdw`8p@K~EzMnRI5w)k+Z7Oq6_Ar|8qhs#~ zet+RRSb9Peu6^t{EI)1ve(?R*@a5NSaDDIw90&J9yX9&mr`1xtjG-;-$0tLjSW%qC zt36fr^T`wBY!#=$=hIMMq+=>Uc9!=Sck{;t>bpn8?e0_|_DPf$lW1|Q>CGaNu~Q2~ zxCK2JjHYq>SKWAGCDndr?|X2_=6c=S(3~X+#Ma~k=#x%@yK)+G;a*CNH!R1EFp!vl zTFw$-0sVaO<# z?a0%DZ7yo#VQt+ORHl{>hMG1=aE?djbXcg`u$8Z-@=wO)h?S^gDDMY++~gQ_@Rt1< z^bKZFMsuyAQb%Qp1GK7@ZLZLXX5k8P4UfjW*xzm7n~&PDZTfTg;k`%Wi?6Q3uYUP1 z{(9e6$n4z<*U#>P`?uF4bMzcUN5VM2^)QqlNT7w5gaoN931!7>Xw9O&geo>C@DgXrnesfqMjwo{DCNu$lSE8`MF$LEP)c`6R<=pZ%VwNILqO%;kbxaYr#6Df-x3` zyV#4aVFxbzc^^LCm{O`>!Mwxq)U(fE-ppDgH{B0M*H&T#0;XvzNQBGC>{L^^gvH>J z?~qABm-ot=c(0&^1!6r!YXg+RF3Z%l#djVHC>z3ZzFR=nR(r}(pqqKfZ5_0I;3}?R z+8J273YRHq=H}X*)Zxv226nY(5cHThV!9huB@z#?%;JosI`D9EHdX4T75c6!Hc(FP ztQquU^MNexyWwMO`{W?*dinxPom_(-{OCnwhlip4-=E?B-Hk|}asr5rermxsEi*rGt{U2AK!(TvE zKq*i=n}^+N}%;^=8B zkwsGHOxWPjp@_gbbkp*b!pi=!0x(>^B-3!ygI)&)VmWNsmjop)7EJV@vBIgmgQ6BB znrZne3|ksl>O>_siKG!NMk1V8VIaW0+IS#C7Wjcx#Kwa&bdjh3F z5>A3fQ{^AJDM7d+gKbnP9u;566fS~ZaUxB;FX0^hhMeewJCsvIaL%>;B!Y}0f*Wr5 z2_E?S{V-`sBzL?_8sdigDDLN;0%z%Dt|4mcW-ETA5@UEj`E5jdkm=kE#E0Q1t_K>A zw(=!Ngy`H3$8Z}Ar+YjpM$4wGSPC(QQAoh$D^eDN(Y*%_xsPrEM@2N$_gRPK zhb9?9{K(g>G3@KlVLCzXf_jgN01|_#>>#B+blX>9=M7qzLeIlY8(WB9!bDj}!y*dX zZ(4%r+!w{qj(Qlsf9HpI{<$l0&V`SXW`juI@qm&*sWWMH4T}H3g*m|y{C(0cbS8bc zzIg`1;?UzQ-t8I1`87f13=`>f+nge_P0FI$t0_q#>Y&iAqdSo&aC6+1Vwv0yl{29n zLb~W!SjmvO!-HI;4cSN!Tt)RQ=bgVpYWK6y_I(Ps*H2EQGMjS}TtWod zh{ai4k0u@iiAqZTW2A#7=w|*NhF(9P_d14Z%&iX9RPl65Edh{22Q0}z5Grdmi)x|O z&VyDuiH~4s+GsRk3wf{R7nNH#8BUNsyzrLzkdCLR>2mOTTzo7Cb`2-7sw0Ln{`>T~ zLE-^R=O;3zk`$J&N}tRlVJ}swiTz_~ z{A1rBZkbt4HIV_08BC%YPxCAV*jCYk*)^8M2c49#1lC607!rbTSZM-IGh-!gTA^ay zmKdZYhk^9L_h8l?3s><(W&Rn;!NSjEsQ25~0>d;LC0K6Su>(oekS$1yKOloRytw?A z1K83m>L7yO{`U9y{qJu@YQw|OIyS)>DzcnelaGK?tP>+<>3)fa=aMAG9vI|sjpAP$ zJ_r<$*bcjdI+NConXvr>ydQ}%*Znmujdgrbu1BJRUaGc9$HGaltCNULLkJ^bWfjYx z{@uhq3AoEkG4F!8sF`1bGMZr{?PFMb|7x^H(s;jP1Y-#eCpU&LuhNZV)<8UEp?1y2 zN0#6)EJqov6gB0NPDxPAa#d#yi)g}e10KHTp*l&C*s2OOOsmXbN0)=D(T8i#eGvb6 z^C$T5gjx9Tqn*T8ex!eQAJsTP_KL-LYoG!rQAKYI7hqi^q*4cstV7|d7ds-zauHWg zE2cfsM`%krXRTj_fi@xfI0XvKO4u@zYgP5B2`fb|M%-#Nd&(^ z@|*kN>e>a3ct9PbZaI34coz>;($vOahI##wZYBPmf)g}5XR&G;2`@lGqo|Zrwde@N zW-Fz@KH{TMOGQxRxIAN;*+3t*fE)^X9xuP|DXU=DjcJ z#L&`C?Emlpp4r)t1Bo0CFAw57lYMYf{tpbr)d9PGE=ym-95*XrC!C5pagb5s(F_vg zyh+s|40cCUbk@P=Z0|QvQ)I#yq6F0})iR~Rf!;9#V{sq$^p4>68=l4$S1iLPpYN88 zf2DF$xBeBrg5Tkrg|o42Qvvn8cx@ai>&uWd`izxS^fC!|ReuZ%OM|E+Rua>8Cndq& zL4yFmaVSSf)9mD7i;x|<5Q0XNFY8e@u%Vmd?pT#0z>SsoJR~)YU{0aGly?hqcNeQ%M8xngH`1u?V_RF|`s6hu5P2;3)Qdv`dBUm7CfInVL#3H&u*3laW;RB+k0(2=srnkI$*A z#MTsnT-&gTP=SUD%`$6+a3sJruik;LcBN78E5>{44q_UaWMXwC_O@^Dq0zbiZyj!U#yoLiPuO_xwou3K!O`?v)Z7<>y#DwG^8Uj3LuEg!Ik#!}*WjA|(m734&wp z+pP_)_;tfU0!Tm3+&LSY6D8D+rYaPYP=QE%BWK|kJAqWC(pm0y`MZgg^#q!Zx}`As zseh9l%Co51)k}t|AhL4pp<{gWX+y%OSP4h<*;Ybo<9A_h-$l@sg}cy&Z#F0K)4K;$ z5T&8M2~Ym#Y4{Gji`BID6rYWaQ z^T)fC??tH$O0PG_&1c_pDkiv{SpDH%_{xhE0mqXj+WT^7=bp^Wx%m_xB|#+ODXwWR zZTZ?N6Rl$rN!dj&F;c*EDd*AQWkDRftO9>|oT{REKeRiafb*Ukkj)A}Hn6;SRDEuO zCyJ@QG&aXAe}eKm7%zo|3Pp>)EF;$<>9_x_99Y z1S!4^3s2^W)3C$TB2J7@Gr1B+UbqPUDPvFQE2j%S|Q3G<7uq5o$RoIgAR+5QM_J6Ml9C(!zc7vb&EV&y2a z%WW)aoYvODA?Oskrk-_#CXlm_M6T^iem7tP2|HeIXE{qvp|Uv% z7#*V4^X5ckmLBNiM7Ivx7W& z^MzFDnaAC&EDU@6;^sa8fmrt&SM|ty{c>bGqoOkbOIFXje z+t^EL>S@@xu@Aep4MP7Xnf5SRLvxz&@kklI+*Pi?pjZKN$f}$~GH0bk zPIuY$9-0+U6L&h9S%t1=25>~V8^a`er((Bs9M0-_NDuEPM_Ca_E2Bj$bcZF1Gil{Q zR$)+L1%2>KnqZ&v;uOK9eymxa!~;){s`$aN%Z|s-esT@sZ$At-?Q*l!$7>SlX0w(y z=LU&CKD{xD+@>f(yq>Fw`d%V-%ZZA;1`c*a(LNkS2M@NVdko!eBj{`!LtpDC#$pK` zvdKGTtz>j!g@U3YlvI?Va_&Ub)m7ryX_GN&+BD3V)_{hl8e)T><>ZmA*0M?@n-oua zyp>=qY1b6$76EAa@U>5fZvIrP{o8v8XD#-P8&U*%CKv1AaXp zWV3a!5FkI-Q0&5`R0(ksg!qu0c4;m@!%eB00kdVb!issHy#*erPeOGn*rcm8bRiCp z3NT9GCzo^J*86)APm|)^e*F3Vzrx$I0nygoKw*HUnMf&eyWeX`7;;|vvoJrSGI`}W!2 zw&G9M&cRK;T8uy4{S~sCx5M+s8ssir4sAG$j9?1ouP_yn%`eHS0-kNd4uuCSW6%y? zb5GB#_n^#WC|26gpM*1h@N4*+ma0ILBUFv7+p9pBB&H})BVIjs8T1*a!5pCJ*8Dj+ znj(kPoz=zFI)<0tixGS!l+)a~A{I`n!HoJcG&K~Wx{lUJDc3_=AH@{uMHM8j za#B};Rn8_`mksm4x{)3ng(0~LVkV}OmhV4QNgTAC05V|ZFIY8rmVUB1NgM$Rj*^od z9+$#MvB3nox3yva*RAL|Fo2NHjWVjhXSVksc31@-`1MhUytNNszjFXVS{f=mO{6{? z&0_QJG`G%)wdN>or!=mcya2Br*n!G29m{`u9O8*AHa)jVnc@!0@?qY;9dW=W(k1Q^ z6my|Le@@jk4h+Qc)*Wl{fA_7x%P;Q0{+2Q1o_`z86Xqcm@ZiK?90M5#Hjg+_M62P= z1ujGh1{bZS>dp9-N>B;H1dwiziNzHT5>HM^=J(r42>QrD7!&4klZdcgA==GFv>=AK z$!^Oa6DpE=!`>}KoDVIHJ;ws;C0 zO;dn_neyPsNv{ewe9yB-`>Q;iFzH=?%OkLg9^_KG6Z)cH|b>8F~Y
2NqG|& zGQnxii1TnYm6py^v19Xgx|wN=|CZ!fhiy_?HaiDYt$?W~^WPC$4XBh&9=zma zTFI{RopdCPfxZ3c+T4Ziojn*GjHu zC6UzQ*3mPR!`7BGIp2jX8l_x%5Wkqd2$KV)=oszA(N`UXQktddjh!f}EK>Q0PJ+rc z+^-1tFr zTYC|u%|Fl{#!%}pA_GyC0_O5KP*oB{n9F-{-vAC!AYFRiEX*gzuD|~)m7(P)A&bk_ zMIzYWmBXGknto0%Ht|^3rTQ?Zpc;Qza6Epyc`YVaxiRI4CKaph+tUMAAb=oE?!op! z{BPqvF4&7B$&iyNNyli31~78cEJ7aY9!7% z7C-bBW9MMbP72m>Ppg%0;c!^yr06|S`B^^q1izD5MN?-yMgktNr8J1D45O;@I$V9j6-aFTiX7Vy*A=H) z0gZG@C0r(nOsDd}-}%9rxcKS!@ciEG*xuWZ+vgsE6XsVyo63bXIH_k{5=mC!hJ0DK zI$#MC=W`CnJCInZW#U*-jw#cy0zT?1OEMF=;|6%%g|`$m#*1uZ}=+?vigv7tGI9-6Ho zj|ZEx5v)n}q1xleGfPiX`Ho+8AHXGx%i*psLMEKTp3hq_d&MF+{5qa^bv>Gghf(AR zfuBH3z#{Km=FP$9u`G!~S}g`a_x?AUapEmU;hG=K$A+u=kZbRO`RNusRlWiki>m4Z zsm@k6m5>cNUWG_SvdVtYBu*w%*fEeI{>or$%zDRVK7OD_8Xs*0=7`^1Cr&Y1>RLph9rPOFxZ z9Oy30VeCK}brhNJU2+zF_Vy|~vvWHx|LP-L(?17Sb<99RbpY`SN}p;C-fA~o#ZuGf zQ~*fnaYGwVI5llE zu!?ctKJJO*QxZYIVc^oK#h6~~$7t^uaS=Zn@~UcN@g{Z*XxMQuLA<9UKoIzOdIUT9 z6QtSp@1Oxiv~Fm{aK{)@Jp*{<@z3z5SJx?NP_Mb*<5~sT zhzq$kWGXc$7LJ(jrR6?^dn4HK;!d1%;oRsC^zNfc)`gs`x}qZn!^kyb8SFQuFu&QOE|(4Ij*;ZcFxg_tLYPf|l@%Z#9u zD&s##pM?|3>v??x{~PVWVa1f96v3a||CzE*imQsTacvv^w6zOSa*Ky+ZP2`maWa&~ zH31W)#I_=srJ+U2MJ&OiDGd40N~tjW>P0yD#0C<=5RzLDz`gq*(sP?|c~KHSZFHiM zDy2wwDyv{1>q2YXiT0F+WY(&Gbn|?jB#@&D9avONi*ERXf5KZ@i~{O*5&7AHerVBQ z%VQ^I2NASR{euwSFKi$JuY;>Rf^c^~K3#8NgToij#r%15(D%>B;hN0P5aH)~#_Uk7 zuH46nCg7=~n&~Dbwx*HsWN^*AX*jmA4!3@>4zIWF!Ee@ofd_Y$;{2wGIH$fIa|4Aa z7^H^Vm!Q3zK}Irc3Z>@~4}%XN&A)W7L-{u}J)g<0soq1nm91)H1Wj#l&Z=Rd#&Srs zO_k5-Q4QTRXFutRU_HTL1!euY^+7DE@F{f@k#r>fMS!3H<#61@zRnB|_8BSwS3ot= zOa}RemP*WslKVn<;kdJ~ysVZtGO%V&F;>5|3wKT|AP4e0l-(Y3XNHS;yk!8bBDzVX zrBuHaQMSKl&Dka67Hb03z#dGjh72t^T=3~!`8_~080DY_4aq)L&;_nY{x4`?_ ztVs{!|J-Hr_e zZx8L*ghzMoz+vT8IAuZ|j<2u7Tz?_Oo2jIMbdNAp!!|3mR5GzuJ%Gb<2C2ONd|J9) z;3B;nQOK|I7z}=1{4?3!QIs#Jq(+;IzFR-V z!4LLh(VZ(WXLc#J@90A6n>{GFE{mUa06*>bd{WT zgTyFqt4P{`^*nQ@r?SDSem1QQ%t^Rebds2p60|9p0IhPKl|v|rqTN!Y(uJ?LlS?xh zsuT~FuQ(Q|P5V_Dy{ne;EIp*S5WZsQzA^%qG=KYu&ifE7D}e9l0$}SXvO5zP9Ui2( zcjJtS^*FVu5tn_m3U9aXfty6RuD@0N{B3&yriRL~u&504ONueAtPo8#1t=}@!BfBu z^f^^Ul2j|H9!%mR%Wy-J2IBEFB4bJP4J5FyZv-2A`tUiemK{-AGX%dYCluj#b4xH; zQo_YsnUx+$rYb!bmC!q6;9!3i{UcVIm(S_PuqTU+WRRB37>}Qlj*{i4Gne7kMaLq@ z!yM*8mfUtK9$U2u``-8hS06bM5i(VQ&y9nl8vb*jA6p~+IDNuQa%&1(N4ild;bt=5 zvN;}X8%W~A-ULpVUWk#gIOd%&7ujSIYajm_N8h*tC9`&->+>!w7)f*QYZM_EUEPSa z4Wf2hF|PED;EXB~f|R?Pa^O0KhzCY#g^gx;A2L(WWi`kOQOaIS#&*W2*F8lv^@2eq z0<*NaGtmh3hsTOMnhBNgp;I;Jr4!+pdM3ZshgF-NQ_`TQundcjo{sQS@5AjQ|M?6R z<5v0iE>&j?2XVz<`zQu_4OGl6Lo%9zdv+ll6@Fw|;z-i$O7Y;f49D;Z(e=`TN<4DR zX?US?H(qOP#{TdSHVwD%=K$GKXxfb`-HRHJ52aK)C0-vw1R-vP9pnHh0&{7aeMt-w z^K{dEZ6nvFVL6z}NtEYIDaKh%C8#8J>7z=B<*X!CF_DKO26{(m;*DgHNH~;)5J%An z@%#GB5Sr6b{w=Ab&5Ft?`2C_4II^Y*$+1z4O$njo+GEg6%j*yScRy~JU!}6Wh9X{C zASt{yFn}>*3>P)b!b8hX$0={VsMZu90acY!^92nY=?~#Q+XhiJl*UA^uXK7fCM}zS zw{LtNdtTd$P*VZA-=I9|9l(*tRN=t~oHT=@IHO|^ogP-ZD*01N zz;` zJ{<}hh$XFh1ZgNiM#VuA)IRP-PbNkJNUQ5DD5%BF3y#5A6KBFlH8PfpKtH7s!PBRL zf5Szm-Hv(bEGpuDe6T->RV4oHxnZs}i{H#%j@uS3RmrSr70;1=2|9?KmE+q%m9V0* z7;hgK#`mTcAkxwe96J%SPnwRckFA2YqzF!O`+<+UFl~7wx!QwBEP^SWt!TwUJl^al zvC;zJwp3Gk^=GK4Jq{G}oGZj5pj$;cvTKEud}3}ob&oW}a9er(R1rD;IHWY*w5nxX zHOrM*DS1=57{z$<0QPPC6odT{MNxAOn@(Fcj#S?e{8P)QOh=)Wc&!FpE@F>JC!s|% zhWisZYROdOCLazDxj#wba7{W6nf-ea80y3=Z*0J(!7kiR5xew)DTqr=W?v3vNf*v3 zZNS-+n~+W$1a{=^#0Xk=pDmQvtGm0gox0X8c0&$_3PB{^Urlg!OjQ8KQDrrR+$vjW zES1B7eiNfna>Qs3L&Oc~v_77Gp5k5%IMZmO>D-=5667Tm25{26TS~lh&7?UvZ`v#b zxo-)+KQ_G-t`!aN&#R>b$>NfWZbfYCe#|PIfM@o`vCbGKafC6us0Q~gUWpT`>nQ4D zFe#a;gCT4i?NgOeg??3fbH8y@yzBS?AY(X5(4~RSLZOvFGa2z)sl(-^{3FE^U@j;_BEA>TtLj_ zC&M0FN{rwvtr%};X-IQR!ALGAG=d$6l))v2VR(oYq+Yi) z?Q{I`?JuxAGy%UldI{2{2{^CA2t#c zrPTa_#MO0yT2~=Xq3Azv$_yOW&q=p-K)D#5gRlre7ecm;O?_GU2-v9V( zEb&+4v2X`QNJJI>BK&&l@wjgCd=zoFlTPZ+pda3(j!B_HrDAhbAAXw0DL$uzqTJUQ z!cT8G4%c4PgFV{^QMqb2rk*r~rnC>y4#}x-VE@(;oW*N3HWiT&y5OU#xUVRVnPr-? z4w9tmSk|f*cPQ7oOXW@`Nf>Tck$5=nBA$|hCI#l;XCNdBKn0Fefj;XX=% zX3Ve%*ADu%;XjSfC=GNXF%qOPtET1z{Lz))R9()&h9I0Pxh9;%Z4+^`&4_6*{z zpR6EN_aiqXDez;^>yC#Ly`<_5xc12pX?uI|@cfhDolbox{$-kUDuOihsh$i+!g;z+Q4{8HuopqP$jqNC-iq=8DNYvRX1$LI(-F#94r&%4;b4Ct+pnWYh(VRgcIN5oEZ@2iKB`@Xst$aSF+! zcXE)C=a1p zJxeE&I;8A8OUV!$v3eUxg;2N~TD0FvPRRx^vM*x$_o;J=3X%SzRW+(qeGhoO z?B0H4Xwrpf`|jD(r`*&flPZxBiNlA`%B%Sj59*swaoz1NU@#HI^YhQb%sJ(V*Ha~N z(K&~eS0~L_GUWal<;A$~$m8+$wgWV~dayqoK~(8$LO>pDU&e`%Od2~Ra1_gD>EK(q z!5GI23eDy-IhD7=g?a+C$;GrviYtjBDzUh10-8caDv2XaA_>zrcT6mSXF(ML6AMZ7 zx^%Np>3cmNAHb7WK8zX?)#B3mn8iIgjtsJRLJfRXCCJtJ5T8^6*VIaQihW8s=bXwn zjYX9;C~*6+J)J<3B0muFVA-`t@(B2B1hMCxJPo(LzXMYTOuYL0HE1e#DAP~c^Xilw zZJUNsSrbrCGDyqlu{{GQC#Eh<)KTBmh@Q;R=^Y!a^wIHF|L9=14CBdLZ3f?0vC1G z;0RoWUZuOVjF_H0NQp;qp?lY~qOq}z*fC2nPd!Y856NlR@)mw^(|_>Q!8Y7B?I>I_ zaVioEa!Pa=;|FAyr%P+SS_jU)b`H+!nStTmVeH*Kgl$~|*hBExOrmY$H3u>YmDHiD zEMdcrKRBeWkDTB}h=f{5YpBv2Ky|>63EmJU21_urz5x>gg(?j0llTblm!`E5^;137 zh2We{-8{LBR+G=7on%H4flBsHlhdYG;H9T-gT}v0LW)*EL@wrNp&Q8}FPsI`&4R&_ zJlsf}`{@Uo%8>b{19MACaa2Vk-s{|k4SQ(?L}->LlIWyKIH}Nu`A5_u#QhuSio=-- zpeT@qNA_2eXQ}`Dc8{U-@IqTU8WgrdHRqQAZf{s}(l#cr#& zBvpbDw@gOmuyWX;ZYhVa4#7c`Z#82vRlrC$Eq{hvJYTXe#-aLBEtQ39K`k;A|1RpR z?@ygYLfDR1NEqku=tIroT5P@ZBQ&pW!?GVN#$>K%Uvobu86oWG)N%OaoC>@60#0J4 zD2f&q+k_zcFR9$sk`8xVNW+qllfW>Gk0T}mTyuoj&qoPe=nSbuoh?zSI!S~D5xBIn zQ<;nehC0QrDlu1S%RHFDojgS1g=iO_!ODUW^GY7sCG`feIE+L}bvGADZK^09F$S4t zD4IkjnMJ9Hz1^KirZXsw$MDjc1Gw$YwOB~;`;W;hk()wuW)3$fnQzFYSsBw(`I7|*#)GJ)S|BbG_lN2gX^7m*SP2=@#6rBAX-^ty zpQgH{Yh@w07S_=$cEU|A)`P@IUaRGf%#RSq6l^!M%FWR1CgXO44HfGmr%UaDnUDD^ zH@964t(tDA4qedl#n)5`8qvRg;!Nb8--I(L4UgE_h%fsN;@Q8i!+94ssgA4zEhBjU zj*n0jH_=Z*o~&E3jqYL{6RLy|95mA62vWqW)I@2DLD25fg!q9qw2H>a7&~bqyX~dP z+WV73aPb*^(!-LE*Oivrsd{!^g{nd9ed|H(lqQK2$dHXlKTIi67u!ohq`9n}-_G|X z7$Y+XC{a}53;&*@O7MFfIC!9+;>(F34`XP1JIee{tm}{C>ets0xD?>2S!bfCjv(hK za&A@|FHZ+70+7&+^v5iRX>4AUW5N=tt5@)^r6rS7Kh3Z6%U|WON z?^Nb+g-_zU+(QW%ks0@OWl&b)P^qP3qgf+FV$P4=UoP zbY#+w`x`2qTubE?V081QZ3p{NQtVafUtZ#mQ6BaWUi}(T63^QU&&F)p_pxP$YCtXi zT$NkMn|NIOe88*HvBySY*w)sA1Kkmfj3(59c!<q3rh-b%%!`2oyGnjVgwLGk}O2o2R>EiBzGZ0duxV@u~ z_Inrl`v;Mr!-b#;)H6riwmgslEh;POS-6*$Gpy^3y^r|Go0zHz@<|c z;rZsZ_&N z)I}yg+?PWWao^ggRYI@ZRpPXhFhFe}BL}o$jZW14S=%amVn3Qp*ily zve^^x-Q{zz;_xPf{5lfhgq5SGYvW~Uha8ReOb5G=)#|ophpqFp1G_HJUXyNG#H0$r zc}+Jhn9i0Sy!6Jac;U^}*iDl$5e?gt^8fwEP4PQvViV3i`AA%L?g~tsH<^1$KuC!o zn%o)m!noo{7*DRk9dnMrI|p~+f4#k!Uf6(|GHG|CDNkUf*M+W;oRz-E|9Xc^R1`Q= zxAu`in;GpH7|PeGSp~WCf=(PwwYQn~>yxJ5POI&)Q<$FSbboS~R!ad1LE7eMR=%F} zRy5t@M1KWEeMl9b=60`#Bi^S(pz_!`ME_VUlI5Xi(pD2KrAt*SUP}b05*b7y5{oE+ z*X_hBn>z8t#;rK1YBGM*I3MxDlW^5IVI-x5E(@=ufm7`L`{SMX+pFua{a_!_uOQk2 z{Ny`}apcL9FuM96KSEPC=XU&mYRWB6f+AXLB>x3C2mNlR*U?5 zU(#N)nePN-NfH9(Por71P}SA=D4~c8hO-$AX2fAesjXYZL8IBQs0NwglvU;k%BCnQ zK?{^9peXuGA3aE**gFo^j!-NT+)Bzg2&+G0C5i6%j$EJaF{1s;TXShsz! zp7yK6iyn2I^t&OWpa#Bj}nZ{dRtyHV!};U|;pFqisU`V$>>&QuKb zClKw7;_wUS!&Mr9qcn(;iXcv@^PoRffmK6MynLV=C;#p_+<*1yxZ%vXh>b>-$h3ME zSSRfm@49C9ShAbI<&jiAvGv>CSp^F?h*Kg2oIkwgZv5}9576WZF1UuHE? zyUm|egsK_EC?kgo&^zm*8CXDRxao5@dU?(FJBIP#)6e0}Pgdcd_uhgPryhY!-+(k~ zgl|?Y;wMIMRYb?@JCBpMEstHAw`aD4NylQ(dF8sr*=o=o!wM`9JQ5i&tn=9HTO%?OO@7Ge* zWZ~;>VA^fgnZc_f||ply1E}VGLjd=9Wzb0Xwfy}@#9JIb%%O=9H zKZZL-kHxC~19+`>5EF|kR8&%YAPPg-UEXAQVX1>9h8gOOaUblKM{c{UO|!bZ74S?p zjgDboEQS)73-tj%Y65Ob2NxQsqCOsuD1WLmLlvOcFM%UiYn7GvZK2(_R~5KwsymoR z6~E+%aS4YM6i{veNH|eadBCfqHIl~tYd53OSBxv`rXf}@ zK@|zE2oZSk$)+xxe#Z+0L2mqY(HvB96MK0rsT}U2>Df!uY*c&*hM@eI3JJR%(r7w? zlCTq%G+}GJl62w3f)*ZyJ{{1nIoL+?~zg(bE4c|*Bsqd&^c7bM?HfGnr{Lc0-MIRfcc``e;F{Bs?(2t>T%TB3ho$@DxMA{s86|)bDe9UlL&OL$KnHNk0X=S8IO9J0=6#xJJV}+pIym2EbeO{3l)jl5zc;Eh!5&Zk3 zt+@Mp%V3I$Yu5$ln`N1{6=7$xDrqp@x7j8WF%ccaH__oR{`T-okm@e!aQ*?gsG01z zCz?V>BF=+Pj0dRx|NU{3Q>GP$a11S-AIz>H>UZJVoBxQ)(juI3?h<7CN8xKKL1tz# zemrnE{(WE_zKV}xL6;9@RZz{d6!*$X9XUiZPGy~?1*9q>cEVriRqIah-;|fA)}?s| zFp_g%SlsXY>Nmrqi0~|rq}5eVMp{xBv|P+e$M^1nLpqOmysAZxNwZNeoNVHEJThQG zFZZUme;E7Pda-?X2Y*`e>|Py5O{hZ=nc&Kw-GxsY9>m<4HR_((MWtxmGK%wR=i-T$ zud#Kg9P_+cWfo^>a{2`G^BEGj9JSvHMNlHtNKR&hz79FDqT zAijkZyboTFgJ94=ZzKYvU;=Vo6j>5WkW}-Wq;xs3n&NIpdJIdb=ca~AV3g`sWsfg_ zyZ-e#Htugzs^_zT0W6xLFg1ICgO6=1sl7Z2cr*Z&N^aByQG&GrCJYMz8A@HMvO!YCOGS_5dcaY(DGp0i}) zxJ|%;k^s$7KaO=rvD}kUHl(!J5sRmqLi==@))Pn|&ke~G7bmt+O*D{+{bEi^wT{`f zAC4>`KE;Gya~K>YQ;{1ZSR83rWAzO2F9ge>1sj3Vxu~iggxA$aLeS7bZq#PYhOvd% zHf>oCSsu2>aACxz)Wb?D5g?bui=`&ki*NSy;o&#dqBK~7D^Hw*@2{M#>Ksj{Rdp@I znC0T)hEu;y9X&w_dauudQjtf4|&`JspGi;YqX9 z$lEv%K#d!pW+OgC=8o0?6xwR-3rHy#&P8Qsrb%0Ct&TSed^%9bK!9~V&OEZ zLhcn5e=&HShwC8P_P=dyZ$Ko7bW29#oFTS-JPrvXk zzS^`Fk3RVtetz@C&>~^vCi^jKLm9C}A@(K0n3fEv<`|mO98aK8z5-P@ZN+{(RYA-t zbz-?cqv8kK#(>V0uDakCaL;CjXxS7xF-AstWk^Ge#J-;(^{_(mBjge=!h@FcMN-tt zr;!Nu!|dOaH>9D_Wl-fX528JWEX~E#P#S3- zq;w{cG<27!i4qT7RbFz92bnTWjlu5DxcJT^4^7tEg^qIGC-WkWB zVM-ZN6AMvSScoZs8mvz2MW2~gPN00OLpizxFd9~#g^)y1Wg(8Nq`98X;;lg!231JL zYHl(>ra3^|b{vJ;(S;tYjS)N&hV_z=Qw37{m`+#`8bz|@DqGnYX|)Dzh$K+0<18Sg zB}S0#*u)L?V*+p3Jsd~d!5)NpfF6g(>bx!~@myjn#ZJEr^CV=MDV58_2A-gXcG}Ws zFPMn)Y1&Dmcx)uDM&&qYk9(U_h_3EO%dS3br~co|?{`ZQs+m&aUg9Et%3n!bo#qQ+ zL3t^rO)EuUK@l=D0?4@y0=uX(XDb3uWYQUx%&My%%XR}e(~27yT7Giuw>&ULUK`}C z2v~zW#B$IK1%9(bQ43QM>}vam;e2Nw`o7qW-7S(&IfjG8IYUMgDVbr#?+2yI+v&$t zFR?^r8K%r{K=8C#$SrF^mfUUhjKJq};pSVfz~aMap?PmR8YY#)D0d=IC{J^E<0j;SdLYzckHz`JKT`3ZL@uMMWwxOd?`d{+=a%2?AM@aFtHKRGy2Te?) znmCYJev725|*tk>1swSOq(c!3H3g#+a1Qp@DK)w zBO3iaWl~8>xq}3>dw7fpJAnq8jg3?X=>nMk<*kM+x?D!7I~IUg*Zh zpYO+u16}xN!2RFpA4 z+(J28%)DV}nUAU`BJA!hw(r2Yi(Oi$iexN8vAU{4&6v<>a!I!l=Mx)n@TK*5VgG)- z8Slj|#1OUja(wb>e!bG;nrgE)=Ef1N#klaZEm(2f0+jybSY)RZ!5pOo=^VhaBc~%R zkxT+%y_hCgu>-Tb1@NkIEz8p0VJK^Z!3S}_!- zEfEXIa47<$ewh;Q%8w<`>^_Uqh*E}L7V;M*;tGkJswIarekRv>TGFV>j_s zCS+Qbo?Dxx!^#tIldg1hl3_a0jaLSGanwt@@tYm{aPprngR!Ir2**@FO7bKG7wME} zDEE=U3YE#O#`eOB5oGfHi7A=FISYrmi2UoPl+voqsR*l8258zdsjMa-lJq0(5Y?2~ ztc5G2R8WS7Ivo;RatFvo5ty-I}G`>GD+2?UCVP2Z$5KMNbyfL5yg(K zZk$zJ1#=%wMIO2&gmxt(YGVuBK7u5w4WFqdg_w5jQBIsKyzKjrZ^Vz*Y(OhHN|cII zC45KuxiCC>A!gJ}!IY|6g1(U2+ek8smfmh`>pVzPif9NvRp;-y_aUYm-q3j1|fnG#9iwnV2cqjCgViO{m_bQ2^+!+4NI!K zmWja_>a?n@I@G-F2C;J*9gCs!_8I85TBqc%fiU>wSCCnjy`I3nEKFM z27TOY%#R3C%`_<~6EAMU_g{Y#uf)12qJ5Ai!vZW%nSDHdc=Q>lE~-L*n5vRL$xI3X z57$bfs0tL5Ao}t6m+#`;onI=E?KDGt(hPpzwHd8Hy$ApN?;qi)mNdTf_$Z+qC4$6Z zf&|{9{B)t@jo7&n5^R#Jsu9DA#uio-qO3BYf`Dqku8b9+hHnhIRP%JnEyPMP}^KIw}k>*0bmZ%B17mj>JBP^*dpYThEtP?gDVz|V^q>+<{O9`Wt zV(_-pe~cfkJQpAD+=Sa-eiWbX*@;ofaYSO=o9G~MEoz`ixAauJ^NaiO!rD*pmv^4v zePx6E_|@KTF!8Huod4a~h)IW5-EJ#vn1{TIwi>V^cILPm%sU&kap+*GfRZDe!|m^W zNFrz>@G2($8ODMMGjaD>KSt2y!%Le!#M?W*LF;fgfV@{!Q&M$Sbt5iWd=ei2?k};w z{}TNCrN3i+`xdTM=06wVnf+T3y6qu6{IB0|AJQr@RY@u0ga83*0j;mmoPfgIxT!1L z9^t=Zw#86CtpYw;N!gLCHGfJ*2k<$Dc@|Y}cizlEC#{7YV>#8Hr-UF?qMFI9bXBuE zkLZO&szWi=cgX171I-gugEJ&A!H~ccaf3qK4oUaYP+X)_J-A5(P8`_Rjy5iHc3}Vs zNhKo}hBGN83Z-#`^f^{Tt4RbN+TMMmNnHNu+ZZH1aq2;W!Z2o4O~g}I-b&Fr89#X9 z5BQ*a2Wso9;i9P{g@>9}I_~Td6uh=|Cx13$a`_be>j$@C+4RG4=|lgAeo=4S{9kKx|8A&0e|FP*kCo=p494kf1KJ&c;luW*G5@ZneM@RxO~D0vDL5u88o zRQ&Pm>+zqj-@yli+c1zELPxa2Ce`BcyNTG{+0%+!-?$%-uX+`aUG{6N{qm0+i8YJ44aHKHOm>payIre zHO?rrv7D)Jp0r06!f%fxG2~1ukQt|hAI@6+ue}75m8#E!WPpolDvd8B2SHlo6xUl# zs01G*Wp0v8fa8ZLhGkH_;n3}AD%9q+K^%8XDc<^I2%|mJg9Ck-Ut0`gPY;sf2hUn{ z57H|TKC6R=s01&qgugufDRy`DQ*;$0O$%X8%_O{h^<9`)S%Y&Px&;lV&O`ejo>Yx2 z2L}gn%{9NqtFOMw=d>Gni&W4;N?h+B=)iG*x(d%;{U?0+n!rk2lTMo=I7%L-yY^=Pfl0GJ}HTn{PkZ7y=ToR&RuW{9=`ZjxcR-u zu=0kLxZ}LvDj4_Gstvg8yFWr(Tere47I+j1M$SiwYZ|~QfBy*{;&&go{Fj(eT8Ur1 zbiW#wO8XkW`SW8q?$|k)HDdxTloU*vRZWQyP#!ecqlhf&CfP?llDlbG^&NAMo~%;a zq9fMhh~*|~R-Sz{qmO51xm2W>c!p|AB1&nhlHoXUg*h|`NVSt13+IJKa3Y=0W#EP4 zVwFm%5d!|my$Mt6Wa!l&piK+lp+EnPi|$1>8%0@J5&rqF`>=i6 z4(yZS5qQ;s<;1r@ap}+s`S&<3eeD16{x2WHi`V@HC;s(X#4`q7ShXKNKP$`)cdO{A zuGs@Q0DJJ4?N~xyHE349XB#c5K1#g*n=gNcHJkS;LO6WFY&`ef-{Iz$9>(lT7vi$> zF6TN9szA@t#~h0XAG`}^p85SlCa=nkjH>TCsS2`O_l^Jk3krylZaVD>go(xOdix1V z#u9|%gShRUf8&*Beg~O)Z@R4xZ+@E)ROt=Rq=46o@?X8GeLG2z%)05b18U7P7B*2u z>xhO^D%sdYz2=e@0up+c#_1#w4<(E$xt9VTX<--_i$sQIjSW#6l&KF|-Bp9Mhyn^` ztIQ;s=H?<+@VfBClU=yy{uZ>fg>7A^D^dK?vmc_Zb_ed9xd?UMVk;`Dl0_XVFVN~< zd19*1;pP0J8(5@$L+h9au1V~$-y zLfB0y5a7lpQCV4mU?_x^{d<(3KN`#6%75O09d|yDyUx89Kj&u4=$-eswBVZ4=8(g( zDzhgacFtQ3rf&NR#^*$8oQ~5Oi3PBKrv20ZQ|KKzJcfO z31Hfy23-EDl~{7>3`E8fG!_k%>MWJGAWR4Jn z-E~y(frAtlqp17w^_%hU|F$SwnF0@W6B+tMDV>Uufw!y)9}V?AK*8){2V|1$*=MC+RyOU_n*TZXJ3UEzWEUA4{pMj+j?=$ z$#X2-nzsh>`H8mwK>K#GqCH$w{?AE})!#da)f+5#`^5`R!lI^`IRA;8F%%h4P2lCWbd2J&<+!_s9K=m^bWkIN!_r?k+O-awLiZA8bc ziLRttQzf#RYTzV|VqEoujdFx6;8Wn;Zl^FRfZnQ5drUqJi%$IT7Ur1^BXgCqCP=MQ!TiPy7pKo^>Ls zs;en-s49cefK?%zd!gF0$`v0y#W6C&p%;cu$$7LL~))F{Y97Dpa^=y`cKi&IFS;g ziOjPfuWtMr*V61nQwi(0WG`RF=^HWWcBfB8|YF-TtEC?4}SQ= z8*$aumtaEuL`5)(_z2#7^#j~>*L~>d=)}{vzpWfcm;Gu5#)jjn9MSGVh)5!-W>prD z(4$llRStqhN8?;%JNGGosJv`(nu?Nbal@f)tc5r4byPjKgY zkQ?2lY}5i@A+XB- z>8BsyjW@sGtA*Iz-G+~OP^Zsbiu#gC=oslkZzPS0Wj?MYV-vukL^Z2Vu|7@!+PCR} zW?NhDpwic8%sBxw8zR}eEjg%eIVp6adxTD%oGs>KjZ*sDnxD^EHJ7oC48Hf-2}fB*h{G|i}@ znwWy{SX@HNT6ToNda40F7H*eQ`Yhc ztm7HZ_6z*`PK13*q(T93ckLx`0cB_%%l*>?3X4&AT%22QP* z053sMTu-A%={&2FNe)}r3)Gopv(~0qmcpSP2vA)P(`r5($?FO!kPgyjOAjxNM=2pEsV5Ya|j0UNL|LjnipnMbI2k~OiGOMZa4Dk)0WAgkXaQB~ogUc_z4*RzB z;KS!P;k+A`VPqgdg2U(*V2n~SG}J;Xu0pQAlL$3} zZQEmbRT}Z|!7e&`C1&<-#^5&xiL{FmOlt5OraIIdi6%y?LN6+WNAHEFWxINC5vi$6 zTH%yTKBdpzxhVvLzVXmd*8{xS@o)Ueq}CL)*olm-|{(-hArm8ucYXPdXdnR6pK#C6dA z52R^gN>&&TH|{Wzk)hQeeS`Do&cwq@563ra*5Z?AcHqQI7NUe|pvfmgL0#xiX7EN= z99@a5Dl2rFQ|h4g4?!CqrWwhF)|3OqHLAV-n@@hmdqs)-i*ar#LE%?j$o6_s5KCYk zQR?=gew5O@Sz1|&I==@aR3-690G`pHYGRop@()XbMlyvToi-n@e7px4!xHb3Fyfq9 z2;X#Kk%UDE)8{V6HOTlw_Q zHxoDh;x??h_hnqrxCE~krm%j~+du`S(cB`G9V{Xtl<}aP<24NVPOQdwRNu0P{vICZ!L`VmdId)+30X1kZy$5EBgc6HnNue!?CK6&|K zoN>l6i2h+UqDm!7|3wd6y&A#q7+iyCxI1I`Vt*TU4ECckP=I9wq80UJ7zpN2>FUJ! zOXiUfHlVYm8~**nSh=hT`^IAUq$`1Ts!I`GlY(wcDUiUHbVY4ygjrbzeURKQMA1X> zH=zulf7z!tXOb_384*L7B7yD{T_2!rD)?Yn zf=|Oq9KECxmmWJEPk**kU2~KcfqPCNvLxV8KsccUj>>A(jifM#9OXLvNE8-Sse2rA z%yJUKs}z|I)ktg6qQgi83zfJ$?t~Q>xc7a`^p@kR$rQbtK1ZQ=N@teBzoiJ3eiB5$ zp&ULs@-BjW?xSflem-r@{I-k1>SSJCgb+o3=&&+`t7(5r*w--uHT9FQpgD}0?iwsU za+$KG7A-oG7QrN9oHZ(REaB;M&pj6shCXcZQXDyJE~b1DMzm@Y9OV(@##&KYRHC>& zGtGy1EM_@~_?-WD<0Xn1W3+h2vR2%tSbAahCE;%!#ewEN{Cd;Zc!z*9twQxc?b;9? zp0@%gFPwvfGl6*pm2eXDCu32p-rNQMqH45s$0%u>m{aUQBZ;7p5~4LO=Hs4D z5BE>bc_`E}Bm@VBxxW5Xf|eGqNtykN3&^$WTQM4qV?b)xOI81HG&D*3nEk4cu&>FB zWm_D0`0aP``MF1-h5+f=|2(5qLsr!m2Jp!ztBBBF!d!~K|2+H%R!%fgd{_YCk+d?` zs;erM%iKSs5c4J!qeAKnrGbZTCu7*fD_TAe!BzqJCf?S|R!5Wi+6pvHsRgF^P@tty zR78o{nnWVxMAI=E=58Fs-~W0)?)~%4c=eU{v3c`WwFc9+-S5BeAw2Zpop|QecaiJZ zipkR_;cHqBy6J_(TZsDlD)qT=k&Y2VwTZ)J7o3G<$1k91Bt0Jz7&5In4h3A8Yi|V2 z?Gc>2_Fe1>_bVa`NZB2w_E;u~?`?h`9~V^MFvE)p9u2iTySC&gcI@cEcW4DoZRB%! zyu^1JI;b92b;U3!4Ly%qbk4tp

1S02Ju<&zPxQiVy)83Gz=UaE)L^@X_oyGP@v zkGzhqfqrC2BraMl3+7Dc^Xx?Xgj12dkYMwIcMu`&T(S5pqGlh`i5L&YtAb4evc|?D zxbXaIRC2|2#~0xG`^&K=(1=~HPb8gufSEHIERL>pVAB~XyE2tA1KE<5<(PKwDGQQ)?xV%YEV}h z#GclEglS?%QZAH99zJc^>rR=Cx4zzsPqy#IftFrOn^}y-ix;4{r!8I~@di`RDV!y2OuUlpB? z6SZ&fpId8j)-B!Ga4=y{Qp=a9=Txw6LMVg(-8Kmm8b{Gyb1FUvlVBaIFn4kTnkF`? z&u-qlLrH@_-f;^iO{qa@But#;$KY^8^)rw^3lA&7e-hj9$$>pm)`N^0!+cj6zUwSR zG@B!y^5KSpX}Gz6FIpnQs4cD~c~~LngFQ)n)E7~O%ThHpx5$YJRGB3%Nq}|GeB5@B zz|aq^jN+=M9;OnjIi&i#33e=~pH-uK9oREAjOEp}sGB+kit#?V?^2`q{2#y>NG{4n>zN%pPr6|*FB6^-~1fEo;e-!7tg~n za|`k2p3m_7k>A7R^*7<@mYeXC%N>|AqX-ZDZvfj{;|RwHkaRJlN^sdpjkx+Of>MLF zc;)x;a@90o)ixYM6h42^A|3$oOfEvOCk>@E+fQ*Qli5_RYR&G2VCO;R{R+sB`;~+_ z6VoOV;7L9OO~msrITiOj@F>lzWesqU;658Zh$oE^JTUJ#3J|N>X&TkQfzd%sDykdr z(&8fH93F6?BIHpa>^RS%H?3hCE!+LkjASVBCcUEoF+q+GYlis1RfVdDQeKiqQVGlc zqZKf>x?D|%_d0^ObKhp9XgaN5d>X3xJ4V8|t9cV%-L)5o)ix+;GDtJ9cPxeJPJ&S_ zjASB%#tJ|F@uO4lhv#0wl~-Pb%8FuK`=iV8;kEbBoVpM$@ly3Gx*+*FNMbwB)5cNcJH;as%&+j z=XJxk@=6Z{A$2PxZIAfPzhS6gkD-cvvLxiD$+Pg?Qy1ZpXaA0`j(!BcFaIg-3b;`+ z^c?Q|nHxX*(Nc6r>uEW+qq;DOQg0jv!5Tux;nB+L@ULJ69520#!_6U_apAcND8BjD zXXL#k9{lTV-0U%B*<{6!6VuQ=IEH-6-;TBsY}q%0H|CvyGs|mLFXgYo!?=9cM>s0n zi3=+mRMPQOn!1skl~k0oBb1UQCePnSW|FqM`{EjgGrF1zSL@Y@5vJC{9OkAGm`GBJ z7M6wuQ4~|ByPa09?_e}0g?A|Q`cb4?%`&!Ar>*Jf!t-2I6~%ElL2e}&`Qx0UpzQ%Z z8|_myi7_LBcItE)#AO(raJp#k4o7j}33D(ooWS$Xyn&zn;(Ivdtd+R>wfFGQ&wqsj zuRVe9xQ@mH(;mS5q37XPRps#IMIQLP9+h3&DQRcAuId>SpmKTf&1gT)FK;3d z__*FEfs0dh`}8<6IEAlo9T-!MtfhTIi#S;{|9FM5)E&fW&t8Ade7yP1R&3j_19KN1 zhCe-W7xtZaDc-x_TIkO_fQvk{aCYghi5Yq^G1m;hOVAE`8zzBeVa zTQau>yO1fbM^$wVo_yvZoOaq3Nb~yt{Ly!?A$b}uXk3QH=~k5I`k_%u3=|Y$i?bHb z071#Y|MT%39SrHVkm{^S!}^PNSQGi?I0BQfYwLY+2MBlJwi2-?F# zO0pKXJ*Y6u3~|^D_nfcZloXJVw=!+qngLT_YT26S#*yXd!~D@F0p^J>)_kI{G#2iAT_% zh+{^m98PCi4fXK4+?eC12v$Qq9jabm+P3{-*BU(1!Gk5b_EEhM@1u2KfDau|daNlTX02g)@nM=SogB zDO^+i6cYqPSoJ01fBY*>E^*@NPv1dh)da-5x5HTbDjd=i+Ur*(;`s(wx@Py;w6l{n z852aE=_v52ifQRZt-xO1vKV&~tCSxKgJCrg#) zf8)NtJ%|kt{sW15)6rKu5r$tTt`M1bAB1byek^nwxccW;P#s){&3pFZtGDjOiGg0| z(k<5Qw~}w|swJxq+B{@2&}=e#V~Fi-hci|V@2q+f1wpfHoZWfN_*VIw`hS_2ni#=A zXCJMe4{+K!XJhTA^?2ZqzsJ*8-GGC26A+y?4PB+BmQLv(AYk5y@_p?%ebFS`^u;@v zF=sAbdF3VCaob%u?U-3uy7X*Hf-$Nv_jt{POrUWX9<=T6K)*HN2~$G_2ssRV7#YCr z2R32ZoKw^k8bkF=$&$+hsH`jiO33kMsS58u+q@GQBc;CIFuNQfT7M;yoy2cRC&RvY z4$TC1QvL2U%V>T@`(dKA3=#l(!|Oo<+%6heZ+|wyg$^9<(0$}0iSBCYk$_sGLrQbo<-ksPh{UC-q z6y=nVS*9Ly8XBh0MW`}>mkAOtx%6^GMh5ZQpMD45yXp+8v9Jm~Yu_@BRlmEW1FdcC zDpOe&u(G}ihgTeing4CY!BkYup_t(ESP89?jz;NLSs_z*4OHpHiip?rw5ew=E-S$F z`4ez3lEnT<963pkC-_hBS)_z;Qjj3rQCr77tA>_|k~PDZm>xCB=?)bkD^f|Az(nds8O>Wy)vyr_ zTSTJiQBI+J&pJuVP7?7Kxn218?dN0r?jAhy);ipD#g!Pz1kpHi8m7%`Kt)v0<)wns`%cvY*s)+5s(yJI zT%|$vUUp{-avNWPG2((=NHv*B+5^(farc9&d7zagNc^Oe7h~R>$$03A*YTt8Uc*iG zqH$^i8Yfkww5C9noexr%Z~WwQ?D+GO813)G)TRntckxPG{+*TZ6^4)<8dXUp=D1(m zRP?LHBt{(A*U@SFH5{le2$9Q!RJT5eL%z`IR!uvCG~Ex5#xOR;jnc_dD`B8uB!rgM zVSLfmp%&gSqnb=uO1PJUho28nW!D6KC~@m56rk(kP)f%waY=ouQzb~cy7XgCckV}p zJB;Ixtib;Njbhu_AU+!I#1)Ox5UVs$$V09>wiGYFx(`=Y%tV3+_Sg{ZWp@!i?m4K+ z!{=01;Ek?kV)!hYTf4B(zd)sLXi9Mar!K6(g3C@tQ+>Hg010| z^t;%$Z6CI8*{iC<2b$ZFBLEEqJXlawiVJT%l{jDuCQhzX1-~Z0n;janI^3#Y*m%5? z_avsF3PgJxt+dJnVOMzEXsWKo*WJA&xF{~IJsd^SES$R;k7e+(-j45{y%H$$s_{kI zs2BfjUX39!TlxKC&zXT7t+h)298q4Tl%%xxNm805OsjdQQ>C$s4?dezjA2ilT_A^+&JGz!VdaD59n$#X(paVHl9*Vs1BQ2fy1U( z|GzmCR(}9ZrqfCd#b{iekPPY2?Sjs6PqOVcwsH(iRimJ?vk4B-xsA`;3Edqc;k(uA zj@Ym&XHV@~56_%s{8Hvnfvl;0tG3m!CfrCL>aIc6(Bp{1r(^L^b5+H*eRDe{-n4pJ zlNmAs97i&$#y}5R>F!!AXElgb+n}q7Hx6$IW4n8?E7_wiHM6uFGHK@rpS^{plvX!2 z&X#@#=oyLQAMpb?NJ$Z)~Ob3aa6UW3;^?8FA@ zw#SHCZ?0K@M6s!|kuN)GK3*@{i~B#=ffEyDSV6V0ZuV*T{oXBjCoA@E5JLoDTRZl_ z93n~&P>)5ea&^4_(aXa%^ zLH|~2qg8Dr14@PXvXK}GE1GX3<4tpu_INy}4P7KDEE1URObQXOY) zP-=NVWrXp8P3y1+mc>v*OYZjfKEuVBfZtD^4=(}Ur^#WguF2q>ADoKClW2MhuE~<1 zdx85muExQULA4VrE}D<2Q%jK>7D=dMkdnGJmQ?YEAPHh3t)>|S$xdwj)B-656d84O zJg5>?)S*zP5{afX^z^ab&+cqsZ>+2YzrkoHIxaiInO?VNkj_;$G}nyA=u5gT;ZFD<)6x4&M7 zTkiN!^;5oj!xwlzcnYdA0mP?B&)^inhZFVE_kdzBk;^G=ok8(;_~Z)g=T&7=TADK~##y%bd)sX47k-ftb@e=ztn)$wuYpUZWrt{gx`Z!lgxwx+B z-^w$v8hXj5S`D&v`!gC`vkI~1-@9>AaV7Pz!@?0+yT5?)lLgS%H-H_<0h}|V5vpUT zX4?*SkA(5+m%QlzR$7~87s;6U)Nvks_Vqo`w+uk9C{Vo|^Tqe3^pYA)!C2P?b4IC^ zE2-@8AkdP+T9xIct2q zy4^E7gJ1pb3{06=g5hW!K96g>LyVW#XwcoLDfeP_VHRtKrQulZd^Ms*Rklh6Zi}Ww zxWpjzGJbJ_^btzp7e8NuHETQY)(3mBGaSZcYd*uvj+0PGHI`^Jkf|fpdD6;5Cll3F z#BVT%quf>aw`wDyQcppFA&5aqe*4CnbDEjJBmPS76;k~ zaN!&OLuV|mqL`OmwG5Yh{|Jo6BWf1M?wF3=L{4>BtMrjgCiKHK#TZr&AxqkcNfeLJbMg4Rf!X?(gJP}#+% z+o?KgC^xU1K}N<6T3n{tQb2Vn8}GDW+22;*L@Cp^UeWCAWaQ%oGW#J#fP3CtBo3yK z+(nVyPu-oggAIN+oK1yTe$H_SOe-Q0jHyw>d3BI4h{?w-blukHavxQDU^TfkZM>q^ zDc#&?YC9sdKD@d$T0rGS>dKRus|mufw9%U8E>FnMyRzlj_`(0ss(LPHIDT?1f;-ZP z9PEO#E(pUVt&90iG!1<)gMi`2x}I)a_T~%NHx^dF{OIFn;O@U&fMhD6I;4EuW1=~! ziu;LAWInWkiT*53Ac4yGVdsqW2Q8eVIdgMFv&n z4!roxxj6Nl|DvUB1gnR8v10YxxNGJ@oKxNef6Ad;&M7mehH(lv9adI>rNuRPqxS&5 z!2ovtcNG@hy8?;*J5(;RG|AFxU1}4wRv879vg-_Vt97WjChF|4vE5fNpNA?b+PZy6 z`GMdb5|*I?rS=ez@t#PQnIywUT{YbBO5*r2!R-V})8@fJ)5}%iwX%#yVyY9`xGETD ze6CDPJKn%dK1~-5G|SRWDXw>W^qoH%xg$+qq|*dAeZXZ) zIh=L#^EmO$8Ti9BCn3{2WDTo;)euYdP!y4^TdrIqqp4vdGV(_aGP6a#Y2y@?jUZFz zq_0DPA7N7T`Ip~<^UG%7|6aWXIY(MeZB=gJZ+C9BiAPBc5`#+msVY&QRSqh-J`c)} zAeTfo#hDhOni%%s%V*xi1_EmhKE#MCu3qyIYPWrbQjbsJuUIyTE`q;=>OWy6)cAP7 z*W7dtuD|(I?xBGQpC{;Xq9ZI#yd7$Uk&I40q1sXf>8y^mBRMs-{vgkMG$YBYIm;1~ z6GWS~9U!u~S@Emus@aknhm&-u7`t>$$&KdVqt5%=-79d(Su^mrM>pc*&s#AVNg$Sq z)91n5vl$2!SY1MLc1N>u6mEknHTc`& zII9tsPArhfD9m9^oHuFuyqljhAg*Ep2jlz(RPTh@dSg95{L zcQeCIoV8NopfG}7I~llpBcAB`Qb_|{71i=3#j|gg-Ev1l+0S4;H?w@6L}*m-~UjvyM*MM zTj}}qLSQ`6G{=qhO?2UndoRV!|NRupfBPStJ#!L%aNK+xeHiaw=2rwIrea357B*G) zGI{>2ZXc+$`H^t53?PxY(3C`(mI1u_>Sy@vKVMXxWIuf7uUL7+9OT>?H5ltqU`x-R zkOg1AWm-x+!2-D!!D@DYCiM|Md$zm;?!6k`dgwX4k0JGNQx{d>m-n54JzunA|E@kY z@m{P28E{foRfO3K8gK;7^~On6Jmd`eVg&1&%j(K*SW?IDvWQN`6!dX_Uhm8xL=%1M zD9K3N`dnVfsj2X{Q@p5JHNVhmT4h+W=p9ZqsYSCprJ5QMYPHm$xfCI&f)KY%e90oy zBOWUQlj|!ndFlkqt((fKK@zH$j}(^-(oAbf;^L#G<56r z22TiQG&SP1`4cg%u@og0g=$i*snYQRd`7or4G_?2!()hd^<(Sy-FWlE^>}Z~cC-^5 zUh>`3amUS9@DK`=7g0${ZCnW)k_x6ZOTjW<&AgX+yk(!=2zuP$l*>2@rXlu|H}IF< z&-o0}^*)8ut~mlTkF3WjOQtHqNm|yuVo0ZqVpJTT!3b4vJgch-zHXTaZ*}}p9N$2E z_q9_7fr=|x!`^641+~2nP0h`UkO-%S+zQV*%?QO%CH2+RG>mO_7d5ZlJ0LFU*-2<~ z4LB-YmNfy}W7e8MZnxF+(QM%2o5b^%Q0QIFQ&L?``1x>JwdD0gvbg#5Bk=mVedvv5 z@DTRlq?`Ya6FJ~?TK$kT8$ZZrGAWZk zV>otJ6+Zv-cd&6^H=g^l8J}$1ho@iL2QS}m&_k#%3Zk?i0H0gh0=z;=pTTfAtU9T; zWg-|NU~HV;gtLEk8Lqr=1?p+W6xdl#QI8hmCOKdr3 z5>)tk2rUVsVLNOqsCrTsj;Z{Nnx<|~W|d@yb!lisrIIaeY$riT`+eG_M%HRSqq;#! z!lX2uaJi6nrqC2D#Vuzn!;LSz!^?SaOXsJUb*h}-QKn)ws%h_*t zF;T0s(Y#^5uB=1_1g!^jIIMK?gv?o?{e7d@+c}IKi^Th zWZGHMrtNji`@>bUtI-H@)m8BB@?ghJ58+O1l~(KU@Nh1==|t5jzf>kXxZKJ&A{#GU zo;9t;WNsd6H-W&0p|phoNEzP@%j_qw>b5SNBI7yd_?_@nXjnIvQ;oD_t$s}gh&ZsQ zz^m3MWkF6&07BO4E$D0#?go^TNfs;tGLNPbR}a~0g76*Ug|NK60z!y9&4h?or_y;U z)GG6tsSd=+)>MGZHxXQW)FQmMtp)GxY{o`s47YvuJRbXLCGfy^k$L|iI1)5) zeKeJFIU5)P`7n}X?g(M!lvC!=j7a5-89yA08>M55(7EI%NQ)jRuAN1dFuS${=jxNK zC}E0QP*4smT59#^)htd|4l7Y_i4@Y*y<%-RocXTpdFId0QOsvV+vReH8ZX!lz`yPG z)KpTAgs`)cFq)`BE9X13_ht z3HEd-=MXWBa%72WFjannv^5qhEN8D%#wktoIWfWOQXSicNj$1fMpNYo8DS=;fJJ#z zwaekRqE=H2NmLfJtpY9yhxI`Ayk%7z;NxpkJjm@zsc4~I?S!M;sY=9DU_%h@T56{L z9CA%kX~_dDA!XNi;O@-fujieH^{qV^j3@Cg=OE_%`)T}?qB?rpRmi^a59r}2C5&`I z&y2UEw?LwQW_@5E&yT{1b|cw6lRsogU=X1lKq zyHriLhGa#TQpgW^z;f4<5W&)u9Gr z6!Bg!GL*JbRo)o#sp;=SJiIO81P(7LP&X0lYH4{PCWf-=sVb;~4byhbn^vdWoMv0) z>RGJve>rFA`E01Eu)30tPP0#v7s<&^@rN7AudlkKdYx+8ik5c+=khwyRFOT|AV-PE zvg5^RB8rAG$7qgI*YocZrw{P+Ia{+1=UoakEujTFrsf$2Da>z;LL&znwT^JF(<%*})VH znWn}=(>i?!-t-Rc*!3Rna1s|}cnEG2KX~L))Xc9$l%E^bi_l0(T_jCzdA}O&>oz6R zoERkVZi%EZnZ%*o=)7l*$F0<*G~-fzuGKzRmX%0EuWAzFfYe$Ovm;Cl7|H2K$=D#f z`@A#vR;TrFq6(ylQ-WbZnpV;+m55WT@=){>SqEGSb*lbKIV)l(IKj02{7P!b=!cH9 z%0d!{l<9Y?*8WImR34tOm8ztZ*DN71gd@1&xTQ3UlDO&34{4xyaiwD`!q4V#ee)TJ z+SghR^moK_B^nQw)sGJ3Q_<9s#a&~%o3S5;uVp5fa=+@PD^0(f~T zYPZQ4#~L=LYkB!%J@@#>^C0kB62Q8)l~<9Ys?GI9mCDRiSHSsk6wZ6T#LxRazNcp+#UWET#$RAQo^FLs+g;*oneQPm8D-<6UGZr3~~=TL-0drAip(4F_bH zIA4>Mk8;sw1iw6G8Qd-pe)Zbhd}t?baP7qIby58Bn&T1t@d89<7b3TRC-jjRr9%N6 zA&&qm0d3sxVCEZ?j;rx;{eXP_;h_|^yy>TDcB1RI+uztpE{F2q$7`U* zpWUJr(=rK#2$ET3hhi{?X@M2_$@HZt_|pLlytEcS!dH0Jc~Ip+IA|e!@8R#`h)d=p zGB|>fOb7>)LB*6EDFJ#ycuq=e?mJ%-SGTn3kCBr{3Chc8>Wk-4^uLIHNrv*1;l{Y< zyTduG8PV|OKnBr7p7^bCjfdt<*-~C|76s*74JXMB4%=-jF?a!Gve`n}6(p7sK3s1c zIX6F3>xQEt1YIJ1nJn^II(o`-OREsJ{1-Qs%Tv6T(qd0}2h;Y91o25^gkEB}arsh| zhy1wmnO9W;>cfr$*hTZ|{@c4TcjaP?T(AfkXDiH}0p!NK(2IO<1iT8#TPPnk$g^iM zsaE&4NolKze1{Y(QB)tVYBKG?pxA}Z%U)+-mmcS-VOM%dp-6TBg$70tSZ12k6w=H$4|m&-x!7sAKDXs6&V!6 zD{ZS{!zS8<68ALmbtz|;Ad+O#SVSu);OAb*q}ne>a#%O4V=$qsHoYahUeu$KQ!V@e zSOd=TJtFL^IDv39HhwW9)fFf5PE$;AzszC zjcP!?XW9+4^kIHK!ruWKY)vM(+#9nbgb>{r!Ad-f)Db2w`PnIGIC3H``^^(*9T>!C z`XH7&KgMrA?#4Ci4~Or9MvN{gg(KcWYa{~0okWh@=A_C}rk(cff#A9|)-Uhthn>i3 z*9cguHDBy%bM?3ppj){R>ilhP&yPdSm%+jIrwG9dIHm-ZU>peKBr7QsDumu$%;NI~E!)XrXlN6c?T&)_gVBUo&Xxe#)hsxmr4>t=G1&ysN5 z_c-r3s@g6SIBV93;Cw2fNCH2Zb3OO+{Q(UFNe}s0TJ&3^Pp0`)|2DLVxvx7_A+vfG zBOg~Xm85VA(pQ=#X9s;eR(6u4v|~;XTDij-0Iu1hMbgRS;(~(r#G$pP;XJ%Zoh^Jk zXz@G9$YJ4DvEd0G^~E8Tlc!G$A~!%BvLS5U!^g`mDu!!rIWma^jy!HQKK;wjaQ$un z!n^G)7@-LKxnmRF40qwyC+Fap&t@RKvKG<9d96%8+@moVqmlvbp@eX$Fs+(iYhwkK ziecDwPG&w}lZSCjQ| zL>Dq|Z^u((+wd1>Gj?l~0-1~=usN44!}ssH2**ySL0XO53*l&o7bEE?`ci4t{9`_e z;II;jJ{qc)V^7SW#E_QOy5&OFHRTr(peN}69|sNWBExYDQ0TJgS6-LS2;y%zOnP9% z6KD})&Dj=FzTnqZ-ZLnj_bOv=TAMLyD@_z!Fdu% zQKT%9kxXl}tSFJh#4g*3y>L|qmzQ3pyvrupd6h*{N>yCmI&sPyTV-36HBl1jP%Q64&&&KJ!pSzE0!q`UerK9 z-rb!iTPZ~dYb6^eBphMAm)*c;r>QIwci$Pj4j!}hIZ^kW6Za!qUXD97@6C;jS;1pjk0 zz?Ojkhlfh`o+_>pQ;4jmBrgH?VCPU3XBU?6{;fsIc$f$D@?;;O0cFzMEBIXHqIvki z3&Ug#mc!&Wr8oc3ni~&;z7pa~OTaBIaG(P3<{IMn>WE(42!C%4=|md=4_&t;AZktC zT}1LM)#Wxfp(NNywKqylBxccayjB4uHOuO@sAX1L_|X$j;^iHC@z3wPiC>Lhv$DIN zdlUFA1>!s7WB7-k?!}Q4+Yx?c2+IeWSQ}ckw)C+|$Qq<%<~|E7m0SrW8a=4Z%zN%S zoA0N2I5=?gB)^cUY;z#xl5NKZj4gRaIRp|WjV6+Nb|FRaWE0ib$Iw1?1+O*7@E^f7 zycNw-V{mWMHAK}uZ2kK`$ICx@9_4{5=I<`zfpSDv=DDeOkaH+AHtIC5sK8Thz4r3+1AMn%)f4>p?Z7U;1hvMe3kjatP;y~zIBikv$*`&nGz$9@ z!+od~BD{5L0j)I`6G?OIqwd#v#E_D{o09$92&LRJpl5%h!>Wab^fOWr@W#o`Xa6_j zidFb~pC%!_u!K&PU;gwac#jSu`JnFRMH0{i!WD1TksP8h9H6-=W%ndebLf;6Wf4Gm z0V!XpCY7Ou{apAj{^nbF?fe-2_3SBpT)%@QuJUZidN4z$tXU`AOq#TeOs2ziI5%IVep6F#Qw)e z`G`eyR#p*HFsJ!FYndC@mzhUCfNKPw8CZ{~O>c0`?^4q>xK9<@c4;z=K6s>7#%^-S zOU)FoUJdiMy6y}=on5uTUQcbT*_2-8Ov+2#6=>%dh-vKX?ZMX{4Dsf81NBAEOt@Y* z)XXCb@;o{O|LCw|6{}r~-J7P+O`3EzQzhidjWuMaW~_a`>r&=&3#}I764l+76+}C0 zh`G@8!&!uBUH#f{r6^^yF&RRLjuL7&mr*4^ z{Kml{{N2^?$<;^lMO@zS|1INHAv8}^a1Js6<5 zh5Wsr8#2I+h^Q@M7ctcMqCib7RaP#_teHnDHIvB`&QPF7nN#HLSma(`?Vxmd8P$*L zNH0y}?wv`Titpk?aEBUd3U|XLCpZLpf%*;{`w}+%;1ynq1Na81_wgvk$Fw6eyigq; z+W4{nVT$1(viQm8G`}ik8Q`9-q+J5L1lQXxE2|v(2H!gT$6hj}!_|UehyT2uVSX({ zSn#a84!J+)57R6BOoTrhDr1GuX-Ok{$m>2yJriaBm2iC55I&e(!q{}jG-Vi<_@a+* zgSVq*c)(3V%p)hEpR);mnt&`Jxy?>a-B0IA^AL|5M*9a4^iXm?oFl~~S$VMEJYn%i zGYAxWkaY6R0@9mwe#P?o_4%O8+pw?K(!kArlcq+^0PSju^6p*)1fShQG(QxO9|p(p zo&AsDm!E%v|2lUDXXo!)IoU-_<04JQtNumoUF^lvXEx#aGY{ZUG>p-{3QD5C+sjBc zhG=cz#!y`*WI$?C6~tv^Qw6mwN4iY0R$tJZgOSfubx zQE?1b9Q|A6zP!HJMlDEO=st65TV6uZNR8z=&{M{dzLM?t@8*1z`TrX!!sZ)A)DwH%=;ev_62t^+6oC)Qi2S@IWf)4=d!| z1r&-JSg{6g?F4Usn_NA?VzPz?4{!pjxJ(0e(ObmFK?4_}CH}Ny=pP-IFBpbp6!+{# z?fIkVf8{I4c5Nmw>!LvhRO8KmaaSLo=Ptm?T7dHnf^cfRF15f7nuh~AXl~<1g1>4= zO1_@CL?b^O1a=))`TvKjW$Y&<5G8(V$-}vo0K?qBXSjD~`1|pC$1i4i{>KF!{iTb=qKAy$(#Jp&E%eo*1}P8R0wg3Hu50TSq*5sO`JfR zYkAR$Ga*fyiG%%!9v&g+ z@;H9`!CAcb*-gx~7R~WG&4YWLy8U%;+s;#J)^bR? z=bS*Z1kSfs6%Y!|Gzp;Hrs{8It`OIJ9y>nK(kgQ6pmbtnUw=L%YmGxmBumX*Adj-K0qoh<8({!-_B3Q4VJ29(@ z7Mh6(QbXE#CAlcf9gLKbj?xTWSg`C#uU|O~p3S1prrQQEE%Btx^^~c$@9-AwNX-wB zS*v;FR|Xa=>a1yacn7<8Zo{5!`|$m*K7;A0S$ufqI^Mc4hErFr;a0tl2B})Y18wqf zZ*uEzesRojUY>^Ze)MqN-ykMl=&7PKIDr1WJF)qh!`S%cM=*S7AF3k*SV~$rb7>5# z(_|_|(ba;5$t`G|IVYC5{>`S+!$Q<3j15g~<@?QaP44+K->RcE$?aMk=J z7iv}M^;V4ISsYNEQWD~dmb&dK_s#*nT(3w7G|E*q1-i9wj8m=0cEHkOt~q^TBm9DA zwpnOX1tnRU$L$ny6>u!jcFE)TDPO%mLhwkhar%@9Hl6F(>;P*UN&xDHnR0=85qT(; zn@Exz5L{UxE03jmUCT;wW<-`rZe9LeSMN5nWXUX>Y6JM|Cm+YxpE`yXh4pHE7Gozq z!`1N#g0Lk_t*xNmZeo$9V1;VF$z?and9J2rBMb~68y-P0x&iIsVeC7y3tv9Eiw2z- zh=--Y<1s1l!Zhh}DIk-4St>Kzfy3+ zGljJOb~E8EZ~deynYeMywy7CQPm@H*^G1%HN)=K4`DNuN)DhE;d8+CmFbVFsF0EO~ zA$0wn`Ri4dt{2qz)FKyGmzPlSil%2mVWJ#yyt5uc>k z-e15kK3c-uN@kPZR>)FLo}0z9M>djW_*QSy2?A?-&{jse@)10B$rMs8dJbIrE&(HX z`*V1-^&WXEa!Q>M;royEBOKui-5~lO>o`Oj=NZMBv!bdlN(ZV|qnFOh9ZX^SY{mLF zN-L-sC8KC&6d3c`Dte|0CvtvEkP&Uu;7_a1${ejEs#Y?swUtS7ub?8JwUT1FG&}rZ zi9mFB$C_AVxB1KltllwMSWzgCYSV?sOBAe#?#f(x|2MrS)>V|4U^ktBy>(VP1dgQunXLH zrI9G2c!QriCOa+fJf)=?w9&|fReYRoy2*Hf%|_Ei4p+Uc*%<5j6IQfi$}~WOCSp^c z1m8o%i!su>in&JD#d!F=a|>O(_emYcjy_<%9ScLNMpdN%=2`4S&daVykrGl!Ka%%9 zCw1a3_V&2k|I7(Uu*wJgd;<8^W_U|7Cwcut=F-PpNI$14I*g%;iwZW{_ab%jM`ZbE zUJ%4ss+`o~43(U)Mj>*|C3+(ZWm^LhnF152JV+9!c^d>J*p^LW$Pwg^a*Ip3K&rLD zLJ@vj6u4mV6boDD6YCX=50k{9f>s`OUl(|x_rL|ROttfGx06|ICJtERyY8s@7$YT? zA4=@QsR1ufc5zwOtNh(81y${8MA-z&s(!(83zkyn*_`D2G!!q$)8Yn5yQgW6OT7I> zFV9})=i+u?{Ts~rqakH-getb-yP@Hcc}YF0ec$NjNiv43WO?$cuGftE6o&e27iZ4b zarEe@otNb|=C1S*i;*EPtJ=}@&`+fvT z8LM}nZDc;1SlLeL1D;`qfQB!VpI+f%YlKNaQ{w^gpwt&3yDKM8W|h#q9yaFAQ-ycc z0(La0Yp@yriAy%`IYqRXb15XG(l)t0R;dCj~yN9@GUa}P6@mgnV>MUgbm#w+Lf zc*(lNI%M79Gm~5x%xc0jwBNDZL~;mjc9gnJwKa7ewQliu%F%gZ6%n^+W)hc}n^EH=ZP!#=Lp?LSL*Ocv#HsDZC|`2JYRIW9j*MC_Qkw6_(rSxZ(DPH==QR(pZD*_>y~xtwuD&)b%CPE_;t| zMw`OWQL36n9yR-UViMleuSJMh~%fFMNwU{F_-# zVVs+buGwTY?8*i!-0L=?g%*4TxB6=QRM7yL^$=3PUD-Hs$HoY$;r6-)5bezPIU{(9 zTa#SEYBS@j5mv`DJLz?}w)3uF&IYrpnq#Tsl4?B{->`?|0%0?iRFZBrZvpNN@5m9n z{##B{-DWjjy4d39R$QjD>IxJaC{f!fkh8Mf6g+YSJTRz#!2+TsOH!LW)#Lx9c zrsPfA$c;b2g|tgv4@3Or4LxHQwS#9s6Q{bS7K&N!b-Qf z>HSssyG9+tmkLeqwWDyh73+0fVV(>eJ88{y>r$IGA6GJ$lP`76#VYbB^ODVznTR(j z1y1U(R>@6H;4nlZw(estMa|hN!!AVnmTX~|!Oxpj=8mzzUF)pVx>twb|H%ZG)NFgC zpqb(X#T*~_{-3nTQ3kbul~8-hcU@i9b5yLph=1QtZF+aMiS-rK(Yl%)1EZDUyz@B=@?4 zaLr_aqdNz&FxMi@ZQFC!E5X(?y|tH97wYD}89q3Q{|E8+59}pI^L79L002ovPDHLk FV1nA`Mce=Y literal 45312 zcmV(+K;6HIP)OV=XdRz#8|jfidwt{C8Om$Sd)) z|K1<`K68J$p>y4l<#dio-}|2oeeXZpj6Ub|J3YspXl=Z|G4Z|sGsYCYE5GM+%D?aF z;Opc$p$)2^ffn?9*H#nSm$}b*_&&9b`fI75zJGZZ>89vtsg3+qAd{`cYXa>nv0?rP zQ*-yPxVP?^{&SAl7EfPfVSId7t$z+wJ1i8y@;LsHTv_57<{^Hr>t}4YEw-}}&-sJzg-Dk$dY%ols(asrs~{c9H0bp^JnIebxlCw&8m|XSUyW zci24k%Wwxmy8#|p=99?uvHzC(VxT&1(s|*;Ofi3$Ak<&?mlKUF4{=?3W6EM6^4 zLhlVTc{t+W8IpSbUQsgHI>u}yr8VK|h`1Yxt|hk~2>Ffv3(^{jK!}siGP5GmAYO0e zTH@Lyh2juK$!;^6FotO5BwFU%VLZROd)4T?$OX&_53)fdK~ifUiDd|DNE4M^<3K5=+II;%d&bplSDQ`Q=8^=7HJzF>s~W{ZbtXE zX(9$XHezOKZ+RjnDdi^A4607V#Cr$zoJUouU`mu9mR_lS88JXkv(oZdTVif`1Ti!Z zz8~Po;6^0L|Lo8V&POBZzDFq^RDG7(Br4v3WT050>K09vG~uZanVE(M=GB~)s*Mt*FaQCQB1sJD zugqA(0~2q~?K7dHL}N=2rI}4+lEKp$T!ym2`$pes0+*$=KKM5tfOJpDARA3klEPpK zAO2)bhb&bI|FpKNhJ8ys`vaet0_-hl8D|IG-}|D7;MxZmi$`vTaq&b)r@c^ zKZ;a4t7jB(s1YXPVPZxlT6!ZkCy5=TwQ>lE)VD?HElkC2W-ZPFDH^@UjL)ER$_>**)906%cW=BYsR1bVR)5wET|y4vNiKuRI94%@+*93* zjX)D6NIbIC05KAoffO_9k&%HwfuS1rOFcJy)NKX|1h83ZFTtldfXZ@cOzR%&H?igGkBb6)}}T_{)xgeyjQTRk@qJkGxu**9$erdQplttL*Me~ zB9k+AGgBLROEdu*NoRdNtGF^@9^Fymbe2jYotQwg+*>o8sB~6qCY@7e)Pfrp={LSs z;DnN_b@oaCOSAM#&j{{Yp4!CE$g{-93VIaDV~cH;SJg&K6NAsy^plAZ`&bruS}U_~ z)bLUN&LA*2!mLdSMj#h;m9>U<4&)K?1@9UCR_3ip0)uKY42ralUY2lqluH?_(IVM3 z<|+)wEJotq`nvxi2H%|&XACV^dN)iiI5tlbQ~P=Kd78n?5>Y=x;6|<_F2e+WR3MW< zw1z1?PxVb}q@1O$f-uaGGSQfUt$Kcr;)H5&4o@O(kMs}ezSM59A&5@`1W|ldwzmqN zG7o`krbwr7-%9kj()7>Z>G(#Vk9lkvIN zhjK*~@T``?GFcnp0;qGyt;RteVdzNR=yHt`Cq6Y-HVOGAG;=Vd(eTHT@gqVyFFYf` z8>D`5pTvb;(kP&FWfC(pE6pO4hufm1oR)UF{Y*zqYt6-q_M)Mb_R*)SuQ}Kyh&FUR zX^MtipuS`;kbIs+Kvp9}ut+u%Gl>bF0ls*5P=lGdEXDad4{rilOKPjEzRGG&|J7ly z25K^%!B`Fd02tdy48bl4gEOUK=ZP;T{V8BfgCjJ;Ywvg$y+giPN>4 z(jUR8ff#48^1W3~jai0jFYUNwj2fQUB!N8+DtSG2_n_IlBxU2H(QEk`sZB*mU%tP* zvib_Np&`)zZ772+Q&5D(NS$!EidnRe8tGnn#hsEzMh|Vt#Pybh7{wS!3P*mjX}q(V zVqVZiN@`16l$I(OidxEZIss5;Gz$@JaRLe6M(S(mVkMr(zEBl>iRV<3ETBnZL{b}o zQYDhuMp5$s1u3k@InOf^9+`}t_H-45z@s>-Z1ffNieij9P*^+~N%XzJ*L#xae2yl> ztoLjLSwuQf+nLa8Xz6pJXezf568y1>L3pr&q>j&xkMK54mIhrhsu4y~1I*+WqZGn) zA}tMhB5al>iH0s3hG?m;pU-$(q^p`71(>4x&zM;ExeTS`#iV|jD3z_Lfm0q5Et6dk z4b6HR&L0wt3`51Vd ziIQ+mPU=r%&N@#6l4KCmo`2-am>C!z=*iBbV1)4k zF)H3p-PZ$?9G5JNq6lx2LxgWGt+ncXVY`RHW`dEa7*wgON`5mYLpZYx3Iaq zHwCXudJ|S zu936haeiMnLHJ)Sc?<7B3{jE>66B#iGsF>zpOUhe!C2D9@i!s1kP~BNmL)wuCTSK! zGz=kruxExP7EK%up?iO@)zHOF3Gywo9TlV^So#wj5IZ1O%%(j(B_@~IN9V~zlm^pL zOi~k0AxB0`TlQ~R*klA>kc`-ZOSL{dK%SISIsnsMu0xuu-0TvHX~bLnoN94Ah>|8| zFUH7I>}YII2F}HvZ5Hm%RoDoZCnVsYl4;=ZSFf{qTf%G331u8vSEE;nOaPLCswHoHSukz&83E8J+#UG&?zG+6eRW0Frc9vv5ziUAqd)Obg;O z!K)2|fwLK{eAIbexZQ=BuQ}DwGff&t9vcPQ5{!%2>6ySVq7#itGPwDAhEl21JgE%W=20+Aa68|hnMFx*>bV%^JU%MO z7Vk?M8_}uCy-&tt^psd`3(+#>Ma5f}6TpgN%ZwcvL*}e@kN)o;7Wy_Rd zhoFKgWPH>Fd6uOs`^ts+Q0!0{n(z$uzsJ5+kc{D?R%kR!3%@gi>N%&ztuWBAC}i>( z^+Yd8S=b2-h*}`C?BC_wAYR>*6@)`>A(G2aKL6(jxLT>YE>;AZr1lw7`8GlqPmdY* zm)C@*#;9?&Al7ov^!KI)dkv70y}{X!aWXJ%Y#Lylht4!A`0F`HW(%iB#)`BQD4T{o zO{^fzYYBQ#aE-6gcQS1C5G*t{*nwrts)q$R#PVBSKV(hc; z5*)DqKJITnEM7JrOZS4W-F*6`Y* zcd8J)fQdmLqe_Sap&&Ft=l}t?Rbpmeu#+x@&L2EjQnRhaTC8U4z3I zbC-H%uFzj#WJubUd#~s~2sW=-4A&+x=T6HYuFhaS2=+UE5Im}5yxM5q*_v}{KRFS60dMzY4|L31mYV&PE4Eho8AkgNcO zLaS?p*((9^a!jGoF&ncN1!HL$qbW-A@EtPZ3pRig?^KJAWc(fdv;_-3yhO%`=g|3v zxr%RL(}PXpD5rlfIX2+*O_Jy~Ew^In4Z>;==8w|V(DUkWI1e6J`>=GNwgt}tfi?10 zc)aWJcCcybUY@z(RBxrV6_xfb)J7+9_bvC~^1t4Qzy0kx+;Q7|*fKgE7_y1}JVAR5 zV^M$`ZWTo4Wr-RHdTKOti-oqtq>d4$Yc)K$VI%HwfB*fvzoNr^zhc=UoO;@Ec=D6Z zz^P{(gZBAzV5g^$kBp&4G&aNu!k@zIh!a|9r!l*NCmQxKBZz3G_gR_t(X$XSdrfRQ zxkc&Fa5T_|V2qaRg_TJqxD)uyMEi;PvwZIYqbnO95|m)>YGCnNlwEy1K*h#uXccM(PSm<7`vPY z!#2`KDh41RA$%Ae9J@hk6u*H-nM8Sh18ZllQ%OAw0Pf`5!6S{~{Azmj#>e$y- z_d|Qdgx?5sObGA(V|)#~`UXp9Oz zPgUyUEgw=LHHt^u4CWCP-wE)z_m@~f@bLaG6-n=>J>FYvQcNaQ3JA>x%?HC>!!SL`nMjg)H zv10(+x9!Ac$GF1|uTRvX@mfXHAguuwWuYB@;?!!9&ModvZSMPN2Nf>A={Ee?{T=>^ zrFiMfF2HjyIuD%-=AhO;;M(q>n3qKUdWJQmC1ObrpqQtQB(+tvxDf3O?={|6V>^pP z=c6p0%Me#HeEgna4NXE1eRE4_fYAd-Y)jBZTek6g&cq{W$DIj38TGzG6M>>)+K z9)Uv+*$>N>Ey0|*edwOk?r?rHY?G7IRZgWKHzz|mFR9&gyi0O=Yyw-i?!eZK+wkC; zM{w6&58(D&@4=dT9>V5TilOge@fqcw_)om4*eF~{SvPdEi9oOA?M9Nz z7xL1%h5CHzdE6G7#j#dl9572|DFQSh%@T2Gl;pXS;+3{mWNq!Z^oN(>-{1Ec-0PUZ zGk9+$ixRw~`bOOKWA2NEtu1)MsmJ1}Pk$U9clIfmzj7ID)jfM^0(No|wZTE^cNPVP z_&}pE8fzpm6f}yY6uyeqteY59?(Jkjea~6gjmJInEIj^#b1=Pg2)F+IPW;o=$(6WBBKZm81qv^zlF5NYq(-?F8RJ2d60cZ8z{<>4 zbCf1T@~nh`@G9GD5UN;^N#%lgujrDhq1oZ(?dx{neeZZbe)!wJ1jBD`7Nz`5EozV| zN$_EJ4=0y#=+cFF-V2_FXFcn2*k{$gu&q_c+|!P+2jV;@LodJ@uJ~oi%?QY{L_RDV z!wjjIy-XA|h5}bSfd23jI3J%3^r<>YeEeC*F5QfwW8mY^10**j z^FbQIfeIOF@Oc#)(Lwlo`{ zpmM#+;?&hxroe~~9fl=4&pbWKP>fu2RlBB z+Q<;>L=&<~)%{h{WTHOAr_)qt>%TNtlfdI)N&hO72?7OaAS#9!(0gEphT%i(Z4RuT z`+{?D-i2r3#w%CjEC2N)T>7VLLLYiaQ*uQte|W?Cp$%Jc;S2r^uYdV-@V58=3tGBb zFf%;npiG5Kbv9BN)^MAsXcB8_<-kY`t{epA$SU`n-G(qKA&=>$CkLWN_Yzq{TunvA zM|uv{<+`M^tvo#XcljEoI;$k^bxQa*l=r|ZZWxal9y!h2Vwv-jEpD0O1~x3%m=V{J z%88(=wY(@Hw#ukTbo3bI4kKqRjxk$W@t^Pf3_kYRZwEhkPsJOfx%99SJS!Hv9D^LX zbP3-0rsv~X&wnDCyE`3IjbM7muD}RcQ?sL77191MLb15&PG$N*M z(fvJrdjnI>Od;6Joaduc-C=Pebue_)F-}+-~IKv(6-KyG{r`%!}b0i?(gGY z{}Hae@m7593-7~$N9~8HUHwi9HYFyu96V%ej0Bf7OljrN_8q0?dFn)h<-90)3~(o~74B;!R0Dreg;8^pHjIa?myMocW) zxAc9a0S` z02a8(e&ZeY;fYUr89w))Z^yYWJQvfR*~u^#wxtFdfi(P!P6i{v$FNeqZ?LGPKorH` z>@{OpDk;Kw;nYX~N|GR^H^gI&0#OZ&(I_o)QUg&Hki;SbmA_-xm?79uXmjzVO0Ah9 zqZTH^EQc_SI?qymOn9rTb;CA=XYv_TyF0LT{dTZ!_jYbBO)UJ)0jRgr;_AnVrJR;g~fTJ!Y zLK|gFRQes%A0v1@ae8?16F1MvR-nl~GzJed9((@DIPLK#;u~N3DL(lhUqk=s7`lC* z^Js2+KYWjeYU2}l(W~E!4?MC7uYLE$nCb72eWWZ=Ih0*lV+j~`9UtV>| zi|~|lPs1f|{5XDh^)2Z0Ir2G2ZmpB_5emKi<6p+EfnmJoV{bq{<|C?ka<1e=d4_8R zEUmFS?`T2si|}nqv=UF0W_sZxps>P8PZ&*S9iIX78lWTbd?Y_QKNnUE%G~e*gD6of zu|L2ytuq;zy4CXKjwHROX0V#pdh%+hqwhrw75i%G^omHhoIC^>N%E%N9^7-wJ$TNA zZ^9;LboRJGoT|;pyzzs&(=plUM;(Yyefr%v^b{w>cMb-%UTv-t^h?r27!esyePX!O zd_a8;-QYnXiJh6`$zuPx)ikig(6ksuGdshGN*oW#Ok~YK6bbgGwNy632G^ngerEeN z?6cnjeE+wf!RJ5nO?>P>z7bOmC|(dV3vwq6GJNFAKftd3VSMWI?{dG5hd@@Kf#C=w zO(cm2fS8=emtbpVlL^KF$O7zCV;uy5wgC%-OrJ$TsB2a9U3*<*RJ=q_UYB&O$Wj&> z(lLu|I!4lI1FDKn+UY}*AKlG zM;&u0UiB~U$IijwV5rt8^6Qy;zWe;8AO8lmsTq9sEAMl!Bb?bR&`bB5(LmyyOg)>> z0883BZwy_NtB83)4(bp5!uKHFP>bFTKesH=3%-12-(bu|i&h$o7>M!EC9@onQUM@l z_J$0shC)Z*$|spUzZZ2${{852vQgJ~!?ULoYwmam7e41r*t}zx8^o%^qcc(3`*Cz{ zd}Q-K-uoK7=AExXZEy&+pCyC8vu#?8MP+YCXob7;=eqo3)y|=|<_vj~g&}{2=ocWA1@tb|Wj2FM) zt+?lrjSk9GLX5#*>EV<&QosJo-=n3q8UODK@5ap7AjMiTf+QSZVi#miV`3toi2t0@ zmCLm(W%3fgND`G%Ql5uGq-~P?1d&7lnl!PPxeM+^OT?vpyhH+odyDFhAU~AwB^=JT z6hlnGi+U#iA0FI)dQ50%Nh{e*MQU z;i3!PitBE>H+Z9_^XQ|tB-#J#zkh|kzBzd3M_-4TL8tXh1)Q&+F$NEFKcny27Ew7& zBynYqGmm6xGDL)8#mJ2l#ERhIXgdcr!Mva(`E04Y{ve5N1ipk>nH%Kni$uz5<0M_# zw}@%YJ3;W%cgN{63m?s)MVA%H5oBi&s*YKkGK`N-;uSA_2iC0L90EbpoS5(aru}8r z4Bz_lhj89SPsh~u&1kC7c8ekZ95T@vB}AU&gf%wQ^^8mT;zfy$$hZtu0v_Vh7dG9C zCqzaIK{AdB`0vTEiz$Q`7y;wIsYGdkzZ@5PUQ`+2UMB!*lwF3+l#M!>bv;P&;JwGXGst2nUAW4T|rbdiHqP6_#wd(B^nR+qe38$`U1Yr6yQo^^2qo;eBa#LiCeC{ z6(9KEe}&AwJkNy`RX14M-RIXo=N!Dusom+F8=WMt1l7%nPRzLMC)Z4_7iGs_O?3Sg zAOccERtX@1EmjQzuXd6zOg%w3Kx3$%+Sib-M=9F0Mz5E+;^4e`p=v@wQLli{JbR?9@=w5=;XkfR!=h z9h`F|=}QY}m|J}`G_&B;6MC0m3kjhlh?2kvciMx{OcK3L-Kj(#Caacov){A+*rUWp znh3MbB4!#(Gu$=a4bWp#zAa!}T$=9mkEejS{|yu*HxEAR4PoUz_??Rvqzykbsfcaz^T6#N;j% zv}$DjTFKLr#xnJz3}c|SJ@lzNj@gGtvGmB5_}pjTf#<#WT_KZ`?J^H#JS6AbzU-+1uZMhgqK~D?ACQ{JdNwF|T)BauNvzfzaDS9!PmA*-5i>TdsXxhKG8^B- zqpGde(j-Y2&;G^#>?mP(%Q(&??ZSj%PYVZFd=5+VygB&fdp?V6?zkWGNY(no4~}2? zvtM@yKJ$q;p|z(C`RKSCfSBVVQXCfASW}dk{qWDISa3p-#4Jn%K^!%N;ouNS{)|%- zMC8X8{a9NZt2Bt5$3Z#SB*eui@QXZ1(+DpWZ(HmYe_)#e(Sgba>dEJyj`zRiBE0oK zzJ=CMD2YUad-W@u-uHpe;Pl5FizSQapf)i{f;pA{2H62J!*wQNK`QW60Md@q;><=$ zLZN$d^#|^hjY)H{PYvAF>Zoe_P_OWihnb`j{~^e-yF*9Ev|&P#!vq@=VM2)8LzW%~ zS+2STpa0zV&?hh{j{hG}W{M!q#{J4Wrb5cL5>0AaS&aXVdk6BfUa&Sda6(p!& z-p~zdGw9M;ca)QkCx%xZ1fC#DWPwTo2v{NPMy+JnQY&Z(y;F%E{;0T$NlyVuWN||_ z--PKcwaczWR~Ca`{=Th_j_Y;~;Jxqt0>1j~55Z1M1eYTt-1rFA zf=nT!U<2n0#fi(g=Y`mVkR*+VMkZ{-L?K0ROk|?sVeLm3j85t*)RC zU;hzL`^DKf@7ZTzX4gPWaVEdFKo;$C79uf5kBo1?tA&szxU^faa{+NgxJJ^PTt3B$ z6h$WYN1fNDpvpxxSRzd9M%=xZwTG1n0_f}i@k8X5js91LEZ*xn?I26j+#dY&yO-kf z>u(AEa!b#_QbOte>;IxiVPeBDAd7^;%LP>n~ zy>r#G%%lY_<+2(6DAaWc?_+36aBOho({$o;#ASX ztgjE>{p5FX>x1jW0OhJ%Q;wdmdg`e-{O|*ib@w_Zp9%O8WM>q1zMkcEPlwPtFqoI< z)g)2*896~e$~5GBb4%FVaBWEzT43$2a#_GIMr#TaCvTAOIZ1OC1Vl;&{0PApFuKOM zb+63y^f+&vPR*Z#3(q+of4=2@T(@Q|+KdScuKgtJai@Ll>;H|5UU)7RFY0wLaf-%_ zwL3#Zo1jTn>~xB9#&g7V$nsCQW7F_a?+gn>Q{cUF?R9@tQ$&gx6=Wbpb&=O-6Dn8= z))e$)-5#QKp=Sk)=^YsY6Z8O&Hv~6JP!6kI;sgN6pILFJeC9;1xLk zltX~|^HFVSMn08t&kNaXg8R53B?8uA^afHr5U>`Wo|wc!C=}MH<`I$xDV>pKu?)Qf zvxp?OHDQh@^(`&Z$rD|5(8SvbmV3djxX2Bf%yk%jmZPv(nX*H@OR;%QFWMK)!)u@U zSgii|w^Nxs_YiNGuJ0efH^21@yz_&v3FSO0m8Mt?jpwW2XND%+ki^XFd*_S^tJ=hT z!Rrj-_o=2j!Fc1@Thd)4$7I z_5JVu60dyuGn{NifBXBgQwfNy{6=b>Ov zPS4HA%ksQa4#%-8=L2)*qN%kts?kzgG%G(ECQywkw$E6~moYJnFMi~lq^VDP%|Z?7 z!fxmx1CaX0`v7^m2E&s)EVO_Eu);V|lIQUniU(*t#XZ{nl-+FylVBo=>v!MiT(kf? z);x@tJozO2>F;-AYnC4Uysu*(ynHFUj|ZE6cM< zGj)=}tm>hG8j1>4e^-Jzbd%_Vrs0~d;TJ#sJ(R2n#2j|de%i4IOoY;&c@&{@#E!J=14KF)91U zqco2)hwBUaXVeJNL`e{Q1w<;i(rq0r|inF)6o%#S6n0_B5q2i3+X=do^=6s*kSEZ{!3qj3fXh z(V-G^xMZ{Zqh#S;K^Cq46l2TJim~PP4BVK?@zqcMYI_SFyk{M*zWSyBTNs;rBKrvF zv(G#_sMwAbi$bonbw+5x&9pZjpuIfm@`1Q=Yj9P}kO3MoV%h|O{J@dr)A@SF7-Dow z_?WdpkmAhfATI>b)TFUdWBNa5atYp%Em506+uUAkZ*Io2_GX-O%2D{vU#>?FMNHX# z`PonZfTup|@dcPm>|Jmfiup$BkDUW$MlF|0V`6LyeT(Num2Z+ig%kHn)0KOp-a|cq?^r~yW{-8(YqYe^Mqy#qLTXAijydD> zY;onr{#(+d!pkA*S_=|gcIF6|q)ijFXG^#Ge>AlPO3QCzTejo;QxC__ z9HZB$*s9l{zO?o=SN$F99@vO|m(NGdN2FuNBa*(dp>bUL+si|M z2)SfMgeiLd=|_d~$5vH7+!uoCjp zc~7KZm6oQRt;x;epb$2X1cn%M>)$-A2{gVHsN&tP3NNd*t%{L4q3Sn zN9?x`Zg^;YNDc6Il23`=IyQ#C{P|kE^!3j{ZDJy%1;lJoL^r9TwYWxEF`*O?+RKB5 zt`@w){M-j*(T^@rLByak8h3N(y<~zAr|q$2M+K4Ej0lICeZC}5Pc^ykMVOv^AuxA~vTjFHhS+vaQ zMb_LBHV%8z$w%Ni$LO5KOGyv--KBrUOD}!~Mb3$lW6!`zd&%xAjzc{U)F0*4h4M88 z36CU%MWSM`CO%s1cG%>IaZB$PD2#3Zl@u;-XROKYM?1b1N>t z{6-AByLjLEG{F{Mg#XN=_Q!&r7VO$JjLtcIkjyyo_sipE!L2+@kLEDywGln6WJN|h zj-;^7NRibdv?5PQN&rLfu+S%xfS~oHoJq>hee{y=(H}<9Eanqc+FQ}w(}nT=ew=aC zN-X+mGlm_UtRj}V^Fh2@ZoU(btl5MW`!5bDyoNSLLzcu#N)AjgxQwr7i>^0vi154_?%$4m1bdlM)X*OEJbJC zUwOrKl-EM6Wg}6~IA&GYvBHPin|pgArlq_I5=8}eRMV1P^SWQCcSsDSrB3;Cl4trU zsU$R`G}0n8=_n;88JnJsQ$z_y7(NOFSs*M4P_^lS~s^zaoXEkP(#wl}m|Ei;ym zBsbOuMVX&0MfLz$^8F?Yj2S*7&Bg*CF+*xg8FH1|_s;<`I$tJ1rVPzp?IC;BX9J#g z!~yt|V{|P7{Bh+KH{kiNcm|Dof@N{}B2xJawOzU%gS|a8UpBf z_$Bos4-rvCmt+dX7IvZ*DvG=iDQ+{lPfl5B)s`mQea|DZd z#oX>z3=NJtDKi&MEiHk8IO;!}f-g>5l9WVr96#~fekFQ+AUj&Ho?z2)Y+FuJWD9i6Sn zXQm-U22v^n70BX43L%ts8hodq|EiN|=8xy35! z%90ikkBA6gzM36u@t(d0`Ayf}971o=@+7|8@`HNwOkqwp8 zM$4*~4gBQ%ec9bTn;*Vv$rF;b^lCuq}IOYY)r0vcP50Z8-vh4hx;qdr?C^jnHM~pfkmZXoH8m5AwI*jIfwng{ay{L)%w!glOV7rlHATV} zCAER!a%(g*LnIU0<5a|kk5&pPqSG@O9sxyzxlx^wdI+|sh~c9?Lbqe8gWtig($a>B zu}So{RB_}1%W#Wh^sr{u#)`ZnjwW~9`2dc2%rRlySU^ZlkD;{UT(LCez~oZBSUr9g z?7lnbQvO`kfi}yKf&vB60~P}rOjbE;3Rqa1(lXgKUVJJuy1t8HV3yK2IWNUSl;#YB zJNxnQgKHx`Cw%8Ko>$K6#j?4bn4XylM5(s6Dg2d5O+ZFe+I~z4YKTiMk^Jqpz+9U7 zmDj_N4Z>HMG8}reMcwbqk>=Qq+=5HPYFNAPSnYl9Qus_O=!SLuqpB@0pGTk|9WR4t#JD72m+H1%kAXXJng@ zx*Obx?49r)kvQ{Hkw{B0L}7GLAWHeuNq0no8rn(C-^=0iwQIKq|9N*<{A|UL*>9mU zK0`f`EHZj?Gxab&Hs?GIFy>Buj-M%LlUadQkvNnIM;M%?zhN>+8&g?_S<{pEa0rYN z#v~|blaIMDQU!_L@Q$g6mKcHi&dkAQ(LWO6-$VNXeL`MK)WS^;qk9SMV;T$R_F!I5 zCpHa^go<8qDG9J~!&Zz8jG(ilIad5)rOl$dKLA-%+LHwzx>u(`J%R_+sGtq6PB&=hMJH9=P8%p~!ZB~Fw?hL}Se zh|ykPJ}eK$qd&CnG^OvE*^9P>g%QCC=8WMU}7ebA(n%6Hxao zzL>(f;VW^`GQ?Jts)H>FidJ)$Sll^QF$}m1wF&umnn@L;_ohawE-NxD<7F^IXKKL( zt05gU;-4~XSiiY2^kH*Y_qxyg&QJ?Ixas_Y+$!x*LAo$)18HPJvT+?ZR*tYF3tpE> zs5TV}e#%a0v4(dDa=5|tnUT4B4HL%KKq13d4-fBBQO1w69R=u;*W*ioVT`{=B~Ii3 zvm%hQgl~Oc0yERvQD31fYpTReeogFud|)2oT`i^w`@Yp`X?1C)H%XX|hbU83O70H!R%5+X^k5#I&p_=Nk zxh7455hEN{Zqx*LeiF! z;MyLE^C+oWZea9kI0;aj!PYH13ev|9Wt$tozV5DA?bXJlsjyw3CHEg0PZ=*jCb;8^ z-2278S$iu?Yg2$@W?~xC!z01y9Ud8TOi&9Iz^0~W(A?C7j*fP;J3QLi-HxhzX4v}N zul}EyL~U{^+&4@Xf_z!Q7r%R{zE>Utfsr{yoe4(x$Ap4H(@wg&A#}#LZEA_vj=TPB zAHjg@*T~2SMn@;Ycr`hA*VfjIzFya-jt;bUb)nkR5+kzSsCV{GEv&Jn$w7n{;o%<| z++HS3cFFT`=kC+nBUroUD;xVo)ov1O-?}}nx*|`Nc*Oj*2NDEOlEZGJsLnk%KFLyj=8Jt?Z`*Q@z6bw;FeqO z!)>?SjR)^ti%lE0VArl8jEql)Dg$21#lRHM(&C=g(TX_^hb~*T7>6CcKaM?a6^=XR zV9Z~!1lFH4JcgQ^7+$mRES??tmJdPA>U~nV3?PKj5GQQtZ+n|#@D@zDwy(MQe%x~N z-MD4--FRTlTC88Y1^o_6OgRRgCL!VJ=rMG6XB*l(+cAIsJnXmMGS{d5aNKc6;qasO zLx;0{d@RBaj|TcyLiq~INLJb$svB@6IQRYBIbB8R>;CgK5;kny0xHBvn45&5crJic z+D|z?0_#~6PE#baq);<8DGH18Wg0?|Lp9H+9I@njTEVnQ>P9>$`Q)%NBs@qa6&^{L zng=aBjEYExj@mTyyvE2<+NM9J>Wk4&PGZNdep$uBjNaMS5|(R)5_HxEiB`!nFc)}A;MaP1^u#0m(U4s-ZMRIY@e(WBXvo`a6 z3f=V)$gYmSC=ae)~1I6WA7@a(tnTUED07!wCZxprNi) zl+zgKA1ZOG4Gw@<_L_MkGGih8le|e{x1zXwj)h6!U4p3WbP(901C$miGRgs?&{z`< zIDz_>fIs2i^koj-PY8Mk1pSJn1;QuN3ej!FK9?+Dd@=UiDU;JvVed=M?(qNkj(0dl z_q9TO4fNnc^!Us%E}G;0y@BKtF{%sa;E~%N!skByUHsygf5A3)FF&9@8EkIG65kRQ zwWM~F(U&24(_111qgTp3Z@@9i&;M{8e&+rT-FFc#e&uuUyo;WS_POnt85j!mOH;jU zYRx9aOW#=oS??OioT1X!iQBHc4PW@o5AcV-T#sFDGW$Mx(BijK3|59^L6x3n!TV<< z1CzZ;IWf_Otvm4LAN&g6b$_Rwa0D)X>^fQ_n=DsTTHlGTyBy`&vb<;=Nn`+774r zLV=*X=nNfk@TtJ9ERI#%z%W?~Pmnz&t+m47#Q>cpK2BOBe6lvtUcu_BFp;6CqKu18 zA`WDvz$%=buzI><8dOOOpg@rXT%fQNO}coBN*8t#WPEHKh>PVj)| z(bzUk_{hI~2A}=Xk1^qX_hlyLRVtw`zE8D038o-$jT(Hz9xy-@gI6q7Q zIZq#AAW+wMfiM@MVV5MkoMHoNM3KzdM-E?3&xqP(HfeFNC6*tfkg*?VpQ`I5d38Rw6w3+bwtEDd%2{cfRFi zc+Dj*gPj_8@N6uk-;zQORH{~3gB1geW8;&8PWa)5mslD=(nB=7VL5;pH;Fb_InqhO z$RDNUUfPH%k0&R|7H*j3G7zC88tp0;f6t`@D6~8IXJLVYhaqq= z7n|Y8TAX!3`m)a}>dP^DzB zz$7(ai_q`VX7RC4d>v1H`swKH>p*Q{hWy7hfoU9hjfPV$(ukw6tTlZ9yl1JYuLn2( z^;UexG5JD2X=}NPher6}S6lf%&$x3Oj9B1|!+Cw(=;`i2dt0-kS2cK&CMG6=-La#8 z5LrJfU#n(OOo(b7=rPu+|paY zq88Xt)qNliS4KeDuPj6mLp0u(WS0zFtQ1wmax!zhnzps5*8C5?UbG|p;FfLpm)Cp% zyBw4EdSg~eyAW-4*Nr&F>UI<0tX0c##xV!ti2avf@w^^%v^NI^_t92yaXRMp2CvVj z85^I*)}5oc?V%00_KpW}(_If?i(?Xxja$fe@C@NU_ix{YyRW$!r=I)RxO-dJPbqCp z82bj?9*b#5W1gr1q9x#wv=ZX(++ouMrNqSXz^?2d2(`|p;FhqOs=8%u zfu}-6V*n}P(1_sViDYR8RN4KK_L#S6D?vdov^XPA+--!h`8&(^PVfg1m7^nE#e4;7 zJVzgCo`$WxO}bC|#RjR&1&sU_i`mo!{&v~5cxdew^i(2~1BAKVNxqWTd^hkHoOUG6 zKkH~5vSI;RoWVLdRl~Hy^Iq*&DZgNb-InfXas$=U(u8(r$@H~Xark}<@Vv8+!@6w) z_|27f;3t3nJ05U`S_d)wkh^TZ1$~$|GKkHWUxT^FAC9KZt{B{+K5*VOLm9)8Lh6Q- z-Qe20bIl_dyZ=6{>}d&h0q1>>675dhe_uTJi6`OIBlg2wA8L0H&eOKqR0*CLU&hbU z_+(+9Q-u6*@1k7T*@ERqAAqxuKLoFO*5h&gT@T==mtBu5-6ZzZlVYNXn$`F5yWjm4 zo^kR4u+ueyI_~$1MNL_t8oK0C4LI>cimjy7iX`ie@RAf|r90+|Cz)@66m$#eC@=*< zEBM}6DqzGAoBfb=30Q={WO8x@kr z*tC5JH{NtVZoPjaw(lBr5^pN_#pm~SBc~j0LBjgyzn+fTxY2GSMnpne%po zrlur$Gx}YB25-9~80M!Ovmai2{u%hjudfOi{P1OqaM5|E;wdK|5=z32jE-Z}wX37E z1N|e@xb3bDxaGdJ*t~rw#wUGWssWby)aIiPS%CxhnH%)53W@un9%5vRM~CZ;AaI6MkAoBF!ju*qTX&;95Rxb*7PSnrs{Gf|eB zCDYa?m7RFVO1$*kQ}LLi55)NRICc(Btgi($XM)BS$tkt=ch!58CumtKpn|LU*U<|c~Qc;oK3^G-V= zxDtn*pa{j76dYq{#X`y-Ev>EIs8Yk7{M{fX4_v}|IC7#9LsTEdmJwGr6Z4XEX6Uaek zH!yh!*M|^5&BKoxy1H8Mxp%(=-}%Lr*t}y9Cm(Y#&OP;DH!u|pxryNK*Ho#7=x29V zC;oWjz4-9ge~X8PMnaltmr3oB9gt~YJXZaSQ|VrBrqvPX28~FS9tjZY&ZFbyx7Z#p!JoP=&o5Sdqr=EYVvlAwE?nM7Bx4E`v z2-~XL_?tKE_FFO!*)ro8f6Db~I#6Yfn^;%fx)vY&`Y&r1m2AhnlMltu|9m61xJi8aaR=l4GmplE zvrV#`AVE|?0Y+LiQoet_m#wWW!YmxC$TBTL4AORG9a4BaKn%aba7`v>c|}egb|(jEvm5l5rY1_S zh=b)@>b(cy9%M~>w~}{7PE9vE9Nmm}yzE>dmsjUr`c$fM-+Dhcdb>OE!^>{M`@Z#? z(BgUIS;%SNqMot|QXy*8UK6SGovT*k-t}AYg}1!`^E+A{gG_~qH#I#O&ds>IgN?RM z)k*e=&08__$U5}wzY?{{N!n8>xgEn(2vcvr?QT?kK8=&?k}6EePm|NUyG3M>-p+3P z=ufNh@89@k(0)CX;8df&u(78Fg{NU>7Dwv`*WQLlw)W$*mpmUmtrbko)MzgvXH*Z5 zVda8eyz8asMqj0S=CGT@zHc*AQ_&A!i`sp1DngeE+jR8Qj*d=n3V;N_2udV=Vu_rX zUwG6seqZ8t;>F@hHp#^yW~e5d49a;)06wQidFwVpXbn9ggQhACNm=Ba>`yRAAG%ni zW_-rxtSy?Gs%UO)4sfCh(U2IKm~@2A;l?HGS?`T3pKIbLN6mG1z)5eWRmDStuob!u ze)OKMcKqURci`QQ$%A*ndSjM-<`gAVq-N5@@OfmKc}~*a{KzJ};gdhcH{Nkks8Hp7 z>ZU>`c?MI4d@zKZaX7r@A+$RvRc&gG8cSt``o3k&RSe$y0A~6JoB`Y#f6ch`2v~Ur zT6wo=kHg%TU3U-O{f%Fs+5PB!{OsFDgFa|^nNzhhU*7M!2iM`>KK&DX>5>;Z;~Qe< zR6{Id8bi~xiv@g6cQquvdDrVqEsl7_(R|El7_VQNWm?7Oc6YZ*|D=Qm?O{}g9fid> zbz*sU2hT1Rz64!3Kt{K!7N}u3QpmF}{utdiQSBP5>AS7?;OY~nO zZO(m}gc&i(?Qy2W^3(k4n0}e!=fOyEba*t*Cw@)$w@|-J~oCEY>e!26mXc0oM}wD zhfX^!zw%^|b{2w1Ea`1{M#T+gZ%=nFETUVnJ`O*rR_<8b1t#h7rAgm*j6 zm0OhEh*ihz*H`S{9Z7XueWWL`ySKsA|yC6tpB2;UxHskZIFPoh9 zn!~HF(3r8ylU5)8d(Z6M?)Ce>yAn@5{cx;W)`#(lX)@Cy#0oZxlkU^r{_ue!&Lbzs z`Jhj6R?lY>u*bo~DCm@<(ASD*qtTMjoS9tG#2Et5(r_r)OEippokx#WK~u!HRbei| z!4g6Rt&ODK@Yd9vr5Fd_7OG{G6K~3X^Sa#Y(8g%U#$|bVXk#t+iwCNhH>aBj6hkHc z-*G1m2i-s{w$wf{Tzzi>vrmkNg%lME{`mg1I#{q~?Kb@3`a9uCU(0nWXKb6=E3NJ@ zrPt7RcW>{${Lvq7z$u45+x2k*ITeMDc1;|Focru2qq7-i#zxW9(-%v3C`~%Cb4Oe> zfmByL3MN{tK-$aa`rNaA7yfv|9f3h-6kI7VLW6cU0tZ2mIH^A1?3|zf^=7=!**Ftp z(W~O0>;3NDfcN$3O)WYIEOMDxo|XfnlZ7;muz7%cUAS-#cvG{K(V3~nkkC^;c6J>a zCH19GC7Z5*lZ;Vbm%2;6o0Kpj4@`J|j8X>X4yll)e4Tv9A^tLS;fApUFE4YFNuXA= zFx!RS9HTB?G@mq48ca{NA*U)sxw>h>_F-Z~qsKqaLxYf}OWPxMxWT#c_6IQNzUy&d zx~8^8DM}*6duV*{Z($2U_j=>)_hG}fek|x|55`>h)whSUJsHyYSH~>V4(2q^okxZy zeea{aLnAU_8S?Sw0X!M#UcknfAy`J<5}-`m3*%^pqv?dE&2^BIqc_K&;2 z7aTF4#>U;D)k5$Zd+4k;Tt`!uDbdEK74(EMfxPkz%3H@{N&;4r=TZP|PZB*+h;cdF zmMI8Th}wI0lBH8&>6j7tz%AzmvD6fm=j(4wdEg*lxmtZb+X|W@oQc7cNJzg_O>Hjg zeqZd2H$7@xJH-3LK})U-X`X68VKzXOu)NNPnw^pAokV$i2X1{}BN->W06H0(%p`ka za+H4a2H^Iwaoo3V2bLXo0H$Xqi0zT2H^nu?;*) zQFhT<1~E;Shb6f1!Ua@#G%&njI;WYb!?_K}tQmXOZ5g=*1`4mK8rpUSitHrImmk}@3-rAJUX6!}piX~{9J0I&l@|~bgnW~JtXH)OHJ7hALSRQZ9 zx-B^S*!@DZ&_gcH*Oo435N#Q+%feb!B9PQ1lpsi6N!}{@AtqbZFfcfZO`CRxz7%q= zWyn$Yo&F8#97Wn-*GxLeKG4;T`A;|#_O3M;d1M2O6Bd3kbl8l5489sAYWYCY#AFRS zhsFr5#j*9$e%`!3%$d^_mP8sD)ew^!V4|qv&%C%pjP$~e7c|kb)FFle} z5=(kJrT{47#MZ`~7AgoTlcmCr1Sqr{jiAl-^4x`TUo;M;wH{1NKGM)`Fp3BiOmCU+M$$pqInedwX#2Ipnj$ z^|$NjRTx-(KdhUC6gs+tcrVL+X02EeFU1gk3TE(kwhi4Zqn2UBN`6lz|ykd#C zdFMd*kQI8y-8EQn!eQv$Z{G+9oJ5+QoDNaTa1SNgQo8G&Axonp3^vHFT)6^^97Gyj zzZqlquZvr~aLHtA$oEO4l%q8~3^9Q!~gXCeb>#4@=HC4qL9g8B+s8F;UE~S0eMu7UL7EziS{fW+az|_a39e&-xGZjv+~KU{6x#5Ru^MX+ zhFc_tE>SV%QPFmUgTkrNRErOoWdu22RV}W2GUX_d_T27G8GkIlhFBYA6l!HXg z0q=&z9MuZA@I@Jx=!u(`elJgiLU4>|;&;vHu#6fw&RX>Y)TvNE7O$L?TEvhL@vZ zX=(S~z-Y=_5Zl9e&7#vRQcZfce^_<#|`ub$LLjB*x<3& z;P5cU9V2(QRl{?_>V1#NyE@Qw;PSu_hQi#r->t5v3kw%5#Dkl6gq857A@%os=pN`& zMvIp(4M7$!>HK7CU%CK2Q&Ska{oW82rO+`+azG0Ele!jdL`8`F?_m{wcNdl(cO){0 zk$q~cUwz-z-R)>H2U`Y5&|Ie(iwDD!K!?(I<@ao}EL=1nag#6`v`y|bV(WrA=-Pi@ z46j*F(mW^Vl3|M*n~)>VHD<{E*>~S1(Wn;M8P!O`f}0og6b^_QCV-l}O$|RBK;nOWaV^pYIryypbUl@!2l5Bm|z402OoK8oLrS^Tu137anHU>F|~asCO2<$n4R?z2!lr) z%heEco;7cU<7qMP&;wCvi~BP9`_9bN9E|C8eK`=fKC~{RF%%ZGP#bxe%Kg$>NdM*! zA3*DN?L6YhgJYc+%6SP_x;NeX?}PD&HwCXzC>mrV)9rHFbw+v@$2(L?SUfk<#MIM{ zA8cp_26;-W`*Sdl@RmPCHzVi$@T3aKh6RTQ^CV3gx;8L++D4G;wB#WwmWPMLNERo; z2Y%Nfe8c#wv6O5it;>w94OA{Xl>2Q8oitp$?_y9YlSrKr$6(tB#&F=0UWdQ);0AN3 z5-kh*SXY`(Q#)%%>y7Mb(0Np>` zEBD2=t=pmm^!3}l6y)S1nzp<@k)>KULG{l}qiz0tw9lIp-d8v-;aga-IQ#5V@q=Ie zMWmh7~L`!07ckaPWLW+XG_nagCu%N$clzQ z9E_N-6;*F8;po}aKBb?UGP~3tWoVP$Q>9*TI}KK@*tdu|U8DRu$8C<_)u!cP^V|yc z&2Qx$yhOYs8c!&v0nU8faX9vRmf z)7OpK)?ukd86=afp}D6UO%AWm3=W~vTuG{wSJ)6vHDs4aKivb`9E9+t-^_GQ$##^s zJUoiCpL8k?T(%f%H*A)reA0H?7`N;83v5tg-oIS@y5~Ev-R$Uuiq& zq1>eP!ez?QaFbsI4qC&27|`o;nt0y)K9cDE+X{G3)j~SAWg!bH%V(5UGbry?$h^b+ zQ{F+kNQ9zJuA=TR5_XohA*tM0?~XpFD$qV&GYmr(X1rJ36323xyz zXY@hK`pZasu|Wh1L~T*&uHemYxd<1&=pAAQ=@yomgQa81rin(N>*tu$D*GR}0v9~> z9AIELdQVJTh8I(Q^HBQQdrT&G4a9;%7F^*c(uehW(At>@o2;qHNziUt45Jp#hur(l z#dGi)$CR&o`+uU1urVJRm+KA{APY-;_v%;h54gUZe%A4L;@PLc4pU$BVwyO((N?e# zX85u0cUo-Qz%aROVIq`*Ofh%i(ZcLNgTRn95Re*=Hn}5H>Mj?Br^Fm*2<@|M zeo%$HEqYM3h%QF|ozGVEU)BsSFkq)B5sQtgfl)l^>1W`@7o3Z){p3Q<&|%%(=u|Qp#jh8XPIRc?A@kL zs#aTD;yAEsCm!+rwGJ-4^yN>(?|*+KF8$lzL+WrNmm^PT;WH#Y3v@JIE9L0v?ZU@C za!F|NjNiTm5yo0je8Td*Nb^@rSQyg3eLN&|RlymK{(6tk=b@#m6*Z?xgYn3Qq3lW+ znJ_L0kM^KZHEx3YFw=Q2M5dCvy3V0tdC*MQral7d*yadb6m$@OE>c~M)UXov5jPRS zz$Vv@BC0Sqz-0_PGiC%Y1gD4l0UM>*J>qJwdEya!Nj*I`Ae`)$WDyFsFJ003Y~^(xTSe? zSN_+R{~gab?{&EKfwi)Ad_k@WgQoG|*QU&biU33I`CtE!x8bPcj>OFNEeMOca$dwp z5;mxbJS2D-3WPN^SFvMo0>iG~ypY=WbK3p3{{bt458aNA!kAPp5W*SKLZMgG;$(AJ zljs7-SkC66)}J{FYiWym#_M4zT~VSZ)sWi6DxieVRS2JIN0_lBA8mp9lZ+?Vfr$G_mMZ+Qjs?ftRj zf)9_w7U;%rMN`wiVS%(sN*eNgdTxAggjoyg1N?w`kI2L%<}K*OkAMDYy!82R$KUR} zFO=E~>rC=kC{_1`kVUNi>pl$nRA~3+vmbjiUhw?$FtcrYh)W=!A~?uVU7m36o>P-y z%X+Wg{RCdOqdz(d6vO$mSFPHg=x!1-mop2=07wD#l*3}c@GezEj*lrx@VeKeuOc!? z)@K#S(yW4(?z#9m)2Oq`Ez1Yr3Gx?nia=q6)dzzJi)a~KAU?OjrYM&OC_ePv)Y*iU z`z^yYcioQ;LkV2&q@7MWZ`?JA1#=cUNj?)EJ2O6^X_y9@8ON7dap!inSMlwie;l9x z__y)d&;JnHCnf?sYLfE_E!#L{#~oAnxaz7!^YESzycSP?-g&4E>>zcS6Rnalo>HGF z7{@r$|7ENd^C;c*Zr4hR@l4^o4M}rlZ0nKV|%!3Q>P z7g!fsP9B#758NkcW6?^YR;)I|3xqu!GLe}7jA-R51OwSbP^;1; zMTncE%L@K?;1J4W9pe9Lt6^;$;V8INom z#_T9G^ zMAJw6x*c9V?$879)Tccb7ryX`=v_Dmwf-G3KqQiiS}l^CeB-dBy0ZXftn8Q(dJ`vN z@G?cnVyVGej2QWbJH@KI7FQ;XO*oWL!AHJu310Z(^YNYU{u)>P?Ix^WyE*I*S0fGR z`!c_$3r8Qh3eS4x<8bb?&T=@q4Kq7;xcfBG*haS1-i){cDhsvKC&tEu(eFQxJ8k>O zrX2xJ`s6w;deqa}jOEK02m7iJB;r8{pSc#g4ZDX+;h_qtbXkCbf<%w~)^QmUN{!Nb z6|4I1!$ts!Z8?jMaE_YU3|d)*N`Qbu_ng-rc+-%BveZzLbJ6%PMy&2|&?FG=j$yxO z4F|2-my8J`658MF{tcVl!w!!U)7@rbXe5?E(~71Pu#yJ(b)-9XVc-21;@zKk11{M$ ziggcfz?QAsF*-aE&h72(#9}>o3p}=aEdl_lKU?us9Wnl{$ zFWU1GiZEN&00dW{4{e-TvTca&&OVuO(C(099Kn|DyMpGZ(fPi9 z<)TFkuwX$S?DSNkiI(6DOAu$dXwPsO@rA0?R_|U+uSq~NG-jIf{Zee^>$irL z@nPk+OH&KG^Tbm1!GtZ*)7UTpJq=?XHlEP?*NXM6&q28rf9_Tv}|xP4JN4-_#WL zjPfN#1}0(_WzZV5%f<5L`=Gh26Ema3vl8JX`m5K(_TTXz5JR>HU+2C%R z7#fO&naG{HX8pF*REDhX!hJseyjDLtb}n>=-p(06)9ut<2PTFC~j|#%HP%g(;uzLH~yuA{A%g z9Cr9YxcsKuqvr*2N4ZINzcp(&;nV|{U^Fx*$Kr!ZWO$WD!`htaowISyObMo!9%6kTZii;p)Z?7m%CD)X(viYv zQ}{j7xi&fB@cc+zGwSnV9R6P8jNQ1(iN-09)dUVZZ2w@0X6#*3Gb`e5%1kue%Gl~W z+D4U!f5R*g+UK5Ar}Bn@+MXsgDcG4d4O<|!SM*XxnL zJYK+oB$DtpgH%bH5C9{RnB-C+95@zVawg7pw z)Y*l}+2GZd-ZMAIv~jR*dVB;mU)0~VquELIE#7e7x>I1JH@tm)_7zJPV#SJOaOxGZ zHi2D=#IRfk$Qbo(QS4UAs`wv+=PqN-ii9RCM^iJ%o?3+)LL@Txt11wMJM9gdJYlog zusk)Gab&2^!FM)7+|Ad=NGQd=DMC+V-Sb7FSF<}_JAL+E#?JL1TLN~_*uZu9!mMQBiiv@K%#4kp$rtsk>7~zmJ~TSq6H=_dmW89GJRPEyhecTqVM-C1f;Hkg z0J=UbxudGPGFJ;eHUrJznmy|m2d2K8(zp6mzc<`A!`>rjWDYt;59zs82fqy-+^_{Z z9nG877le3KDP_Ck+G5%RH+QS!y$;jatSKOfFtcn9;%difk&0Sl)Fj^Up2lW zl#PNN2~ku-ZnTa$T43cOiOBsq`8Dgk=Ok8!Y=x z8uFb_Okw4UrOu*Xf_op{5D4r8N51gBU%9@qe;E5M=yt^ROTZ>D**}P;r5zzX_t6~M z@4M$6<@i+cuSfwVh*!rjv+`%w^9?5-4@&gAYol~`-~x5O>F?8(`jKS0Ik(XS;k<99)0v-KvOj?R)pZPNgW;H{Gw$v3R}4WSpyf%=J2 zl;--r#RTIF24rk-nCn#Oh$Mj?e^i}rFUaynP3ru zC>b$$WK4ElM_VTDC&~gLr#K25!mRxn&l?IT8R@&Cf$&zI^0ElNRN2);T}#@*pOmqi zEzU%v)n%bnw3YY7?=VW$q=a~jv|E^o?+Z(m)1j3_m|6~+9DCfM`2KJH62AAXU>?48 z%|<-!jKkv7+<6l_2ZJ;h1BUV2ULq0gIVrz}wkhV9$)UCxj5PI(Pqo1*)xip_l+@MV z8%3kNYOH4NTIsug@UY@Pi?C&M6w^Ks=dgLRBlxa?39Q+;Juo)+!@G9pw727+gH{BX znF2+8Q-&T$sy+;29!h&8sG$}piquU8sU>(4VgpdQIKtG8`s4q}!unkz4Pi_AQRGC% zeol2C3+QmPKpQclxHD-26dG@e16_Wff&xTv-+zY@k3Rm;P*TogZ!X~DE&n?nUXPKn z$q0?yfJ_Yzhjf7Eu8t7)S2$_bW7gEq-Ms2UR|)1##wanJAe`X1VxI-hr2ghCrFMsO z;sa2W?9y_+`D_=?ei#KAe5l_U_dfjZ141n=%~*5)I&2;u4Jife$(eK)9C6^jSh{q9 z!}3!?AC{3u+=j8Oy!0)joi1{sRd6$c`Q1`A3euQ)x{nCUoP@6MQ%;Oy6j|c?s5GXv9Bzc91%^nPQ&g3KxUbPbY zE?tC&H*N_yeTD-^n|5I1juGs)xEJG74!%szgaK*}$^X6!IM6DYkKs{e&7uckl7-JHZ=? z`K?-RS+h2-`VWi2xL}&jx6VgyHsIapDU1??;3PcN< z)S;!p;f`_Vj5Z`VbB(U&9jiI?cE*_};0M3@Gg}mL?hSt1mB*hDrH@bW@+**Nqbi9~ z{)3#pHyB-#)VQxCNx?fh)7a2OU|dpk05G500MAUd2z82*xg=!hyFgvFq>-bwdphJC z^4a@}#1qa4t&NU_g&#lJd%wM|126qo-H^OKQZ@dhKm!@|zP-eMPw{bg4d)8Ni)i zabIl;HC?>N16-lEa4r0Xc~qTX_Mx-A8d3fGSl*Td=qwHu#lyD<|IT#$Y&yK z(v4yZs=U!F3+Uv9$%(PE5}8>=80bk7e=jyf#k@J~A!Wgb(qnKSkLH#St53`X z?c=3?m%Hx!zquOseD^YZ>T_=ok~FU+XsHhk^LZ2Gav71(w!LI{o$KUl$&5IN=-cPf zqhCpS#cdBzUm0k|*Hcb9iq?gyoF`&h`G*=RbOFG@hQf-xtNayMW;tyFA} z{|Qjv%^`rxl3#c_#|J<3Rs8yvdvVs02ja!&o{X{4NsK?d0UfIjK+~L_2v4#qhVOqE zxtqj(9p?CC4gY<`&A9rJ4S4dCPsh2>KO43FA*$_C&grnlHYknL$a|EoNxDQyDK;NN znT_!s{UPw?Cr-12e;aoU;+Ff@2JIQvhaxUm?{#fH<&>lRwsuLOm4%??7z}G}D`^s( zDiA?HR+P3H5;LX`@F($!w(|2~5F70SMBe-2ak}68(=Tj}o~XEd?AqHOz}A6r^tCi$ z((ku3IE*njU|y;t)FuDlJuDH}PplzlBdl|9s@Hw>l}oS2*{2*CHrN^;8o~BIU5$=| z_e0j!jIs4wG5*N9(D&Z1PJI6lH{pKAHi%KLru01t zG`~sjmK`vDF|=dyW(Qqvyz5SE8Jh@6b-o>8t47Bwk3aK79Cp}2$cKlR3ILCpNeLB( z3B&;)llsK`B(m0WVA2%ZWeC1dm4&#JfzI^ao17;~JuRvsYb?rMq_P%#UzSqL&771V z_$2*SQW-3veXfh8UC1)_v{>>vOQ9Mf5F2{{8NE>w@VCMDNt`r()u9`i-gF}!IrI+rdG?@^@VqsQrca~RTOz8|fvO=xwD z?5+Jrw)f*rpZ;%r;mt2Z-@G0SIruVg{mro+i#xrotrb1(J^14dcjIH<{xw>tNK;cq z$?m!~H)7PiJ(v;FJp>}Uar7niMmKDYr8P{fhyK@F)(DqS5sZ)q@bvSa5cAJ%PBl4M ztCSwaK6ExNRZVz*LS9oB7=kqt2vR{tNg#|VCGqMqRH@h7)qt2}7O+n?T~HvfOcb5~ zfPnkFLfG1WRT1Wv&au#ekGA7Tuv<>cWJO~KEw5&xpE4NrOMS-8qEde{X6Ry5e3 zuD=uKpK@4u?>#P~8@FTn;IU8@HY9n8TTdr&JvugNcqruMs}1;7HV3TO2luSshCatI zSKad)r4 zLeC@^5m<)oFOsM!yHlliOZ^}tpNSdh8`2uJEKLMnY!6ByFjn0o1dQ1I7i*#D9PET? ztuSc;7YfufVu&R2QC>0cLKcK~uI`gcMHey|!=y1NSk4^q%1E7!PvD%hPsJxc@@0o) zCc|$YH~N9N{@#ah@5XJ|Z((29s&U3i_Tf!iF=y5Ourrg&DP$5<`lm1qgDfi$=1Iq| z!tbxTIrP())mXD-C*JU1Kf!)I-PmXG94E1xFg!XIwuRj0`qAO;;A^CeIwo4)+JZxU zm0smBN>BQqx3v*Q=dm1KyD2~iziC=adli3LeJ^%6DA-Fo;Ca|G=!C{oPCXX;9k>iL zLxY7nmAv*frTQ|1Bf(vMDowHwfhPeU`Fr!vbyIxzDDgJZhBWy2QXt zlCJ4h5yoiT#Kd7-U*JS3YN?8P8$fusAlj9zk9j6LXUdEd+4z zWxs<~;*`^l!=;yBi_S{ib=jAM-sN6@bNyYotxLH zkzLx&gJnQL()~m2PZ&Lodkp;ez|h3t0EV}04cohCj(Pl2#Xmbp)dEOeOiSB6KK~`p z3~ornwj8@q4JBVmGjoBW91}?iUMb}`=JXRkX1o)kiCKyUiJ$T)@l6n-SrO96Gg*to z30-ZJwO2ookPCQVC;D&zAkSFq#R8&i z=u{1!dBHh>(X$+)_WVw9e{}=%igQl#4w_J4$7kP-Zrc%z(tIkdZuv({xyHPsg+c72 zub*hDRlN3`)57l5zqtH+}5m!34TKt><>(8NJEp%F9yNBV*B`pX;oVsIqlr38$f})u~7|* zjOr`(1t4xR4S&;3Wg>>aU?|Bz`LzUQD|M)UtV4ajeLU=td2{fxb56nG zOXp$27yX~#OZ&#?#l&SdZKC(1>4$XO#$IKiU1H)Hd_ zkeh!sv^dGXWNsgh+J6a7+;2JBoEjSF@5e$1d78VrP@9?*qdhW7-1ROeFNY{r8njfh zqNuGjGM5OX?x17zLC55N$(%28+TPrXo9|nP8}EHMXvv&>=iZ?2aIfc{cXn716M{U3 zJS5hNhBk}>lt75lZqoX^?Wb}^q$6zt5HHUnT3}y17`j-wKBnzvCEHSUudMcBR4q0L zoR=c`#7fEA2JeNk6gQK67`5a}!ybCivzqIRXXIZG21tzqT1k?{sH!_>d=$@l?s@q7 zSAOUW|M95kY=ke@-S;rAzh^B@TeTE}Za{oe!SH<#p>yGa&>l`)5_~DF(O53=V&(7* zze%i9^Og3F@J&8C5sKw!3>Q7#bPUnrF!sEo4#MUuZwV5<$4$7Wo^S-tJ>f`9%}j^; zcxG+#`8ZDXjZIEt-2FARv|x!}#DEzXHTmM?a+9>Z1=W_O5F7CJ3foe=`!^ILmlxf| zxC5QbaMf4i9U8;ngB#G))Iv;X@srDM44H}?^7oz+pFsDlXPtu+Pd@>*T{|ceOL&_m zd_)?Yp~w_vhG#!GpdBCSAZcz%OE9yOb`Q03xH_-p{`&V7;v=h=od+-X8|oIfGcQCf zXBqUu!JT245uQb>(;H!&9zKp4-GkKEJ)v z-iFc9Y25zj)qze&9Df)(I$JU0V3Oa)#j=#2p2V_~kHEmg>oL84r+1iR*zfvg*xTe- zdD>#i7e@t!+THIU%@}KX0b%C!^0pf5|JJ53V_x z%#t*AiFp*fV|O1FrV``2WMhcULQEb;p9CQ9t(6Lh(z8XCCh#CVXJb@vjmHT=L9*5i ze~YwgUnTmhXsSGgZ?0<$vY(U5ZJAL?)fsLmNrP~HP1I{h`r*lA$yZ7>Ujk=1W_sa8 zPs5KKqtDQqL(dp~fUdgZeq6m~15RDFICx-up`!kK?nj5i>c%9j9qopSsCb)NTVQcr=@nc&^1_IBe-ANwXg`q8gp zmxn#>jCcLp%kZXmy$H3DabkqnV(x?XGie5j4u2aa=ERO&Ve{N-*pV)G8m1ZF`{Q-+ zC5w9fDkBe*yxY^!>L>@PPCn@v_q<`k<~0Q-Qb0)S6sDoI33zC?a(N|Jx-RzLP+$#Y zfP?SFwR<@5NQSPGyuw=|(IK$FssWEo8X8INy(FCtm6M|>#Irj5#IBwq(NhGJW~Je@ zUjr225fYmz|2$|(TAXK568MU;$DMK{&U*am_}yQw4wVAJ`b~fqbGX>u<0BvXDz0=)?l%+g^t<++2k?xizY*W~ z>icojV~;`Zm>?gYh%jbq2CcoFIQ02X!g^;n>~h!D+;4@N367bXdONWE2`6CD(FX-v zBd7FU-}h=yw*hZ3kk8*p8y|ejC-Lzw|I|SV$AGS%e&gT|eBv8;%wvzjai<*}R^M~s zCvG< zeDT+R3*mjA*Wwx7d%64uX=j{vJkEdWlQ7jk;GjSShG{LXXQP-LNr7{~C$x=?&@3Pa zrnI&bWy==-eHOD_8yT&fsaZ}>ECb4t+(8XeVjpMZIX8u$-ASOgw?RWXG=#X7A;wTyC2?wZr2|Vm;AFUe~^bU0lP%U zncpkbxQVfC{dQb)?d|x<4}Xu#uU;JjNj+gZ8Y_+C!f*SpTpD+P6V2R!$-&w+`k{T| z+4R6LhMgd2s`{NjX3*WW0cTBjLAa1Fu zLop46G>L9cDH0!3jkSPHAgt^`t=rKMnJ|3Wb(|QTC9jDy)NmCH$r-RQaZ4Z6DRu!x zFDD`UQXRsSFJqI2;QR7y5SXu+U6REN!Hk@6n~@0+{qfUyU2{vw z_h=3bk<*e$i+OI4Mx5sO)-V2q@4CNzdb@D+F^A%?Ll4CM2QI-HdlAkS8epCpEscCxXnr89~FOm;=qJ5JJTt~LPVPa$&FlgUVcXL56XIKiY?KQKimCMmne1w$hWq>HjF%}- z$M<1+cnl|;eq?Yre&fG?8@4%#3}^$^{MH|?#hK2~JAB1_jQPEeS{$R?etTFz-Q3q3 zB%&p?Ue5Yvb_uTf;Srqiyr}$^Lzcdykg29(q}bE^nEnnMTt(tPp%m@Fpx6B zte{xw9B7L>BiJjcpv2e;k~BI5#Iqy^5{$8h$t2qrhKUs!-cVARpOk)`=*mO)!|Psm z5#IChF9)e!BUsVy80Chwo1LL`E#7+JV=y!}hN_chewF+7Tkpk!6Ap)+PIUocm2J#Y z0&#+5PPEVL9KaDL9DqwN{|3JErJvw4pZg9TaTCBV0BM6Nk!>w4@CKRVsRnMs&w*%y zznLNx+C{|21cux_{A9Z5yfg8Zx4#7YA9WyV1A~YG6fW}+>1d?=4EqTCR;!_Y{MMUp z59?SxU7DSd{J_>-_}tI_N=1p%xQ#p7tz5bgZ+-V0Lhbo#K7$AyYeWf)6d+1t5$!uI zrP-ZW9Qt~f;BUVZkuYrg&G8*sG4%4Z+5KZZu9Lh{wl`yN8Ivkh|&cg!0+%JHhcpmhhCJY(GS4 z>f7s$zX{3`T-M!*C!BpUUiPwQ;K$a2q*xlko(s6y%9?mF2>}p?XK;4#O~~rNEkE$KK7~z z!*TGD8M+>#FjJRpy24&cB@IxP_}T;0=%^A$Lp&-h{&}V{=!-)_4JCJ9NGyy@$=i68 zJ0KE*m{guY1}2#u;KHJbNIXLs`O#z}24^E!6g54+HZzSD#~2^}@LTb;r@cxB4sl)v z#`*B~|A74$Uxa<;c3@(<7AD9JC)Jys;n=y);;`LIlDTQ(8X3F@nV7gUJ9c8u{7$^~ zT`$2aUvmL&z3FcJ?Jqas#=oz|n)@HYj$K0`*fL{N;+M8fw8Ha~qsuYjiX{th+zChE z@n@fc(@r}abA66$~{*h!hlIVcWK`^wUI(nBBP9^nYcqg%d-=jfTUPPhBk#dDIGAyI- zJqos5u3?oIJu1=*uuIC2;!ml%p`o#~umyQweEMq>W(LP_(&LW98{Y6DeDE{h#(W>x zsYO`Q>h8I1XbkW9=5O)&e|>iFAlT548rENV6IPsa2HNKKg?)=MF5<)!8!Y9?03!+I zp^T4Y=8oZg71 zNhF4cjU_s33B5QY8J^ZCqXR6>6Q-l?gG*t6at^sfhjwp!j7FZjTVpMt50@t9fZr2D2T{*GmjKM^gC@qD1i zNYWAtZDJPV3GR=IXF_*sZOCtkH4-L3@07v$gZEAMV?9ThK^R5TNJ*Iuur~B@@yVm1Be)6Z_ zQIYkVh{a9(?$7W0%zLnM`HHY}U#R2|@Of-O#{cHhn_`&*MTrJGao|N#FmC$i?IkJ0` zHHd~T#dn7P5%(sg5(GFEpW)YSHg&e*bD#frJmpC*!_eTcg9WkJnOD(XRsa0PJF#?5 z4_^25GcYhRg65_c7>89i{q;sHIm1cbiJxtalH^vVV2F11-}NB2t-cFY_kLz_ z8trXW+_Qcs-ud-kW2%-1n#|A@O>Uxkx8;j2z5vg=;2D_O+8-B4=hI+8C^c8|`kxeQxiWU2zdSf7a?17%Zga?Z=|EJ+AvXm*_e+GtKLNfsXjDS`y3V#Yy~{SI4! zPyFX4c=0RV6PDVAfGQ%?>T#ER<+5uqzpoR|J?mHukBkKgSQ{P1#>=nA(lZ^i&+84a zB)X`1afmXE^fWX~7NQwJs^mOZf}VtC<` z$@=ep5L<7(Cse_$h3(<1*tlyP@A}$ruw!gIM043c?_G&Lo#3?7kHv>S_V37@T~YNK zIG>8zCurpuYls_bsj(n!ZNpZ_X(B88IHZ0D*qK?OS{Zii&d4lF_5#1{!F%Js=Y47k zV?~t|4a1x^h2{-%l~DGL*~_bL?_Y1PRPprFj>gdVSg0Q0bIdn7 z$-eNEqtUU9(|U=?*)V9PY?F1kG9-+I|kRRLz83jDF?k;9ODm+OyP}R_%ZI-v^7*Dm{I#wO?ipe(TXp9 z|MOV2Vp$LlS;e~&slSNc7E|P@!ol-7w2cFdJe;)Xop%>ENHhaxhbxVaM&6gQRBwdk z1EWVfA)_=$#64ob(1qBU49bW#xohcY6uF8yFVrAr%5wYlca*rLGZ*Wkq|t$&o-c-J zWE_!zVPeL1hDhVs2tN6__n?300DgJ-wV0pz((*05pKoFfhj6Ts4L8Xn@YWKH#(cmQJpnh`pcQ%I>FI$)+=z~vz%(>n{3CN793 zTZu8CMZn`zLnGLH{c4PF+m5Wc>I}~*v^Te4{gwf|jKxu4`-s!zKI zeCccd4`-fyGA6febXXm+`apmzF`p&V+LgxxN%$ELb1Xp@wVjVRB|t9E671mjvq6t~ zw`2Xi%h0D0vsamAd3jQ#8>x%HYiwn6k^a;;-HuX#y8V8P?HItKla4}jPiI*DlQWTR3?{*Hu!Ox*R<$=Peb$}ULL)H-L=$8bq+!n( z**4*M88U~xht_Vwwwvz6v@@zbJ`WoacX#6U2e#mCU-~)L_78>NSuR2dq0PSXz!yII z5nS->r(t}{CdcG9dVT`*n2Ef=xhxECCB~AgqMnuFu(H-ef~Mhney*dzEX51Prow{?C#H@kCgQgu z4QwpBGc!4b&bC&3_XnTEh0lIHuD<9cdx-5XQ0m1xK6i4kGtllH{OX&Tl?{mSDuf> z^SUtL_w8wFM(%S(u3nAdEjzKmL765eHGMU$ocFxsOhFk$8eDlTGk#f`lk{f^&U|Ad zeDO6;e9*{;@W;07z~)=-!1T`kP``YNztp3%fc8p3P0t{sp^@$H+>5A`J|A;PT*dO|T-~ zH;sG!(a6u97@c4=FS#_u^M&tN;DO3r6!HjU8*{b(&Tz(JCbLn7aF#7Z9<@{#te$XR zIC4dhjI$PrA!q^=JQ+s!$H=+t=k`qElfUN9?ZS_K_65A)IsY0Y`@Bjel%Mct`=HVs zcm3bjZp1|&{|?^$yeH!E#~p~ViD_rJAuK)^ePAsfaT0#cfh*B>;Br*jTB6T9&x_5o zBp^&`3ph&>(qNjq5u(I};c23WGT1<5@T$Y&1Ec7__aO{CwAR@RQ^CvP+uqySj-9S= zA97OMm#g-k6_4;|6q9?nGUA#19k0gg-QUcP?IF7p>`f2<lzrir{OUb27gqi@EGUvm+rckPU;a$w1AN=!nq zf_*$}P_wrrje(-GHc%x^zz76p&r?#yX} z-}}jD@s`(o5a0UcA2BD362~)ym+URn&97f^3vRh@9o}%k<8bn!E5oY&@#$$)n%#Yd zhq3AUTT#8|!64mx9K%;T+9RWQrq8V`J_wEC#p}%2?1azR;LoBP%|WfqeK$EYf_9ws009Q9QL9K89jPre<` zc*)ZoT-g!UrW$!ZtzNPQ?0!fNFxsG22kHccG3L&d_61VfM_SNsIo83GM<)NCn5bBg zwg{Dx_)x?-^K2?uDvYjYP)eCZlH3Aaii;EeS0uU^U-V3E?1;;ee)mKp^%>om{s-;K zBZ>i82s|;=4L0!M@F&j`qkKZ_xc~b_^a;0(>bA*RED4&GSk#{^TIaSAOv~+_`mE@T%~( z<^J%LWt)LobdG+z3k>;7^4%Z zNIL#gCAvOjf(bXLgAG-dg7HhF0Vt@3q{0jvS_pmMl!nYy!hT27LqmAOdtZY?4m%KU zc=N}wrGF5;v>2HQ8FqVcm;dyJJ8;FV_v4uk&OF00&WZ&+n4X%!guA`3pp`pFGyL#+ zj5;Y??P^C`UmsfL_Mxe#6U}XHVZn#rNg_-*@5}e)EgZg|a)$17|4^v2GBq$9%F+3( z%FHpehcCYI9X=n&wSM)3>+#*+UxUBg^-$<$FL_lMs$?|jx4U~k?Ti!g=`X$;^A^pC zlDrb@x9AKMCUvHQU@4PSNo`xCY0|W5oCNB`vS7m8TrD^7nv}L_6fBD8W*KLTa{4cV zEnu1dP)rqILqxn@9L%!~FnYZaAgxrAT~X+N>tJocP=>912^g959gmo3ABEBQP;E{@ zoM>e8W&%^&cjBC9Js!Vz%>Md|--o~6dS`$#Rcpf1HcL3xGu7DSB);^A>+s7fSL11? z9)PBJ$~W@9K-n+a(NHD9~K`>_3B zIcOE~N_@?i%rkf!My955t;69z`^ybsV}VhNu=LFzQ_6MdS0;Ib)9>E+fj7PcZ+P3w z-J@Nb21jCsr9UrIi#Fs+rnyZTl>Ow)@B&|u-t!j>8%*@HNG!(2b2F}?0~Gj8yN2U$ z2xH7!ZzefwVlrEneCgpTz7H&=t*9upRnVnwv4k*3nmGmdu)A3BKkE?qV$$c>bb2XG z1RkVZT_;S>zg56H4M*=CmCgv=%R+TJyoC zski}`CFNSRVexgv!6?5IUHH$JmYA5rhHb;R=FW$4*-dxgw)I;=DeMjtncNZs`r-8L z_j!f`?pY@txB?&f=o@hI*{3)yF&OBQg~>G&F3LGqUFzDGB$s?-ZXz0T&_GKLOJ*Ri z@Xs}WWCVqi1g?iEg@I4wNE0m%am3pky=M(kK6u$8_bIL^ZcsM5p4oY^1ie>#$o#Ki zbg0`FUX`TLgqG->677amnNB6h(w?2s5R41D!+__J;k~P_HZl>I?2?bY0grp)NqE;K z|B36J1dSLtuEYsfd;WCS_8IR=ui+23-;c}P-#*=4IQo#~IC|A`9Jq8o7WTBGyQ34G z9Zg|`ywtCdUpcnghH>?SG~pO~%dSzZ-#UOh9^8nV?t2(_KC&4*Yqb!23~N#i;sR{y zO-7Jvz}gVC&e|-U3yIL{5V=yk(sKiMh6P4qNu*d=pm|P!)U=JYGxR~2$ zY{V%S5L2ckcG*jk5JP>eg~!xVrmTO<+r?Y(MiTLP25F^bx(7p;nDI$ttBq0VhnDOj zE$O|+Zz45kr_n$CC`fseh^==dmSB@o!=SboBCW}YO-d?W7mPC}h5Qn^<4-#lzxmU* z@U1WX2%q`vx3PX`IP5gnLjpuc zVwNJqocNqrkSIPQPHHl3$ws-6%~sHX5!oE(18E5_%vkAzZm7r*VLc=`oT!RJ5o9en=>zr>D- ziBKoJC5wZRGX!zhInyynevp*jHZqEh?yt6flQf0(l1?}_^y&x2FY#+3V=O|Zbe(4; z|2!YU_iaA?h(qzlx4Z;Tc*rd_M4_=k9VS?dBAvwVn)&n{ZlI5Oec7}a9-9Rz7 zL^XW@SiJ=g)3rWtrO|>3eqa6^O)Y8kVt#$?Cu%VJA^y9xZde9?W^ElcEe`cJaI&Xdl zKJclx;^h}V3*Y+I&+ya#{yo+W42A{G9pIeloNBvRQlTdN-b~CTSXCK*abT&Q3Jr7! z_SBLuJflq%{I-nV(DV@I?30hdD=&U7p75m8P;p}42b8?U=hv+=p9TqNIj7ryj*>k# zBTF6%W^zFfOj!1Zv-VTTTb6Rr>9&QsICQLfp-{{JUTUOu$VARZYd#r1pbF-Xubz48melK=sxrZF?I6Z@{1 zkN5t+x8QZJeIb7Pn=9~(pIwH(-*T5@z*x%J|5Yj^HEH3zq&!Cn0bzHW9vicYsf36K zj69?rOpsJx>R``vpL`Zx_=59s+^I(dx1-lOQ{yx4nN@Ho+>+0kd=aFGEsbGhPQ!ae zX-Eo_-dmzs#x|PPl5}3iJ5i);(Y-llOR3fF?6}q;3_=vZ{+kBc!XJe-r47J_O6p=2 z5sfm-MR$w})e>~k&iZm*aWUZZe<;}jWiuqzC4PTxROvhm%Tkt!L`0^ge6*uCxU_?* zI@mBd1v@r{Idi-4;@3S77rp$Mxc#QPap`aWhAS?=4);IsF#2n;Zn!6~U&X`{B^sb9 z_a;bJev1Px$pK59mYELa@w`#$%XN4r zk9UYRMn(Y{&^9??=k730UfSnsUTZL=-*P?<(=Wjl6quTWK)WC3veed6pHrQ~x}1a654uPku10%E0;QX)Ul#)5Vy=a<}s8 zpYj5o#2-qf%JWi4O9ZZ|pH$`N2+o^VWN84)fZ120M%&OU=YNCsX>ZN2b9S6KQ# zW%Nc8-INF?kzEj{F|%_XhEmUfe3oA18I{$DOyw70``>0p#$10#gW-DcAi%X!yiwF-u-pmn7g>+8_9Tn$rP@dJ^J+;GNk3aE%~FqqU!gwS!z289dq>tZC~bsb!-y{Z4%~L89xNZzzby#s}xOSeYG-qgu18 zr~li7JCRn~r|HN(O zhP;+gCo%fQR9j>Iw_n6z{EDZLk}ycmh(7O7yhv1hY%62>P(aEDQgV)Z=Ap1q)XF5a z3wiwX?6S?m6M*|KQr;+rCx5n#e`LVSG-hCb=mpODC{BhnAcoR#!8tR&gouY?K~6Q= zb4xebsQx6aP)&IvX$EJ8{f}Bb0KDk65iT#k*B$)7^~SLDMuAt8BxgT6NqZd(lXD@` zF-bd9`kv1v?^Kp2xaXPJR0ZPs8Z&;cCH4_pmaJg7Xk?(H(U>$vU}VJMXW_%!+VdtZ zAgH%3iXgg+NV{|EI!l!TW^d~OB^`zkCgSNT8bg* zVlCd8;Cf;h{>?(XPh3(@dv|DZ#8`%+1v7G%v_MiKO4Sm6&&ciQ9}w53=1RIDwTItI zH>ht}1FusFde)z(8pU)hEPSDW6DY6ITAAF1$q&>CJkL9g1f8ajCHqBxj^`@)tYHbt z49_J+ly;`m{eB}T%9mI%*RavTxyWRd&gL5}&*0g7r~ms$iRz-Z>Ci5+I{a>mOszE? zU!(6!j31?KEYuWvFf5~wd}UZ#Qw1*GRgDFt2ofFv&d#I78DSZkm7I-m@G5&y8)WWi zI0W!celh4%V&KphWsGX?6zH1pSuwg8gq#GO;t-9FMq}U5rPu&p7qtRwuGruBJ?ic& z0!aTzZyRrs9-^Tf8djFCP5t7AXd~1gWn+W=nngT}!k9doqpTK-mUdCSiF6KcdB8M7 ztK9m!^kafF>}wWADiSb$Yb5zdR%TiC8(m-+Uqja0sB@L1=QX2HkwLvwO%|w^+EP#q zDYRcCU0CsWs` z>m|iPjhTWL<&eMr+Z|4Cs6Wtv$V3#<@CQ^{ABD@>BQbDyS2ABp^63!nvN7eW= zBcbv}N4Y}(JP6!3p>6jQ3FQE-Hx&(S_&$i%EjgCEY5r3^|NDG_Sb zvnU!FpA!jfG#mhm3}Y3HuuLGNw#a=;{hF0Hi3!1QOkY10G#%wk_ zjE_ck+pN6N;B(ncts|e2lpHrux#^bu{48K%!^!JXL-8P0E(4LP!C>51KRX*4sR_E+ zbYB^Se@vQ}XM92B)xjgpVEmnKJGv;@xw?V`0k285C};$I9Uv2%y(A#a zEBQiPfHm-Zg$z!A78fkCB+1jT?4B@Y_C7ZfEQRmX4?0P%6xrwaK_e+Xo9#AhbNAqy zu8Xj1s0F1|eHX-C%WYCj;7+VcIbha|p>uh`0=Aa%Cx?~eO?D?;3yr2MycnL2_hwFD zqBRNvG{WMVC`Dq>g>`sS!45WTn0X4)IhX4!n7HxxxN3z;PLu|kM_KTfFKKj(J)GM}DmSWGt{GC-Dwo?efG)d} z=;=L=wf{FQg$zfQCt<8f6vjJC0>Wm=y(k)+Re+x0S(H9;8Ix@|t9N&BeQa*5V00vg zZA6$xuR01_mMUV5s;tDK6r<;gGO@}1CJRac-cP82`?&okP6fW3NS6}6PY{jZ3`_pZ z7NL^Tc%mq2;&9%zKtP%j^cOo@Fgj<&YIJW)Y9_RwbB;~v+{RdrnOqZ3ygp15 zh9Od?kl&YPJ~JPyX%3u_!5U8EO}n5p1~EU6LCj7xFxrq~NgnQ7NyHBmivmp46k+Bt z0d3qU%|y*Gn_>2UeUx3ub&iITbl0!5@kJvSBO8^GC!jPMz@9M{1d+rQz8R$CxQ5iq z9!kcj_8!2B=OwD@_+VLRZR(8#?o1i0HR@z&L!Kas?x;VsbSN`@0sHg)Y4UMh?f{R# z0hgS?h`_G0*C(xaEWs<97-`1_I-l{m%ov_P34RofUV4_{)+UW1MLJ24uq4tFuTc>0 ze_OgY@~D*CqFu|gonLl8^RZ`%4v10Ac3oEEUtxLNEZamT*-RAhWa#66Yp}wIAId=_ z4pZ2aBGDiXw~=-IXdub%CPpKPU-y~p2!;mS2VPg0(h+$16*H|Z4IWD^Bg~$(h$fW( zKVortZrN)>U-9_`O#%qxL%|Zw7@E$zqsjlqu{+4JcW||6jQ+z~iCw~(Ra#L6v1Yr5 z#=;VN+CuU@KLm;mT}AN+Y#ao>V}(r+i?=gtR53G=-%^ zAVOnZ){q*%`94E zZ-i1K!*q|DNZg}DC((v!p*zF8*d>4O3Voj!?!%8@aUB9i5`vZo85tLr=82(55-J}i z6Gb{BgU*d-0G%*-Begu6iYy;t6J3TW#z5oW1WAhbU^NzuX<|~Ku?h~)C}}M{i1L}; z%~l003V|u431#FTFiFc3e>P@n+?V>pDtcIk0U1zOhW02py{^BrF{++ee4U+LSj>8q z-J?{JLu@IACWn2#f)M7j7# zrLG$hqmh;n3}|AYzW+WLmL_3JBg9EUfupn9 zF5u6CNx_)0NX7=ZcXtAyXe&iKE9amYb`L2V9gSRv^-NCf);v%!3Lk|EG3qr95z?6P zT(#n`lJEJge#0*&l+CfFMyX%rA`Tv@Adj}sSP`&PW%)+Fi1X}-+lkby*c&zgTX zgCQ1vUGSww^D`9&(t{MGc${!?ZZ+PcveAGF)1_Gq**UM#L}zi5tNZYGn#>I#U?bt7 zJx+Xn`ACxjv6>Z)qfr4ri5~9;)--~R4G%qG%fTqAn+{!gpa`FK3$YZ8kz9HuC<+&r z=vvBL^p{k5zeuaY)rIN7v(pwICi&NSVpjIpjNh&3nIEps2$#1SlyDXs!a0Nnd zR<>oAYWZ`qFXc9A=FC&#m)6XBTnT1b%afs~Eec6_4T*qsy+A_BkT+EB+8RP~$vL3m zlfYjZe+DA4`UJNrjx+Z~-dhx)iwF%p-?T&zR~c5zr8D$f9$YyqL#mEST0w}WRj9sa zCJYh3r~F~|krJLYtgeVyR!ugRNFtdO1Ix3bDk&p@JsO(3J9siDBF8C~294fpO~;4M zcF;_ut>r4u98d?~}o*=!WB&BbFK zlzm0bsSJ8`Q(XC^ctj*E38XL~#Na_OR#Q|IGFQQTlIU5G&x(9OqWc=$yed5XXo_bk z36SnY`5Q9Zitic5Fe(T{^CGuq5R1UuFaT04(y${lO){oSq;1kJ8ABG?{VYP2oYuGc ztpnrtJPCHs#CvljybL8Zd_QQ-rF>Z$nF7|Ve{YZ`QP#*VXy8r_f8Zchgp^haH&F#E zqHA)eg6IS#xJ-eQG-H?3t&43j1%{F`(Lpkk6sPvdPSb^%pGZ@d3DW=4dDx3{OMEYE zi@@liE}Cz+w@Yi#jMn&I;<8(k6>n6#W;svXSjx-2S-6d%9&kn8#_AhWP3erjrmLP`Lu%TOx;(57R@>ryc3XB=n3J}9RTw7Yfzk9kd4q)lcoCTyMirnW0e5C{f+geal og1M^^{6^zD7H0zsO858w0Z0QWJJQ=U-2eap07*qoM6N<$g6+QOoB#j- From 24bed71f3cebfd9cd1fc8464bdfe8fbae8d3823c Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 20:21:17 +0100 Subject: [PATCH 071/109] Add test github workflow --- .github/workflows/tests.yaml | 72 ++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 .github/workflows/tests.yaml diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml new file mode 100644 index 0000000..f59c8e4 --- /dev/null +++ b/.github/workflows/tests.yaml @@ -0,0 +1,72 @@ +#.github/workflows/tests.yaml +name: Unit Tests + +on: + pull_request: + branches: + - main + - develop + push: + branches: + - '**' # Every branch + +jobs: + tests: + if: github.repository_owner == 'paulovcmedeiros' + strategy: + fail-fast: true + matrix: + os: [ "ubuntu-latest" ] + env: [ "pytest" ] + python-version: [ "3.9" ] + + name: "${{ matrix.os }}, python=${{ matrix.python-version }}" + runs-on: ${{ matrix.os }} + + container: + image: python:${{ matrix.python-version }}-bullseye + env: + COVERAGE_FILE: ".coverage.${{ matrix.env }}.${{ matrix.python-version }}" + + steps: + #---------------------------------------------- + # check-out repo + #---------------------------------------------- + - name: Check out repository + uses: actions/checkout@v3 + + #---------------------------------------------- + # --- configure poetry & install project ---- + #---------------------------------------------- + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Load cached venv (if cache exists) + id: cached-poetry-dependencies + uses: actions/cache@v3 + with: + path: .venv + key: ${{ github.job }}-venv-${{ runner.os }}-${{ matrix.python-version }}-${{ hashFiles('**/pyproject.toml', '**/poetry.toml') }} + + - name: Install dependencies (if venv cache is not found) + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root --only main,test + + - name: Install the project itself + run: poetry install --no-interaction --only-root + + #---------------------------------------------- + # run test suite and report coverage + #---------------------------------------------- + - name: Run tests + run: | + poetry run pytest + + - name: Upload test coverage report to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: ./.coverage.xml From 6b869563874cf5f4510af2a78ae8e2fba5bfa981 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Mon, 6 Nov 2023 22:40:03 +0100 Subject: [PATCH 072/109] Keep chat contaxt files in cache dir --- gpt_buddy_bot/__init__.py | 20 +++++++++++++++----- gpt_buddy_bot/chat.py | 3 ++- gpt_buddy_bot/chat_context.py | 4 +++- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/gpt_buddy_bot/__init__.py b/gpt_buddy_bot/__init__.py index bd1eda8..ff4e59c 100644 --- a/gpt_buddy_bot/__init__.py +++ b/gpt_buddy_bot/__init__.py @@ -9,22 +9,32 @@ class GeneralConstants: + # Main package info + RUN_ID = uuid.uuid4().hex PACKAGE_NAME = __name__ VERSION = version(__name__) - APP_NAME = PACKAGE_NAME.title().replace("Gpt", "GPT").replace("_", " ") + + # Main package directories PACKAGE_DIRECTORY = Path(__file__).parent - APP_DIR = PACKAGE_DIRECTORY / "app" - APP_PATH = APP_DIR / "app.py" - RUN_ID = uuid.uuid4().hex PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name) + PACKAGE_CHAT_CONTEXTS_DIRECTORY = PACKAGE_CACHE_DIRECTORY / "chat_contexts" + + # Constants related to the app + APP_NAME = PACKAGE_NAME.title().replace("Gpt", "GPT").replace("_", " ") + APP_DIR = PACKAGE_DIRECTORY / "app" + APP_PATH = APP_DIR / "app.py" PARSED_ARGS_FILE = PACKAGE_TMPDIR / f"parsed_args_{RUN_ID}.pkl" + + # Constants related to using the OpenAI API + OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") TOKEN_USAGE_DATABASE = PACKAGE_CACHE_DIRECTORY / "token_usage.db" + # Initialise the package's directories PACKAGE_TMPDIR.mkdir(parents=True, exist_ok=True) PACKAGE_CACHE_DIRECTORY.mkdir(parents=True, exist_ok=True) - OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY") + PACKAGE_CHAT_CONTEXTS_DIRECTORY.mkdir(parents=True, exist_ok=True) # Initialize the OpenAI API client diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index bfabf53..90db7d4 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -27,7 +27,8 @@ def __init__(self, configs: ChatOptions): if self.context_file_path is None: self.context_file_path = ( - GeneralConstants.PACKAGE_TMPDIR / f"embeddings_for_chat_{self.id}.csv" + GeneralConstants.PACKAGE_CHAT_CONTEXTS_DIRECTORY + / f"embeddings_for_chat_{self.id}.csv" ) if self.context_model is None: diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index d4855ac..deadf61 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -51,6 +51,7 @@ def add_to_history(self, text: str): _store_message_embedding_data( obj=text, embedding_model=self.embedding_model, + chat_model=self.parent_chat.model, embedding=embedding_request["embedding"], file_path=self.context_file_path, ) @@ -87,7 +88,7 @@ def request_embedding_from_openai(text: str, model: str): def _store_message_embedding_data( - obj, embedding_model: str, embedding: list[float], file_path: Path + obj, embedding_model: str, chat_model: str, embedding: list[float], file_path: Path ): """Store message and embeddings to file.""" # Adapted from Date: Tue, 7 Nov 2023 15:33:16 +0100 Subject: [PATCH 073/109] Assert that tests use tmp cache dir --- gpt_buddy_bot/__init__.py | 4 ++-- gpt_buddy_bot/chat.py | 3 +-- tests/conftest.py | 25 ++++++++++++++++++++++--- tests/smoke/test_app.py | 5 ++--- tests/smoke/test_commands.py | 5 +++-- tests/unit/test_chat.py | 18 +++++++++++++++++- 6 files changed, 47 insertions(+), 13 deletions(-) diff --git a/gpt_buddy_bot/__init__.py b/gpt_buddy_bot/__init__.py index ff4e59c..c7191f7 100644 --- a/gpt_buddy_bot/__init__.py +++ b/gpt_buddy_bot/__init__.py @@ -19,7 +19,7 @@ class GeneralConstants: PACKAGE_CACHE_DIRECTORY = Path.home() / ".cache" / PACKAGE_NAME _PACKAGE_TMPDIR = tempfile.TemporaryDirectory() PACKAGE_TMPDIR = Path(_PACKAGE_TMPDIR.name) - PACKAGE_CHAT_CONTEXTS_DIRECTORY = PACKAGE_CACHE_DIRECTORY / "chat_contexts" + CHAT_CACHE_DIR = PACKAGE_CACHE_DIRECTORY / "chats" # Constants related to the app APP_NAME = PACKAGE_NAME.title().replace("Gpt", "GPT").replace("_", " ") @@ -34,7 +34,7 @@ class GeneralConstants: # Initialise the package's directories PACKAGE_TMPDIR.mkdir(parents=True, exist_ok=True) PACKAGE_CACHE_DIRECTORY.mkdir(parents=True, exist_ok=True) - PACKAGE_CHAT_CONTEXTS_DIRECTORY.mkdir(parents=True, exist_ok=True) + CHAT_CACHE_DIR.mkdir(parents=True, exist_ok=True) # Initialize the OpenAI API client diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 90db7d4..a401ecb 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -27,8 +27,7 @@ def __init__(self, configs: ChatOptions): if self.context_file_path is None: self.context_file_path = ( - GeneralConstants.PACKAGE_CHAT_CONTEXTS_DIRECTORY - / f"embeddings_for_chat_{self.id}.csv" + GeneralConstants.CHAT_CACHE_DIR / f"chat_{self.id}" / f"embeddings.csv" ) if self.context_model is None: diff --git a/tests/conftest.py b/tests/conftest.py index 1f31ed4..849c0b3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,11 +5,12 @@ import openai import pytest +import gpt_buddy_bot from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions -# Register markers +# Register markers and constants def pytest_configure(config): config.addinivalue_line( "markers", @@ -20,14 +21,23 @@ def pytest_configure(config): "no_embedding_create_mocking: mark test to not mock openai.Embedding.create", ) + pytest.ORIGINAL_PACKAGE_CACHE_DIRECTORY = ( + gpt_buddy_bot.GeneralConstants.PACKAGE_CACHE_DIRECTORY + ) + -@pytest.fixture(scope="session", autouse=True) +@pytest.fixture(autouse=True) def set_env(): # Make sure we don't consume our tokens in tests os.environ["OPENAI_API_KEY"] = "INVALID_API_KEY" openai.api_key = os.environ["OPENAI_API_KEY"] +@pytest.fixture(autouse=True) +def mocked_general_constants(tmp_path): + gpt_buddy_bot.GeneralConstants.PACKAGE_CACHE_DIRECTORY = tmp_path / "cache" + + @pytest.fixture(autouse=True) def openai_api_request_mockers(request, mocker): """Mockers for OpenAI API requests. We don't want to consume our tokens in tests.""" @@ -76,7 +86,7 @@ def _mock_input(*args, **kwargs): mocker.patch("builtins.input", new=lambda _: _mock_input(user_input=user_input)) -@pytest.fixture +@pytest.fixture() def default_chat_configs(tmp_path): return ChatOptions( token_usage_db_path=tmp_path / "token_usage.db", # Don't use the regular db file @@ -84,6 +94,15 @@ def default_chat_configs(tmp_path): ) +@pytest.fixture() +def cli_args_overrides(default_chat_configs): + args = [] + for field, value in default_chat_configs.model_dump().items(): + if value is not None: + args = [*args, *[f"--{field.replace('_', '-')}", str(value)]] + return args + + @pytest.fixture() def default_chat(default_chat_configs): return Chat(configs=default_chat_configs) diff --git a/tests/smoke/test_app.py b/tests/smoke/test_app.py index 2225492..9ba9bb5 100644 --- a/tests/smoke/test_app.py +++ b/tests/smoke/test_app.py @@ -1,8 +1,7 @@ from gpt_buddy_bot.app import app -from gpt_buddy_bot.chat_configs import ChatOptions -def test_app(mocker): +def test_app(mocker, default_chat_configs): mocker.patch("streamlit.session_state", {}) - mocker.patch("pickle.load", return_value=ChatOptions()) + mocker.patch("pickle.load", return_value=default_chat_configs) app.run_app() diff --git a/tests/smoke/test_commands.py b/tests/smoke/test_commands.py index a882c25..0fd913c 100644 --- a/tests/smoke/test_commands.py +++ b/tests/smoke/test_commands.py @@ -5,8 +5,9 @@ @pytest.mark.parametrize("user_input", ("Hi!", ""), ids=("regular-input", "empty-input")) -def test_terminal_command(input_builtin_mocker): - args = ["terminal", "--report-accounting-when-done"] +def test_terminal_command(input_builtin_mocker, cli_args_overrides): + args = [*["terminal", "--report-accounting-when-done"], *cli_args_overrides] + args = list(dict.fromkeys(args)) main(args) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 4c48277..1e5b11e 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,6 +1,7 @@ import openai import pytest +from gpt_buddy_bot import GeneralConstants from gpt_buddy_bot.chat import CannotConnectToApiError, Chat from gpt_buddy_bot.chat_configs import ChatOptions @@ -18,6 +19,21 @@ def test_testbed_doesnt_actually_connect_to_openai(default_chat, input_builtin_m pytest.exit("Refuse to continue: Testbed is trying to connect to OpenAI API!") +@pytest.mark.order(2) +def test_we_are_using_tmp_cachedir(): + try: + assert ( + GeneralConstants.PACKAGE_CACHE_DIRECTORY + != pytest.ORIGINAL_PACKAGE_CACHE_DIRECTORY + ) + + except AssertionError: + pytest.exit( + "Refuse to continue: Tests attempted to use the package's real cache dir " + + f"({GeneralConstants.PACKAGE_CACHE_DIRECTORY})!" + ) + + @pytest.mark.parametrize("user_input", ("Hi!", ""), ids=("regular-input", "empty-input")) def test_terminal_chat(default_chat, input_builtin_mocker): default_chat.start() @@ -25,7 +41,7 @@ def test_terminal_chat(default_chat, input_builtin_mocker): def test_chat_configs(default_chat, default_chat_configs): - assert default_chat.configs == default_chat_configs + assert default_chat._passed_configs == default_chat_configs @pytest.mark.no_chat_completion_create_mocking From 99e4f8dd60f7360519fbb29897d849c84b1cc536 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 7 Nov 2023 15:34:12 +0100 Subject: [PATCH 074/109] Changes to embeddings/msgs storage --- gpt_buddy_bot/app/app_page_templates.py | 5 +- gpt_buddy_bot/chat.py | 15 +++--- gpt_buddy_bot/chat_context.py | 67 +++++++++++++++---------- gpt_buddy_bot/tokens.py | 5 +- 4 files changed, 53 insertions(+), 39 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index e33f351..82ba553 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -172,8 +172,9 @@ def render(self): if "page_title" not in self.state and len(self.chat_history) > 3: with st.spinner("Working out conversation topic..."): prompt = "Summarize the following msg exchange in max 4 words:\n" - prompt += "\n\x1f".join( - message["content"] for message in self.chat_history + prompt += "\n".join( + f"{message['role'].strip()}: {message['content'].strip()}" + for message in self.chat_history ) self.title = "".join(self.chat_obj.respond_system_prompt(prompt)) self.sidebar_title = self.title diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index a401ecb..29e1e3f 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -98,17 +98,14 @@ def respond_system_prompt(self, prompt: str): def yield_response_from_msg(self, prompt_as_msg: dict): """Yield response from a prompt.""" try: - yield from self._yield_response_from_msg(prompt_as_msg=prompt_as_msg) + yield from self._yield_response_from_msg(prompt_msg=prompt_as_msg) except openai.error.AuthenticationError as error: raise CannotConnectToApiError(self._auth_error_msg) from error - def _yield_response_from_msg(self, prompt_as_msg: dict): + def _yield_response_from_msg(self, prompt_msg: dict): """Yield response from a prompt. Assumes that OpenAI authentication works.""" - role = prompt_as_msg["role"] - prompt = prompt_as_msg["content"] - # Get appropriate context for prompt from the context handler - prompt_context_request = self.context_handler.get_context(text=prompt) + prompt_context_request = self.context_handler.get_context(msg=prompt_msg) context = prompt_context_request["context_messages"] # Update token_usage with tokens used in context handler for prompt @@ -116,7 +113,7 @@ def _yield_response_from_msg(self, prompt_as_msg: dict): prompt_context_request["tokens_usage"].values() ) - contextualised_prompt = [self.base_directive, *context, prompt_as_msg] + contextualised_prompt = [self.base_directive, *context, prompt_msg] # Update token_usage with tokens used in chat input self.token_usage[self.model]["input"] += sum( get_n_tokens(string=msg["content"], model=self.model) @@ -138,7 +135,7 @@ def _yield_response_from_msg(self, prompt_as_msg: dict): # Put current chat exchange in context handler's history history_entry_registration_tokens_usage = self.context_handler.add_to_history( - text=f"{role}: {prompt}. {self.assistant_name}: {full_reply_content}" + msg_list=[prompt_msg, {"role": "assistant", "content": full_reply_content}] ) # Update token_usage with tokens used in context handler for reply @@ -169,7 +166,7 @@ def _respond_prompt(self, prompt: str, role: str): prompt = prompt.strip() role = role.lower().strip() role2name = {"user": self.username, "system": self.system_name} - prompt_as_msg = {"role": role, "name": role2name[role], "content": prompt} + prompt_as_msg = {"role": role, "content": prompt} yield from self.yield_response_from_msg(prompt_as_msg) @property diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index deadf61..cb9dc40 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -1,5 +1,6 @@ import ast import csv +import itertools import json import time from collections import deque @@ -21,15 +22,15 @@ def __init__(self, parent_chat: "Chat"): self.history = deque(maxlen=50) self._tokens_usage = {"input": 0, "output": 0} - def add_to_history(self, text: str): - self.history.append(text) + def add_to_history(self, msg_list: list[dict]): + self.history += msg_list return self._tokens_usage - def get_context(self, text: str): - context_msg = _compose_context_msg( + def get_context(self, msg: dict): + context_msgs = _gather_context_msgs( history=self.history, system_name=self.parent_chat.system_name ) - return {"context_messages": [context_msg], "tokens_usage": self._tokens_usage} + return {"context_messages": context_msgs, "tokens_usage": self._tokens_usage} class EmbeddingBasedChatContext(BaseChatContext): @@ -46,10 +47,10 @@ def embedding_model(self): def context_file_path(self): return self.parent_chat.context_file_path - def add_to_history(self, text: str): - embedding_request = self.calculate_embedding(text=text) - _store_message_embedding_data( - obj=text, + def add_to_history(self, msg_list: list[dict]): + embedding_request = self._calculate_embedding_for_msgs(msg_list=msg_list) + _store_message_exchance_and_corresponding_embedding( + msg_list=msg_list, embedding_model=self.embedding_model, chat_model=self.parent_chat.model, embedding=embedding_request["embedding"], @@ -57,8 +58,8 @@ def add_to_history(self, text: str): ) return embedding_request["tokens_usage"] - def get_context(self, text: str): - embedding_request = self.calculate_embedding(text=text) + def get_context(self, msg: dict): + embedding_request = self._calculate_embedding_for_text(text=msg["content"]) context_messages = _find_context( embedding=embedding_request["embedding"], file_path=self.context_file_path, @@ -70,12 +71,18 @@ def get_context(self, text: str): "tokens_usage": embedding_request["tokens_usage"], } - def calculate_embedding(self, text: str): + def _calculate_embedding_for_msgs(self, msg_list: list[dict]): + text = "\n".join( + [f"{msg['role'].strip()}: {msg['content'].strip()}" for msg in msg_list] + ) + return self._calculate_embedding_for_text(text=text) + + def _calculate_embedding_for_text(self, text: str): return request_embedding_from_openai(text=text, model=self.embedding_model) def request_embedding_from_openai(text: str, model: str): - text.lower().replace("\n", " ") + text = text.lower().strip() embedding_request = openai.Embedding.create(input=[text], model=model) embedding = embedding_request["data"][0]["embedding"] @@ -87,25 +94,28 @@ def request_embedding_from_openai(text: str, model: str): return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_message_embedding_data( - obj, embedding_model: str, chat_model: str, embedding: list[float], file_path: Path +def _store_message_exchance_and_corresponding_embedding( + msg_list: list[dict], + embedding_model: str, + chat_model: str, + embedding: list[float], + file_path: Path, ): """Store message and embeddings to file.""" # Adapted from # See also . - embedding_file_entry_data = { "timestamp": int(time.time()), "embedding_model": f"{embedding_model}", "chat_model": f"{chat_model}", - "message": json.dumps(obj), + "message_exchange": json.dumps(msg_list), "embedding": json.dumps(embedding), } init_file = not file_path.exists() or file_path.stat().st_size == 0 write_mode = "w" if init_file else "a" - + file_path.parent.mkdir(parents=True, exist_ok=True) with open(file_path, write_mode, newline="") as file: writer = csv.DictWriter(file, fieldnames=embedding_file_entry_data.keys()) if init_file: @@ -113,11 +123,10 @@ def _store_message_embedding_data( writer.writerow(embedding_file_entry_data) -def _compose_context_msg(history: list[str], system_name: str): - context_msg_content = "You know that the following was said:\n\n" - context_msg_content += "\x1f\n".join(rf"{message}" for message in history) + "\n\n" - context_msg_content += "Answer the last message." - return {"role": "system", "name": system_name, "content": context_msg_content} +def _gather_context_msgs(history: list[dict], system_name: str): + sys_directives = "Considering the previous messages, answer the next message." + sys_msg = {"role": "system", "name": system_name, "content": sys_directives} + return [*history, sys_msg] def _find_context( @@ -133,7 +142,7 @@ def _find_context( return [] df = df.loc[df["embedding_model"] == parent_chat.context_model] - df["embedding"] = df.embedding.apply(ast.literal_eval).apply(np.array) + df["embedding"] = df["embedding"].apply(ast.literal_eval).apply(np.array) df["similarity"] = df["embedding"].apply(lambda x: cosine_similarity(x, embedding)) @@ -146,6 +155,12 @@ def _find_context( .sort_values("timestamp") ) df_context = pd.concat([df_similar_chats, df_last_n_chats]) - selected = df_context["message"].apply(ast.literal_eval).drop_duplicates().tolist() + selected_history = ( + df_context["message_exchange"].apply(ast.literal_eval).drop_duplicates() + ).tolist() - return [_compose_context_msg(history=selected, system_name=parent_chat.system_name)] + selected_history = list(itertools.chain.from_iterable(selected_history)) + + return _gather_context_msgs( + history=selected_history, system_name=parent_chat.system_name + ) diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index de59f77..d656f84 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -146,8 +146,9 @@ def get_usage_balance_dataframe(self): } df_rows.append(df_row) - df = _group_columns_by_prefix(pd.DataFrame(df_rows)) - df = _add_totals_row(df) + df = pd.DataFrame(df_rows) + if not df.empty: + df = _add_totals_row(_group_columns_by_prefix(df)) return df From d235870a4dbf239775cf693d747d5880760a273f Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 7 Nov 2023 18:22:20 +0100 Subject: [PATCH 075/109] Fix crash when deleting last page in app --- gpt_buddy_bot/app/multipage.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 1bbaeb2..ee3c2be 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,5 +1,6 @@ "Code for the creation streamlit apps with dynamically created pages." import contextlib +import json from abc import ABC, abstractmethod import openai @@ -50,7 +51,10 @@ def add_page(self, page: AppPage, selected: bool = True): def remove_page(self, page: AppPage): """Remove a page from the app.""" del self.pages[page.page_id] - self.register_selected_page(next(iter(self.pages.values()))) + try: + self.register_selected_page(next(iter(self.pages.values()))) + except StopIteration: + self.add_page() def register_selected_page(self, page: AppPage): """Register a page as selected.""" From c3bcef038a197d875d8dc532f874ee7f3e722b9d Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 7 Nov 2023 18:24:15 +0100 Subject: [PATCH 076/109] App able to retrieve past chat contexts --- gpt_buddy_bot/app/app_page_templates.py | 17 +++++++++++ gpt_buddy_bot/app/multipage.py | 40 ++++++++++++++++++++++--- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 82ba553..50022c1 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -1,4 +1,5 @@ """Utilities for creating pages in a streamlit app.""" +import json import pickle import sys import uuid @@ -85,6 +86,12 @@ def chat_configs(self) -> ChatOptions: self.state["chat_configs"] = pickle.load(chat_configs_file) return self.state["chat_configs"] + @chat_configs.setter + def chat_configs(self, value: ChatOptions): + self.state["chat_configs"] = ChatOptions.model_validate(value) + if "chat_obj" in self.state: + del self.state["chat_obj"] + @property def chat_obj(self) -> Chat: """Return the chat object responsible for the queries in this page.""" @@ -178,3 +185,13 @@ def render(self): ) self.title = "".join(self.chat_obj.respond_system_prompt(prompt)) self.sidebar_title = self.title + + metadata = { + "page_title": self.title, + "sidebar_title": self.sidebar_title, + } + metadata_file = ( + self.chat_obj.context_file_path.parent / "metadata.json" + ) + with open(metadata_file, "w") as metadata_f: + json.dump(metadata, metadata_f, indent=2) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index ee3c2be..1e3f39a 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -99,8 +99,10 @@ def init_openai_client(self): if not openai.api_key: st.write(":red[You need to provide a key to use the chat]") - def add_page(self, selected: bool = True): - return super().add_page(page=ChatBotPage(), selected=selected) + def add_page(self, page: ChatBotPage = None, selected: bool = True, **kwargs): + if page is None: + page = ChatBotPage(**kwargs) + return super().add_page(page=page, selected=selected) def handle_ui_page_selection(self): """Control page selection in the UI sidebar.""" @@ -204,6 +206,14 @@ def handle_ui_page_selection(self): new_chat_configs.update(updates_to_chat_configs) self.selected_page.chat_obj = Chat.from_dict(new_chat_configs) + def get_saved_chat_context_fpaths(self): + """Get the filepaths of saved chat contexts, sorted by last modified.""" + return sorted( + GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/embeddings.csv"), + key=lambda fpath: fpath.stat().st_mtime, + reverse=True, + ) + def render(self, **kwargs): with st.sidebar: self.init_openai_client() @@ -211,7 +221,29 @@ def render(self, **kwargs): tab1, tab2 = st.tabs(["Chats", "Settings"]) self.sidebar_tabs = {"chats": tab1, "settings": tab2} with tab1: - # Create a new chat upon init or button press - if st.button(label=":heavy_plus_sign: New Chat") or not self.pages: + # Add button to create a new chat + new_chat_button = st.button(label=":heavy_plus_sign: New Chat") + + # Reopen chats from cache (if any) + if not st.session_state.get("saved_chats_reloaded", False): + for fpath in self.get_saved_chat_context_fpaths(): + metadata_file = fpath.parent / "metadata.json" + with open(metadata_file, "r") as metadata_file: + metadata = json.load(metadata_file) + + new_page = ChatBotPage( + page_title=metadata["page_title"], + sidebar_title=metadata["sidebar_title"], + ) + new_page.chat_configs = new_page.chat_configs.model_copy( + update={"context_file_path": fpath} + ) + + self.add_page(page=new_page) + st.session_state["saved_chats_reloaded"] = True + + # Create a new chat upon request or if there is none yet + if new_chat_button or not self.pages: self.add_page() + return super().render(**kwargs) From 56e8deb4046e5be337a3723878e839c85c327ee5 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Tue, 7 Nov 2023 21:44:17 +0100 Subject: [PATCH 077/109] Save/load chat configs. Add private mode. --- gpt_buddy_bot/app/app_page_templates.py | 19 +++---- gpt_buddy_bot/app/multipage.py | 27 ++++----- gpt_buddy_bot/chat.py | 76 +++++++++++++++++++++++-- gpt_buddy_bot/chat_configs.py | 9 ++- tests/conftest.py | 2 +- 5 files changed, 98 insertions(+), 35 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 50022c1..6fd637e 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -65,8 +65,14 @@ def render(self): class ChatBotPage(AppPage): - def __init__(self, sidebar_title: str = "", page_title: str = ""): + def __init__( + self, chat_obj: Chat = None, sidebar_title: str = "", page_title: str = "" + ): super().__init__(sidebar_title=sidebar_title, page_title=page_title) + + if chat_obj: + self.chat_obj = chat_obj + chat_title = f"### Chat #{self.page_number}" self._page_title = ( page_title @@ -186,12 +192,5 @@ def render(self): self.title = "".join(self.chat_obj.respond_system_prompt(prompt)) self.sidebar_title = self.title - metadata = { - "page_title": self.title, - "sidebar_title": self.sidebar_title, - } - metadata_file = ( - self.chat_obj.context_file_path.parent / "metadata.json" - ) - with open(metadata_file, "w") as metadata_f: - json.dump(metadata, metadata_f, indent=2) + self.chat_obj.metadata["page_title"] = self.title + self.chat_obj.metadata["sidebar_title"] = self.sidebar_title diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 1e3f39a..ccd8faf 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,6 +1,5 @@ "Code for the creation streamlit apps with dynamically created pages." import contextlib -import json from abc import ABC, abstractmethod import openai @@ -50,11 +49,13 @@ def add_page(self, page: AppPage, selected: bool = True): def remove_page(self, page: AppPage): """Remove a page from the app.""" - del self.pages[page.page_id] try: self.register_selected_page(next(iter(self.pages.values()))) except StopIteration: self.add_page() + self.pages[page.page_id].chat_obj.private_mode = True + self.pages[page.page_id].chat_obj.clear_cache() + del self.pages[page.page_id] def register_selected_page(self, page: AppPage): """Register a page as selected.""" @@ -206,10 +207,10 @@ def handle_ui_page_selection(self): new_chat_configs.update(updates_to_chat_configs) self.selected_page.chat_obj = Chat.from_dict(new_chat_configs) - def get_saved_chat_context_fpaths(self): + def get_saved_chat_cache_dir_paths(self): """Get the filepaths of saved chat contexts, sorted by last modified.""" return sorted( - GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/embeddings.csv"), + GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/"), key=lambda fpath: fpath.stat().st_mtime, reverse=True, ) @@ -226,21 +227,15 @@ def render(self, **kwargs): # Reopen chats from cache (if any) if not st.session_state.get("saved_chats_reloaded", False): - for fpath in self.get_saved_chat_context_fpaths(): - metadata_file = fpath.parent / "metadata.json" - with open(metadata_file, "r") as metadata_file: - metadata = json.load(metadata_file) - + st.session_state["saved_chats_reloaded"] = True + for cache_dir_path in self.get_saved_chat_cache_dir_paths(): + chat = Chat.from_cache(cache_dir=cache_dir_path) new_page = ChatBotPage( - page_title=metadata["page_title"], - sidebar_title=metadata["sidebar_title"], + chat_obj=chat, + page_title=chat.metadata.get("page_title", "Recovered Chat"), + sidebar_title=chat.metadata.get("sidebar_title"), ) - new_page.chat_configs = new_page.chat_configs.model_copy( - update={"context_file_path": fpath} - ) - self.add_page(page=new_page) - st.session_state["saved_chats_reloaded"] = True # Create a new chat upon request or if there is none yet if new_chat_button or not self.pages: diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 29e1e3f..874d708 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -1,6 +1,10 @@ #!/usr/bin/env python3 +import json +import shutil import uuid from collections import defaultdict +from filecmp import clear_cache +from pathlib import Path import openai @@ -15,21 +19,21 @@ class CannotConnectToApiError(Exception): class Chat: - def __init__(self, configs: ChatOptions): + def __init__(self, configs: ChatOptions = None): self.id = uuid.uuid4() + if configs is None: + configs = ChatOptions() + self._passed_configs = configs for field in self._passed_configs.model_fields: setattr(self, field, self._passed_configs[field]) + self.cache_dir.mkdir(parents=True, exist_ok=True) + self.token_usage = defaultdict(lambda: {"input": 0, "output": 0}) self.token_usage_db = TokenUsageDatabase(fpath=self.token_usage_db_path) - if self.context_file_path is None: - self.context_file_path = ( - GeneralConstants.CHAT_CACHE_DIR / f"chat_{self.id}" / f"embeddings.csv" - ) - if self.context_model is None: self.context_handler = BaseChatContext(parent_chat=self) elif self.context_model == "text-embedding-ada-002": @@ -37,6 +41,47 @@ def __init__(self, configs: ChatOptions): else: raise NotImplementedError(f"Unknown context model: {self.context_model}") + @property + def cache_dir(self): + return self._cache_dir + + @cache_dir.setter + def cache_dir(self, value): + if value is None: + value = GeneralConstants.CHAT_CACHE_DIR / f"chat_{self.id}" + self._cache_dir = Path(value) + + def clear_cache(self): + """Remove the cache directory.""" + shutil.rmtree(self.cache_dir, ignore_errors=True) + + @property + def configs_file(self): + """File to store the chat's configs.""" + return self.cache_dir / "configs.json" + + @property + def context_file_path(self): + return self.cache_dir / f"embeddings.csv" + + @property + def metadata_file(self): + """File to store the chat metadata.""" + return self.cache_dir / "metadata.json" + + @property + def metadata(self): + """Keep metadata associated with the chat.""" + try: + _ = self._metadata + except AttributeError: + try: + with open(self.metadata_file, "r") as f: + self._metadata = json.load(f) + except FileNotFoundError: + self._metadata = {} + return self._metadata + @property def configs(self): """Return the chat's configs after initialisation.""" @@ -72,6 +117,16 @@ def __del__(self): n_output_tokens=self.token_usage[model]["output"], ) + if self.private_mode: + self.clear_cache() + else: + # Store configs + with open(self.configs_file, "w") as configs_f: + configs_f.write(self.configs.model_dump_json(indent=2)) + # Store metadata + with open(self.metadata_file, "w") as metadata_f: + json.dump(self.metadata, metadata_f, indent=2) + @classmethod def from_dict(cls, configs: dict): return cls(configs=ChatOptions.model_validate(configs)) @@ -85,6 +140,15 @@ def from_cli_args(cls, cli_args): } return cls.from_dict(chat_opts) + @classmethod + def from_cache(cls, cache_dir: Path): + """Return a chat object from a cached chat.""" + try: + with open(cache_dir / "configs.json", "r") as configs_f: + return cls.from_dict(json.load(configs_f)) + except FileNotFoundError: + return cls() + @property def initial_greeting(self): return f"Hello! I'm {self.assistant_name}. How can I assist you today?" diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 92f2d68..5abcd5e 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -110,9 +110,9 @@ class ChatOptions(OpenAiApiCallOptions): default="text-embedding-ada-002", description="OpenAI API model to use for embedding", ) - context_file_path: Optional[Path] = Field( + cache_dir: Optional[Path] = Field( default=None, - description="Path to the file to read/write the chat context from/to.", + description="Directory where to store/save info about the chat.", ) ai_instructions: tuple[str, ...] = Field( default=( @@ -131,3 +131,8 @@ class ChatOptions(OpenAiApiCallOptions): gt=0, description="Maximum number of attempts to connect to the OpenAI API", ) + private_mode: Optional[bool] = Field( + default=None, + description="Toggle private mode. If set to `True`, the chat will not " + + "be logged and the chat history will not be saved.", + ) diff --git a/tests/conftest.py b/tests/conftest.py index 849c0b3..8fffae3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -90,7 +90,7 @@ def _mock_input(*args, **kwargs): def default_chat_configs(tmp_path): return ChatOptions( token_usage_db_path=tmp_path / "token_usage.db", # Don't use the regular db file - context_file_path=tmp_path / "context.json", # Don't use our context files + cache_dir=tmp_path, # Don't use our cache files ) From d11776b4d0a2ebc4b0c25f99cdb7c2adf54595ca Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 01:10:23 +0100 Subject: [PATCH 078/109] Reload history on UI upon chat recovery from cache --- gpt_buddy_bot/app/.streamlit/config.toml | 3 ++ gpt_buddy_bot/app/app_page_templates.py | 55 +++++++++++++----------- gpt_buddy_bot/app/multipage.py | 16 ++++--- gpt_buddy_bot/chat.py | 51 ++++++++++++---------- gpt_buddy_bot/chat_context.py | 38 +++++++++++----- 5 files changed, 101 insertions(+), 62 deletions(-) diff --git a/gpt_buddy_bot/app/.streamlit/config.toml b/gpt_buddy_bot/app/.streamlit/config.toml index 94668a3..266f834 100644 --- a/gpt_buddy_bot/app/.streamlit/config.toml +++ b/gpt_buddy_bot/app/.streamlit/config.toml @@ -9,6 +9,9 @@ [server] runOnSave = true +[client] + showErrorDetails = true + [theme] base = "dark" # Colors diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 6fd637e..9ca71e0 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -1,5 +1,4 @@ """Utilities for creating pages in a streamlit app.""" -import json import pickle import sys import uuid @@ -19,6 +18,10 @@ _USER_AVATAR_IMAGE = Image.open(_USER_AVATAR_FILE_PATH) +# Sentinel object for when a chat is recovered from cache +_RecoveredChat = object() + + class AppPage(ABC): """Abstract base class for pages in a streamlit app.""" @@ -26,10 +29,22 @@ def __init__(self, sidebar_title: str = "", page_title: str = ""): self.page_id = str(uuid.uuid4()) self.page_number = st.session_state.get("n_created_pages", 0) + 1 - self._sidebar_title = ( - sidebar_title if sidebar_title else f"Page {self.page_number}" + chat_number_for_title = f"### Chat #{self.page_number}" + if page_title is _RecoveredChat: + self._fallback_page_title = f"{chat_number_for_title.strip('#')} (Recovered)" + page_title = None + else: + self._fallback_page_title = ( + f"{GeneralConstants.APP_NAME} :speech_balloon:\n{chat_number_for_title}" + ) + if page_title: + self.title = page_title + + self._fallback_sidebar_title = ( + page_title if page_title else self._fallback_page_title ) - self._page_title = page_title if page_title else self.sidebar_title + if sidebar_title: + self.sidebar_title = sidebar_title @property def state(self): @@ -41,23 +56,22 @@ def state(self): @property def sidebar_title(self): """Get the title of the page in the sidebar.""" - return self.state.get("sidebar_title", self._sidebar_title) + return self.state.get("sidebar_title", self._fallback_sidebar_title) @sidebar_title.setter - def sidebar_title(self, value): + def sidebar_title(self, value: str): """Set the sidebar title for the page.""" self.state["sidebar_title"] = value @property def title(self): """Get the title of the page.""" - return self.state.get("page_title", self._page_title) + return self.state.get("page_title", self._fallback_page_title) @title.setter - def title(self, value): + def title(self, value: str): """Set the title of the page.""" self.state["page_title"] = value - st.title(value) @abstractmethod def render(self): @@ -73,14 +87,6 @@ def __init__( if chat_obj: self.chat_obj = chat_obj - chat_title = f"### Chat #{self.page_number}" - self._page_title = ( - page_title - if page_title - else f"{GeneralConstants.APP_NAME} :speech_balloon:\n{chat_title}" - ) - self._sidebar_title = sidebar_title if sidebar_title else chat_title - self.avatars = {"assistant": _ASSISTANT_AVATAR_IMAGE, "user": _USER_AVATAR_IMAGE} @property @@ -118,10 +124,12 @@ def chat_history(self) -> list[dict[str, str]]: return self.state["messages"] def render_chat_history(self): - """Render the chat history of the page.""" + """Render the chat history of the page. Do not include system messages.""" for message in self.chat_history: role = message["role"] - with st.chat_message(role, avatar=self.avatars[role]): + if role == "system": + continue + with st.chat_message(role, avatar=self.avatars.get(role)): st.markdown(message["content"]) def render(self): @@ -184,13 +192,12 @@ def render(self): # Reset title according to conversation initial contents if "page_title" not in self.state and len(self.chat_history) > 3: with st.spinner("Working out conversation topic..."): - prompt = "Summarize the following msg exchange in max 4 words:\n" - prompt += "\n".join( - f"{message['role'].strip()}: {message['content'].strip()}" - for message in self.chat_history + prompt = "Summarize the messages in max 4 words.\n" + self.title = "".join( + self.chat_obj.respond_system_prompt(prompt, add_to_history=False) ) - self.title = "".join(self.chat_obj.respond_system_prompt(prompt)) self.sidebar_title = self.title + st.title(self.title) self.chat_obj.metadata["page_title"] = self.title self.chat_obj.metadata["sidebar_title"] = self.sidebar_title diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index ccd8faf..7adf589 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -6,7 +6,7 @@ import streamlit as st from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.app.app_page_templates import AppPage, ChatBotPage +from gpt_buddy_bot.app.app_page_templates import AppPage, ChatBotPage, _RecoveredChat from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions @@ -34,7 +34,7 @@ def n_created_pages(self, value): st.session_state["n_created_pages"] = value @property - def pages(self) -> AppPage: + def pages(self) -> dict[AppPage]: """Return the pages of the app.""" if "available_pages" not in st.session_state: st.session_state["available_pages"] = {} @@ -131,7 +131,7 @@ def handle_ui_page_selection(self): with self.sidebar_tabs["settings"]: caption = f"\u2699\uFE0F Settings for Chat #{self.selected_page.page_number}" - if self.selected_page.title != self.selected_page._page_title: + if self.selected_page.title != self.selected_page._fallback_page_title: caption += f": {self.selected_page.title}" st.caption(caption) current_chat_configs = self.selected_page.chat_obj.configs @@ -210,7 +210,11 @@ def handle_ui_page_selection(self): def get_saved_chat_cache_dir_paths(self): """Get the filepaths of saved chat contexts, sorted by last modified.""" return sorted( - GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/"), + ( + directory + for directory in GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/") + if next(directory.iterdir(), False) + ), key=lambda fpath: fpath.stat().st_mtime, reverse=True, ) @@ -232,10 +236,12 @@ def render(self, **kwargs): chat = Chat.from_cache(cache_dir=cache_dir_path) new_page = ChatBotPage( chat_obj=chat, - page_title=chat.metadata.get("page_title", "Recovered Chat"), + page_title=chat.metadata.get("page_title", _RecoveredChat), sidebar_title=chat.metadata.get("sidebar_title"), ) + new_page.state["messages"] = chat.load_history() self.add_page(page=new_page) + self.register_selected_page(next(iter(self.pages.values()))) # Create a new chat upon request or if there is none yet if new_chat_button or not self.pages: diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 874d708..76070da 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -78,7 +78,7 @@ def metadata(self): try: with open(self.metadata_file, "r") as f: self._metadata = json.load(f) - except FileNotFoundError: + except (FileNotFoundError, json.decoder.JSONDecodeError): self._metadata = {} return self._metadata @@ -117,7 +117,7 @@ def __del__(self): n_output_tokens=self.token_usage[model]["output"], ) - if self.private_mode: + if self.private_mode or not next(self.cache_dir.iterdir(), False): self.clear_cache() else: # Store configs @@ -145,28 +145,32 @@ def from_cache(cls, cache_dir: Path): """Return a chat object from a cached chat.""" try: with open(cache_dir / "configs.json", "r") as configs_f: - return cls.from_dict(json.load(configs_f)) + new = cls.from_dict(json.load(configs_f)) except FileNotFoundError: - return cls() + new = cls() + return new + + def load_history(self): + return self.context_handler.load_history() @property def initial_greeting(self): return f"Hello! I'm {self.assistant_name}. How can I assist you today?" - def respond_user_prompt(self, prompt: str): - yield from self._respond_prompt(prompt=prompt, role="user") + def respond_user_prompt(self, prompt: str, **kwargs): + yield from self._respond_prompt(prompt=prompt, role="user", **kwargs) - def respond_system_prompt(self, prompt: str): - yield from self._respond_prompt(prompt=prompt, role="system") + def respond_system_prompt(self, prompt: str, **kwargs): + yield from self._respond_prompt(prompt=prompt, role="system", **kwargs) - def yield_response_from_msg(self, prompt_as_msg: dict): + def yield_response_from_msg(self, prompt_as_msg: dict, **kwargs): """Yield response from a prompt.""" try: - yield from self._yield_response_from_msg(prompt_msg=prompt_as_msg) + yield from self._yield_response_from_msg(prompt_msg=prompt_as_msg, **kwargs) except openai.error.AuthenticationError as error: raise CannotConnectToApiError(self._auth_error_msg) from error - def _yield_response_from_msg(self, prompt_msg: dict): + def _yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True): """Yield response from a prompt. Assumes that OpenAI authentication works.""" # Get appropriate context for prompt from the context handler prompt_context_request = self.context_handler.get_context(msg=prompt_msg) @@ -197,15 +201,19 @@ def _yield_response_from_msg(self, prompt_msg: dict): string=full_reply_content, model=self.model ) - # Put current chat exchange in context handler's history - history_entry_registration_tokens_usage = self.context_handler.add_to_history( - msg_list=[prompt_msg, {"role": "assistant", "content": full_reply_content}] - ) + if add_to_history: + # Put current chat exchange in context handler's history + history_entry_reg_tokens_usage = self.context_handler.add_to_history( + msg_list=[ + prompt_msg, + {"role": "assistant", "content": full_reply_content}, + ] + ) - # Update token_usage with tokens used in context handler for reply - self.token_usage[self.context_model]["output"] += sum( - history_entry_registration_tokens_usage.values() - ) + # Update token_usage with tokens used in context handler for reply + self.token_usage[self.context_model]["output"] += sum( + history_entry_reg_tokens_usage.values() + ) def start(self): """Start the chat.""" @@ -226,12 +234,11 @@ def start(self): def report_token_usage(self, current_chat: bool = True): self.token_usage_db.print_usage_costs(self.token_usage, current_chat=current_chat) - def _respond_prompt(self, prompt: str, role: str): + def _respond_prompt(self, prompt: str, role: str, **kwargs): prompt = prompt.strip() role = role.lower().strip() - role2name = {"user": self.username, "system": self.system_name} prompt_as_msg = {"role": role, "content": prompt} - yield from self.yield_response_from_msg(prompt_as_msg) + yield from self.yield_response_from_msg(prompt_as_msg, **kwargs) @property def _auth_error_msg(self): diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index cb9dc40..11c3a63 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -26,8 +26,12 @@ def add_to_history(self, msg_list: list[dict]): self.history += msg_list return self._tokens_usage + def load_history(self): + """Load the chat history.""" + return self.history + def get_context(self, msg: dict): - context_msgs = _gather_context_msgs( + context_msgs = _make_list_of_context_msgs( history=self.history, system_name=self.parent_chat.system_name ) return {"context_messages": context_msgs, "tokens_usage": self._tokens_usage} @@ -49,7 +53,7 @@ def context_file_path(self): def add_to_history(self, msg_list: list[dict]): embedding_request = self._calculate_embedding_for_msgs(msg_list=msg_list) - _store_message_exchance_and_corresponding_embedding( + _store_message_exchange_and_corresponding_embedding( msg_list=msg_list, embedding_model=self.embedding_model, chat_model=self.parent_chat.model, @@ -58,6 +62,16 @@ def add_to_history(self, msg_list: list[dict]): ) return embedding_request["tokens_usage"] + def load_history(self): + """Load the chat history from file.""" + try: + df = pd.read_csv(self.context_file_path) + except FileNotFoundError: + return [] + selected_history = (df["message_exchange"].apply(ast.literal_eval)).tolist() + selected_history = list(itertools.chain.from_iterable(selected_history)) + return selected_history + def get_context(self, msg: dict): embedding_request = self._calculate_embedding_for_text(text=msg["content"]) context_messages = _find_context( @@ -94,7 +108,7 @@ def request_embedding_from_openai(text: str, model: str): return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_message_exchance_and_corresponding_embedding( +def _store_message_exchange_and_corresponding_embedding( msg_list: list[dict], embedding_model: str, chat_model: str, @@ -123,8 +137,8 @@ def _store_message_exchance_and_corresponding_embedding( writer.writerow(embedding_file_entry_data) -def _gather_context_msgs(history: list[dict], system_name: str): - sys_directives = "Considering the previous messages, answer the next message." +def _make_list_of_context_msgs(history: list[dict], system_name: str): + sys_directives = "Considering the previous messages, answer the next message:" sys_msg = {"role": "system", "name": system_name, "content": sys_directives} return [*history, sys_msg] @@ -133,8 +147,8 @@ def _find_context( file_path: Path, embedding: list[float], parent_chat: "Chat", - n_related_entries: int = 4, - n_directly_preceeding_exchanges: int = 2, + n_related_msg_exchanges: int = 3, + n_tailing_history_exchanges: int = 2, ): try: df = pd.read_csv(file_path) @@ -146,14 +160,16 @@ def _find_context( df["similarity"] = df["embedding"].apply(lambda x: cosine_similarity(x, embedding)) - # Get the last n messages added to the history - df_last_n_chats = df.tail(n_directly_preceeding_exchanges) + # Get the last messages added to the history + df_last_n_chats = df.tail(n_tailing_history_exchanges) + # Get the most similar messages df_similar_chats = ( df.sort_values("similarity", ascending=False) - .head(n_related_entries) + .head(n_related_msg_exchanges) .sort_values("timestamp") ) + df_context = pd.concat([df_similar_chats, df_last_n_chats]) selected_history = ( df_context["message_exchange"].apply(ast.literal_eval).drop_duplicates() @@ -161,6 +177,6 @@ def _find_context( selected_history = list(itertools.chain.from_iterable(selected_history)) - return _gather_context_msgs( + return _make_list_of_context_msgs( history=selected_history, system_name=parent_chat.system_name ) From 1101ba67e11b5b118b5cfb342568645c2d333648 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 12:37:34 +0100 Subject: [PATCH 079/109] Minor fixes --- gpt_buddy_bot/app/app_page_templates.py | 10 +++------- gpt_buddy_bot/app/multipage.py | 5 +++-- gpt_buddy_bot/chat.py | 4 ++-- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 9ca71e0..b41c2a7 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -29,20 +29,16 @@ def __init__(self, sidebar_title: str = "", page_title: str = ""): self.page_id = str(uuid.uuid4()) self.page_number = st.session_state.get("n_created_pages", 0) + 1 - chat_number_for_title = f"### Chat #{self.page_number}" + chat_number_for_title = f"Chat #{self.page_number}" if page_title is _RecoveredChat: self._fallback_page_title = f"{chat_number_for_title.strip('#')} (Recovered)" page_title = None else: - self._fallback_page_title = ( - f"{GeneralConstants.APP_NAME} :speech_balloon:\n{chat_number_for_title}" - ) + self._fallback_page_title = chat_number_for_title if page_title: self.title = page_title - self._fallback_sidebar_title = ( - page_title if page_title else self._fallback_page_title - ) + self._fallback_sidebar_title = page_title if page_title else chat_number_for_title if sidebar_title: self.sidebar_title = sidebar_title diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 7adf589..2990073 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -221,9 +221,10 @@ def get_saved_chat_cache_dir_paths(self): def render(self, **kwargs): with st.sidebar: + st.title(GeneralConstants.APP_NAME) self.init_openai_client() # Create a sidebar with tabs for chats and settings - tab1, tab2 = st.tabs(["Chats", "Settings"]) + tab1, tab2 = st.tabs(["Chats", "Settings for Current Chat"]) self.sidebar_tabs = {"chats": tab1, "settings": tab2} with tab1: # Add button to create a new chat @@ -241,7 +242,7 @@ def render(self, **kwargs): ) new_page.state["messages"] = chat.load_history() self.add_page(page=new_page) - self.register_selected_page(next(iter(self.pages.values()))) + self.register_selected_page(next(iter(self.pages.values()), None)) # Create a new chat upon request or if there is none yet if new_chat_button or not self.pages: diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 76070da..9475ced 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -3,7 +3,6 @@ import shutil import uuid from collections import defaultdict -from filecmp import clear_cache from pathlib import Path import openai @@ -124,8 +123,9 @@ def __del__(self): with open(self.configs_file, "w") as configs_f: configs_f.write(self.configs.model_dump_json(indent=2)) # Store metadata + metadata = self.metadata # Trigger loading metadata if not yet done with open(self.metadata_file, "w") as metadata_f: - json.dump(self.metadata, metadata_f, indent=2) + json.dump(metadata, metadata_f, indent=2) @classmethod def from_dict(cls, configs: dict): From ede0d706f844fc5c4d62aa497dfdf28d87ecf06c Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 14:08:50 +0100 Subject: [PATCH 080/109] Use light theme by default --- gpt_buddy_bot/app/.streamlit/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt_buddy_bot/app/.streamlit/config.toml b/gpt_buddy_bot/app/.streamlit/config.toml index 266f834..612800d 100644 --- a/gpt_buddy_bot/app/.streamlit/config.toml +++ b/gpt_buddy_bot/app/.streamlit/config.toml @@ -13,6 +13,6 @@ showErrorDetails = true [theme] - base = "dark" + base = "light" # Colors primaryColor = "#2BB5E8" From 5c525eee72ef50bce0e0d091021fc7d3ecbb9997 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 14:09:32 +0100 Subject: [PATCH 081/109] Update function to estimate n tokens --- gpt_buddy_bot/chat.py | 12 ++++++------ gpt_buddy_bot/tokens.py | 32 +++++++++++++++++++++++++++----- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 9475ced..4b4a5a8 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -10,7 +10,7 @@ from . import GeneralConstants from .chat_configs import ChatOptions, OpenAiApiCallOptions from .chat_context import BaseChatContext, EmbeddingBasedChatContext -from .tokens import TokenUsageDatabase, get_n_tokens +from .tokens import TokenUsageDatabase, get_n_tokens_from_msgs class CannotConnectToApiError(Exception): @@ -183,9 +183,8 @@ def _yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True contextualised_prompt = [self.base_directive, *context, prompt_msg] # Update token_usage with tokens used in chat input - self.token_usage[self.model]["input"] += sum( - get_n_tokens(string=msg["content"], model=self.model) - for msg in contextualised_prompt + self.token_usage[self.model]["input"] += get_n_tokens_from_msgs( + messages=contextualised_prompt, model=self.model ) # Make API request and yield response chunks @@ -197,8 +196,9 @@ def _yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True yield chunk # Update token_usage ith tokens used in chat output - self.token_usage[self.model]["output"] += get_n_tokens( - string=full_reply_content, model=self.model + reply_as_msg = {"role": "assistant", "content": full_reply_content} + self.token_usage[self.model]["output"] += get_n_tokens_from_msgs( + messages=[reply_as_msg], model=self.model ) if add_to_history: diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index d656f84..d208983 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -189,11 +189,33 @@ def print_usage_costs(self, token_usage: dict, current_chat: bool = True): continue _print_df(df=df, header=header) - -def get_n_tokens(string: str, model: str) -> int: - """Returns the number of tokens in a text string.""" - encoding = tiktoken.encoding_for_model(model) - return len(encoding.encode(string)) + print() + print("Note: These are only estimates. Actual costs may vary.") + link = "https://platform.openai.com/account/usage" + print(f"Please visit <{link}> to follow your actual usage and costs.") + + +def get_n_tokens_from_msgs(messages: list[dict], model: str): + """Returns the number of tokens used by a list of messages.""" + # Adapted from + # + try: + encoding = tiktoken.encoding_for_model(model) + except KeyError: + encoding = tiktoken.get_encoding("cl100k_base") + + # OpenAI's original function was implemented for gpt-3.5-turbo-0613, but we'll use + # it for all models for now. We are only intereste dinestimates, after all. + num_tokens = 0 + for message in messages: + # every message follows {role/name}\n{content}\n + num_tokens += 4 + for key, value in message.items(): + num_tokens += len(encoding.encode(value)) + if key == "name": # if there's a name, the role is omitted + num_tokens += -1 # role is always required and always 1 token + num_tokens += 2 # every reply is primed with assistant + return num_tokens def _group_columns_by_prefix(df): From 1846bfe9c4b263d2660cf31f2fb568e3b35e9092 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 14:10:06 +0100 Subject: [PATCH 082/109] Update list of supported openai models --- gpt_buddy_bot/chat_configs.py | 16 ++++++++++++---- gpt_buddy_bot/tokens.py | 9 +++++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 5abcd5e..acd5b40 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -67,10 +67,18 @@ def __getitem__(self, item): class OpenAiApiCallOptions(BaseConfigModel): _openai_url = "https://platform.openai.com/docs/api-reference/chat/create#chat-create" - - model: Literal["gpt-3.5-turbo", "gpt-4"] = Field( - default="gpt-3.5-turbo", - description=f"OpenAI LLM model to use. See {_openai_url}-model", + _models_url = "https://platform.openai.com/docs/models" + + model: Literal[ + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-16k", # Will point to gpt-3.5-turbo-1106 starting Dec 11, 2023 + "gpt-3.5-turbo", # Will point to gpt-3.5-turbo-1106 starting Dec 11, 2023 + "gpt-4-1106-preview", + "gpt-4", + "gpt-4-32k", + ] = Field( + default="gpt-3.5-turbo-1106", + description=f"OpenAI LLM model to use. See {_openai_url}-model and {_models_url}", ) max_tokens: Optional[int] = Field( default=None, gt=0, description=f"See <{_openai_url}-max_tokens>" diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index d208983..8949500 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -5,9 +5,14 @@ import pandas as pd import tiktoken -PRICE_PER_THOUSAND_TOKENS = { +# See for the latest prices. +PRICE_PER_K_TOKENS = { "gpt-3.5-turbo": {"input": 0.0015, "output": 0.002}, + "gpt-3.5-turbo-16k": {"input": 0.001, "output": 0.002}, + "gpt-3.5-turbo-1106": {"input": 0.001, "output": 0.002}, + "gpt-4-1106-preview": {"input": 0.03, "output": 0.06}, "gpt-4": {"input": 0.03, "output": 0.06}, + "gpt-4-32k": {"input": 0.06, "output": 0.12}, "text-embedding-ada-002": {"input": 0.0001, "output": 0.0}, None: {"input": 0.0, "output": 0.0}, } @@ -17,7 +22,7 @@ class TokenUsageDatabase: def __init__(self, fpath: Path): self.fpath = fpath self.token_price = {} - for model, price_per_k_tokens in PRICE_PER_THOUSAND_TOKENS.items(): + for model, price_per_k_tokens in PRICE_PER_K_TOKENS.items(): self.token_price[model] = { k: v / 1000.0 for k, v in price_per_k_tokens.items() } From c86d484ca60b8138c780e04091484301c5e25878 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 14:10:52 +0100 Subject: [PATCH 083/109] Include all supported models in tests --- tests/conftest.py | 14 +++++++++++++- tests/unit/test_chat.py | 13 +------------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 8fffae3..e350e1a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -86,9 +86,21 @@ def _mock_input(*args, **kwargs): mocker.patch("builtins.input", new=lambda _: _mock_input(user_input=user_input)) +@pytest.fixture(params=ChatOptions.get_allowed_values("model")) +def llm_model(request): + return request.param + + +@pytest.fixture(params=ChatOptions.get_allowed_values("context_model")) +def context_model(request): + return request.param + + @pytest.fixture() -def default_chat_configs(tmp_path): +def default_chat_configs(llm_model, context_model, tmp_path): return ChatOptions( + model=llm_model, + context_model=context_model, token_usage_db_path=tmp_path / "token_usage.db", # Don't use the regular db file cache_dir=tmp_path, # Don't use our cache files ) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 1e5b11e..9bbb2da 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -2,8 +2,7 @@ import pytest from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.chat import CannotConnectToApiError, Chat -from gpt_buddy_bot.chat_configs import ChatOptions +from gpt_buddy_bot.chat import CannotConnectToApiError @pytest.mark.order(1) @@ -53,13 +52,3 @@ def _mock_openai_ChatCompletion_create(*args, **kwargs): mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): default_chat.start() - - -@pytest.mark.parametrize("context_model", ChatOptions.get_allowed_values("context_model")) -@pytest.mark.parametrize("user_input", ("regular-input",)) -def test_chat_context_handlers(default_chat_configs, input_builtin_mocker, context_model): - chat_configs_dict = default_chat_configs.model_dump() - chat_configs_dict.update({"context_model": context_model}) - - chat = Chat.from_dict(chat_configs_dict) - chat.start() From 240c10541d990d7e3a3ae68939fdbe3b4d030dbb Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 14:39:15 +0100 Subject: [PATCH 084/109] Do not lower text when calc embeddings Probably better in order to recognise, e.g., acronyms. --- gpt_buddy_bot/chat_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index 11c3a63..fd96ef3 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -96,7 +96,7 @@ def _calculate_embedding_for_text(self, text: str): def request_embedding_from_openai(text: str, model: str): - text = text.lower().strip() + text = text.strip() embedding_request = openai.Embedding.create(input=[text], model=model) embedding = embedding_request["data"][0]["embedding"] From 6f33fe39d4a7f10eaec60fd502085ed6dd3b12d0 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 16:45:13 +0100 Subject: [PATCH 085/109] Add `retry_api_call` decorator --- gpt_buddy_bot/app/app_page_templates.py | 3 +- gpt_buddy_bot/chat.py | 38 ++++++---------------- gpt_buddy_bot/chat_context.py | 1 + gpt_buddy_bot/general_utils.py | 43 +++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 29 deletions(-) create mode 100644 gpt_buddy_bot/general_utils.py diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index b41c2a7..00abdd7 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -8,8 +8,9 @@ from PIL import Image from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.chat import CannotConnectToApiError, Chat +from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions +from gpt_buddy_bot.general_utils import CannotConnectToApiError _AVATAR_FILES_DIR = GeneralConstants.APP_DIR / "data" _ASSISTANT_AVATAR_FILE_PATH = _AVATAR_FILES_DIR / "assistant_avatar.png" diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 4b4a5a8..8a7c570 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -10,13 +10,10 @@ from . import GeneralConstants from .chat_configs import ChatOptions, OpenAiApiCallOptions from .chat_context import BaseChatContext, EmbeddingBasedChatContext +from .general_utils import retry_api_call from .tokens import TokenUsageDatabase, get_n_tokens_from_msgs -class CannotConnectToApiError(Exception): - """Error raised when the package cannot connect to the OpenAI API.""" - - class Chat: def __init__(self, configs: ChatOptions = None): self.id = uuid.uuid4() @@ -249,32 +246,17 @@ def _auth_error_msg(self): def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): - success = False - api_call_args = {} for field in OpenAiApiCallOptions.model_fields: if getattr(chat_obj, field) is not None: api_call_args[field] = getattr(chat_obj, field) - n_attempts = 0 - max_n_att = chat_obj.api_connection_max_n_attempts - while not success: - n_attempts += 1 - try: - for completion_chunk in openai.ChatCompletion.create( - messages=conversation, stream=True, **api_call_args - ): - reply_chunk = getattr(completion_chunk.choices[0].delta, "content", "") - yield reply_chunk - except ( - openai.error.ServiceUnavailableError, - openai.error.Timeout, - ) as error: - if n_attempts < max_n_att: - print( - f"\n > {error}. Making new attempt ({n_attempts+1}/{max_n_att})..." - ) - else: - raise CannotConnectToApiError(chat_obj._auth_error_msg) from error - else: - success = True + @retry_api_call(auth_error_msg=chat_obj._auth_error_msg) + def stream_reply(conversation, **api_call_args): + for completion_chunk in openai.ChatCompletion.create( + messages=conversation, stream=True, **api_call_args + ): + reply_chunk = getattr(completion_chunk.choices[0].delta, "content", "") + yield reply_chunk + + yield from stream_reply(conversation, **api_call_args) diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index fd96ef3..caa351b 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -95,6 +95,7 @@ def _calculate_embedding_for_text(self, text: str): return request_embedding_from_openai(text=text, model=self.embedding_model) +@retry_api_call() def request_embedding_from_openai(text: str, model: str): text = text.strip() embedding_request = openai.Embedding.create(input=[text], model=model) diff --git a/gpt_buddy_bot/general_utils.py b/gpt_buddy_bot/general_utils.py new file mode 100644 index 0000000..4737d81 --- /dev/null +++ b/gpt_buddy_bot/general_utils.py @@ -0,0 +1,43 @@ +from functools import wraps + +import openai + + +class CannotConnectToApiError(Exception): + """Error raised when the package cannot connect to the OpenAI API.""" + + +def retry_api_call(max_n_attempts=5, auth_error_msg="Problems connecting to OpenAI API."): + """Retry connecting to the API up to a maximum number of times.""" + + def retry_api_call_decorator(function): + """Wrap `function` and log beginning, exit and elapsed time.""" + + @wraps(function) + def wrapper(*args, **kwargs): + n_attempts = 0 + success = False + while not success: + n_attempts += 1 + try: + function_rtn = function(*args, **kwargs) + except ( + openai.error.ServiceUnavailableError, + openai.error.Timeout, + openai.error.APIError, + ) as error: + if n_attempts < max_n_attempts: + print( + f"\n > {error}. " + + f"Making new attempt ({n_attempts+1}/{max_n_attempts})..." + ) + else: + raise CannotConnectToApiError(auth_error_msg) from error + else: + success = True + + return function_rtn + + return wrapper + + return retry_api_call_decorator From 6245e90a44855457854261ec3d1e0719bc4acd32 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 22:35:37 +0100 Subject: [PATCH 086/109] Create `retry_api_call` decorator --- gpt_buddy_bot/chat.py | 2 +- gpt_buddy_bot/chat_context.py | 36 ++++++++++++++++++------ gpt_buddy_bot/general_utils.py | 51 ++++++++++++++++++++++------------ tests/unit/test_chat.py | 5 ++-- 4 files changed, 66 insertions(+), 28 deletions(-) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 8a7c570..e6fc437 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -10,7 +10,7 @@ from . import GeneralConstants from .chat_configs import ChatOptions, OpenAiApiCallOptions from .chat_context import BaseChatContext, EmbeddingBasedChatContext -from .general_utils import retry_api_call +from .general_utils import CannotConnectToApiError, retry_api_call from .tokens import TokenUsageDatabase, get_n_tokens_from_msgs diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index caa351b..073b34e 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -3,6 +3,7 @@ import itertools import json import time +from abc import ABC, abstractmethod from collections import deque from pathlib import Path from typing import TYPE_CHECKING @@ -12,19 +13,38 @@ import pandas as pd from openai.embeddings_utils import cosine_similarity +from .general_utils import retry_api_call + if TYPE_CHECKING: from .chat import Chat -class BaseChatContext: +class ChatContext(ABC): def __init__(self, parent_chat: "Chat"): self.parent_chat = parent_chat + + @abstractmethod + def add_to_history(self, msg_list: list[dict]): + """Add message exchange to history.""" + + @abstractmethod + def load_history(self): + """Load the chat history.""" + + @abstractmethod + def get_context(self, msg: dict): + """Return context messages.""" + + +class BaseChatContext(ChatContext): + def __init__(self, parent_chat: "Chat"): + super().__init__(parent_chat=parent_chat) self.history = deque(maxlen=50) - self._tokens_usage = {"input": 0, "output": 0} + self._placeholder_tokens_usage = {"input": 0, "output": 0} def add_to_history(self, msg_list: list[dict]): self.history += msg_list - return self._tokens_usage + return self._placeholder_tokens_usage def load_history(self): """Load the chat history.""" @@ -34,15 +54,15 @@ def get_context(self, msg: dict): context_msgs = _make_list_of_context_msgs( history=self.history, system_name=self.parent_chat.system_name ) - return {"context_messages": context_msgs, "tokens_usage": self._tokens_usage} + return { + "context_messages": context_msgs, + "tokens_usage": self._placeholder_tokens_usage, + } -class EmbeddingBasedChatContext(BaseChatContext): +class EmbeddingBasedChatContext(ChatContext): """Chat context.""" - def __init__(self, parent_chat: "Chat"): - self.parent_chat = parent_chat - @property def embedding_model(self): return self.parent_chat.context_model diff --git a/gpt_buddy_bot/general_utils.py b/gpt_buddy_bot/general_utils.py index 4737d81..83b27e7 100644 --- a/gpt_buddy_bot/general_utils.py +++ b/gpt_buddy_bot/general_utils.py @@ -1,4 +1,7 @@ +import inspect +import time from functools import wraps +from re import I import openai @@ -10,34 +13,48 @@ class CannotConnectToApiError(Exception): def retry_api_call(max_n_attempts=5, auth_error_msg="Problems connecting to OpenAI API."): """Retry connecting to the API up to a maximum number of times.""" + handled_exceptions = ( + openai.error.ServiceUnavailableError, + openai.error.Timeout, + openai.error.APIError, + ) + + def on_error(error, n_attempts): + if n_attempts < max_n_attempts: + print( + f"\n > {error}. " + + f"Making new attempt ({n_attempts+1}/{max_n_attempts})..." + ) + time.sleep(1) + else: + raise CannotConnectToApiError(auth_error_msg) from error + def retry_api_call_decorator(function): """Wrap `function` and log beginning, exit and elapsed time.""" @wraps(function) - def wrapper(*args, **kwargs): + def wrapper_f(*args, **kwargs): + n_attempts = 0 + while True: + n_attempts += 1 + try: + return function(*args, **kwargs) + except handled_exceptions as error: + on_error(error=error, n_attempts=n_attempts) + + @wraps(function) + def wrapper_generator_f(*args, **kwargs): n_attempts = 0 success = False while not success: n_attempts += 1 try: - function_rtn = function(*args, **kwargs) - except ( - openai.error.ServiceUnavailableError, - openai.error.Timeout, - openai.error.APIError, - ) as error: - if n_attempts < max_n_attempts: - print( - f"\n > {error}. " - + f"Making new attempt ({n_attempts+1}/{max_n_attempts})..." - ) - else: - raise CannotConnectToApiError(auth_error_msg) from error + yield from function(*args, **kwargs) + except handled_exceptions as error: + on_error(error=error, n_attempts=n_attempts) else: success = True - return function_rtn - - return wrapper + return wrapper_generator_f if inspect.isgeneratorfunction(function) else wrapper_f return retry_api_call_decorator diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 9bbb2da..a07c691 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -2,7 +2,7 @@ import pytest from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.chat import CannotConnectToApiError +from gpt_buddy_bot.general_utils import CannotConnectToApiError @pytest.mark.order(1) @@ -47,8 +47,9 @@ def test_chat_configs(default_chat, default_chat_configs): @pytest.mark.parametrize("user_input", ("regular-input",)) def test_request_timeout_retry(mocker, default_chat, input_builtin_mocker): def _mock_openai_ChatCompletion_create(*args, **kwargs): - raise openai.error.Timeout("Mocked timeout error") + raise openai.error.Timeout("Mocked timeout error was not caught!") mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) + mocker.patch("time.sleep") # Don't waste time sleeping in tests with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): default_chat.start() From 89ad44516c92fba41da1125b92d9cf0aae8efc64 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 22:48:25 +0100 Subject: [PATCH 087/109] A little animation in initial greeting --- gpt_buddy_bot/app/app_page_templates.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 00abdd7..496407f 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -1,6 +1,7 @@ """Utilities for creating pages in a streamlit app.""" import pickle import sys +import time import uuid from abc import ABC, abstractmethod @@ -143,7 +144,15 @@ def render(self): self.render_chat_history() else: with st.chat_message("assistant", avatar=self.avatars["assistant"]): - st.markdown(self.chat_obj.initial_greeting) + with st.empty(): + st.markdown("▌") + greeting = "" + for word in self.chat_obj.initial_greeting.split(): + greeting += f"{word} " + st.markdown(greeting + "▌") + time.sleep(0.1) + st.markdown(greeting) + self.chat_history.append( { "role": "assistant", From 8a0c95f0f8e8e5d99789280578119471e86cb1a2 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Wed, 8 Nov 2023 23:43:22 +0100 Subject: [PATCH 088/109] Fix: return to 1st page upon page removal --- gpt_buddy_bot/app/multipage.py | 6 +++--- gpt_buddy_bot/chat.py | 2 +- gpt_buddy_bot/general_utils.py | 2 -- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 2990073..ad95779 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -49,13 +49,13 @@ def add_page(self, page: AppPage, selected: bool = True): def remove_page(self, page: AppPage): """Remove a page from the app.""" + self.pages[page.page_id].chat_obj.private_mode = True + self.pages[page.page_id].chat_obj.clear_cache() + del self.pages[page.page_id] try: self.register_selected_page(next(iter(self.pages.values()))) except StopIteration: self.add_page() - self.pages[page.page_id].chat_obj.private_mode = True - self.pages[page.page_id].chat_obj.clear_cache() - del self.pages[page.page_id] def register_selected_page(self, page: AppPage): """Register a page as selected.""" diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index e6fc437..530bab9 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -58,7 +58,7 @@ def configs_file(self): @property def context_file_path(self): - return self.cache_dir / f"embeddings.csv" + return self.cache_dir / "embeddings.csv" @property def metadata_file(self): diff --git a/gpt_buddy_bot/general_utils.py b/gpt_buddy_bot/general_utils.py index 83b27e7..855ab0b 100644 --- a/gpt_buddy_bot/general_utils.py +++ b/gpt_buddy_bot/general_utils.py @@ -1,7 +1,6 @@ import inspect import time from functools import wraps -from re import I import openai @@ -12,7 +11,6 @@ class CannotConnectToApiError(Exception): def retry_api_call(max_n_attempts=5, auth_error_msg="Problems connecting to OpenAI API."): """Retry connecting to the API up to a maximum number of times.""" - handled_exceptions = ( openai.error.ServiceUnavailableError, openai.error.Timeout, From 558a451614822644585751e25f8257bd8464eb80 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 13:01:39 +0100 Subject: [PATCH 089/109] Use an sqlite db to store embeddings/history --- gpt_buddy_bot/app/app_page_templates.py | 2 +- gpt_buddy_bot/chat.py | 12 +- gpt_buddy_bot/chat_context.py | 164 +++++++++--------------- gpt_buddy_bot/embeddings_database.py | 108 ++++++++++++++++ tests/unit/test_chat.py | 12 +- 5 files changed, 183 insertions(+), 115 deletions(-) create mode 100644 gpt_buddy_bot/embeddings_database.py diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 496407f..7a799e0 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -183,7 +183,7 @@ def render(self): full_response += chunk st.markdown(full_response + "▌") except CannotConnectToApiError: - full_response = self.chat_obj._auth_error_msg + full_response = self.chat_obj._api_connection_error_msg finally: st.markdown(full_response) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 530bab9..a01e640 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -58,7 +58,7 @@ def configs_file(self): @property def context_file_path(self): - return self.cache_dir / "embeddings.csv" + return self.cache_dir / "embeddings.db" @property def metadata_file(self): @@ -165,7 +165,7 @@ def yield_response_from_msg(self, prompt_as_msg: dict, **kwargs): try: yield from self._yield_response_from_msg(prompt_msg=prompt_as_msg, **kwargs) except openai.error.AuthenticationError as error: - raise CannotConnectToApiError(self._auth_error_msg) from error + raise CannotConnectToApiError(self._api_connection_error_msg) from error def _yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True): """Yield response from a prompt. Assumes that OpenAI authentication works.""" @@ -238,10 +238,12 @@ def _respond_prompt(self, prompt: str, role: str, **kwargs): yield from self.yield_response_from_msg(prompt_as_msg, **kwargs) @property - def _auth_error_msg(self): + def _api_connection_error_msg(self): return ( - "Sorry, I'm having trouble authenticating with OpenAI. " + "Sorry, I'm having trouble communicating with OpenAI. " + "Please check the validity of your API key and try again." + + "If the problem persists, please also take a look at the " + + "OpenAI status page: https://status.openai.com." ) @@ -251,7 +253,7 @@ def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): if getattr(chat_obj, field) is not None: api_call_args[field] = getattr(chat_obj, field) - @retry_api_call(auth_error_msg=chat_obj._auth_error_msg) + @retry_api_call(auth_error_msg=chat_obj._api_connection_error_msg) def stream_reply(conversation, **api_call_args): for completion_chunk in openai.ChatCompletion.create( messages=conversation, stream=True, **api_call_args diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index 073b34e..782beca 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -1,11 +1,6 @@ import ast -import csv import itertools -import json -import time from abc import ABC, abstractmethod -from collections import deque -from pathlib import Path from typing import TYPE_CHECKING import numpy as np @@ -13,6 +8,7 @@ import pandas as pd from openai.embeddings_utils import cosine_similarity +from .embeddings_database import EmbeddingsDatabase from .general_utils import retry_api_call if TYPE_CHECKING: @@ -22,14 +18,37 @@ class ChatContext(ABC): def __init__(self, parent_chat: "Chat"): self.parent_chat = parent_chat + self.database = EmbeddingsDatabase( + db_path=self.context_file_path, embedding_model=self.embedding_model + ) + + @property + def embedding_model(self): + return self.parent_chat.context_model + + @property + def context_file_path(self): + return self.parent_chat.context_file_path - @abstractmethod def add_to_history(self, msg_list: list[dict]): """Add message exchange to history.""" + embedding_request = self.request_embedding(msg_list=msg_list) + self.database.insert_message_exchange( + chat_model=self.parent_chat.model, + message_exchange=msg_list, + embedding=embedding_request["embedding"], + ) + return embedding_request["tokens_usage"] - @abstractmethod - def load_history(self): + def load_history(self) -> list[dict]: """Load the chat history.""" + df = self.database.get_messages_dataframe() + msg_exchanges = df["message_exchange"].apply(ast.literal_eval).tolist() + return list(itertools.chain.from_iterable(msg_exchanges)) + + @abstractmethod + def request_embedding(self, msg_list: list[dict]): + """Request embedding from OpenAI API.""" @abstractmethod def get_context(self, msg: dict): @@ -37,22 +56,18 @@ def get_context(self, msg: dict): class BaseChatContext(ChatContext): - def __init__(self, parent_chat: "Chat"): - super().__init__(parent_chat=parent_chat) - self.history = deque(maxlen=50) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) self._placeholder_tokens_usage = {"input": 0, "output": 0} - def add_to_history(self, msg_list: list[dict]): - self.history += msg_list - return self._placeholder_tokens_usage - - def load_history(self): - """Load the chat history.""" - return self.history + # Implement abstract methods + def request_embedding(self, msg_list: list[dict]): + """Return a placeholder embedding request.""" + return {"embedding": None, "tokens_usage": self._placeholder_tokens_usage} def get_context(self, msg: dict): context_msgs = _make_list_of_context_msgs( - history=self.history, system_name=self.parent_chat.system_name + history=self.load_history(), system_name=self.parent_chat.system_name ) return { "context_messages": context_msgs, @@ -63,57 +78,31 @@ def get_context(self, msg: dict): class EmbeddingBasedChatContext(ChatContext): """Chat context.""" - @property - def embedding_model(self): - return self.parent_chat.context_model - - @property - def context_file_path(self): - return self.parent_chat.context_file_path + def _request_embedding_for_text(self, text: str): + return request_embedding_from_openai(text=text, model=self.embedding_model) - def add_to_history(self, msg_list: list[dict]): - embedding_request = self._calculate_embedding_for_msgs(msg_list=msg_list) - _store_message_exchange_and_corresponding_embedding( - msg_list=msg_list, - embedding_model=self.embedding_model, - chat_model=self.parent_chat.model, - embedding=embedding_request["embedding"], - file_path=self.context_file_path, + # Implement abstract methods + def request_embedding(self, msg_list: list[dict]): + """Request embedding from OpenAI API.""" + text = "\n".join( + [f"{msg['role'].strip()}: {msg['content'].strip()}" for msg in msg_list] ) - return embedding_request["tokens_usage"] - - def load_history(self): - """Load the chat history from file.""" - try: - df = pd.read_csv(self.context_file_path) - except FileNotFoundError: - return [] - selected_history = (df["message_exchange"].apply(ast.literal_eval)).tolist() - selected_history = list(itertools.chain.from_iterable(selected_history)) - return selected_history + return self._request_embedding_for_text(text=text) def get_context(self, msg: dict): - embedding_request = self._calculate_embedding_for_text(text=msg["content"]) - context_messages = _find_context( + embedding_request = self._request_embedding_for_text(text=msg["content"]) + selected_history = _select_relevant_history( + history_df=self.database.get_messages_dataframe(), embedding=embedding_request["embedding"], - file_path=self.context_file_path, - parent_chat=self.parent_chat, ) - + context_messages = _make_list_of_context_msgs( + history=selected_history, system_name=self.parent_chat.system_name + ) return { "context_messages": context_messages, "tokens_usage": embedding_request["tokens_usage"], } - def _calculate_embedding_for_msgs(self, msg_list: list[dict]): - text = "\n".join( - [f"{msg['role'].strip()}: {msg['content'].strip()}" for msg in msg_list] - ) - return self._calculate_embedding_for_text(text=text) - - def _calculate_embedding_for_text(self, text: str): - return request_embedding_from_openai(text=text, model=self.embedding_model) - @retry_api_call() def request_embedding_from_openai(text: str, model: str): @@ -129,64 +118,31 @@ def request_embedding_from_openai(text: str, model: str): return {"embedding": embedding, "tokens_usage": tokens_usage} -def _store_message_exchange_and_corresponding_embedding( - msg_list: list[dict], - embedding_model: str, - chat_model: str, - embedding: list[float], - file_path: Path, -): - """Store message and embeddings to file.""" - # Adapted from - # See also . - embedding_file_entry_data = { - "timestamp": int(time.time()), - "embedding_model": f"{embedding_model}", - "chat_model": f"{chat_model}", - "message_exchange": json.dumps(msg_list), - "embedding": json.dumps(embedding), - } - - init_file = not file_path.exists() or file_path.stat().st_size == 0 - write_mode = "w" if init_file else "a" - file_path.parent.mkdir(parents=True, exist_ok=True) - with open(file_path, write_mode, newline="") as file: - writer = csv.DictWriter(file, fieldnames=embedding_file_entry_data.keys()) - if init_file: - writer.writeheader() - writer.writerow(embedding_file_entry_data) - - def _make_list_of_context_msgs(history: list[dict], system_name: str): sys_directives = "Considering the previous messages, answer the next message:" sys_msg = {"role": "system", "name": system_name, "content": sys_directives} return [*history, sys_msg] -def _find_context( - file_path: Path, +def _select_relevant_history( + history_df: pd.DataFrame, embedding: list[float], - parent_chat: "Chat", n_related_msg_exchanges: int = 3, n_tailing_history_exchanges: int = 2, ): - try: - df = pd.read_csv(file_path) - except FileNotFoundError: - return [] - - df = df.loc[df["embedding_model"] == parent_chat.context_model] - df["embedding"] = df["embedding"].apply(ast.literal_eval).apply(np.array) - - df["similarity"] = df["embedding"].apply(lambda x: cosine_similarity(x, embedding)) + history_df["embedding"] = ( + history_df["embedding"].apply(ast.literal_eval).apply(np.array) + ) + history_df["similarity"] = history_df["embedding"].apply( + lambda x: cosine_similarity(x, embedding) + ) # Get the last messages added to the history - df_last_n_chats = df.tail(n_tailing_history_exchanges) + df_last_n_chats = history_df.tail(n_tailing_history_exchanges) # Get the most similar messages df_similar_chats = ( - df.sort_values("similarity", ascending=False) + history_df.sort_values("similarity", ascending=False) .head(n_related_msg_exchanges) .sort_values("timestamp") ) @@ -198,6 +154,4 @@ def _find_context( selected_history = list(itertools.chain.from_iterable(selected_history)) - return _make_list_of_context_msgs( - history=selected_history, system_name=parent_chat.system_name - ) + return selected_history diff --git a/gpt_buddy_bot/embeddings_database.py b/gpt_buddy_bot/embeddings_database.py new file mode 100644 index 0000000..fd10f3f --- /dev/null +++ b/gpt_buddy_bot/embeddings_database.py @@ -0,0 +1,108 @@ +import datetime +import json +import sqlite3 +from pathlib import Path + +import pandas as pd + + +class EmbeddingsDatabase: + def __init__(self, db_path: Path, embedding_model: str): + self.embedding_model = embedding_model + self.db_path = db_path + self.create() + if self.get_embedding_model() is None: + self._init_embedding_model_table() + + def create(self): + conn = sqlite3.connect(self.db_path) + + # SQL to create 'embedding_model' table with 'embedding_model' as primary key + create_embedding_model_table = """ + CREATE TABLE IF NOT EXISTS embedding_model ( + created_timestamp INTEGER NOT NULL, + embedding_model TEXT, + PRIMARY KEY (embedding_model) + ) + """ + + # SQL to create 'messages' table + create_messages_table = """ + CREATE TABLE IF NOT EXISTS messages ( + timestamp INTEGER NOT NULL, + chat_model TEXT NOT NULL, + message_exchange TEXT NOT NULL, + embedding TEXT + ) + """ + + with conn: + # Create tables + conn.execute(create_embedding_model_table) + conn.execute(create_messages_table) + + # Triggers to prevent modification after insertion + conn.execute( + """ + CREATE TRIGGER IF NOT EXISTS prevent_embedding_model_modification + BEFORE UPDATE ON embedding_model + BEGIN + SELECT RAISE(FAIL, 'modification not allowed'); + END; + """ + ) + + conn.execute( + """ + CREATE TRIGGER IF NOT EXISTS prevent_messages_modification + BEFORE UPDATE ON messages + BEGIN + SELECT RAISE(FAIL, 'modification not allowed'); + END; + """ + ) + + # Close the connection to the database + conn.close() + + def get_embedding_model(self): + conn = sqlite3.connect(self.db_path) + query = "SELECT embedding_model FROM embedding_model;" + # Execute the query and fetch the result + embedding_model = None + with conn: + cur = conn.cursor() + cur.execute(query) + result = cur.fetchone() + embedding_model = result[0] if result else None + + conn.close() + + return embedding_model + + def _init_embedding_model_table(self): + conn = sqlite3.connect(self.db_path) + create_time = int(datetime.datetime.utcnow().timestamp()) + sql = "INSERT INTO embedding_model " + sql += "(created_timestamp, embedding_model) VALUES (?, ?);" + with conn: + conn.execute(sql, (create_time, self.embedding_model)) + conn.close() + + def insert_message_exchange(self, chat_model, message_exchange, embedding): + timestamp = int(datetime.datetime.utcnow().timestamp()) + message_exchange = json.dumps(message_exchange) + embedding = json.dumps(embedding) + conn = sqlite3.connect(self.db_path) + sql = "INSERT INTO messages " + sql += "(timestamp, chat_model, message_exchange, embedding) VALUES (?, ?, ?, ?);" + with conn: + conn.execute(sql, (timestamp, chat_model, message_exchange, embedding)) + conn.close() + + def get_messages_dataframe(self): + conn = sqlite3.connect(self.db_path) + query = "SELECT * FROM messages;" + df = pd.read_sql_query(query, conn) + conn.close() + return df diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index a07c691..0da587a 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -6,10 +6,12 @@ @pytest.mark.order(1) -@pytest.mark.no_chat_completion_create_mocking +@pytest.mark.no_chat_completion_create_mocking() @pytest.mark.parametrize("user_input", ("regular-input",)) def test_testbed_doesnt_actually_connect_to_openai(default_chat, input_builtin_mocker): - with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): + with pytest.raises( + CannotConnectToApiError, match=default_chat._api_connection_error_msg + ): try: default_chat.start() except CannotConnectToApiError: @@ -43,7 +45,7 @@ def test_chat_configs(default_chat, default_chat_configs): assert default_chat._passed_configs == default_chat_configs -@pytest.mark.no_chat_completion_create_mocking +@pytest.mark.no_chat_completion_create_mocking() @pytest.mark.parametrize("user_input", ("regular-input",)) def test_request_timeout_retry(mocker, default_chat, input_builtin_mocker): def _mock_openai_ChatCompletion_create(*args, **kwargs): @@ -51,5 +53,7 @@ def _mock_openai_ChatCompletion_create(*args, **kwargs): mocker.patch("openai.ChatCompletion.create", new=_mock_openai_ChatCompletion_create) mocker.patch("time.sleep") # Don't waste time sleeping in tests - with pytest.raises(CannotConnectToApiError, match=default_chat._auth_error_msg): + with pytest.raises( + CannotConnectToApiError, match=default_chat._api_connection_error_msg + ): default_chat.start() From 86cf15465f460f10f9d4dfd419c9a1b5e0f458ee Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 13:33:26 +0100 Subject: [PATCH 090/109] comtex_model: None --> "full-history" --- gpt_buddy_bot/chat.py | 6 +++--- gpt_buddy_bot/chat_configs.py | 2 +- gpt_buddy_bot/chat_context.py | 14 ++++++-------- gpt_buddy_bot/embeddings_database.py | 12 ++++++++++-- gpt_buddy_bot/tokens.py | 2 +- 5 files changed, 21 insertions(+), 15 deletions(-) diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index a01e640..fc7a6ea 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -9,7 +9,7 @@ from . import GeneralConstants from .chat_configs import ChatOptions, OpenAiApiCallOptions -from .chat_context import BaseChatContext, EmbeddingBasedChatContext +from .chat_context import EmbeddingBasedChatContext, FullHistoryChatContext from .general_utils import CannotConnectToApiError, retry_api_call from .tokens import TokenUsageDatabase, get_n_tokens_from_msgs @@ -30,8 +30,8 @@ def __init__(self, configs: ChatOptions = None): self.token_usage = defaultdict(lambda: {"input": 0, "output": 0}) self.token_usage_db = TokenUsageDatabase(fpath=self.token_usage_db_path) - if self.context_model is None: - self.context_handler = BaseChatContext(parent_chat=self) + if self.context_model == "full-history": + self.context_handler = FullHistoryChatContext(parent_chat=self) elif self.context_model == "text-embedding-ada-002": self.context_handler = EmbeddingBasedChatContext(parent_chat=self) else: diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index acd5b40..f98eb90 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -114,7 +114,7 @@ class ChatOptions(OpenAiApiCallOptions): default=f"{GeneralConstants.PACKAGE_NAME}_system", description="Name of the chat's system", ) - context_model: Literal["text-embedding-ada-002", None] = Field( + context_model: Literal["text-embedding-ada-002", "full-history"] = Field( default="text-embedding-ada-002", description="OpenAI API model to use for embedding", ) diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index 782beca..9cb18c4 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -55,7 +55,7 @@ def get_context(self, msg: dict): """Return context messages.""" -class BaseChatContext(ChatContext): +class FullHistoryChatContext(ChatContext): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._placeholder_tokens_usage = {"input": 0, "output": 0} @@ -127,8 +127,8 @@ def _make_list_of_context_msgs(history: list[dict], system_name: str): def _select_relevant_history( history_df: pd.DataFrame, embedding: list[float], - n_related_msg_exchanges: int = 3, - n_tailing_history_exchanges: int = 2, + max_n_prompt_reply_pairs: int = 5, + max_n_tailing_prompt_reply_pairs: int = 2, ): history_df["embedding"] = ( history_df["embedding"].apply(ast.literal_eval).apply(np.array) @@ -138,12 +138,12 @@ def _select_relevant_history( ) # Get the last messages added to the history - df_last_n_chats = history_df.tail(n_tailing_history_exchanges) + df_last_n_chats = history_df.tail(max_n_tailing_prompt_reply_pairs) # Get the most similar messages df_similar_chats = ( history_df.sort_values("similarity", ascending=False) - .head(n_related_msg_exchanges) + .head(max_n_prompt_reply_pairs) .sort_values("timestamp") ) @@ -152,6 +152,4 @@ def _select_relevant_history( df_context["message_exchange"].apply(ast.literal_eval).drop_duplicates() ).tolist() - selected_history = list(itertools.chain.from_iterable(selected_history)) - - return selected_history + return list(itertools.chain.from_iterable(selected_history)) diff --git a/gpt_buddy_bot/embeddings_database.py b/gpt_buddy_bot/embeddings_database.py index fd10f3f..7bd031c 100644 --- a/gpt_buddy_bot/embeddings_database.py +++ b/gpt_buddy_bot/embeddings_database.py @@ -11,8 +11,16 @@ def __init__(self, db_path: Path, embedding_model: str): self.embedding_model = embedding_model self.db_path = db_path self.create() - if self.get_embedding_model() is None: + + stored_embedding_model = self.get_embedding_model() + if stored_embedding_model is None: self._init_embedding_model_table() + elif stored_embedding_model != self.embedding_model: + raise ValueError( + "Database already contains a different embedding model: " + f"{self.get_embedding_model()}.\n" + "Cannot continue." + ) def create(self): conn = sqlite3.connect(self.db_path) @@ -21,7 +29,7 @@ def create(self): create_embedding_model_table = """ CREATE TABLE IF NOT EXISTS embedding_model ( created_timestamp INTEGER NOT NULL, - embedding_model TEXT, + embedding_model TEXT NOT NULL, PRIMARY KEY (embedding_model) ) """ diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index 8949500..bb029bc 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -14,7 +14,7 @@ "gpt-4": {"input": 0.03, "output": 0.06}, "gpt-4-32k": {"input": 0.06, "output": 0.12}, "text-embedding-ada-002": {"input": 0.0001, "output": 0.0}, - None: {"input": 0.0, "output": 0.0}, + "full-history": {"input": 0.0, "output": 0.0}, } From 0c54339d5c628f85dae35bd373a0a67380c09c09 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 13:50:58 +0100 Subject: [PATCH 091/109] Some refactoring --- gpt_buddy_bot/app/app_page_templates.py | 2 +- gpt_buddy_bot/chat.py | 40 +++---------------- gpt_buddy_bot/chat_context.py | 2 +- .../{general_utils.py => openai_utils.py} | 27 +++++++++++++ tests/unit/test_chat.py | 2 +- 5 files changed, 36 insertions(+), 37 deletions(-) rename gpt_buddy_bot/{general_utils.py => openai_utils.py} (62%) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 7a799e0..2fbbbd8 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -11,7 +11,7 @@ from gpt_buddy_bot import GeneralConstants from gpt_buddy_bot.chat import Chat from gpt_buddy_bot.chat_configs import ChatOptions -from gpt_buddy_bot.general_utils import CannotConnectToApiError +from gpt_buddy_bot.openai_utils import CannotConnectToApiError _AVATAR_FILES_DIR = GeneralConstants.APP_DIR / "data" _ASSISTANT_AVATAR_FILE_PATH = _AVATAR_FILES_DIR / "assistant_avatar.png" diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index fc7a6ea..6aefa4d 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -5,12 +5,10 @@ from collections import defaultdict from pathlib import Path -import openai - from . import GeneralConstants -from .chat_configs import ChatOptions, OpenAiApiCallOptions +from .chat_configs import ChatOptions from .chat_context import EmbeddingBasedChatContext, FullHistoryChatContext -from .general_utils import CannotConnectToApiError, retry_api_call +from .openai_utils import make_api_chat_completion_call from .tokens import TokenUsageDatabase, get_n_tokens_from_msgs @@ -160,15 +158,8 @@ def respond_user_prompt(self, prompt: str, **kwargs): def respond_system_prompt(self, prompt: str, **kwargs): yield from self._respond_prompt(prompt=prompt, role="system", **kwargs) - def yield_response_from_msg(self, prompt_as_msg: dict, **kwargs): - """Yield response from a prompt.""" - try: - yield from self._yield_response_from_msg(prompt_msg=prompt_as_msg, **kwargs) - except openai.error.AuthenticationError as error: - raise CannotConnectToApiError(self._api_connection_error_msg) from error - - def _yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True): - """Yield response from a prompt. Assumes that OpenAI authentication works.""" + def yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True): + """Yield response from a prompt message.""" # Get appropriate context for prompt from the context handler prompt_context_request = self.context_handler.get_context(msg=prompt_msg) context = prompt_context_request["context_messages"] @@ -186,7 +177,7 @@ def _yield_response_from_msg(self, prompt_msg: dict, add_to_history: bool = True # Make API request and yield response chunks full_reply_content = "" - for chunk in _make_api_chat_completion_call( + for chunk in make_api_chat_completion_call( conversation=contextualised_prompt, chat_obj=self ): full_reply_content += chunk @@ -232,9 +223,7 @@ def report_token_usage(self, current_chat: bool = True): self.token_usage_db.print_usage_costs(self.token_usage, current_chat=current_chat) def _respond_prompt(self, prompt: str, role: str, **kwargs): - prompt = prompt.strip() - role = role.lower().strip() - prompt_as_msg = {"role": role, "content": prompt} + prompt_as_msg = {"role": role.lower().strip(), "content": prompt.strip()} yield from self.yield_response_from_msg(prompt_as_msg, **kwargs) @property @@ -245,20 +234,3 @@ def _api_connection_error_msg(self): + "If the problem persists, please also take a look at the " + "OpenAI status page: https://status.openai.com." ) - - -def _make_api_chat_completion_call(conversation: list, chat_obj: Chat): - api_call_args = {} - for field in OpenAiApiCallOptions.model_fields: - if getattr(chat_obj, field) is not None: - api_call_args[field] = getattr(chat_obj, field) - - @retry_api_call(auth_error_msg=chat_obj._api_connection_error_msg) - def stream_reply(conversation, **api_call_args): - for completion_chunk in openai.ChatCompletion.create( - messages=conversation, stream=True, **api_call_args - ): - reply_chunk = getattr(completion_chunk.choices[0].delta, "content", "") - yield reply_chunk - - yield from stream_reply(conversation, **api_call_args) diff --git a/gpt_buddy_bot/chat_context.py b/gpt_buddy_bot/chat_context.py index 9cb18c4..432945e 100644 --- a/gpt_buddy_bot/chat_context.py +++ b/gpt_buddy_bot/chat_context.py @@ -9,7 +9,7 @@ from openai.embeddings_utils import cosine_similarity from .embeddings_database import EmbeddingsDatabase -from .general_utils import retry_api_call +from .openai_utils import retry_api_call if TYPE_CHECKING: from .chat import Chat diff --git a/gpt_buddy_bot/general_utils.py b/gpt_buddy_bot/openai_utils.py similarity index 62% rename from gpt_buddy_bot/general_utils.py rename to gpt_buddy_bot/openai_utils.py index 855ab0b..2284ffb 100644 --- a/gpt_buddy_bot/general_utils.py +++ b/gpt_buddy_bot/openai_utils.py @@ -1,9 +1,15 @@ import inspect import time from functools import wraps +from typing import TYPE_CHECKING import openai +from .chat_configs import OpenAiApiCallOptions + +if TYPE_CHECKING: + from .chat import Chat + class CannotConnectToApiError(Exception): """Error raised when the package cannot connect to the OpenAI API.""" @@ -39,6 +45,8 @@ def wrapper_f(*args, **kwargs): return function(*args, **kwargs) except handled_exceptions as error: on_error(error=error, n_attempts=n_attempts) + except openai.error.AuthenticationError as error: + raise CannotConnectToApiError(auth_error_msg) from error @wraps(function) def wrapper_generator_f(*args, **kwargs): @@ -50,9 +58,28 @@ def wrapper_generator_f(*args, **kwargs): yield from function(*args, **kwargs) except handled_exceptions as error: on_error(error=error, n_attempts=n_attempts) + except openai.error.AuthenticationError as error: + raise CannotConnectToApiError(auth_error_msg) from error else: success = True return wrapper_generator_f if inspect.isgeneratorfunction(function) else wrapper_f return retry_api_call_decorator + + +def make_api_chat_completion_call(conversation: list, chat_obj: "Chat"): + api_call_args = {} + for field in OpenAiApiCallOptions.model_fields: + if getattr(chat_obj, field) is not None: + api_call_args[field] = getattr(chat_obj, field) + + @retry_api_call(auth_error_msg=chat_obj._api_connection_error_msg) + def stream_reply(conversation, **api_call_args): + for completion_chunk in openai.ChatCompletion.create( + messages=conversation, stream=True, **api_call_args + ): + reply_chunk = getattr(completion_chunk.choices[0].delta, "content", "") + yield reply_chunk + + yield from stream_reply(conversation, **api_call_args) diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 0da587a..03ce316 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -2,7 +2,7 @@ import pytest from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.general_utils import CannotConnectToApiError +from gpt_buddy_bot.openai_utils import CannotConnectToApiError @pytest.mark.order(1) From e0f2d2800e3381cabc7b7bad1fd4094bd4b06894 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 18:55:11 +0100 Subject: [PATCH 092/109] Attempt to make streamlit widgets retain values --- gpt_buddy_bot/app/app_page_templates.py | 23 +++++--- gpt_buddy_bot/app/multipage.py | 77 +++++++++++++++++++------ 2 files changed, 73 insertions(+), 27 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 2fbbbd8..39a9a2a 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -110,9 +110,9 @@ def chat_obj(self) -> Chat: return self.state["chat_obj"] @chat_obj.setter - def chat_obj(self, value: Chat): - self.state["chat_obj"] = value - self.state["chat_configs"] = value.configs + def chat_obj(self, new_chat_obj: Chat): + self.state["chat_obj"] = new_chat_obj + self.state["chat_configs"] = new_chat_obj.configs @property def chat_history(self) -> list[dict[str, str]]: @@ -165,7 +165,10 @@ def render(self): placeholder = ( f"Send a message to {self.chat_obj.assistant_name} ({self.chat_obj.model})" ) - if prompt := st.chat_input(placeholder=placeholder): + if prompt := st.chat_input( + placeholder=placeholder, + on_submit=lambda: self.state.update({"chat_started": True}), + ): # Display user message in chat message container with st.chat_message("user", avatar=self.avatars["user"]): st.markdown(prompt) @@ -199,11 +202,13 @@ def render(self): if "page_title" not in self.state and len(self.chat_history) > 3: with st.spinner("Working out conversation topic..."): prompt = "Summarize the messages in max 4 words.\n" - self.title = "".join( + title = "".join( self.chat_obj.respond_system_prompt(prompt, add_to_history=False) ) - self.sidebar_title = self.title - st.title(self.title) + self.chat_obj.metadata["page_title"] = title + self.chat_obj.metadata["sidebar_title"] = title + self.chat_obj.save_cache() - self.chat_obj.metadata["page_title"] = self.title - self.chat_obj.metadata["sidebar_title"] = self.sidebar_title + self.title = title + self.sidebar_title = title + st.title(title) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index ad95779..3672a33 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -138,32 +138,64 @@ def handle_ui_page_selection(self): updates_to_chat_configs = {} # Present the user with the model and instructions fields first - field_names = ["model", "ai_instructions"] + field_names = ["model", "ai_instructions", "context_model"] field_names += [field_name for field_name in ChatOptions.model_fields] field_names = list(dict.fromkeys(field_names)) - model_fiedls = {k: ChatOptions.model_fields[k] for k in field_names} + model_fields = {k: ChatOptions.model_fields[k] for k in field_names} - for field_name, field in model_fiedls.items(): + # Keep track of selected values so that selectbox doesn't reset + if "widget_previous_value" not in self.selected_page.state: + self.selected_page.state["widget_previous_value"] = {} + + def save_widget_previous_values(element_key): + self.selected_page.state["widget_previous_value"][ + element_key + ] = st.session_state.get(element_key) + + for field_name, field in model_fields.items(): title = field_name.replace("_", " ").title() choices = ChatOptions.get_allowed_values(field=field_name) description = ChatOptions.get_description(field=field_name) field_type = ChatOptions.get_type(field=field_name) + # Check if the field is frozen and disable corresponding UI element if so + chat_started = self.selected_page.state.get("chat_started", False) + extra_info = field.json_schema_extra + if extra_info is None: + extra_info = {} + disable_ui_element = extra_info.get("frozen", False) and ( + chat_started + or any( + msg["role"] == "user" for msg in self.selected_page.chat_history + ) + ) + + # Keep track of selected values so that selectbox doesn't reset + current_config_value = getattr(current_chat_configs, field_name) element_key = f"{field_name}-pg-{self.selected_page.page_id}-ui-element" - last_field_value = getattr(current_chat_configs, field_name) + widget_previous_value = self.selected_page.state[ + "widget_previous_value" + ].get(element_key, current_config_value) if choices: - index = ( - 0 - if st.session_state.get("last_rendered_page") - == self.selected_page.page_id - else choices.index(last_field_value) - ) new_field_value = st.selectbox( - title, choices, key=element_key, index=index, help=description + title, + key=element_key, + options=choices, + index=choices.index(widget_previous_value), + help=description, + disabled=disable_ui_element, + on_change=save_widget_previous_values, + args=[element_key], ) elif field_type == str: new_field_value = st.text_input( - title, value=last_field_value, key=element_key, help=description + title, + key=element_key, + value=widget_previous_value, + help=description, + disabled=disable_ui_element, + on_change=save_widget_previous_values, + args=[element_key], ) elif field_type in [int, float]: step = 1 if field_type == int else 0.01 @@ -180,32 +212,41 @@ def handle_ui_page_selection(self): new_field_value = st.number_input( title, - value=last_field_value, + key=element_key, + value=widget_previous_value, placeholder="OpenAI Default", min_value=bounds[0], max_value=bounds[1], step=step, - key=element_key, help=description, + disabled=disable_ui_element, + on_change=save_widget_previous_values, + args=[element_key], ) elif field_type in (list, tuple): new_field_value = st.text_area( title, - value="\n".join(last_field_value), + value="\n".join(widget_previous_value), key=element_key, help=description, + disabled=disable_ui_element, + on_change=save_widget_previous_values, + args=[element_key], ) - new_field_value = tuple(new_field_value.split("\n")) else: continue - if new_field_value != last_field_value: + if new_field_value != current_config_value: + if field_type in (list, tuple): + new_field_value = tuple(new_field_value.split("\n")) updates_to_chat_configs[field_name] = new_field_value if updates_to_chat_configs: new_chat_configs = current_chat_configs.model_dump() new_chat_configs.update(updates_to_chat_configs) - self.selected_page.chat_obj = Chat.from_dict(new_chat_configs) + new_chat = Chat.from_dict(new_chat_configs) + self.selected_page.chat_obj = new_chat + new_chat.save_cache() def get_saved_chat_cache_dir_paths(self): """Get the filepaths of saved chat contexts, sorted by last modified.""" From d0c36cf671ce5cd6b0738758493183ae9b823461 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 21:29:09 +0100 Subject: [PATCH 093/109] Do not mix embedding methods --- gpt_buddy_bot/app/app_page_templates.py | 14 ++++----- gpt_buddy_bot/app/multipage.py | 4 ++- gpt_buddy_bot/chat.py | 22 ++++++++----- gpt_buddy_bot/chat_configs.py | 6 +++- gpt_buddy_bot/embeddings_database.py | 41 +++++++++++++------------ 5 files changed, 50 insertions(+), 37 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 39a9a2a..3ebfc9c 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -153,13 +153,13 @@ def render(self): time.sleep(0.1) st.markdown(greeting) - self.chat_history.append( - { - "role": "assistant", - "name": self.chat_obj.assistant_name, - "content": self.chat_obj.initial_greeting, - } - ) + self.chat_history.append( + { + "role": "assistant", + "name": self.chat_obj.assistant_name, + "content": self.chat_obj.initial_greeting, + } + ) # Accept user input placeholder = ( diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 3672a33..268a585 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -226,7 +226,9 @@ def save_widget_previous_values(element_key): elif field_type in (list, tuple): new_field_value = st.text_area( title, - value="\n".join(widget_previous_value), + value=widget_previous_value + if isinstance(widget_previous_value, str) + else "\n".join(widget_previous_value), key=element_key, help=description, disabled=disable_ui_element, diff --git a/gpt_buddy_bot/chat.py b/gpt_buddy_bot/chat.py index 6aefa4d..88600c3 100644 --- a/gpt_buddy_bot/chat.py +++ b/gpt_buddy_bot/chat.py @@ -45,6 +45,15 @@ def cache_dir(self, value): value = GeneralConstants.CHAT_CACHE_DIR / f"chat_{self.id}" self._cache_dir = Path(value) + def save_cache(self): + """Store the chat's configs and metadata to the cache directory.""" + with open(self.configs_file, "w") as configs_f: + configs_f.write(self.configs.model_dump_json(indent=2)) + + metadata = self.metadata # Trigger loading metadata if not yet done + with open(self.metadata_file, "w") as metadata_f: + json.dump(metadata, metadata_f, indent=2) + def clear_cache(self): """Remove the cache directory.""" shutil.rmtree(self.cache_dir, ignore_errors=True) @@ -111,16 +120,13 @@ def __del__(self): n_output_tokens=self.token_usage[model]["output"], ) - if self.private_mode or not next(self.cache_dir.iterdir(), False): + cache_empty = self.cache_dir.exists() and not next( + self.cache_dir.iterdir(), False + ) + if self.private_mode or cache_empty: self.clear_cache() else: - # Store configs - with open(self.configs_file, "w") as configs_f: - configs_f.write(self.configs.model_dump_json(indent=2)) - # Store metadata - metadata = self.metadata # Trigger loading metadata if not yet done - with open(self.metadata_file, "w") as metadata_f: - json.dump(metadata, metadata_f, indent=2) + self.save_cache() @classmethod def from_dict(cls, configs: dict): diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index f98eb90..75d84b2 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -116,7 +116,11 @@ class ChatOptions(OpenAiApiCallOptions): ) context_model: Literal["text-embedding-ada-002", "full-history"] = Field( default="text-embedding-ada-002", - description="OpenAI API model to use for embedding", + description=( + "Model to use for chat context (~memory). " + + "Once picked, it cannot be changed." + ), + json_schema_extra={"frozen": True}, ) cache_dir: Optional[Path] = Field( default=None, diff --git a/gpt_buddy_bot/embeddings_database.py b/gpt_buddy_bot/embeddings_database.py index 7bd031c..c75df49 100644 --- a/gpt_buddy_bot/embeddings_database.py +++ b/gpt_buddy_bot/embeddings_database.py @@ -8,21 +8,12 @@ class EmbeddingsDatabase: def __init__(self, db_path: Path, embedding_model: str): - self.embedding_model = embedding_model self.db_path = db_path + self.embedding_model = embedding_model self.create() - stored_embedding_model = self.get_embedding_model() - if stored_embedding_model is None: - self._init_embedding_model_table() - elif stored_embedding_model != self.embedding_model: - raise ValueError( - "Database already contains a different embedding model: " - f"{self.get_embedding_model()}.\n" - "Cannot continue." - ) - def create(self): + self.db_path.parent.mkdir(parents=True, exist_ok=True) conn = sqlite3.connect(self.db_path) # SQL to create 'embedding_model' table with 'embedding_model' as primary key @@ -88,16 +79,17 @@ def get_embedding_model(self): return embedding_model - def _init_embedding_model_table(self): - conn = sqlite3.connect(self.db_path) - create_time = int(datetime.datetime.utcnow().timestamp()) - sql = "INSERT INTO embedding_model " - sql += "(created_timestamp, embedding_model) VALUES (?, ?);" - with conn: - conn.execute(sql, (create_time, self.embedding_model)) - conn.close() - def insert_message_exchange(self, chat_model, message_exchange, embedding): + stored_embedding_model = self.get_embedding_model() + if stored_embedding_model is None: + self._init_database() + elif stored_embedding_model != self.embedding_model: + raise ValueError( + "Database already contains a different embedding model: " + f"{self.get_embedding_model()}.\n" + "Cannot continue." + ) + timestamp = int(datetime.datetime.utcnow().timestamp()) message_exchange = json.dumps(message_exchange) embedding = json.dumps(embedding) @@ -114,3 +106,12 @@ def get_messages_dataframe(self): df = pd.read_sql_query(query, conn) conn.close() return df + + def _init_database(self): + conn = sqlite3.connect(self.db_path) + create_time = int(datetime.datetime.utcnow().timestamp()) + sql = "INSERT INTO embedding_model " + sql += "(created_timestamp, embedding_model) VALUES (?, ?);" + with conn: + conn.execute(sql, (create_time, self.embedding_model)) + conn.close() From 4f3c0e4e799164262b703660474747abec118158 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 22:15:27 +0100 Subject: [PATCH 094/109] Remove initial chat animation The previous page wouldn't be cleared and the contents would get mixed for a split-second. --- gpt_buddy_bot/app/app_page_templates.py | 10 +--------- gpt_buddy_bot/app/multipage.py | 11 +++++++---- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 3ebfc9c..39f43d3 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -144,15 +144,7 @@ def render(self): self.render_chat_history() else: with st.chat_message("assistant", avatar=self.avatars["assistant"]): - with st.empty(): - st.markdown("▌") - greeting = "" - for word in self.chat_obj.initial_greeting.split(): - greeting += f"{word} " - st.markdown(greeting + "▌") - time.sleep(0.1) - st.markdown(greeting) - + st.markdown(self.chat_obj.initial_greeting) self.chat_history.append( { "role": "assistant", diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 268a585..39ba84f 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -224,11 +224,14 @@ def save_widget_previous_values(element_key): args=[element_key], ) elif field_type in (list, tuple): + prev_value = ( + widget_previous_value + if isinstance(widget_previous_value, str) + else "\n".join(widget_previous_value) + ) new_field_value = st.text_area( title, - value=widget_previous_value - if isinstance(widget_previous_value, str) - else "\n".join(widget_previous_value), + value=prev_value.strip(), key=element_key, help=description, disabled=disable_ui_element, @@ -240,7 +243,7 @@ def save_widget_previous_values(element_key): if new_field_value != current_config_value: if field_type in (list, tuple): - new_field_value = tuple(new_field_value.split("\n")) + new_field_value = tuple(new_field_value.strip().split("\n")) updates_to_chat_configs[field_name] = new_field_value if updates_to_chat_configs: From d0b47269e561f032aec3a243a77557cb099f92e4 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 22:23:10 +0100 Subject: [PATCH 095/109] rm gpt-4-32k model. Not accessible. --- gpt_buddy_bot/chat_configs.py | 1 - gpt_buddy_bot/tokens.py | 1 - 2 files changed, 2 deletions(-) diff --git a/gpt_buddy_bot/chat_configs.py b/gpt_buddy_bot/chat_configs.py index 75d84b2..8125381 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/gpt_buddy_bot/chat_configs.py @@ -75,7 +75,6 @@ class OpenAiApiCallOptions(BaseConfigModel): "gpt-3.5-turbo", # Will point to gpt-3.5-turbo-1106 starting Dec 11, 2023 "gpt-4-1106-preview", "gpt-4", - "gpt-4-32k", ] = Field( default="gpt-3.5-turbo-1106", description=f"OpenAI LLM model to use. See {_openai_url}-model and {_models_url}", diff --git a/gpt_buddy_bot/tokens.py b/gpt_buddy_bot/tokens.py index bb029bc..7645fb9 100644 --- a/gpt_buddy_bot/tokens.py +++ b/gpt_buddy_bot/tokens.py @@ -12,7 +12,6 @@ "gpt-3.5-turbo-1106": {"input": 0.001, "output": 0.002}, "gpt-4-1106-preview": {"input": 0.03, "output": 0.06}, "gpt-4": {"input": 0.03, "output": 0.06}, - "gpt-4-32k": {"input": 0.06, "output": 0.12}, "text-embedding-ada-002": {"input": 0.0001, "output": 0.0}, "full-history": {"input": 0.0, "output": 0.0}, } From 5bda1055e74f1e6ec30cef933787736a0f56bfe5 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 22:33:10 +0100 Subject: [PATCH 096/109] Handle validation error when reloading chats --- gpt_buddy_bot/app/multipage.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 39ba84f..c9e294a 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -4,6 +4,7 @@ import openai import streamlit as st +from pydantic import ValidationError from gpt_buddy_bot import GeneralConstants from gpt_buddy_bot.app.app_page_templates import AppPage, ChatBotPage, _RecoveredChat @@ -280,7 +281,16 @@ def render(self, **kwargs): if not st.session_state.get("saved_chats_reloaded", False): st.session_state["saved_chats_reloaded"] = True for cache_dir_path in self.get_saved_chat_cache_dir_paths(): - chat = Chat.from_cache(cache_dir=cache_dir_path) + try: + chat = Chat.from_cache(cache_dir=cache_dir_path) + except ValidationError: + st.warning( + f"Failed to load cached chat {cache_dir_path}: " + + "Non-supported configs.", + icon="⚠️", + ) + continue + new_page = ChatBotPage( chat_obj=chat, page_title=chat.metadata.get("page_title", _RecoveredChat), From 602151307e1c12765899412518329e5684c17f8d Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Thu, 9 Nov 2023 23:53:06 +0100 Subject: [PATCH 097/109] Some css styling for selection buttons --- gpt_buddy_bot/app/app_page_templates.py | 1 - gpt_buddy_bot/app/multipage.py | 27 ++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 39f43d3..a8a07fd 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -1,7 +1,6 @@ """Utilities for creating pages in a streamlit app.""" import pickle import sys -import time import uuid from abc import ABC, abstractmethod diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index c9e294a..7962b32 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -108,9 +108,33 @@ def add_page(self, page: ChatBotPage = None, selected: bool = True, **kwargs): def handle_ui_page_selection(self): """Control page selection in the UI sidebar.""" + + st.markdown( + """ + + """, + unsafe_allow_html=True, + ) + with self.sidebar_tabs["chats"]: for page in self.pages.values(): - col1, col2 = st.columns([0.8, 0.2]) + col1, col2 = st.columns([0.9, 0.1]) with col1: st.button( label=page.sidebar_title, @@ -118,6 +142,7 @@ def handle_ui_page_selection(self): on_click=self.register_selected_page, kwargs={"page": page}, use_container_width=True, + disabled=page.page_id == self.selected_page.page_id, ) with col2: st.button( From e20858c6b32ad544c4ffaa2615da1259e2361d0f Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 10 Nov 2023 00:57:21 +0100 Subject: [PATCH 098/109] Some linting --- gpt_buddy_bot/__init__.py | 3 ++ gpt_buddy_bot/app/__init__.py | 1 + gpt_buddy_bot/app/app_page_templates.py | 69 +++++++++++++++++++------ gpt_buddy_bot/app/multipage.py | 30 +++++++++-- gpt_buddy_bot/argparse_wrapper.py | 8 +-- gpt_buddy_bot/chat.py | 59 ++++++++++++++++++++- gpt_buddy_bot/chat_context.py | 6 +-- gpt_buddy_bot/command_definitions.py | 3 +- gpt_buddy_bot/embeddings_database.py | 4 +- gpt_buddy_bot/openai_utils.py | 2 +- gpt_buddy_bot/tokens.py | 43 +++++++-------- pyproject.toml | 54 +++++++++++++++++++ tests/conftest.py | 18 +++---- tests/smoke/test_commands.py | 7 +-- tests/unit/test_chat.py | 25 +++++---- 15 files changed, 254 insertions(+), 78 deletions(-) diff --git a/gpt_buddy_bot/__init__.py b/gpt_buddy_bot/__init__.py index c7191f7..142cb2c 100644 --- a/gpt_buddy_bot/__init__.py +++ b/gpt_buddy_bot/__init__.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +"""Unnoficial OpenAI API UI and CLI tool.""" import os import tempfile import uuid @@ -9,6 +10,8 @@ class GeneralConstants: + """General constants for the package.""" + # Main package info RUN_ID = uuid.uuid4().hex PACKAGE_NAME = __name__ diff --git a/gpt_buddy_bot/app/__init__.py b/gpt_buddy_bot/app/__init__.py index e69de29..5246618 100644 --- a/gpt_buddy_bot/app/__init__.py +++ b/gpt_buddy_bot/app/__init__.py @@ -0,0 +1 @@ +"""UI for the package.""" diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index a8a07fd..8d4cb3d 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -24,18 +24,34 @@ class AppPage(ABC): - """Abstract base class for pages in a streamlit app.""" + """Abstract base class for a page within a streamlit application. + + Attributes: + page_id (str): Unique identifier for the page. + page_number (int): The current number of created pages. + _fallback_sidebar_title (str): Default sidebar title used if none is provided. + _fallback_page_title (str): Default page title used if none is provided. + + """ def __init__(self, sidebar_title: str = "", page_title: str = ""): + """Initializes a new instance of the AppPage class. + + Args: + sidebar_title (str, optional): The title to be displayed in the sidebar. + Defaults to an empty string. + page_title (str, optional): The title to be displayed on the page. + Defaults to an empty string. + """ self.page_id = str(uuid.uuid4()) self.page_number = st.session_state.get("n_created_pages", 0) + 1 chat_number_for_title = f"Chat #{self.page_number}" if page_title is _RecoveredChat: - self._fallback_page_title = f"{chat_number_for_title.strip('#')} (Recovered)" + self.fallback_page_title = f"{chat_number_for_title.strip('#')} (Recovered)" page_title = None else: - self._fallback_page_title = chat_number_for_title + self.fallback_page_title = chat_number_for_title if page_title: self.title = page_title @@ -63,7 +79,7 @@ def sidebar_title(self, value: str): @property def title(self): """Get the title of the page.""" - return self.state.get("page_title", self._fallback_page_title) + return self.state.get("page_title", self.fallback_page_title) @title.setter def title(self, value: str): @@ -76,9 +92,29 @@ def render(self): class ChatBotPage(AppPage): + """Represents a chatbot page within a streamlit application, inheriting from AppPage. + + Args: + AppPage ([type]): [description] + + Attributes: + chat_obj (Chat): Chat object that manages the chat interactions. + avatars (dict): Dictionary holding avatar images for participants. + + """ + def __init__( self, chat_obj: Chat = None, sidebar_title: str = "", page_title: str = "" ): + """Initialize new instance of the ChatBotPage class with an optional Chat object. + + Args: + chat_obj (Chat, optional): The chat object. Defaults to None. + sidebar_title (str, optional): The sidebar title for the chatbot page. + Defaults to an empty string. + page_title (str, optional): The title for the chatbot page. + Defaults to an empty string. + """ super().__init__(sidebar_title=sidebar_title, page_title=page_title) if chat_obj: @@ -168,18 +204,19 @@ def render(self): ) # Display (stream) assistant response in chat message container - with st.chat_message("assistant", avatar=self.avatars["assistant"]): - with st.empty(): - st.markdown("▌") - full_response = "" - try: - for chunk in self.chat_obj.respond_user_prompt(prompt): - full_response += chunk - st.markdown(full_response + "▌") - except CannotConnectToApiError: - full_response = self.chat_obj._api_connection_error_msg - finally: - st.markdown(full_response) + with st.chat_message( + "assistant", avatar=self.avatars["assistant"] + ), st.empty(): + st.markdown("▌") + full_response = "" + try: + for chunk in self.chat_obj.respond_user_prompt(prompt): + full_response += chunk + st.markdown(full_response + "▌") + except CannotConnectToApiError: + full_response = self.chat_obj.api_connection_error_msg + finally: + st.markdown(full_response) self.chat_history.append( { diff --git a/gpt_buddy_bot/app/multipage.py b/gpt_buddy_bot/app/multipage.py index 7962b32..e6c15cf 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/gpt_buddy_bot/app/multipage.py @@ -1,4 +1,4 @@ -"Code for the creation streamlit apps with dynamically created pages." +"""Code for the creation streamlit apps with dynamically created pages.""" import contextlib from abc import ABC, abstractmethod @@ -81,7 +81,14 @@ def render(self, **kwargs): class MultipageChatbotApp(AbstractMultipageApp): + """A Streamlit multipage app specifically for chatbot interactions. + + Inherits from AbstractMultipageApp and adds chatbot-specific functionalities. + + """ + def init_openai_client(self): + """Initializes the OpenAI client with the API key provided in the Streamlit UI.""" # Initialize the OpenAI API client placeholher = ( "OPENAI_API_KEY detected" @@ -102,13 +109,25 @@ def init_openai_client(self): st.write(":red[You need to provide a key to use the chat]") def add_page(self, page: ChatBotPage = None, selected: bool = True, **kwargs): + """Adds a new ChatBotPage to the app. + + If no page is specified, a new instance of ChatBotPage is created and added. + + Args: + page: The ChatBotPage to be added. If None, a new page is created. + selected: Whether the added page should be selected immediately. + **kwargs: Additional keyword arguments for ChatBotPage creation. + + Returns: + The result of the superclass's add_page method. + + """ if page is None: page = ChatBotPage(**kwargs) return super().add_page(page=page, selected=selected) def handle_ui_page_selection(self): - """Control page selection in the UI sidebar.""" - + """Control page selection and removal in the UI sidebar.""" st.markdown( """ - """, - unsafe_allow_html=True, + def get_widget_previous_value(self, widget_key, default=None): + """Get the previous value of a widget, if any.""" + if "widget_previous_value" not in self.selected_page.state: + self.selected_page.state["widget_previous_value"] = {} + return self.selected_page.state["widget_previous_value"].get(widget_key, default) + + def save_widget_previous_values(self, element_key): + """Save a widget's 'previous value`, to be read by `get_widget_previous_value`.""" + if "widget_previous_value" not in self.selected_page.state: + self.selected_page.state["widget_previous_value"] = {} + self.selected_page.state["widget_previous_value"][ + element_key + ] = st.session_state.get(element_key) + + def get_saved_chat_cache_dir_paths(self): + """Get the filepaths of saved chat contexts, sorted by last modified.""" + return sorted( + ( + directory + for directory in GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/") + if next(directory.iterdir(), False) + ), + key=lambda fpath: fpath.stat().st_mtime, + reverse=True, ) - with self.sidebar_tabs["chats"]: - for page in self.pages.values(): - col1, col2 = st.columns([0.9, 0.1]) - with col1: - st.button( - label=page.sidebar_title, - key=f"select_{page.page_id}", - on_click=self.register_selected_page, - kwargs={"page": page}, - use_container_width=True, - disabled=page.page_id == self.selected_page.page_id, - ) - with col2: - st.button( - ":wastebasket:", - key=f"delete_{page.page_id}", - type="primary", - use_container_width=True, - on_click=self.remove_page, - kwargs={"page": page}, - help="Delete this chat.", - ) + def handle_ui_page_selection(self): + """Control page selection and removal in the UI sidebar.""" + _set_button_style() + self._build_sidebar_tabs() with self.sidebar_tabs["settings"]: caption = f"\u2699\uFE0F Settings for Chat #{self.selected_page.page_number}" @@ -180,7 +163,6 @@ def handle_ui_page_selection(self): caption += f": {self.selected_page.title}" st.caption(caption) current_chat_configs = self.selected_page.chat_obj.configs - updates_to_chat_configs = {} # Present the user with the model and instructions fields first field_names = ["model", "ai_instructions", "context_model"] @@ -188,108 +170,9 @@ def handle_ui_page_selection(self): field_names = list(dict.fromkeys(field_names)) model_fields = {k: ChatOptions.model_fields[k] for k in field_names} - # Keep track of selected values so that selectbox doesn't reset - if "widget_previous_value" not in self.selected_page.state: - self.selected_page.state["widget_previous_value"] = {} - - def save_widget_previous_values(element_key): - self.selected_page.state["widget_previous_value"][ - element_key - ] = st.session_state.get(element_key) - - for field_name, field in model_fields.items(): - title = field_name.replace("_", " ").title() - choices = ChatOptions.get_allowed_values(field=field_name) - description = ChatOptions.get_description(field=field_name) - field_type = ChatOptions.get_type(field=field_name) - - # Check if the field is frozen and disable corresponding UI element if so - chat_started = self.selected_page.state.get("chat_started", False) - extra_info = field.json_schema_extra - if extra_info is None: - extra_info = {} - disable_ui_element = extra_info.get("frozen", False) and ( - chat_started - or any( - msg["role"] == "user" for msg in self.selected_page.chat_history - ) - ) - - # Keep track of selected values so that selectbox doesn't reset - current_config_value = getattr(current_chat_configs, field_name) - element_key = f"{field_name}-pg-{self.selected_page.page_id}-ui-element" - widget_previous_value = self.selected_page.state[ - "widget_previous_value" - ].get(element_key, current_config_value) - if choices: - new_field_value = st.selectbox( - title, - key=element_key, - options=choices, - index=choices.index(widget_previous_value), - help=description, - disabled=disable_ui_element, - on_change=save_widget_previous_values, - args=[element_key], - ) - elif field_type == str: - new_field_value = st.text_input( - title, - key=element_key, - value=widget_previous_value, - help=description, - disabled=disable_ui_element, - on_change=save_widget_previous_values, - args=[element_key], - ) - elif field_type in [int, float]: - step = 1 if field_type == int else 0.01 - bounds = [None, None] - for item in field.metadata: - with contextlib.suppress(AttributeError): - bounds[0] = item.gt + step - with contextlib.suppress(AttributeError): - bounds[0] = item.ge - with contextlib.suppress(AttributeError): - bounds[1] = item.lt - step - with contextlib.suppress(AttributeError): - bounds[1] = item.le - - new_field_value = st.number_input( - title, - key=element_key, - value=widget_previous_value, - placeholder="OpenAI Default", - min_value=bounds[0], - max_value=bounds[1], - step=step, - help=description, - disabled=disable_ui_element, - on_change=save_widget_previous_values, - args=[element_key], - ) - elif field_type in (list, tuple): - prev_value = ( - widget_previous_value - if isinstance(widget_previous_value, str) - else "\n".join(widget_previous_value) - ) - new_field_value = st.text_area( - title, - value=prev_value.strip(), - key=element_key, - help=description, - disabled=disable_ui_element, - on_change=save_widget_previous_values, - args=[element_key], - ) - else: - continue - - if new_field_value != current_config_value: - if field_type in (list, tuple): - new_field_value = tuple(new_field_value.strip().split("\n")) - updates_to_chat_configs[field_name] = new_field_value + updates_to_chat_configs = self._handle_chat_configs_value_selection( + current_chat_configs, model_fields + ) if updates_to_chat_configs: new_chat_configs = current_chat_configs.model_dump() @@ -298,18 +181,6 @@ def save_widget_previous_values(element_key): self.selected_page.chat_obj = new_chat new_chat.save_cache() - def get_saved_chat_cache_dir_paths(self): - """Get the filepaths of saved chat contexts, sorted by last modified.""" - return sorted( - ( - directory - for directory in GeneralConstants.CHAT_CACHE_DIR.glob("chat_*/") - if next(directory.iterdir(), False) - ), - key=lambda fpath: fpath.stat().st_mtime, - reverse=True, - ) - def render(self, **kwargs): """Renders the multipage chatbot app in the UI according to the selected page.""" with st.sidebar: @@ -350,3 +221,149 @@ def render(self, **kwargs): self.add_page() return super().render(**kwargs) + + def _build_sidebar_tabs(self): + with self.sidebar_tabs["chats"]: + for page in self.pages.values(): + col1, col2 = st.columns([0.9, 0.1]) + with col1: + st.button( + label=page.sidebar_title, + key=f"select_{page.page_id}", + on_click=self.register_selected_page, + kwargs={"page": page}, + use_container_width=True, + disabled=page.page_id == self.selected_page.page_id, + ) + with col2: + st.button( + ":wastebasket:", + key=f"delete_{page.page_id}", + type="primary", + use_container_width=True, + on_click=self.remove_page, + kwargs={"page": page}, + help="Delete this chat.", + ) + + def _handle_chat_configs_value_selection(self, current_chat_configs, model_fields): + updates_to_chat_configs = {} + for field_name, field in model_fields.items(): + title = field_name.replace("_", " ").title() + choices = ChatOptions.get_allowed_values(field=field_name) + description = ChatOptions.get_description(field=field_name) + field_type = ChatOptions.get_type(field=field_name) + + # Check if the field is frozen and disable corresponding UI element if so + chat_started = self.selected_page.state.get("chat_started", False) + extra_info = field.json_schema_extra + if extra_info is None: + extra_info = {} + disable_ui_element = extra_info.get("frozen", False) and ( + chat_started + or any(msg["role"] == "user" for msg in self.selected_page.chat_history) + ) + + # Keep track of selected values so that selectbox doesn't reset + current_config_value = getattr(current_chat_configs, field_name) + element_key = f"{field_name}-pg-{self.selected_page.page_id}-ui-element" + widget_previous_value = self.get_widget_previous_value( + element_key, default=current_config_value + ) + if choices: + new_field_value = st.selectbox( + title, + key=element_key, + options=choices, + index=choices.index(widget_previous_value), + help=description, + disabled=disable_ui_element, + on_change=self.save_widget_previous_values, + args=[element_key], + ) + elif field_type == str: + new_field_value = st.text_input( + title, + key=element_key, + value=widget_previous_value, + help=description, + disabled=disable_ui_element, + on_change=self.save_widget_previous_values, + args=[element_key], + ) + elif field_type in [int, float]: + step = 1 if field_type == int else 0.01 + bounds = [None, None] + for item in field.metadata: + with contextlib.suppress(AttributeError): + bounds[0] = item.gt + step + with contextlib.suppress(AttributeError): + bounds[0] = item.ge + with contextlib.suppress(AttributeError): + bounds[1] = item.lt - step + with contextlib.suppress(AttributeError): + bounds[1] = item.le + + new_field_value = st.number_input( + title, + key=element_key, + value=widget_previous_value, + placeholder="OpenAI Default", + min_value=bounds[0], + max_value=bounds[1], + step=step, + help=description, + disabled=disable_ui_element, + on_change=self.save_widget_previous_values, + args=[element_key], + ) + elif field_type in (list, tuple): + prev_value = ( + widget_previous_value + if isinstance(widget_previous_value, str) + else "\n".join(widget_previous_value) + ) + new_field_value = st.text_area( + title, + value=prev_value.strip(), + key=element_key, + help=description, + disabled=disable_ui_element, + on_change=self.save_widget_previous_values, + args=[element_key], + ) + else: + continue + + if new_field_value != current_config_value: + if field_type in (list, tuple): + new_field_value = tuple(new_field_value.strip().split("\n")) + updates_to_chat_configs[field_name] = new_field_value + + return updates_to_chat_configs + + +def _set_button_style(): + """CSS styling for the buttons in the app.""" + st.markdown( + """ + + """, + unsafe_allow_html=True, + ) diff --git a/gpt_buddy_bot/openai_utils.py b/gpt_buddy_bot/openai_utils.py index 9a05e90..34aee2b 100644 --- a/gpt_buddy_bot/openai_utils.py +++ b/gpt_buddy_bot/openai_utils.py @@ -28,10 +28,7 @@ def retry_api_call(max_n_attempts=5, auth_error_msg="Problems connecting to Open def on_error(error, n_attempts): if n_attempts < max_n_attempts: logger.warning( - " > {}. Making new attempt ({}/{})...", - error, - n_attempts + 1, - max_n_attempts, + "{}. Making new attempt ({}/{})...", error, n_attempts + 1, max_n_attempts ) time.sleep(1) else: From 02d2c75d2299266c8cc8b6c567c4d8120d6ccd00 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 10 Nov 2023 11:27:03 +0100 Subject: [PATCH 105/109] Lint using pydoclint --- .flakeheaven.toml | 24 ++++++++++++++++++++++ gpt_buddy_bot/app/app_page_templates.py | 27 +++++-------------------- gpt_buddy_bot/embeddings_database.py | 3 +++ pyproject.toml | 3 +++ 4 files changed, 35 insertions(+), 22 deletions(-) create mode 100644 .flakeheaven.toml diff --git a/.flakeheaven.toml b/.flakeheaven.toml new file mode 100644 index 0000000..47c8298 --- /dev/null +++ b/.flakeheaven.toml @@ -0,0 +1,24 @@ +[tool.flakeheaven] + exclude = [".*/", "tmp/", "*/tmp/", "*.ipynb"] + format = "colored" + # Show line of source code in output, with syntax highlighting + show_source = true + style = "google" + +# list of plugins and rules for them +[tool.flakeheaven.plugins] + # Deactivate all rules for all plugins by default + "*" = ["-*"] + # Activate only those plugins not covered by ruff + pydoclint = [ + "+*", + "-DOC105", + "-DOC106", + "-DOC107", + "-DOC109", + "-DOC110", + "-DOC203", + "-DOC301", + "-DOC403", + "-DOC404", + ] diff --git a/gpt_buddy_bot/app/app_page_templates.py b/gpt_buddy_bot/app/app_page_templates.py index 29a1952..5abb440 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/gpt_buddy_bot/app/app_page_templates.py @@ -23,15 +23,7 @@ class AppPage(ABC): - """Abstract base class for a page within a streamlit application. - - Attributes: - page_id (str): Unique identifier for the page. - page_number (int): The current number of created pages. - _fallback_sidebar_title (str): Default sidebar title used if none is provided. - _fallback_page_title (str): Default page title used if none is provided. - - """ + """Abstract base class for a page within a streamlit application.""" def __init__(self, sidebar_title: str = "", page_title: str = ""): """Initializes a new instance of the AppPage class. @@ -91,16 +83,7 @@ def render(self): class ChatBotPage(AppPage): - """Represents a chatbot page within a streamlit application, inheriting from AppPage. - - Args: - AppPage ([type]): [description] - - Attributes: - chat_obj (Chat): Chat object that manages the chat interactions. - avatars (dict): Dictionary holding avatar images for participants. - - """ + """Implement a chatbot page in a streamlit application, inheriting from AppPage.""" def __init__( self, chat_obj: Chat = None, sidebar_title: str = "", page_title: str = "" @@ -108,10 +91,10 @@ def __init__( """Initialize new instance of the ChatBotPage class with an optional Chat object. Args: - chat_obj (Chat, optional): The chat object. Defaults to None. - sidebar_title (str, optional): The sidebar title for the chatbot page. + chat_obj (Chat): The chat object. Defaults to None. + sidebar_title (str): The sidebar title for the chatbot page. Defaults to an empty string. - page_title (str, optional): The title for the chatbot page. + page_title (str): The title for the chatbot page. Defaults to an empty string. """ super().__init__(sidebar_title=sidebar_title, page_title=page_title) diff --git a/gpt_buddy_bot/embeddings_database.py b/gpt_buddy_bot/embeddings_database.py index 55daad7..45f3ec3 100644 --- a/gpt_buddy_bot/embeddings_database.py +++ b/gpt_buddy_bot/embeddings_database.py @@ -101,6 +101,9 @@ def insert_message_exchange(self, chat_model, message_exchange, embedding): chat_model (str): The chat model. message_exchange: The message exchange. embedding: The embedding associated with the message exchange. + + Raises: + ValueError: If the database already contains a different embedding model. """ stored_embedding_model = self.get_embedding_model() if stored_embedding_model is None: diff --git a/pyproject.toml b/pyproject.toml index 3993d1c..e9e2b55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,9 @@ [tool.black] line-length = 90 +[tool.flakeheaven] + base = ".flakeheaven.toml" + [tool.isort] line_length = 90 profile = "black" From f268b562f6a51af5d6755807eafd6e5f9b705dae Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 10 Nov 2023 12:28:46 +0100 Subject: [PATCH 106/109] Add poe tasks for linting and test checks --- pyproject.toml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index e9e2b55..3bf9ebb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,3 +124,35 @@ addopts = "-v --failed-first --cov-report=term-missing --cov-report=term:skip-covered --cov-report=xml:.coverage.xml --cov=./" log_cli_level = "INFO" testpaths = ["tests/smoke", "tests/unit"] + + #################################### + # Leave configs for `poe` separate # + #################################### + +[tool.poe] + poetry_command = "devtools" + +[tool.poe.tasks] + _black = "black ." + _isort = "isort ." + _ruff = "ruff check ." + # Test-related tasks + pytest = "pytest" + # Tasks to be run as pre-push checks + pre-push-checks = ["lint", "pytest"] + +[tool.poe.tasks._flake8] + cmd = "flakeheaven lint ." + env = {FLAKEHEAVEN_CACHE_TIMEOUT = "0"} + +[tool.poe.tasks.lint] + args = [{name = "fix", type = "boolean", default = false}] + control = {expr = "fix"} + +[[tool.poe.tasks.lint.switch]] + case = "True" + sequence = ["_isort", "_black", "_ruff --fix", "_flake8"] + +[[tool.poe.tasks.lint.switch]] + case = "False" + sequence = ["_isort --check-only", "_black --check --diff", "_ruff", "_flake8"] From 645eb6a03826bc1b7ef325f2aa0bd687eb7003cd Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 10 Nov 2023 12:30:18 +0100 Subject: [PATCH 107/109] Add linting workflow --- .github/workflows/linting.yaml | 68 ++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 .github/workflows/linting.yaml diff --git a/.github/workflows/linting.yaml b/.github/workflows/linting.yaml new file mode 100644 index 0000000..aefdd41 --- /dev/null +++ b/.github/workflows/linting.yaml @@ -0,0 +1,68 @@ +#.github/workflows/linting.yaml +name: Linting Checks + +on: + pull_request: + branches: + - main + - develop + paths: + - '**.py' + - '.github/workflows/linting.yaml' + push: + branches: + - '**' # Every branch + paths: + - '**.py' + - '.github/workflows/linting.yaml' + +jobs: + linting: + if: github.repository_owner == 'paulovcmedeiros' + name: Run Linters + runs-on: ubuntu-latest + steps: + #---------------------------------------------- + # check-out repo and set-up python + #---------------------------------------------- + - name: Check out repository + uses: actions/checkout@v3 + - name: Set up python + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + #---------------------------------------------- + # --- configure poetry & install project ---- + #---------------------------------------------- + - name: Install Poetry + uses: snok/install-poetry@v1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Install poethepoet + run: poetry self add 'poethepoet[poetry_plugin]' + + - name: Load cached venv (if cache exists) + id: cached-poetry-dependencies + uses: actions/cache@v3 + with: + path: .venv + key: ${{ github.job }}-venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/pyproject.toml', '**/poetry.toml') }} + + - name: Install dependencies (if venv cache is not found) + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root --only main,linting + + - name: Install the project itself + run: poetry install --no-interaction --only-root + + #---------------------------------------------- + # Run the linting checks + #---------------------------------------------- + - name: Run linters + run: | + poetry devtools lint + From a7947c7ca9a64b74e472115dd5d872f6012ed7c2 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 10 Nov 2023 14:56:34 +0100 Subject: [PATCH 108/109] Change package name to pyRobBot. Add README. --- README.md | 76 ++++++++++++++++++ pyproject.toml | 6 +- {gpt_buddy_bot => pyrobbot}/__init__.py | 2 +- {gpt_buddy_bot => pyrobbot}/__main__.py | 0 .../app/.streamlit/config.toml | 0 {gpt_buddy_bot => pyrobbot}/app/__init__.py | 0 {gpt_buddy_bot => pyrobbot}/app/app.py | 4 +- .../app/app_page_templates.py | 8 +- .../app/data/assistant_avatar.png | Bin .../app/data/user_avatar.png | Bin {gpt_buddy_bot => pyrobbot}/app/multipage.py | 8 +- .../argparse_wrapper.py | 0 {gpt_buddy_bot => pyrobbot}/chat.py | 0 {gpt_buddy_bot => pyrobbot}/chat_configs.py | 6 +- {gpt_buddy_bot => pyrobbot}/chat_context.py | 0 .../command_definitions.py | 0 .../embeddings_database.py | 0 {gpt_buddy_bot => pyrobbot}/openai_utils.py | 0 {gpt_buddy_bot => pyrobbot}/tokens.py | 0 tests/conftest.py | 10 +-- tests/smoke/test_app.py | 4 +- tests/smoke/test_commands.py | 4 +- tests/unit/test_chat.py | 4 +- 23 files changed, 103 insertions(+), 29 deletions(-) rename {gpt_buddy_bot => pyrobbot}/__init__.py (94%) rename {gpt_buddy_bot => pyrobbot}/__main__.py (100%) rename {gpt_buddy_bot => pyrobbot}/app/.streamlit/config.toml (100%) rename {gpt_buddy_bot => pyrobbot}/app/__init__.py (100%) rename {gpt_buddy_bot => pyrobbot}/app/app.py (72%) rename {gpt_buddy_bot => pyrobbot}/app/app_page_templates.py (97%) rename {gpt_buddy_bot => pyrobbot}/app/data/assistant_avatar.png (100%) rename {gpt_buddy_bot => pyrobbot}/app/data/user_avatar.png (100%) rename {gpt_buddy_bot => pyrobbot}/app/multipage.py (98%) rename {gpt_buddy_bot => pyrobbot}/argparse_wrapper.py (100%) rename {gpt_buddy_bot => pyrobbot}/chat.py (100%) rename {gpt_buddy_bot => pyrobbot}/chat_configs.py (97%) rename {gpt_buddy_bot => pyrobbot}/chat_context.py (100%) rename {gpt_buddy_bot => pyrobbot}/command_definitions.py (100%) rename {gpt_buddy_bot => pyrobbot}/embeddings_database.py (100%) rename {gpt_buddy_bot => pyrobbot}/openai_utils.py (100%) rename {gpt_buddy_bot => pyrobbot}/tokens.py (100%) diff --git a/README.md b/README.md index e69de29..0bcc75a 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,76 @@ +[![GitHub](https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white)](https://github.com/paulovcmedeiros/pyRobBot) +[![Github Pages](https://img.shields.io/badge/github%20pages-121013?style=for-the-badge&logo=github&logoColor=white)](https://paulovcmedeiros.github.io/pyRobBot-docs/) + + +[![Contributors Welcome](https://img.shields.io/badge/Contributors-welcome-.svg)](https://github.com/paulovcmedeiros/pyRobBot/pulls) +[![Linting](https://github.com/paulovcmedeiros/pyRobBot/actions/workflows/linting.yaml/badge.svg)](https://github.com/paulovcmedeiros/pyRobBot/actions/workflows/linting.yaml) +[![Tests](https://github.com/paulovcmedeiros/pyRobBot/actions/workflows/tests.yaml/badge.svg)](https://github.com/paulovcmedeiros/pyRobBot/actions/workflows/tests.yaml) +[![codecov](https://codecov.io/gh/paulovcmedeiros/pyRobBot/graph/badge.svg?token=XI8G1WH9O6)](https://codecov.io/gh/paulovcmedeiros/pyRobBot) + +# pyRobBot + +A simple chatbot that uses the OpenAI API to get responses from [GPT LLMs](https://platform.openai.com/docs/models) via OpenAI API. Written in Python with a Web UI made with [Streamlit](https://streamlit.io). Can also be used directly from the terminal. + +See also the [online documentation](https://paulovcmedeiros.github.io/pyRobBot-docs). + +## Features +- [x] Web UI + - Add/remove conversations dynamically +- [x] Fully configurable + - Support for multiple GPT LLMs + - Control over the parameters passed to the OpenAI API, with (hopefully) sensible defaults + - Ability o modify the chat parameters in the same conversation + - Each conversation has its own parameters +- [x] Autosave and retrieve chat history +- [x] Chat context handling using [embeddings](https://platform.openai.com/docs/guides/embeddings) +- [x] Kepp track of estimated token usage and associated API call costs +- [x] Terminal UI + + +## System Requirements +- Python >= 3.9 +- A valid [OpenAI API key](https://platform.openai.com/account/api-keys) + - Set in the Web UI or through the environment variable `OPENAI_API_KEY` + +## Installation +### Using pip +```shell +pip install pyrobbot +``` + +### From source +```shell +pip install git+https://github.com/paulovcmedeiros/pyRobBot.git +``` + +## Basic Usage +Upon succesfull installation, you should be able to run +```shell +rob [opts] SUBCOMMAND [subcommand_opts] +``` +where `[opts]` and `[subcommand_opts]` denote optional command line arguments +that apply, respectively, to `rob` in general and to `SUBCOMMAND` +specifically. + +**Please run `rob -h` for information** about the supported subcommands +and general `rob` options. For info about specific subcommands and the +options that apply to them only, **please run `rob SUBCOMMAND -h`** (note +that the `-h` goes after the subcommand in this case). + +### Using the Web UI +```shell +rob +``` + +### Running on the Terminal +```shell +rob . +``` +## Disclaimers +This project's main purpose is to serve as a learning exercise for me (the author) and to serve as tool for and experimenting with OpenAI API and GPT LLMs. It does not aim to be the best or more robust OpenAI-powered chatbot out there. + +Having said this, this project *does* aim to have a friendly user interface and to be easy to use and configure. So, please feel free to open an issue or submit a pull request if you find a bug or have a suggestion. + +Last but not least: this project is **not** affiliated with OpenAI in any way. + + diff --git a/pyproject.toml b/pyproject.toml index 3bf9ebb..b6fbe46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [tool.poetry] authors = ["Paulo V C Medeiros "] - description = "A simple UI & terminal ChatGPT chatbot that uses OpenAI API." + description = "A simple UI & terminal chatbot that uses the OpenAI API." license = "MIT" - name = "gpt-buddy-bot" + name = "pyrobbot" readme = "README.md" version = "0.1.0" @@ -11,7 +11,7 @@ requires = ["poetry-core"] [tool.poetry.scripts] - gbb = "gpt_buddy_bot.__main__:main" + rob = "pyrobbot.__main__:main" [tool.poetry.dependencies] # Python version diff --git a/gpt_buddy_bot/__init__.py b/pyrobbot/__init__.py similarity index 94% rename from gpt_buddy_bot/__init__.py rename to pyrobbot/__init__.py index 142cb2c..acf2c68 100644 --- a/gpt_buddy_bot/__init__.py +++ b/pyrobbot/__init__.py @@ -25,7 +25,7 @@ class GeneralConstants: CHAT_CACHE_DIR = PACKAGE_CACHE_DIRECTORY / "chats" # Constants related to the app - APP_NAME = PACKAGE_NAME.title().replace("Gpt", "GPT").replace("_", " ") + APP_NAME = "pyRobBot" APP_DIR = PACKAGE_DIRECTORY / "app" APP_PATH = APP_DIR / "app.py" PARSED_ARGS_FILE = PACKAGE_TMPDIR / f"parsed_args_{RUN_ID}.pkl" diff --git a/gpt_buddy_bot/__main__.py b/pyrobbot/__main__.py similarity index 100% rename from gpt_buddy_bot/__main__.py rename to pyrobbot/__main__.py diff --git a/gpt_buddy_bot/app/.streamlit/config.toml b/pyrobbot/app/.streamlit/config.toml similarity index 100% rename from gpt_buddy_bot/app/.streamlit/config.toml rename to pyrobbot/app/.streamlit/config.toml diff --git a/gpt_buddy_bot/app/__init__.py b/pyrobbot/app/__init__.py similarity index 100% rename from gpt_buddy_bot/app/__init__.py rename to pyrobbot/app/__init__.py diff --git a/gpt_buddy_bot/app/app.py b/pyrobbot/app/app.py similarity index 72% rename from gpt_buddy_bot/app/app.py rename to pyrobbot/app/app.py index 7d3bb67..20a9161 100644 --- a/gpt_buddy_bot/app/app.py +++ b/pyrobbot/app/app.py @@ -1,6 +1,6 @@ """Entrypoint for the package's UI.""" -from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.app.multipage import MultipageChatbotApp +from pyrobbot import GeneralConstants +from pyrobbot.app.multipage import MultipageChatbotApp def run_app(): diff --git a/gpt_buddy_bot/app/app_page_templates.py b/pyrobbot/app/app_page_templates.py similarity index 97% rename from gpt_buddy_bot/app/app_page_templates.py rename to pyrobbot/app/app_page_templates.py index 5abb440..86095ac 100644 --- a/gpt_buddy_bot/app/app_page_templates.py +++ b/pyrobbot/app/app_page_templates.py @@ -6,10 +6,10 @@ import streamlit as st from PIL import Image -from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.chat import Chat -from gpt_buddy_bot.chat_configs import ChatOptions -from gpt_buddy_bot.openai_utils import CannotConnectToApiError +from pyrobbot import GeneralConstants +from pyrobbot.chat import Chat +from pyrobbot.chat_configs import ChatOptions +from pyrobbot.openai_utils import CannotConnectToApiError _AVATAR_FILES_DIR = GeneralConstants.APP_DIR / "data" _ASSISTANT_AVATAR_FILE_PATH = _AVATAR_FILES_DIR / "assistant_avatar.png" diff --git a/gpt_buddy_bot/app/data/assistant_avatar.png b/pyrobbot/app/data/assistant_avatar.png similarity index 100% rename from gpt_buddy_bot/app/data/assistant_avatar.png rename to pyrobbot/app/data/assistant_avatar.png diff --git a/gpt_buddy_bot/app/data/user_avatar.png b/pyrobbot/app/data/user_avatar.png similarity index 100% rename from gpt_buddy_bot/app/data/user_avatar.png rename to pyrobbot/app/data/user_avatar.png diff --git a/gpt_buddy_bot/app/multipage.py b/pyrobbot/app/multipage.py similarity index 98% rename from gpt_buddy_bot/app/multipage.py rename to pyrobbot/app/multipage.py index 5bce548..fd0ed02 100644 --- a/gpt_buddy_bot/app/multipage.py +++ b/pyrobbot/app/multipage.py @@ -6,10 +6,10 @@ import streamlit as st from pydantic import ValidationError -from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.app.app_page_templates import AppPage, ChatBotPage, _RecoveredChat -from gpt_buddy_bot.chat import Chat -from gpt_buddy_bot.chat_configs import ChatOptions +from pyrobbot import GeneralConstants +from pyrobbot.app.app_page_templates import AppPage, ChatBotPage, _RecoveredChat +from pyrobbot.chat import Chat +from pyrobbot.chat_configs import ChatOptions class AbstractMultipageApp(ABC): diff --git a/gpt_buddy_bot/argparse_wrapper.py b/pyrobbot/argparse_wrapper.py similarity index 100% rename from gpt_buddy_bot/argparse_wrapper.py rename to pyrobbot/argparse_wrapper.py diff --git a/gpt_buddy_bot/chat.py b/pyrobbot/chat.py similarity index 100% rename from gpt_buddy_bot/chat.py rename to pyrobbot/chat.py diff --git a/gpt_buddy_bot/chat_configs.py b/pyrobbot/chat_configs.py similarity index 97% rename from gpt_buddy_bot/chat_configs.py rename to pyrobbot/chat_configs.py index 4e81bd3..42d3607 100644 --- a/gpt_buddy_bot/chat_configs.py +++ b/pyrobbot/chat_configs.py @@ -10,7 +10,7 @@ from pydantic import BaseModel, Field -from gpt_buddy_bot import GeneralConstants +from pyrobbot import GeneralConstants class BaseConfigModel(BaseModel): @@ -124,9 +124,7 @@ class ChatOptions(OpenAiApiCallOptions): """Model for the chat's configuration options.""" username: str = Field(default=getuser(), description="Name of the chat's user") - assistant_name: str = Field( - default=GeneralConstants.APP_NAME, description="Name of the chat's assistant" - ) + assistant_name: str = Field(default="Rob", description="Name of the chat's assistant") system_name: str = Field( default=f"{GeneralConstants.PACKAGE_NAME}_system", description="Name of the chat's system", diff --git a/gpt_buddy_bot/chat_context.py b/pyrobbot/chat_context.py similarity index 100% rename from gpt_buddy_bot/chat_context.py rename to pyrobbot/chat_context.py diff --git a/gpt_buddy_bot/command_definitions.py b/pyrobbot/command_definitions.py similarity index 100% rename from gpt_buddy_bot/command_definitions.py rename to pyrobbot/command_definitions.py diff --git a/gpt_buddy_bot/embeddings_database.py b/pyrobbot/embeddings_database.py similarity index 100% rename from gpt_buddy_bot/embeddings_database.py rename to pyrobbot/embeddings_database.py diff --git a/gpt_buddy_bot/openai_utils.py b/pyrobbot/openai_utils.py similarity index 100% rename from gpt_buddy_bot/openai_utils.py rename to pyrobbot/openai_utils.py diff --git a/gpt_buddy_bot/tokens.py b/pyrobbot/tokens.py similarity index 100% rename from gpt_buddy_bot/tokens.py rename to pyrobbot/tokens.py diff --git a/tests/conftest.py b/tests/conftest.py index 8bad2b4..cbb018a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,9 +5,9 @@ import openai import pytest -import gpt_buddy_bot -from gpt_buddy_bot.chat import Chat -from gpt_buddy_bot.chat_configs import ChatOptions +import pyrobbot +from pyrobbot.chat import Chat +from pyrobbot.chat_configs import ChatOptions # Register markers and constants @@ -22,7 +22,7 @@ def pytest_configure(config): ) pytest.ORIGINAL_PACKAGE_CACHE_DIRECTORY = ( - gpt_buddy_bot.GeneralConstants.PACKAGE_CACHE_DIRECTORY + pyrobbot.GeneralConstants.PACKAGE_CACHE_DIRECTORY ) @@ -35,7 +35,7 @@ def _set_env(): @pytest.fixture(autouse=True) def _mocked_general_constants(tmp_path): - gpt_buddy_bot.GeneralConstants.PACKAGE_CACHE_DIRECTORY = tmp_path / "cache" + pyrobbot.GeneralConstants.PACKAGE_CACHE_DIRECTORY = tmp_path / "cache" @pytest.fixture(autouse=True) diff --git a/tests/smoke/test_app.py b/tests/smoke/test_app.py index b4ec30d..046c221 100644 --- a/tests/smoke/test_app.py +++ b/tests/smoke/test_app.py @@ -1,10 +1,10 @@ -from gpt_buddy_bot.app import app +from pyrobbot.app import app def test_app(mocker, default_chat_configs): mocker.patch("streamlit.session_state", {}) mocker.patch( - "gpt_buddy_bot.chat_configs.ChatOptions.from_file", + "pyrobbot.chat_configs.ChatOptions.from_file", return_value=default_chat_configs, ) app.run_app() diff --git a/tests/smoke/test_commands.py b/tests/smoke/test_commands.py index 2b394f1..9cecf9b 100644 --- a/tests/smoke/test_commands.py +++ b/tests/smoke/test_commands.py @@ -1,7 +1,7 @@ import pytest -from gpt_buddy_bot.__main__ import main -from gpt_buddy_bot.argparse_wrapper import get_parsed_args +from pyrobbot.__main__ import main +from pyrobbot.argparse_wrapper import get_parsed_args @pytest.mark.usefixtures("_input_builtin_mocker") diff --git a/tests/unit/test_chat.py b/tests/unit/test_chat.py index 7dff75c..ff72d49 100644 --- a/tests/unit/test_chat.py +++ b/tests/unit/test_chat.py @@ -1,8 +1,8 @@ import openai import pytest -from gpt_buddy_bot import GeneralConstants -from gpt_buddy_bot.openai_utils import CannotConnectToApiError +from pyrobbot import GeneralConstants +from pyrobbot.openai_utils import CannotConnectToApiError @pytest.mark.order(1) From 6659c42ef713000db957b4d74f1a8529628710c2 Mon Sep 17 00:00:00 2001 From: Paulo V C Medeiros Date: Fri, 10 Nov 2023 14:57:28 +0100 Subject: [PATCH 109/109] Remove link to github pages: we don't have one yet --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 0bcc75a..dabe323 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ [![GitHub](https://img.shields.io/badge/github-%23121011.svg?style=for-the-badge&logo=github&logoColor=white)](https://github.com/paulovcmedeiros/pyRobBot) -[![Github Pages](https://img.shields.io/badge/github%20pages-121013?style=for-the-badge&logo=github&logoColor=white)](https://paulovcmedeiros.github.io/pyRobBot-docs/) [![Contributors Welcome](https://img.shields.io/badge/Contributors-welcome-.svg)](https://github.com/paulovcmedeiros/pyRobBot/pulls)