diff --git a/README.md b/README.md index d0b396ee..85233f3f 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,10 @@ We support everything from **multi-modality image understanding**, **code interp Featuring code execution and environment manipulation by [E2B](https://e2b.dev) -We are migrating towards using [QDRANT](https://qdrant.tech/) as our vector database backing, we are moving away from pinecone. +We are migrating towards using [QDRANT](https://qdrant.tech/) as our vector database backing, we are moving away from pinecone. Qdrant is an excellent vector database choice, and in fact the best one that we've tested and used so far. +

+ +

# Overview of Capabilities ![Overview of Features](https://i.imgur.com/BZdORTL.png) @@ -122,8 +125,9 @@ These commands are grouped, so each group has a prefix, but you can easily tab c - `/index discord_backup` - Use the last 3000 messages of every channel on your Discord server as an index. Needs both an admin and an index role. - `/index chat user_index: search_index:` - Chat with your documents that you've indexed previously! -### DALL-E2 Commands -- `/dalle draw ` - Have DALL-E generate images based on a prompt. +### DALL-E Commands +- `/dalle draw ` - Have DALL-E 3 generate images based on a prompt. +- `/dalle draw_old ` - Have DALL-E 2 generate images based on a prompt. - `/dalle optimize ` - Optimize a given prompt text for DALL-E image generation. ### System and Settings @@ -132,8 +136,8 @@ These commands are grouped, so each group has a prefix, but you can easily tab c - `/system usage` - Estimate current usage details (based on Davinci). - `/system settings low_usage_mode True/False` - Turn low usage mode on and off. If on, it will use the curie-001 model, and if off, it will use the Davinci-003 model. - `/system delete-conversation-threads` - Delete all threads related to this bot across all servers. -- `/system local-size` - Get the size of the local dalleimages folder. -- `/system clear-local` - Clear all the local dalleimages. +- `/system local-size` - Get the size of the local `/dalleimages` folder. +- `/system clear-local` - Delete all the contents of `/dalleimages`. # Step-by-Step Guides for GPTDiscord [**GPTDiscord Guides**](https://github.com/Kav-K/GPTDiscord/tree/main/detailed_guides) diff --git a/cogs/code_interpreter_service_cog.py b/cogs/code_interpreter_service_cog.py index 1a09607a..998609d4 100644 --- a/cogs/code_interpreter_service_cog.py +++ b/cogs/code_interpreter_service_cog.py @@ -285,11 +285,16 @@ async def on_message(self, message): ) await message.reply( embed=response_embed, - view=CodeInterpreterDownloadArtifactsView( - message, self, self.sessions[message.channel.id], artifact_names - ) - if artifacts_available - else None, + view=( + CodeInterpreterDownloadArtifactsView( + message, + self, + self.sessions[message.channel.id], + artifact_names, + ) + if artifacts_available + else None + ), ) safe_remove_list(self.thread_awaiting_responses, message.channel.id) diff --git a/cogs/moderations_service_cog.py b/cogs/moderations_service_cog.py index f1bd1dc0..b82f7d90 100644 --- a/cogs/moderations_service_cog.py +++ b/cogs/moderations_service_cog.py @@ -209,9 +209,11 @@ async def start_moderations_service(self, guild_id, alert_channel_id=None): self.set_guild_moderated(guild_id) moderations_channel = await self.check_and_launch_moderations( guild_id, - Moderation.moderation_alerts_channel - if not alert_channel_id - else alert_channel_id, + ( + Moderation.moderation_alerts_channel + if not alert_channel_id + else alert_channel_id + ), ) self.set_moderated_alert_channel(guild_id, moderations_channel.id) @@ -291,9 +293,11 @@ async def config_command( ephemeral=True, embed=await self.build_moderation_settings_embed( config_type, - self.get_or_set_warn_set(ctx.guild_id) - if config_type == "warn" - else self.get_or_set_delete_set(ctx.guild_id), + ( + self.get_or_set_warn_set(ctx.guild_id) + if config_type == "warn" + else self.get_or_set_delete_set(ctx.guild_id) + ), ), ) return @@ -319,16 +323,20 @@ async def config_command( new_delete_set = ThresholdSet( hate if hate else delete_set["hate"], - hate_threatening - if hate_threatening - else delete_set["hate/threatening"], + ( + hate_threatening + if hate_threatening + else delete_set["hate/threatening"] + ), self_harm if self_harm else delete_set["self-harm"], sexual if sexual else delete_set["sexual"], sexual_minors if sexual_minors else delete_set["sexual/minors"], violence if violence else delete_set["violence"], - violence_graphic - if violence_graphic - else delete_set["violence/graphic"], + ( + violence_graphic + if violence_graphic + else delete_set["violence/graphic"] + ), ) self.set_delete_set(ctx.guild_id, new_delete_set) await self.restart_moderations_service(ctx) diff --git a/cogs/search_service_cog.py b/cogs/search_service_cog.py index 05c13d53..2bcee6bf 100644 --- a/cogs/search_service_cog.py +++ b/cogs/search_service_cog.py @@ -276,9 +276,9 @@ async def paginate_embed( for count, chunk in enumerate(response_text, start=1): if not first: page = discord.Embed( - title="Search Results" - if not original_link - else "Follow-up results", + title=( + "Search Results" if not original_link else "Follow-up results" + ), description=chunk, url=original_link, ) diff --git a/cogs/text_service_cog.py b/cogs/text_service_cog.py index 56b12d7d..c02b343d 100644 --- a/cogs/text_service_cog.py +++ b/cogs/text_service_cog.py @@ -198,9 +198,9 @@ def __init__( assert self.CONVERSATION_DRAWING_ABILITY_EXTRACTION_SNIPPET is not None except Exception: - self.CONVERSATION_STARTER_TEXT = ( - self.CONVERSATION_STARTER_TEXT_MINIMAL - ) = self.CONVERSATION_STARTER_TEXT_VISION = ( + self.CONVERSATION_STARTER_TEXT = self.CONVERSATION_STARTER_TEXT_MINIMAL = ( + self.CONVERSATION_STARTER_TEXT_VISION + ) = ( "You are an artificial intelligence that is able to do anything, and answer any question," "I want you to be my personal assistant and help me with some tasks. " "and I want you to make well-informed decisions using the data that you have been trained on, " @@ -239,9 +239,9 @@ async def on_member_join(self, member): welcome_message_response = await self.model.send_request( query, tokens=self.usage_service.count_tokens(query), - is_chatgpt_request=True - if "turbo" in str(self.model.model) - else False, + is_chatgpt_request=( + True if "turbo" in str(self.model.model) else False + ), ) welcome_message = str(welcome_message_response["choices"][0]["text"]) except Exception: @@ -1404,10 +1404,16 @@ async def converse_command( await TextService.encapsulated_send( self, target.id, - opener - if target.id not in self.conversation_threads or self.pinecone_service - else "".join( - [item.text for item in self.conversation_threads[target.id].history] + ( + opener + if target.id not in self.conversation_threads + or self.pinecone_service + else "".join( + [ + item.text + for item in self.conversation_threads[target.id].history + ] + ) ), target_message, overrides=overrides, @@ -1597,9 +1603,11 @@ async def callback(self, interaction: discord.Interaction): try: id = await self.converser_cog.sharegpt_service.format_and_share( self.converser_cog.full_conversation_history[self.conversation_id], - self.converser_cog.bot.user.default_avatar.url - if not self.converser_cog.bot.user.avatar - else self.converser_cog.bot.user.avatar.url, + ( + self.converser_cog.bot.user.default_avatar.url + if not self.converser_cog.bot.user.avatar + else self.converser_cog.bot.user.avatar.url + ), ) url = f"https://shareg.pt/{id}" await interaction.response.send_message( diff --git a/cogs/translation_service_cog.py b/cogs/translation_service_cog.py index f063875a..46313146 100644 --- a/cogs/translation_service_cog.py +++ b/cogs/translation_service_cog.py @@ -28,9 +28,9 @@ def build_translation_embed( ) embed.set_footer( text=f"Requested by {requestor.name}#{requestor.discriminator}", - icon_url=requestor.avatar.url - if requestor.avatar - else requestor.default_avatar.url, + icon_url=( + requestor.avatar.url if requestor.avatar else requestor.default_avatar.url + ), ) return embed diff --git a/detailed_guides/AI-SEARCH.md b/detailed_guides/AI-SEARCH.md index 936edd0b..44ed3ef6 100644 --- a/detailed_guides/AI-SEARCH.md +++ b/detailed_guides/AI-SEARCH.md @@ -5,28 +5,29 @@ GOOGLE_SEARCH_API_KEY="...." GOOGLE_SEARCH_ENGINE_ID="...." ``` -You first need to create a programmable search engine and get the search engine ID: https://developers.google.com/custom-search/docs/tutorial/creatingcse +You first need to create a programmable search engine and get the search engine ID [here](https://developers.google.com/custom-search/docs/tutorial/creatingcse). -Then you can get the API key, click the "Get a key" button on this page: https://developers.google.com/custom-search/v1/introduction +Then you can get the API key, click the "Get a key" button [on this page](https://developers.google.com/custom-search/v1/introduction). You can limit the max price that is charged for a single search request by setting `MAX_SEARCH_PRICE` in your `.env` file. +Step by Step Guide: +--- -1\. Go to the [Programmable Search Engine docs](https://developers.google.com/custom-search/docs/tutorial/creatingcse) to get a Search engine ID ----------------------------------------------------------------------- +1\. Go to the [Programmable Search Engine docs](https://developers.google.com/custom-search/docs/tutorial/creatingcse) to get a Search engine ID. +--- -2\. Click on "Control Panel" under "Defining a Programmable Engine in Control Panel" ----------------------------------------- +a. Click on "Control Panel" under "Defining a Programmable Engine in Control Panel" -Click to sign in(make a Google acct if you do not have one): +b. Click to sign in(make a Google account if you do not have one): ![image](https://user-images.githubusercontent.com/23362597/233266042-98098ed5-72b2-41b3-9495-1a9f4d7e1101.png) -3\. Register yourself a new account/Login to the Control Panel +2\. Register yourself a new account/Login to the Control Panel ----------------------------------- After logging in, you will be redirected to the Control Panel to create a new search engine: @@ -34,7 +35,7 @@ After logging in, you will be redirected to the Control Panel to create a new se ![image](https://user-images.githubusercontent.com/23362597/233266323-53232468-2590-4820-b55f-08c78529d752.png) -4\. Create a new search engine +3\. Create a new search engine ------------------------------ Fill in a name, select to "Search the entire web" and hit "Create": @@ -42,13 +43,13 @@ Fill in a name, select to "Search the entire web" and hit "Create": ![image](https://user-images.githubusercontent.com/23362597/233266738-b70f004d-4324-482e-a945-9b0193b60158.png) -5\. Copy your Search engine ID to your .env file +4\. Copy your Search engine ID to your .env file -------------------------- ![image](https://user-images.githubusercontent.com/23362597/233267123-ea25a3bb-6cdb-4d46-a893-846ea4933632.png) -6\. Go to [custom-search docs](https://developers.google.com/custom-search/v1/introduction) to get a Google search API key +5\. Go to [custom-search docs](https://developers.google.com/custom-search/v1/introduction) to get a Google search API key ------------------------------------------------- Click "Get a Key": @@ -56,23 +57,48 @@ Click "Get a Key": ![image](https://user-images.githubusercontent.com/23362597/233267659-f82621f4-1f0b-46bf-8994-be443dd79932.png) -7\. Name your project and agree to the Terms of Service +6\. Name your project and agree to the Terms of Service ------------------------------------ ![image](https://user-images.githubusercontent.com/23362597/233267793-ca3c273d-ebc6-44a5-a49d-0d4c3223c992.png) -8\. Copy your Google search API key to your .env file +7\. Copy your Google search API key to your .env file ------------------------------------ ![image](https://user-images.githubusercontent.com/23362597/233268067-5a6cfaf1-bec0-48b3-8add-70b218fb4264.png) -9\. Enable Cloud Vision API for image recognition +8\. Enable Cloud Vision API for image recognition +------------------------------------ +a. Navigate to the Google Cloud API console [here](https://console.cloud.google.com/apis/api/vision.googleapis.com/). + +b. Click on 'Create Project': + +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/f128cc80-2a53-4578-9f4e-99791d7f8ffe) + + +c. Give it a name and create it: + +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/35050805-f2ad-4489-8d8c-dacbd961c0b1) + + +d. Now, navigate to the API Library: + +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/0eff23c1-09c1-4e65-a08a-a56af5515727) + + +e. Search for the 'Cloud Vision API' and enable it: + +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/ef225dbe-4385-4263-b0aa-7100bbfed0e8) + + +9\. Enable Custom Search API for Google search +------------------------------------ -https://console.cloud.google.com/apis/api/vision.googleapis.com/ +You can follow [this link](https://console.cloud.google.com/apis/api/customsearch.googleapis.com/) to quickly jump to the page to enable the custom search API. You may need to first selet the project you created before: +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/875039dc-88b8-47a2-ad6e-4d39be26781d) -10\. Enable Custom Search API for Google search +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/3a0f4200-7319-4a2f-8fb1-9d85bffb387a) -https://console.cloud.google.com/apis/api/customsearch.googleapis.com/ \ No newline at end of file diff --git a/detailed_guides/CODE-INTERPRETER.md b/detailed_guides/CODE-INTERPRETER.md index d1db7b87..046507cc 100644 --- a/detailed_guides/CODE-INTERPRETER.md +++ b/detailed_guides/CODE-INTERPRETER.md @@ -1,17 +1,28 @@ # Code Interpreter -This bot supports a full fledged version of Code Interpreter, where code in various languages can be executed directly in discord. You can even install python and system packages. Python is the preferred language for code interpreter, although it will still work relatively nicely with other popular languages. +This bot supports a full fledged version of Code Interpreter, where code in various languages can be executed directly in Discord. You can even install Python and system packages. Python is the preferred language for code interpreter, although it will still work relatively nicely with other popular languages. -To get started with code interpreter, you need an E2B API key. You can find an E2B API key at https://e2b.dev/docs/getting-started/api-key +To get started with code interpreter, you need an E2B API key. You can find an E2B API key & more info on FoundryLabs [here](https://e2b.dev/docs/getting-started/api-key). ```env E2B_API_KEY="...." ``` -Like above, add the E2B API key to your `.env` file. E2B is a cloud-based isolated execution environment, so that you can run code safely and have a containerized environment to install packages and etc in. +Like above, add the E2B API key to your `.env` file. E2B is a cloud-based isolated execution environment, so that you can run code safely and have a containerized environment to install packages and execute code, etc in. Afterwards, to use code interpreter, simply use `/code chat`. -When you begin a code interpreter instance, a new isolated environment to run your code is automatically created. Inside your chat, you can ask GPT to install python or system packages into this environment, and ask GPT to run any sort of python (and other language) code within it as well. Unlike ChatGPT's code interpreter / advanced data analysis, this also has access to the internet so you can work with code that uses the network as well. +When you begin a code interpreter instance, a new isolated environment to run your code is automatically created. Inside your chat, you can ask GPT to install Python or system packages into this environment, and ask GPT to run any sort of python (and other language) code within it as well. Unlike ChatGPT's code interpreter / advanced data analysis, this also has access to the internet so you can work with code that uses the network as well. -When ChatGPT executes code, sometimes it will create files (especially if you ask it to). If files are created, they can be downloaded from the "Download Artifacts" button that will pop up after the code is executed. -Sometimes, ChatGPT will create files that are not in the upstream `artifacts` folder. To remedy this, simply ask it to ensure all files it makes are within `artifacts`. \ No newline at end of file +As per the E2B documentation, the free tier provides the following resources to a sandboxed code instance: + +> - 2 vCPU +> - 512 MB RAM +> - 1 GB free disk storage +> - 24 hours max sandbox session length + +Given that it is possible to maintain and use a thread over multiple days with GPTDiscord, exceeding the instance limits of your E2B API key will cause the thread to break and not reply. In this case, you will need to end & restart the thread to continue conversing. + + +When ChatGPT executes code, sometimes it will create files (especially if you ask it to within your instructions) in a folder called `artifacts`. If files are created, they can be downloaded from the "Download Artifacts" button that will pop up after the code is executed. + +Sometimes, ChatGPT will create files that are not placed in the upstream `artifacts` folder. To remedy this, simply ask it to ensure all files it makes are within `artifacts`. diff --git a/detailed_guides/INSTALLATION.md b/detailed_guides/INSTALLATION.md index b5acd1be..33ba5b40 100644 --- a/detailed_guides/INSTALLATION.md +++ b/detailed_guides/INSTALLATION.md @@ -111,18 +111,27 @@ curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py python3.9 get-pip.py ``` +#### Create a Python Virtual Enviroment +```shell +pip install virtualenv +python3.9 -m venv venv +source venv/bin/activate +``` + + #### Install project dependencies ``` python3.9 -m pip install --ignore-installed PyYAML python3.9 -m pip install torch==1.13.1+cpu torchvision==0.14.1+cpu -f https://download.pytorch.org/whl/torch_stable.html python3.9 -m pip install urllib3==1.26.7 +python3.9 -m pip install git+https://github.com/openai/whisper.git python3.9 -m pip install -r requirements.txt python3.9 -m pip install . ``` #### Copy the sample.env file into a regular .env file. `DEBUG_GUILD` and the ID for `ALLOWED_GUILDS` can be found by right-clicking your server and choosing "Copy ID". Similarly, `DEBUG_CHANNEL` can be found by right-clicking your debug channel. ```shell -cp .env .env +cp sample.env .env ``` #### The command below is used to edit the .env file and to put in your API keys. You can right click within the editor after running this command to paste. When you are done editing, press CTRL + X, and then type Y, to save. diff --git a/detailed_guides/INTERNET-CONNECTED-CHAT.md b/detailed_guides/INTERNET-CONNECTED-CHAT.md index 97fe5401..e0d82a0e 100644 --- a/detailed_guides/INTERNET-CONNECTED-CHAT.md +++ b/detailed_guides/INTERNET-CONNECTED-CHAT.md @@ -30,23 +30,21 @@ Follow the on-screen instructions to register a new account. ![image](https://user-images.githubusercontent.com/23362597/232921997-1c6ed4dc-7aea-459b-8d76-95fdf86ee108.png) -4\. Access the Wolfram Developer Portal +4\. Access the [Wolfram Developer Portal](https://developer.wolframalpha.com/access) ------------------------------ -After successful registration, you will be redirected to the Developer Portal: +After successful registration, you will be redirected to the Developer Portal, or follow the link above: -![image](https://user-images.githubusercontent.com/23362597/232922409-92af9237-1230-43dc-836a-7425f31b4f56.png) +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/a32542c8-7060-4411-a92e-e3eb5ae06433) 5\. Click on "Get an AppID" -------------------------- -![image](https://user-images.githubusercontent.com/23362597/232922639-919f6d5a-da05-4f2c-af4b-5c517a926e78.png) - -6\. Fill in a name and description: +6\. Fill in a name and description and select "Full Results API" from the dropdown: ------------------------------------------------- -![image](https://user-images.githubusercontent.com/23362597/232922798-25b21534-4452-4851-ae72-940b9f9d1fae.png) +![image](https://github.com/Raecaug/GPTDiscord/assets/23362597/173de276-74d9-472c-a513-2420a14e34a7) 8\. Copy the newly generated AppID to your ".env" file diff --git a/detailed_guides/PERMANENT-MEMORY.md b/detailed_guides/PERMANENT-MEMORY.md index f7786d6b..6bb3af99 100644 --- a/detailed_guides/PERMANENT-MEMORY.md +++ b/detailed_guides/PERMANENT-MEMORY.md @@ -1,5 +1,7 @@ -# Permanent Memory and Conversations -Permanent memory has now been implemented into the bot, using the OpenAI Ada embeddings endpoint, and Pinecone. +# Permanent Memory and Conversations +We are migrating towards using [QDRANT](https://qdrant.tech/) as our vector database backing, we are moving away from pinecone. Qdrant is an excellent vector database choice, and in fact the best one that we've tested and used so far. + +Permanent memory has now been implemented into the bot, using the OpenAI Ada embeddings endpoint, and Pinecone. Pinecone is a vector database. The OpenAI Ada embeddings endpoint turns pieces of text into embeddings. The way that this feature works is by embedding the user prompts and the GPT responses, storing them in a pinecone index, and then retrieving the most relevant bits of conversation whenever a new user prompt is given in a conversation. @@ -22,4 +24,4 @@ To manually create an index instead of the bot automatically doing it, go to the Then, name the index `conversation-embeddings`, set the dimensions to `1536`, and set the metric to `DotProduct`: -
\ No newline at end of file +
diff --git a/gpt3discord.py b/gpt3discord.py index 41570ddb..af81e187 100644 --- a/gpt3discord.py +++ b/gpt3discord.py @@ -34,7 +34,7 @@ from models.openai_model import Model -__version__ = "12.3.7" +__version__ = "12.3.8" PID_FILE = Path("bot.pid") diff --git a/models/autocomplete_model.py b/models/autocomplete_model.py index 5a70c937..0964acdd 100644 --- a/models/autocomplete_model.py +++ b/models/autocomplete_model.py @@ -96,6 +96,7 @@ async def get_function_calling_models(ctx: discord.AutocompleteContext): "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0613", + "gpt-4-turbo-preview", ] async def get_models( diff --git a/models/index_model.py b/models/index_model.py index c2828eb0..4372fc03 100644 --- a/models/index_model.py +++ b/models/index_model.py @@ -1688,10 +1688,12 @@ async def interaction_check(self, interaction: discord.Interaction) -> bool: self.user_id, indexes, self.name, - False - if not self.deep_select.values - or self.deep_select.values[0] == "no" - else True, + ( + False + if not self.deep_select.values + or self.deep_select.values[0] == "no" + else True + ), ) except ValueError as e: await interaction.followup.send( diff --git a/models/openai_model.py b/models/openai_model.py index 0ce6f340..85b9cee3 100644 --- a/models/openai_model.py +++ b/models/openai_model.py @@ -72,6 +72,7 @@ class Models: GPT4_32_DEV = "gpt-4-32k-0613" GPT_4_TURBO = "gpt-4-1106-preview" GPT_4_TURBO_VISION = "gpt-4-vision-preview" + GPT_4_TURBO_CATCHALL = "gpt-4-turbo-preview" # Model collections TEXT_MODELS = [ @@ -87,6 +88,7 @@ class Models: GPT4_32_DEV, GPT_4_TURBO, GPT_4_TURBO_VISION, + GPT_4_TURBO_CATCHALL, ] CHATGPT_MODELS = [ TURBO, @@ -96,6 +98,7 @@ class Models: GPT4_32, GPT_4_TURBO_VISION, GPT_4_TURBO, + GPT_4_TURBO_CATCHALL, ] GPT4_MODELS = [ GPT4, @@ -104,6 +107,7 @@ class Models: GPT4_32_DEV, GPT_4_TURBO_VISION, GPT_4_TURBO, + GPT_4_TURBO_CATCHALL, ] EDIT_MODELS = [EDIT] @@ -124,6 +128,7 @@ class Models: GPT4_32_DEV: 32768, GPT_4_TURBO_VISION: 128000, GPT_4_TURBO: 128000, + GPT_4_TURBO_CATCHALL: 128000, } @staticmethod @@ -867,9 +872,9 @@ async def send_language_detect_request( self, text, pretext, - ) -> ( - Tuple[dict, bool] - ): # The response, and a boolean indicating whether or not the context limit was reached. + ) -> Tuple[ + dict, bool + ]: # The response, and a boolean indicating whether or not the context limit was reached. # Validate that all the parameters are in a good state before we send the request prompt = f"{pretext}{text}\nOutput:" @@ -935,9 +940,9 @@ async def send_chatgpt_chat_request( custom_api_key=None, system_prompt_override=None, respond_json=None, - ) -> ( - Tuple[dict, bool] - ): # The response, and a boolean indicating whether or not the context limit was reached. + ) -> Tuple[ + dict, bool + ]: # The response, and a boolean indicating whether or not the context limit was reached. # Validate that all the parameters are in a good state before we send the request model_selection = self.model if not model else model print("The model selection is " + model_selection) @@ -1002,9 +1007,9 @@ async def send_chatgpt_chat_request( messages.append( { "role": role, - "name": username_clean - if role == "user" - else bot_name_clean, + "name": ( + username_clean if role == "user" else bot_name_clean + ), "content": text, } ) @@ -1014,9 +1019,9 @@ async def send_chatgpt_chat_request( messages.append( { "role": role, - "name": username_clean - if role == "user" - else bot_name_clean, + "name": ( + username_clean if role == "user" else bot_name_clean + ), "content": [ {"type": "text", "text": text}, ], @@ -1032,9 +1037,9 @@ async def send_chatgpt_chat_request( messages.append( { "role": role, - "name": username_clean - if role == "user" - else bot_name_clean, + "name": ( + username_clean if role == "user" else bot_name_clean + ), "content": [ {"type": "text", "text": text}, ], @@ -1054,17 +1059,21 @@ async def send_chatgpt_chat_request( "stop": "" if stop is None else stop, "temperature": self.temp if temp_override is None else temp_override, "top_p": self.top_p if top_p_override is None else top_p_override, - "presence_penalty": self.presence_penalty - if presence_penalty_override is None - else presence_penalty_override, - "frequency_penalty": self.frequency_penalty - if frequency_penalty_override is None - else frequency_penalty_override, + "presence_penalty": ( + self.presence_penalty + if presence_penalty_override is None + else presence_penalty_override + ), + "frequency_penalty": ( + self.frequency_penalty + if frequency_penalty_override is None + else frequency_penalty_override + ), } if "-preview" in model_selection: - payload[ - "max_tokens" - ] = 4096 # TODO Not sure if this needs to be subtracted from a token count.. + payload["max_tokens"] = ( + 4096 # TODO Not sure if this needs to be subtracted from a token count.. + ) if respond_json: # payload["response_format"] = { "type": "json_object" } # TODO The above needs to be fixed, doesn't work for some reason? @@ -1127,12 +1136,16 @@ async def send_transcription_request( data.add_field( "file", file.read() if isinstance(file, discord.Attachment) else file.fp.read(), - filename="audio." + file.filename.split(".")[-1] - if isinstance(file, discord.Attachment) - else "audio.mp4", - content_type=file.content_type - if isinstance(file, discord.Attachment) - else "video/mp4", + filename=( + "audio." + file.filename.split(".")[-1] + if isinstance(file, discord.Attachment) + else "audio.mp4" + ), + content_type=( + file.content_type + if isinstance(file, discord.Attachment) + else "video/mp4" + ), ) if temperature_override: @@ -1208,22 +1221,28 @@ async def send_request( "model": self.model if model is None else model, "prompt": prompt, "stop": "" if stop is None else stop, - "temperature": self.temp - if temp_override is None - else temp_override, + "temperature": ( + self.temp if temp_override is None else temp_override + ), "top_p": self.top_p if top_p_override is None else top_p_override, - "max_tokens": self.max_tokens - tokens - if max_tokens_override is None - else max_tokens_override, - "presence_penalty": self.presence_penalty - if presence_penalty_override is None - else presence_penalty_override, - "frequency_penalty": self.frequency_penalty - if frequency_penalty_override is None - else frequency_penalty_override, - "best_of": self.best_of - if not best_of_override - else best_of_override, + "max_tokens": ( + self.max_tokens - tokens + if max_tokens_override is None + else max_tokens_override + ), + "presence_penalty": ( + self.presence_penalty + if presence_penalty_override is None + else presence_penalty_override + ), + "frequency_penalty": ( + self.frequency_penalty + if frequency_penalty_override is None + else frequency_penalty_override + ), + "best_of": ( + self.best_of if not best_of_override else best_of_override + ), } headers = { "Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}" @@ -1256,21 +1275,25 @@ async def send_request( "model": model_selection, "messages": messages, "stop": "" if stop is None else stop, - "temperature": self.temp - if temp_override is None - else temp_override, + "temperature": ( + self.temp if temp_override is None else temp_override + ), "top_p": self.top_p if top_p_override is None else top_p_override, - "presence_penalty": self.presence_penalty - if presence_penalty_override is None - else presence_penalty_override, - "frequency_penalty": self.frequency_penalty - if frequency_penalty_override is None - else frequency_penalty_override, + "presence_penalty": ( + self.presence_penalty + if presence_penalty_override is None + else presence_penalty_override + ), + "frequency_penalty": ( + self.frequency_penalty + if frequency_penalty_override is None + else frequency_penalty_override + ), } if "preview" in model_selection: - payload[ - "max_tokens" - ] = 4096 # Temporary workaround while 4-turbo and vision are in preview. + payload["max_tokens"] = ( + 4096 # Temporary workaround while 4-turbo and vision are in preview. + ) headers = { "Authorization": f"Bearer {self.openai_key if not custom_api_key else custom_api_key}" diff --git a/models/search_model.py b/models/search_model.py index 109950b4..d66048e3 100644 --- a/models/search_model.py +++ b/models/search_model.py @@ -396,9 +396,11 @@ async def search( if not redo: self.add_search_index( index, - ctx.user.id - if isinstance(ctx, discord.ApplicationContext) - else ctx.author.id, + ( + ctx.user.id + if isinstance(ctx, discord.ApplicationContext) + else ctx.author.id + ), query, ) diff --git a/sample.env b/sample.env index 69959ef0..9b3b1150 100644 --- a/sample.env +++ b/sample.env @@ -95,3 +95,6 @@ PRE_MODERATE = "False" ## Force only english to be spoken in the server FORCE_ENGLISH = "False" + +## Launch a HTTP endpoint at :8181/ that will return a json response of the bot's status and uptime(good for cloud app containers) +HEALTH_SERVICE_ENABLED="False" diff --git a/services/image_service.py b/services/image_service.py index ef8dd1e3..4ba7286a 100644 --- a/services/image_service.py +++ b/services/image_service.py @@ -86,11 +86,15 @@ async def encapsulated_send( # Start building an embed to send to the user with the results of the image generation embed = discord.Embed( - title="Image Generation Results" - if not vary - else "Image Generation Results (Varying)" - if not draw_from_optimizer - else "Image Generation Results (Drawing from Optimizer)", + title=( + "Image Generation Results" + if not vary + else ( + "Image Generation Results (Varying)" + if not draw_from_optimizer + else "Image Generation Results (Drawing from Optimizer)" + ) + ), description=f"{prompt}", color=0xC730C7, ) diff --git a/services/text_service.py b/services/text_service.py index 3192bd2b..ff1c2319 100644 --- a/services/text_service.py +++ b/services/text_service.py @@ -236,9 +236,9 @@ async def encapsulated_send( if redo_request: _prompt_with_history = _prompt_with_history[:-2] - converser_cog.conversation_threads[ - ctx.channel.id - ].history = _prompt_with_history + converser_cog.conversation_threads[ctx.channel.id].history = ( + _prompt_with_history + ) # Ensure that the last prompt in this list is the prompt we just sent (new_prompt_item) if _prompt_with_history[-1].text != new_prompt_item.text: @@ -1077,13 +1077,11 @@ async def process_conversation_edit(converser_cog, after, original_message): if after.channel.id in converser_cog.conversation_threads: # Remove the last two elements from the history array and add the new : prompt - converser_cog.conversation_threads[ - after.channel.id - ].history = converser_cog.conversation_threads[ - after.channel.id - ].history[ - :-2 - ] + converser_cog.conversation_threads[after.channel.id].history = ( + converser_cog.conversation_threads[after.channel.id].history[ + :-2 + ] + ) pinecone_dont_reinsert = None if not converser_cog.pinecone_service: