diff --git a/ollama-pdf-chat/InstantChat.py b/ollama-pdf-chat/InstantChat.py new file mode 100644 index 000000000..b105fd710 --- /dev/null +++ b/ollama-pdf-chat/InstantChat.py @@ -0,0 +1,129 @@ +import os +from typing import List +from langchain_community.document_loaders import PyPDFLoader + +from langchain.embeddings.ollama import OllamaEmbeddings +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.vectorstores.chroma import Chroma +from langchain.chains import ( + ConversationalRetrievalChain, +) +from langchain_community.chat_models import ChatOllama + +from langchain.docstore.document import Document +from langchain.memory import ChatMessageHistory, ConversationBufferMemory + + +import chainlit as cl +from dotenv import load_dotenv +load_dotenv(dotenv_path=".env",verbose=True) + +llm_model = os.getenv("LLM_MODEL", "gemma") +print(f"LLM_MODEL value: {llm_model}") +text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) + + +@cl.on_chat_start +async def on_chat_start(): + files = None + + # Wait for the user to upload a file + while files == None: + files = await cl.AskFileMessage( + content="Please upload a text file or PDF to begin!", + accept=["text/plain","application/pdf"], + max_size_mb=20, + timeout=180, + ).send() + + file = files[0] + + + msg = cl.Message(content=f"Processing `{file.name}`...", disable_feedback=True) + await msg.send() + if(file.type == "text/plain"): + with open(file.path, "r", encoding="utf-8") as f: + text = f.read() + + # Split the text into chunks + texts = text_splitter.split_text(text) + + # Create a metadata for each chunk + metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))] + + # Create a Chroma vector store + embeddings = OllamaEmbeddings(temperature=0.3,top_k=20,show_progress=True, model=llm_model) + docsearch = await cl.make_async(Chroma.from_texts)( + texts, embeddings, metadatas=metadatas + ) + elif file.type == "application/pdf": + # Load the PDF file + loader = PyPDFLoader(file.path) + embeddings = OllamaEmbeddings(temperature=0.3,top_k=20,show_progress=True, model=llm_model) + + # Extract the text content + texts = text_splitter.split_documents(loader.load()) + # Extract the metadata + textCollection = [] + metadatas = [] + for text in texts: + textCollection.append(text.page_content) + metadatas.append(text.metadata) + docsearch = await cl.make_async(Chroma.from_texts)( + textCollection, embeddings, metadatas=metadatas + ) + + + + message_history = ChatMessageHistory() + + memory = ConversationBufferMemory( + memory_key="chat_history", + output_key="answer", + chat_memory=message_history, + return_messages=True, + ) + + # Create a chain that uses the Chroma vector store + chain = ConversationalRetrievalChain.from_llm( + ChatOllama(model_name=llm_model, temperature=0.2, streaming=True), + chain_type="stuff", + retriever=docsearch.as_retriever(), + memory=memory, + return_source_documents=True, + + ) + + # Let the user know that the system is ready + msg.content = f"Processing `{file.name}` done. You can now ask questions! We are using the {llm_model} model." + await msg.update() + + cl.user_session.set("chain", chain) + + +@cl.on_message +async def main(message: cl.Message): + chain = cl.user_session.get("chain") # type: ConversationalRetrievalChain + cb = cl.AsyncLangchainCallbackHandler() + + res = await chain.acall(message.content, callbacks=[cb]) + answer = res["answer"] + source_documents = res["source_documents"] # type: List[Document] + + text_elements = [] # type: List[cl.Text] + + if source_documents: + for source_idx, source_doc in enumerate(source_documents): + source_name = f"source_{source_idx}" + # Create the text element referenced in the message + text_elements.append( + cl.Text(content=source_doc.page_content, name=source_name) + ) + source_names = [text_el.name for text_el in text_elements] + + if source_names: + answer += f"\nSources: {', '.join(source_names)}" + else: + answer += "\nNo sources found" + + await cl.Message(content=answer, elements=text_elements).send() diff --git a/ollama-pdf-chat/LICENSE b/ollama-pdf-chat/LICENSE new file mode 100644 index 000000000..a9783450f --- /dev/null +++ b/ollama-pdf-chat/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Krishnatejaswi S + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ollama-pdf-chat/README.md b/ollama-pdf-chat/README.md new file mode 100644 index 000000000..0622b668c --- /dev/null +++ b/ollama-pdf-chat/README.md @@ -0,0 +1,127 @@ +# 🚀 Olama-PDF-Chat + +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) ![GitHub repo size](https://img.shields.io/github/repo-size/KTS-o7/RVChat) ![GitHub language count](https://img.shields.io/github/languages/count/KTS-o7/RVChat) ![GitHub top language](https://img.shields.io/github/languages/top/KTS-o7/RVChat) + +> Ollama pdf Chat is a web application built using the Chatlit library, Langchain, and Ollama. This README serves as a complete guide on how to set up and use the application properly. + +## 📚 Table of Contents + +- [Installation](#💻-installation) +- [Usage](#🎯-usage) +- [License](./LICENSE) + +## 💻 Installation + +- Prerequisites: + + - [Chatlit](https://docs.chainlit.io/) + - [Langchain](https://www.langchain.com/) + - [Ollama](https://ollama.com/) + +First we need to install Ollama into your system. You can do this by going to this [website](https://ollama.com/) and following the instructions provided there. + +Then start the Ollama server with the following command: + +```bash + sudo systemctl start ollama +``` + +Now, to check if the server is running, use the following command: + +```bash + sudo systemctl status ollama +``` + +Once the server is ready and running, we need to install the required models for the application. You can do this by running the following command: + +```bash + ollama pull +``` + +After this step you need to clone the repository + +```bash + git clone https://github.com/KTS-o7/cookbook.git +``` + +go into the directory + +```bash + cd ollama-pdf-chat +``` + +We need to create a virtual environment for the application + +```bash + python3 -m venv ./env + source ./env/bin/activate +``` + +and then install the required dependencies + +```bash + pip install -r requirements.txt +``` + +Set environment variables for the application +in a `.env` file in the root directory of the application. +We have provided an example environment file [here](./exampleEnv) + +```bash + touch .env + echo "ANONYMIZED_TELEMETRY=False" >> .env +``` + +## 🎯 Usage + +> You need to keep the required pdfs in a folder called `files` in the root directory of the application. +> Call the ingestor to ingest the pdfs + +```bash + python ingestor.py +``` + +Once all these are done you can start the application by running the following command: + +```bash + chainlit run multiChat.py +``` + +> OR + +If you want to chat with just one PDF file, you can run the following command: + +```bash + chainlit run InstantChat.py +``` + +> Customization to theme can be done by chainging the config.toml file inside `.chainlit` directory. +> An example config.toml file is also given + +## 📄 License + +Ollama-pdf-Chat is distributed under the MIT License. The terms of the license are as follows: + +```markdown +MIT License + +Copyright (c) 2024 Krishnatejaswi S + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +``` diff --git a/ollama-pdf-chat/chainlit.md b/ollama-pdf-chat/chainlit.md new file mode 100644 index 000000000..dae797d93 --- /dev/null +++ b/ollama-pdf-chat/chainlit.md @@ -0,0 +1,29 @@ +# How to use Ollama-PDF Chat + +1. Upload Plain text file or PDF file. + > We donot support other file types. +2. Wait till the file is processed. This will usually take anywhere from 1-5 minutes, depending on your file size and processing power of the computer. +3. Once the file is processed, you will be able to see the chatbot in action. +4. You can ask questions to the chatbot and it will answer them to the best of its ability. +5. You can also ask the chatbot to summarize the document for you. +6. You can also ask the chatbot to list out or summarize the important points in the document. +7. This chatbot also provides the sources of the information it provides. + +> This is still under development, so we are unable to process images or extremely large files. We are working on it and will be able to support it soon. + +> read the [documentation](./README.md) for more information. + +## Welcome to Chainlit! 🚀🤖 + +Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs. + +## Useful Links 🔗 + +- **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚 +- **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬 + +We can't wait to see what you create with Chainlit! Happy coding! 💻😊 + +## Welcome screen + +To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty. diff --git a/ollama-pdf-chat/exampleEnv b/ollama-pdf-chat/exampleEnv new file mode 100644 index 000000000..0c0dc0647 --- /dev/null +++ b/ollama-pdf-chat/exampleEnv @@ -0,0 +1,9 @@ +# environment variables can be defined here +# This application needs a file named as .env +# currently we define a variable to denote the LLM Model to be used in application +# At a given time only one LLM Model should be active. Hence comment out all the models except one at a time. + + +#LLM_MODEL = llama2 +# LLM_MODEL = phi +LLM_MODEL = gemma diff --git a/ollama-pdf-chat/example_config.toml b/ollama-pdf-chat/example_config.toml new file mode 100644 index 000000000..a4423497f --- /dev/null +++ b/ollama-pdf-chat/example_config.toml @@ -0,0 +1,99 @@ +[project] +# Whether to enable telemetry (default: true). No personal data is collected. +enable_telemetry = false + + +# List of environment variables to be provided by each user to use the app. +user_env = [] + +# Duration (in seconds) during which the session is saved when the connection is lost +session_timeout = 3600 + +# Enable third parties caching (e.g LangChain cache) +cache = true + +# Authorized origins +allow_origins = ["*"] + +# Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317) +# follow_symlink = false + +[features] +# Show the prompt playground +prompt_playground = true + +# Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript) +unsafe_allow_html = false + +# Process and display mathematical expressions. This can clash with "$" characters in messages. +latex = false + +# Authorize users to upload files with messages +multi_modal = true + +# Allows user to use speech to text +[features.speech_to_text] + enabled = false + # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string + # language = "en-US" + +[UI] +# Name of the app and chatbot. +name = "Ollama-PDF-Chat" + +# Show the readme while the thread is empty. +show_readme_as_default = false + +# Description of the app and chatbot. This is used for HTML tags. +description = "This is a Ollama based chatbot built to chat with PDFs and Textfiles in offline" + +# Large size content are by default collapsed for a cleaner ui +default_collapse_content = true + +# The default value for the expand messages settings. +default_expand_messages = false + +# Hide the chain of thought details from the user in the UI. +hide_cot = false + +# Link to your github repo. This will add a github button in the UI's header. +github = "https://github.com/KTS-o7/RVChat" + +# Specify a CSS file that can be used to customize the user interface. +# The CSS file can be served from the public directory or via an external link. +# custom_css = "/public/test.css" + +# Specify a Javascript file that can be used to customize the user interface. +# The Javascript file can be served from the public directory. +# custom_js = "/public/test.js" + +# Specify a custom font url. +# custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap" + +# Override default MUI light theme. (Check theme.ts) +[UI.theme] + #font_family = "Inter, sans-serif" +[UI.theme.light] + #background = "#FAFAFA" + #paper = "#FFFFFF" + + [UI.theme.light.primary] + main = "#3f51b5" # Indigo blue + dark = "#303f9f" # Dark indigo blue + light = "#c5cae9" # Light indigo blue + +# Override default MUI dark theme. (Check theme.ts) +[UI.theme.dark] + #background = "#FAFAFA" + #paper = "#FFFFFF" + + [UI.theme.dark.primary] + main = "#9fa8da" # Lighter indigo blue + dark = "#7986cb" # Lighter dark indigo blue + light = "#e8eaf6" # Lighter light indigo blue + + + + +[meta] +generated_by = "1.0.301" diff --git a/ollama-pdf-chat/ingestor.py b/ollama-pdf-chat/ingestor.py new file mode 100644 index 000000000..4415bdc49 --- /dev/null +++ b/ollama-pdf-chat/ingestor.py @@ -0,0 +1,38 @@ +import os +import warnings +from tqdm import tqdm + +from langchain_community.vectorstores.chroma import Chroma +from langchain_community.document_loaders import (PyPDFLoader,CSVLoader, UnstructuredMarkdownLoader,UnstructuredPowerPointLoader,DirectoryLoader) +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.embeddings import OllamaEmbeddings + +warnings.simplefilter(action='ignore') +llm_model = "llama2" +def create_vector_database(): + + pdfDirecLoader = DirectoryLoader("./files/", glob="*.pdf",loader_cls=PyPDFLoader) + loadedDocuments = pdfDirecLoader.load() + print(len(loadedDocuments)) + #print(loadedDocuments) + + + textSplitter = RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=50) + chunkedDocuments = textSplitter.split_documents(loadedDocuments) + # print(len(chunkedDocuments)) + # print(type(chunkedDocuments)) + # print(chunkedDocuments[0]) + content = [] + metadatas = [] + for doc in chunkedDocuments: + content.append(doc.page_content) + metadatas.append(doc.metadata) + # print(content[0],metadatas[0],content[1],metadatas[1]) + + ollama_embeddings = OllamaEmbeddings(model=llm_model,show_progress=True) + + vectorDB = Chroma.from_texts(texts=content,embedding=ollama_embeddings,metadatas=metadatas,persist_directory="./data") + vectorDB.persist() + +if __name__ == "__main__": + create_vector_database() diff --git a/ollama-pdf-chat/multiChat.py b/ollama-pdf-chat/multiChat.py new file mode 100644 index 000000000..cad19b41b --- /dev/null +++ b/ollama-pdf-chat/multiChat.py @@ -0,0 +1,88 @@ +import os +import time +import chainlit as cl +import warnings +from langchain_community.vectorstores.chroma import Chroma +from langchain_community.document_loaders import (PyPDFLoader,DirectoryLoader) +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.embeddings import OllamaEmbeddings +from langchain.chains import ConversationalRetrievalChain +from langchain_community.chat_models.ollama import ChatOllama +from langchain.memory import ChatMessageHistory,ConversationBufferMemory + +llmmodel = os.getenv("LLM_MODEL", "llama2") +llmmodel = "llama2" +print(llmmodel) + +text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000,chunk_overlap=50) + +@cl.on_chat_start +async def on_chat_start(): + warnings.simplefilter(action='ignore') + ollama_embeddings = OllamaEmbeddings(model=llmmodel,show_progress=True) + vectorDB = Chroma(persist_directory="./data",embedding_function=ollama_embeddings) + + msg = cl.Message(content=f"Processing Started ...") + time.sleep(3) + await msg.send() + + if not os.path.exists("./files"): + os.makedirs("./files") + if not os.path.exists("./data"): + os.makedirs("./data") + pdfDirecLoader = DirectoryLoader("./files/", glob="*.pdf",loader_cls=PyPDFLoader) + loadedDocuments = pdfDirecLoader.load() + chunkedDocuments = text_splitter.split_documents(loadedDocuments) + content = [] + metadatas = [] + for doc in chunkedDocuments: + content.append(doc.page_content) + metadatas.append(doc.metadata) + vectorDB = Chroma.from_texts(texts=content,embedding=ollama_embeddings,metadatas=metadatas,persist_directory="./data") + vectorDB.persist() + + vectorDB = Chroma(persist_directory="./data",embedding_function=ollama_embeddings) + + messageHistory = ChatMessageHistory() + + memory = ConversationBufferMemory(memory_key="chat_history",output_key="answer", + chat_memory=messageHistory, return_messages=True) + + chain = ConversationalRetrievalChain.from_llm(ChatOllama(model=llmmodel,temperature=0.2,streaming=True),chain_type="stuff",retriever=vectorDB.as_retriever(mmr=True), + memory=memory, + return_source_documents=True,) + + msg.content = f"Processing Complete..." + await msg.update() + + cl.user_session.set("chain",chain) + +@cl.on_message +async def main(message: cl.Message): + chain = cl.user_session.get("chain") # type: ConversationalRetrievalChain + cb = cl.AsyncLangchainCallbackHandler() + + res = await chain.acall(message.content, callbacks=[cb]) + answer = res["answer"] + source_documents = res["source_documents"] + + text_elements = [] + + if source_documents: + for source_idx, source_doc in enumerate(source_documents): + source_name = f"source_{source_idx}" + # Create the text element referenced in the message + text_elements.append( + cl.Text(content=source_doc.page_content, name=source_name) + ) + source_names = [text_el.name for text_el in text_elements] + + if source_names: + answer += f"\nSources: {', '.join(source_names)}" + else: + answer += "\nNo sources found" + + await cl.Message(content=answer, elements=text_elements).send() + + + diff --git a/ollama-pdf-chat/requirements.txt b/ollama-pdf-chat/requirements.txt new file mode 100644 index 000000000..1ece08d94 --- /dev/null +++ b/ollama-pdf-chat/requirements.txt @@ -0,0 +1,150 @@ +aiofiles==23.2.1 +aiohttp==3.9.3 +aiosignal==1.3.1 +annotated-types==0.6.0 +anyio==3.7.1 +asgiref==3.7.2 +asttokens==2.4.1 +asyncer==0.0.2 +attrs==23.2.0 +backoff==2.2.1 +bcrypt==4.1.2 +bidict==0.23.1 +build==1.0.3 +cachetools==5.3.3 +certifi==2024.2.2 +chainlit==1.0.301 +charset-normalizer==3.3.2 +chroma-hnswlib==0.7.3 +chromadb==0.4.23 +click==8.1.7 +coloredlogs==15.0.1 +comm==0.2.1 +dataclasses-json==0.5.14 +debugpy==1.8.1 +decorator==5.1.1 +Deprecated==1.2.14 +distro==1.9.0 +executing==2.0.1 +fastapi==0.108.0 +fastapi-socketio==0.0.10 +filelock==3.13.1 +filetype==1.2.0 +flatbuffers==23.5.26 +frozenlist==1.4.1 +fsspec==2024.2.0 +google-auth==2.28.1 +googleapis-common-protos==1.62.0 +greenlet==3.0.3 +grpcio==1.62.0 +h11==0.14.0 +httpcore==1.0.4 +httptools==0.6.1 +httpx==0.25.2 +huggingface-hub==0.20.3 +humanfriendly==10.0 +idna==3.6 +importlib-metadata==6.11.0 +importlib_resources==6.1.2 +ipykernel==6.29.3 +ipython==8.22.2 +jedi==0.19.1 +jsonpatch==1.33 +jsonpointer==2.4 +jupyter_client==8.6.0 +jupyter_core==5.7.1 +kubernetes==29.0.0 +langchain==0.1.9 +langchain-community==0.0.24 +langchain-core==0.1.27 +langsmith==0.1.9 +Lazify==0.4.0 +literalai==0.0.204 +marshmallow==3.21.0 +matplotlib-inline==0.1.6 +mmh3==4.1.0 +monotonic==1.6 +mpmath==1.3.0 +multidict==6.0.5 +mypy-extensions==1.0.0 +nest-asyncio==1.6.0 +numpy==1.26.4 +oauthlib==3.2.2 +ollama==0.1.6 +onnxruntime==1.17.1 +openai==1.12.0 +opentelemetry-api==1.23.0 +opentelemetry-exporter-otlp==1.23.0 +opentelemetry-exporter-otlp-proto-common==1.23.0 +opentelemetry-exporter-otlp-proto-grpc==1.23.0 +opentelemetry-exporter-otlp-proto-http==1.23.0 +opentelemetry-instrumentation==0.44b0 +opentelemetry-instrumentation-asgi==0.44b0 +opentelemetry-instrumentation-fastapi==0.44b0 +opentelemetry-proto==1.23.0 +opentelemetry-sdk==1.23.0 +opentelemetry-semantic-conventions==0.44b0 +opentelemetry-util-http==0.44b0 +orjson==3.9.15 +overrides==7.7.0 +packaging==23.2 +parso==0.8.3 +pexpect==4.9.0 +platformdirs==4.2.0 +posthog==3.4.2 +prompt-toolkit==3.0.43 +protobuf==4.25.3 +psutil==5.9.8 +ptyprocess==0.7.0 +pulsar-client==3.4.0 +pure-eval==0.2.2 +pyasn1==0.5.1 +pyasn1-modules==0.3.0 +pydantic==2.6.2 +pydantic_core==2.16.3 +Pygments==2.17.2 +PyJWT==2.8.0 +pypdf==4.0.2 +PyPDF2==3.0.1 +PyPika==0.48.9 +pyproject_hooks==1.0.0 +python-dateutil==2.8.2 +python-dotenv==1.0.1 +python-engineio==4.9.0 +python-graphql-client==0.4.3 +python-multipart==0.0.6 +python-socketio==5.11.1 +PyYAML==6.0.1 +pyzmq==25.1.2 +requests==2.31.0 +requests-oauthlib==1.3.1 +rsa==4.9 +simple-websocket==1.0.0 +six==1.16.0 +sniffio==1.3.1 +SQLAlchemy==2.0.27 +stack-data==0.6.3 +starlette==0.32.0.post1 +sympy==1.12 +syncer==2.0.3 +tenacity==8.2.3 +tokenizers==0.15.2 +tomli==2.0.1 +tornado==6.4 +tqdm==4.66.2 +traitlets==5.14.1 +typer==0.9.0 +typing-inspect==0.9.0 +typing_extensions==4.10.0 +uptrace==1.22.0 +urllib3==2.2.1 +uvicorn==0.25.0 +uvloop==0.19.0 +watchfiles==0.20.0 +wcwidth==0.2.13 +websocket-client==1.7.0 +websockets==12.0 +wrapt==1.16.0 +wsproto==1.2.0 +yarl==1.9.4 +zipp==3.17.0