Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: update pre-commit hook versions and replace darglint with pydoclint #9

Merged
merged 2 commits into from
Oct 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v4.5.0
hooks:
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/psf/black
rev: 23.9.0
rev: 23.10.0
hooks:
- id: black
- repo: https://github.com/kynan/nbstripout
Expand All @@ -18,14 +18,15 @@ repos:
hooks:
- id: interrogate
args: [-c, pyproject.toml]
- repo: https://github.com/terrencepreilly/darglint
rev: v1.8.1
- repo: https://github.com/jsh9/pydoclint
rev: 0.3.8
hooks:
- id: darglint
args: [-v 2] # this config makes the error messages a bit less cryptic.
- id: pydoclint
args:
- "--config=pyproject.toml"
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.287
rev: v0.1.1
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
Expand Down
24 changes: 11 additions & 13 deletions llamabot/bot/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,17 @@ class ChatBot:
Automatic chat memory management happens.

h/t Andrew Giessel/GPT4 for the idea.

:param system_prompt: The system prompt to use.
:param temperature: The model temperature to use.
See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
for more information.
:param model_name: The name of the OpenAI model to use.
:param logging: Whether to log the chat history.
:param streaming: (LangChain config) Whether to stream the output to stdout.
:param verbose: (LangChain config) Whether to print debug messages.
:param response_budget: (LangChain config) The maximum number of tokens
to use for the response.
"""

def __init__(
Expand All @@ -34,19 +45,6 @@ def __init__(
verbose=True,
response_budget=2_000,
):
"""Initialize the ChatBot.

:param system_prompt: The system prompt to use.
:param temperature: The model temperature to use.
See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
for more information.
:param model_name: The name of the OpenAI model to use.
:param logging: Whether to log the chat history.
:param streaming: (LangChain config) Whether to stream the output to stdout.
:param verbose: (LangChain config) Whether to print debug messages.
:param response_budget: (LangChain config) The maximum number of tokens
to use for the response.
"""
self.model = create_model(
model_name=model_name,
temperature=temperature,
Expand Down
56 changes: 27 additions & 29 deletions llamabot/bot/querybot.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,33 @@


class QueryBot:
"""QueryBot is a bot that lets us use GPT4 to query documents."""
"""QueryBot is a bot that lets us use GPT4 to query documents.

Pass in either the doc_path or saved_index_path to initialize the QueryBot.

NOTE: QueryBot is not designed to have memory!

The default text splitter is the TokenTextSplitter from LangChain.
The default index that we use is the GPTVectorStoreIndex from LlamaIndex.
We also default to using GPT4 with temperature 0.0.

:param system_message: The system message to send to the chatbot.
:param model_name: The name of the OpenAI model to use.
:param temperature: The model temperature to use.
See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
for more information.
:param doc_paths: A path to a document,
or a list of paths to multiple documents,
to use for the chatbot.
:param saved_index_path: The path to the saved index to use for the chatbot.
:param response_tokens: The number of tokens to use for responses.
:param history_tokens: The number of tokens to use for history.
:param chunk_sizes: The chunk sizes to use for the LlamaIndex TokenTextSplitter.
Defaults to [2000], but can be a list of integers.
:param streaming: Whether to stream the chatbot or not.
:param verbose: (LangChain config) Whether to print debug messages.
:param use_cache: Whether to use the cache or not.
"""

def __init__(
self,
Expand All @@ -71,34 +97,6 @@ def __init__(
verbose: bool = True,
use_cache: bool = True,
):
"""Initialize QueryBot.

Pass in either the doc_path or saved_index_path to initialize the QueryBot.

NOTE: QueryBot is not designed to have memory!

The default text splitter is the TokenTextSplitter from LangChain.
The default index that we use is the GPTVectorStoreIndex from LlamaIndex.
We also default to using GPT4 with temperature 0.0.

:param system_message: The system message to send to the chatbot.
:param model_name: The name of the OpenAI model to use.
:param temperature: The model temperature to use.
See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
for more information.
:param doc_paths: A path to a document,
or a list of paths to multiple documents,
to use for the chatbot.
:param saved_index_path: The path to the saved index to use for the chatbot.
:param response_tokens: The number of tokens to use for responses.
:param history_tokens: The number of tokens to use for history.
:param chunk_sizes: The chunk sizes to use for the LlamaIndex TokenTextSplitter.
Defaults to [2000], but can be a list of integers.
:param streaming: Whether to stream the chatbot or not.
:param verbose: (LangChain config) Whether to print debug messages.
:param use_cache: Whether to use the cache or not.
"""

chat = create_model(
model_name=model_name,
temperature=temperature,
Expand Down
18 changes: 8 additions & 10 deletions llamabot/bot/simplebot.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@ class SimpleBot:
and sends back a single response.

This bot does not retain chat history.

:param system_prompt: The system prompt to use.
:param temperature: The model temperature to use.
See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
for more information.
:param model_name: The name of the OpenAI model to use.
:param streaming: (LangChain config) Whether to stream the output to stdout.
:param verbose: (LangChain config) Whether to print debug messages.
"""

def __init__(
Expand All @@ -28,16 +36,6 @@ def __init__(
streaming=True,
verbose=True,
):
"""Initialize the SimpleBot.

:param system_prompt: The system prompt to use.
:param temperature: The model temperature to use.
See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
for more information.
:param model_name: The name of the OpenAI model to use.
:param streaming: (LangChain config) Whether to stream the output to stdout.
:param verbose: (LangChain config) Whether to print debug messages.
"""
self.system_prompt = system_prompt
self.model = create_model(
model_name=model_name,
Expand Down
1 change: 0 additions & 1 deletion llamabot/recorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ class PromptRecorder:
"""Prompt recorder to support recording of prompts and responses."""

def __init__(self):
"""Initialize prompt recorder."""
self.prompts_and_responses = []

def __enter__(self):
Expand Down
8 changes: 3 additions & 5 deletions llamabot/zotero/completer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,11 @@


class PaperTitleCompleter(Completer):
"""Completer class for paper titles."""
"""Completer class for paper titles.

def __init__(self, paper_titles):
"""Initialize the completer.
:param paper_titles: A list of paper titles to choose from."""

:param paper_titles: A list of paper titles to choose from.
"""
def __init__(self, paper_titles):
self.paper_titles = paper_titles

def get_completions(self, document, complete_event) -> list:
Expand Down