From 7e6361508aef00441d72c1aef08d29f92d409361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Shinichi=20Taka=C5=B7anagi?= Date: Mon, 15 May 2023 19:59:42 +0900 Subject: [PATCH] Update documentation (...and fix minor bugs) (#15) --- README.md | 28 +++++++++++++++------------- oregpt/command.py | 5 +++++ oregpt/main.py | 8 ++++++-- pyproject.toml | 2 ++ tests/conftest.py | 1 + tests/test_chat_bot.py | 32 ++++---------------------------- tests/test_command.py | 14 ++++++++++++-- 7 files changed, 45 insertions(+), 45 deletions(-) diff --git a/README.md b/README.md index 5add616..089c62c 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ A tiny GPT CLI tool. You can chat with the GPT model developped by OpenAI and save the conversation as json. -![oregpt](https://user-images.githubusercontent.com/24406372/236609166-0f2385b1-fd9e-4810-b80d-c19c44d13411.gif) + ## Installation ### Get your own OpenAI API Key @@ -33,6 +33,20 @@ Once you have installed oregpt, you can run it by typing: $ oregpt ``` +## Supported commands on chat +Commands such as saving and loading conversations are available as the following: + +| Command | Description | +| ---- | ---- | +| `/exit` | Exit from this chat tool | +| `/quit` | Exit from this chat tool | +| `/q` | Exit from this chat tool | +| `/clear` | Clear chat history all | +| `/history` | Show chat history in json format | +| `/save` | Save chat hisotry in json format | +| `/load` | Load chat hisotry from a json file | +| `/help` | Show all commands which you can use in this chat tool | + ## Configuration You can specify the place of conversation `log`, [style (color etc)](https://python-prompt-toolkit.readthedocs.io/en/master/pages/advanced_topics/styling.html) @@ -57,15 +71,3 @@ character: name: System style: "#cc0000" ``` - -## Supported commands on chat -| Command | Description | -| ---- | ---- | -| `/exit` | Exit from this chat tool | -| `/quit | Exit from this chat tool | -| `/q | Exit from this chat tool | -| `/clear | Clear chat history all | -| `/history | Show chat history in json format | -| `/save | Save chat hisotry in json format | -| `/load | Load chat hisotry from a json file | -| `/help | Show all commands which you can use in this chat tool | diff --git a/oregpt/command.py b/oregpt/command.py index ff569aa..40d4d1e 100644 --- a/oregpt/command.py +++ b/oregpt/command.py @@ -26,6 +26,11 @@ def build(self, message: str) -> Optional["Command"]: else None ) + def looks_like_command(self, message: str) -> bool: + if len(message) == 0: + return False + return "/" == message[0] + def register(cls: Type["Command"]) -> Type["Command"]: for representation in cls.representations: diff --git a/oregpt/main.py b/oregpt/main.py index 497f553..3b7c6ef 100644 --- a/oregpt/main.py +++ b/oregpt/main.py @@ -37,7 +37,7 @@ def initialize_open_ai_key(config: dict[str, Any]) -> None: def main() -> int: config = load_config() initialize_open_ai_key(config["openai"]) - std_in_out = StdInOut(config["character"], lambda: "To exit, type q, quit, exit, or Ctrl + C") + std_in_out = StdInOut(config["character"], lambda: "To exit, type /q, /quit, /exit, or Ctrl + C") bot = ChatBot(config["openai"]["model"], std_in_out) command_builder = CommandBuilder(config, bot) @@ -47,7 +47,11 @@ def main() -> int: if command := command_builder.build(message): command.execute() else: - bot.respond(message) + if command_builder.looks_like_command(message): + std_in_out.print_system("Invalid command. Valid commands are as the following:") + command_builder.build("/help").execute() # type: ignore + else: + bot.respond(message) except KeyboardInterrupt: return 0 except Exception as e: diff --git a/pyproject.toml b/pyproject.toml index 25c5556..dc9720e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -90,6 +90,8 @@ testpaths = [ console_output_style = "progress" log_cli = true + +# === Mypy === [tool.mypy] strict = true scripts_are_modules = true diff --git a/tests/conftest.py b/tests/conftest.py index 72d4376..4903261 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,6 +8,7 @@ def pytest_configure(): + # https://stackoverflow.com/questions/44441929/how-to-share-global-variables-between-tests pytest.DUMMY_CONTENT = "Yep" diff --git a/tests/test_chat_bot.py b/tests/test_chat_bot.py index 0b81818..a35d2f3 100644 --- a/tests/test_chat_bot.py +++ b/tests/test_chat_bot.py @@ -1,32 +1,8 @@ -import contextlib import json import pytest -from openai import ChatCompletion from oregpt.chat_bot import ChatBot -from oregpt.stdinout import StdInOut - -DUMMY_CONTENT = "Yep" - - -@pytest.fixture(scope="function") -def patched_bot(monkeypatch, helpers): - def _create(*args, **kwargs): - return [{"choices": [{"delta": {"content": DUMMY_CONTENT}}]}] - - # Set monkey patch to avoid this error: https://github.com/prompt-toolkit/python-prompt-toolkit/issues/406 - def _print(*args, **kwargs): - pass - - @contextlib.contextmanager - def _print_as_contextmanager(*args, **kwargs): - yield - - monkeypatch.setattr(ChatCompletion, "create", _create) - monkeypatch.setattr(StdInOut, "_print", _print) - monkeypatch.setattr(StdInOut, "print_assistant_thinking", _print_as_contextmanager) - return helpers.make_chat_bot("Yes") @pytest.fixture @@ -43,10 +19,10 @@ def test_initialized_property(helpers): def test_respond_and_log(patched_bot): what_user_said = "Hello, world" assert patched_bot.log == ChatBot.SYSTEM_ROLE - assert DUMMY_CONTENT == patched_bot.respond(what_user_said) + assert pytest.DUMMY_CONTENT == patched_bot.respond(what_user_said) assert patched_bot.log == ChatBot.SYSTEM_ROLE + [ {"role": "user", "content": what_user_said}, - {"role": "assistant", "content": DUMMY_CONTENT}, + {"role": "assistant", "content": pytest.DUMMY_CONTENT}, ] @@ -59,7 +35,7 @@ def test_save(tmp_file, patched_bot): assert patched_bot.log == json.load(file) assert patched_bot.log == ChatBot.SYSTEM_ROLE + [ {"role": "user", "content": what_user_said}, - {"role": "assistant", "content": DUMMY_CONTENT}, + {"role": "assistant", "content": pytest.DUMMY_CONTENT}, ] @@ -78,7 +54,7 @@ def test_clear(patched_bot): patched_bot.respond(what_user_said) assert patched_bot.log == ChatBot.SYSTEM_ROLE + [ {"role": "user", "content": what_user_said}, - {"role": "assistant", "content": DUMMY_CONTENT}, + {"role": "assistant", "content": pytest.DUMMY_CONTENT}, ] patched_bot.clear() assert patched_bot._log == ChatBot.SYSTEM_ROLE diff --git a/tests/test_command.py b/tests/test_command.py index 81d833a..09ad7f4 100644 --- a/tests/test_command.py +++ b/tests/test_command.py @@ -12,13 +12,23 @@ ) -def test_command_builder(helpers): +def test_command_builder_build(helpers): command_builder = CommandBuilder({}, helpers.make_chat_bot("Yahoo")) for command_type in [ExitCommand, ClearCommand, HistoryCommand, SaveCommand, LoadCommand, HelpCommand]: for representation in command_type.representations: assert isinstance(command_builder.build(f"/{representation}"), command_type) +def test_command_builder_looks_like_command(helpers): + command_builder = CommandBuilder({}, helpers.make_chat_bot("Yahoo")) + assert command_builder.looks_like_command("/hoge hoge") == True + assert command_builder.looks_like_command("/hoge") == True + assert command_builder.looks_like_command("/") == True + assert command_builder.looks_like_command("hoge hoge") == False + assert command_builder.looks_like_command("hoge") == False + assert command_builder.looks_like_command("") == False + + def test_exit_command(patched_bot): command = ExitCommand({}, patched_bot, []) with pytest.raises(SystemExit): @@ -29,4 +39,4 @@ def test_clear_command(patched_bot): cl = ClearCommand({}, patched_bot, []) patched_bot.respond("Hi, bot-san") cl.execute() - patched_bot.log == ChatBot.SYSTEM_ROLE + assert patched_bot.log == ChatBot.SYSTEM_ROLE