Skip to content

Commit

Permalink
Merge pull request #409 from LlmKira/main
Browse files Browse the repository at this point in the history
Build Docker
  • Loading branch information
sudoskys authored Apr 29, 2024
2 parents 5654454 + 05c5633 commit b8fe5ca
Show file tree
Hide file tree
Showing 29 changed files with 551 additions and 646 deletions.
3 changes: 2 additions & 1 deletion .env.exp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# NOTE:If you want to share your bot to **everyone**
# GLOBAL_OAI_KEY=sk-xxx
# GLOBAL_OAI_MODEL=gpt-3.5-turbo-0613
GLOBAL_OAI_MODEL=gpt-3.5-turbo
GLOBAL_OAI_TOOL_MODEL=gpt-3.5-turbo
# GLOBAL_OAI_ENDPOINT=https://api.openai.com/v1/

AMQP_DSN=amqp://admin:8a8a8a@localhost:5672/
Expand Down
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.

### 🍔 Login Modes

- `Login via url`: Use `/login token$https://provider.com` to Login. The program posts the token to the interface to
- `Login via url`: Use `/login <a token>$<something like https://provider.com/login>` to Login. The program posts the token to the interface to
retrieve configuration
information, [how to develop this](https://github.com/LlmKira/Openaibot/blob/81eddbff0f136697d5ad6e13ee1a7477b26624ed/app/components/credential.py#L20).
- `Login`: Use `/login https://api.com/v1$key$model` to login
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>$<tool model such as gpt-3.5-turbo>` to login

### 🧀 Plugin Can Do More

Expand All @@ -97,6 +97,7 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.
| Discord ||| |
| Kook ||| Does not support `triggering by reply` |
| Slack ||| Does not support `triggering by reply` |
| Line || | |
| QQ || | |
| Wechat || | |
| Twitter || | |
Expand Down Expand Up @@ -145,6 +146,9 @@ npm install pm2 -g
pm2 start pm2.json
```

> **Be sure to change the default password for the command, or disable open ports to prevent the database from being
scanned and attacked.**

### 🥣 Docker

Build Hub: [sudoskys/llmbot](https://hub.docker.com/repository/docker/sudoskys/llmbot/general)
Expand Down
9 changes: 9 additions & 0 deletions app/components/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from typing import Optional

from app.components.credential import Credential
from app.components.user_manager import USER_MANAGER


async def read_user_credential(user_id: str) -> Optional[Credential]:
user = await USER_MANAGER.read(user_id=user_id)
return user.credential
32 changes: 3 additions & 29 deletions app/components/credential.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import os
from urllib.parse import urlparse

import requests
from dotenv import load_dotenv
Expand All @@ -15,6 +14,7 @@ class Credential(BaseModel):
api_key: str
api_endpoint: str
api_model: str
api_tool_model: str = "gpt-3.5-turbo"

@classmethod
def from_provider(cls, token, provider_url):
Expand All @@ -36,37 +36,10 @@ def from_provider(cls, token, provider_url):
api_key=user_data["api_key"],
api_endpoint=user_data["api_endpoint"],
api_model=user_data["api_model"],
api_tool_model=user_data.get("api_tool_model", "gpt-3.5-turbo"),
)


def split_setting_string(input_string):
if not isinstance(input_string, str):
return None
segments = input_string.split("$")

# 检查链接的有效性
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False

# 开头为链接的情况
if is_valid_url(segments[0]) and len(segments) >= 3:
return segments[:3]
# 第二个元素为链接,第一个元素为字符串的情况
elif (
len(segments) == 2
and not is_valid_url(segments[0])
and is_valid_url(segments[1])
):
return segments
# 其他情况
else:
return None


load_dotenv()

if os.getenv("GLOBAL_OAI_KEY") and os.getenv("GLOBAL_OAI_ENDPOINT"):
Expand All @@ -75,6 +48,7 @@ def is_valid_url(url):
api_key=os.getenv("GLOBAL_OAI_KEY"),
api_endpoint=os.getenv("GLOBAL_OAI_ENDPOINT"),
api_model=os.getenv("GLOBAL_OAI_MODEL", "gpt-3.5-turbo"),
api_tool_model=os.getenv("GLOBAL_OAI_TOOL_MODEL", "gpt-3.5-turbo"),
)
else:
global_credential = None
3 changes: 2 additions & 1 deletion app/middleware/llm_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ def pair_check(_messages):
new_list.append(_messages[i])
new_list.append(_messages[-1])
if isinstance(_messages[-1], AssistantMessage) and _messages[-1].tool_calls:
logger.warning("llm_task:the last AssistantMessage not paired, be careful")
new_list.extend(mock_tool_message(_messages[-1], "[On Queue]"))
return new_list

Expand Down Expand Up @@ -150,7 +151,7 @@ async def build_history_messages(self):
message_run = []
if isinstance(system_prompt, str):
message_run.append(SystemMessage(content=system_prompt))
history = await self.message_history.read(lines=10)
history = await self.message_history.read(lines=8)
logger.trace(f"History message {history}")
for de_active_message in history:
try:
Expand Down
85 changes: 67 additions & 18 deletions app/receiver/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,12 @@
from aio_pika.abc import AbstractIncomingMessage
from loguru import logger

from app.components import read_user_credential
from app.components.credential import global_credential
from llmkira.kv_manager.env import EnvManager
from llmkira.kv_manager.tool_call import GLOBAL_TOOLCALL_CACHE_HANDLER
from llmkira.logic import LLMLogic
from llmkira.memory import global_message_runtime
from llmkira.openai.cell import ToolCall
from llmkira.sdk.tools.register import ToolRegister
from llmkira.task import Task, TaskHeader
Expand Down Expand Up @@ -235,20 +239,7 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
return logger.info(
f"[Snapshot Auth] \n--auth-require {pending_task.name} require."
)

# Resign Chain
# 时序实现,防止过度注册
if len(task.task_sign.tool_calls_pending) == 1:
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=5):
logger.debug(
"ToolCall run out, resign a new request to request stop sign."
)
await create_child_snapshot(
task=task,
memory_able=True,
channel=task.receiver.platform,
)
# 运行函数, 传递模型的信息,以及上一条的结果的openai raw信息
# Run Function
run_result = await _tool_obj.load(
task=task,
receiver=task.receiver,
Expand All @@ -257,11 +248,72 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
pending_task=pending_task,
refer_llm_result=task.task_sign.llm_response,
)
run_status = True
# 更新任务状态
if run_result.get("exception"):
run_status = False
await task.task_sign.complete_task(
tool_calls=pending_task, success_or_not=True, run_result=run_result
)
return run_result
# Resign Chain
# 时序实现,防止过度注册
if len(task.task_sign.tool_calls_pending) == 0:
if not has_been_called_recently(userid=task.receiver.uid, n_seconds=3):
credentials = await read_user_credential(user_id=task.receiver.uid)
if global_credential:
credentials = global_credential
logic = LLMLogic(
api_key=credentials.api_key,
api_endpoint=credentials.api_endpoint,
api_model=credentials.api_tool_model,
)
history = await global_message_runtime.update_session(
session_id=task.receiver.uid,
).read(lines=3)
logger.debug(f"Read History:{history}")
continue_ = await logic.llm_continue(
context=f"History:{history},ToolCallResult:{run_status}",
condition="If there is still any action that needs to be performed",
default=False,
)
if continue_.boolean:
logger.debug(
"ToolCall run out, resign a new request to request stop sign."
)
await create_child_snapshot(
task=task,
memory_able=True,
channel=task.receiver.platform,
)
# 运行函数, 传递模型的信息,以及上一条的结果的openai raw信息
await Task.create_and_send(
queue_name=task.receiver.platform,
task=TaskHeader(
sender=task.sender,
receiver=task.receiver,
task_sign=task.task_sign.notify(
plugin_name=__receiver__,
response_snapshot=True,
memory_able=False,
),
message=[
EventMessage(
user_id=task.receiver.user_id,
chat_id=task.receiver.chat_id,
text=continue_.comment_to_user,
)
],
),
)
else:
if continue_.comment_to_user:
await reply_user(
platform=task.receiver.platform,
receiver=task.receiver,
task=task,
text=continue_.comment_to_user,
)
return run_status

async def process_function_call(self, message: AbstractIncomingMessage):
"""
Expand Down Expand Up @@ -307,9 +359,6 @@ async def run_task(self, task, pending_task):
try:
await self.run_pending_task(task=task, pending_task=pending_task)
except Exception as e:
await task.task_sign.complete_task(
tool_calls=pending_task, success_or_not=False, run_result=str(e)
)
logger.error(f"Function Call Error {e}")
raise e
finally:
Expand Down
9 changes: 2 additions & 7 deletions app/receiver/receiver_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
from loguru import logger
from telebot import formatting

from app.components.credential import Credential, global_credential
from app.components.user_manager import USER_MANAGER
from app.components import read_user_credential
from app.components.credential import global_credential
from app.middleware.llm_task import OpenaiMiddleware
from llmkira.kv_manager.env import EnvManager
from llmkira.openai import OpenaiError
Expand Down Expand Up @@ -48,11 +48,6 @@ async def get(self, user_id):
user_locks = UserLocks()


async def read_user_credential(user_id: str) -> Optional[Credential]:
user = await USER_MANAGER.read(user_id=user_id)
return user.credential


async def generate_authorization(
secrets: Dict, tool_invocation: ToolCall
) -> Tuple[dict, list, bool]:
Expand Down
46 changes: 32 additions & 14 deletions app/sender/discord/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
# @Software: PyCharm
import base64
import binascii
import json
import random
from typing import List

Expand All @@ -30,7 +29,15 @@
__sender__ = "discord_hikari"
__default_disable_tool_action__ = False

from ..util_func import auth_reloader, is_command, is_empty_command, uid_make, login
from ..util_func import (
auth_reloader,
is_command,
is_empty_command,
uid_make,
save_credential,
dict2markdown,
learn_instruction,
)
from llmkira.openapi.trigger import get_trigger_loop
from ...components.credential import Credential, ProviderError

Expand Down Expand Up @@ -238,7 +245,7 @@ async def listen_login_url_command(
credential = Credential.from_provider(
token=token, provider_url=provider_url
)
await login(
await save_credential(
uid=uid_make(__sender__, ctx.user.id),
credential=credential,
)
Expand All @@ -256,6 +263,18 @@ async def listen_login_url_command(
ephemeral=True,
)

@client.include
@crescent.command(
dm_enabled=True,
name="learn",
description="Set instruction text",
)
async def listen_learn_command(ctx: crescent.Context, instruction: str):
reply = await learn_instruction(
uid=uid_make(__sender__, ctx.user.id), instruction=instruction
)
return await ctx.respond(content=convert(reply), ephemeral=True)

@client.include
@crescent.command(
dm_enabled=True,
Expand All @@ -264,17 +283,19 @@ async def listen_login_url_command(
)
async def listen_endpoint_command(
ctx: crescent.Context,
openai_endpoint: str,
openai_key: str,
openai_model: str,
api_endpoint: str,
api_key: str,
api_model: str,
api_tool_model: str = "gpt-3.5-turbo",
):
try:
credential = Credential(
api_endpoint=openai_endpoint,
api_key=openai_key,
api_model=openai_model,
api_endpoint=api_endpoint,
api_key=api_key,
api_model=api_model,
api_tool_model=api_tool_model,
)
await login(
await save_credential(
uid=uid_make(__sender__, ctx.user.id),
credential=credential,
)
Expand Down Expand Up @@ -383,10 +404,7 @@ async def listen_env_command(ctx: crescent.Context, env_string: str):
"**🧊 Env parse failed...O_o**\n", separator="\n"
)
else:
text = formatting.format_text(
f"**🧊 Updated**\n" f"```json\n{json.dumps(env_map, indent=2)}```",
separator="\n",
)
text = convert(dict2markdown(env_map))
await ctx.respond(
ephemeral=True,
content=text,
Expand Down
1 change: 1 addition & 0 deletions app/sender/discord/event.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ def help_message():
`/login` - login
`/login_via_url` - login via url
`/env` - set environment variable, split by ; , use `/env ENV=NONE` to disable a env.
`/learn` - set your system prompt, reset by `/learn reset`
**Please confirm that that bot instance is secure, some plugins may be dangerous on unsafe instance.**
""".format(prefix=BotSetting.prefix)
Loading

0 comments on commit b8fe5ca

Please sign in to comment.