From f19853c759ab6f49b7f488db9235686b50a60ef9 Mon Sep 17 00:00:00 2001 From: yym68686 Date: Sat, 25 Nov 2023 19:23:22 +0800 Subject: [PATCH] 1. update README Zeabur banner 2. fixed bug: This model's maximum context length 3. Fix the logic error of replying to messages in Telegram --- README.md | 3 ++- bot.py | 2 +- chatgpt2api/chatgpt2api.py | 12 ++++++++++-- requirements.txt | 2 +- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b58b0512..082d80bc 100644 --- a/README.md +++ b/README.md @@ -45,7 +45,8 @@ Join the [Telegram Group](https://t.me/+_01cz9tAkUc1YzZl) chat to share your use One-click deployment: -[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/R5JY5O?referralCode=yym68686) +[![Deployed on Zeabur](https://zeabur.com/deployed-on-zeabur-dark.svg)](https://zeabur.com?referralCode=yym68686&utm_source=yym68686&utm_campaign=oss) + If you need follow-up function updates, the following deployment method is recommended: diff --git a/bot.py b/bot.py index 45c4d5fa..db019c41 100644 --- a/bot.py +++ b/bot.py @@ -31,7 +31,7 @@ translator_prompt = "You are a translation engine, you can only translate text and cannot interpret it, and do not explain. Translate the text to {}, please do not explain any sentences, just translate or leave them as they are. this is the content you need to translate: " @decorators.Authorization async def command_bot(update, context, language=None, prompt=translator_prompt, title="", robot=None, has_command=True): - if update.message.reply_to_message is None: + if update.message.reply_to_message is None or update.message.reply_to_message.text: if has_command == False or len(context.args) > 0: message = update.message.text if config.NICK is None else update.message.text[botNicKLength:].strip() if update.message.text[:botNicKLength].lower() == botNick else None if has_command: diff --git a/chatgpt2api/chatgpt2api.py b/chatgpt2api/chatgpt2api.py index 72ae202b..ed3eef20 100644 --- a/chatgpt2api/chatgpt2api.py +++ b/chatgpt2api/chatgpt2api.py @@ -296,6 +296,10 @@ def ask_stream( ) headers = {"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"} + if self.engine == "gpt-4-1106-preview": + model_max_tokens = kwargs.get("max_tokens", self.max_tokens) + else: + model_max_tokens = min(self.get_max_tokens(convo_id=convo_id) - 500, kwargs.get("max_tokens", self.max_tokens)) json_post = { "model": os.environ.get("MODEL_NAME") or model or self.engine, "messages": self.conversation[convo_id] if pass_history else [{"role": "system","content": self.system_prompt},{"role": role, "content": prompt}], @@ -313,7 +317,7 @@ def ask_stream( ), "n": kwargs.get("n", self.reply_count), "user": role, - "max_tokens": kwargs.get("max_tokens", self.max_tokens), + "max_tokens": model_max_tokens, # "max_tokens": min( # self.get_max_tokens(convo_id=convo_id), # kwargs.get("max_tokens", self.max_tokens), @@ -402,6 +406,10 @@ async def ask_stream_async( self.reset(convo_id=convo_id, system_prompt=self.system_prompt) self.add_to_conversation(prompt, "user", convo_id=convo_id) self.__truncate_conversation(convo_id=convo_id) + if self.engine == "gpt-4-1106-preview": + model_max_tokens = kwargs.get("max_tokens", self.max_tokens) + else: + model_max_tokens = min(self.get_max_tokens(convo_id=convo_id) - 500, kwargs.get("max_tokens", self.max_tokens)) # Get response async with self.aclient.stream( "post", @@ -424,7 +432,7 @@ async def ask_stream_async( ), "n": kwargs.get("n", self.reply_count), "user": role, - "max_tokens": kwargs.get("max_tokens", self.max_tokens), + "max_tokens": model_max_tokens, # "max_tokens": min( # self.get_max_tokens(convo_id=convo_id), # kwargs.get("max_tokens", self.max_tokens), diff --git a/requirements.txt b/requirements.txt index e8aa809e..97977346 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ --index-url https://pypi.python.org/simple/ tiktoken requests -python-telegram-bot[webhook,rate-limiter]==20.6 +python-telegram-bot[webhooks,rate-limiter]==20.6 # langchain chromadb