Skip to content

Commit

Permalink
Merge pull request #569 from err09r/gpt-4o
Browse files Browse the repository at this point in the history
Add gpt-4o model
  • Loading branch information
n3d1117 authored May 14, 2024
2 parents 9a1b5cb + e1f5988 commit 2040038
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 5 deletions.
12 changes: 8 additions & 4 deletions bot/openai_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
GPT_4_32K_MODELS = ("gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613")
GPT_4_VISION_MODELS = ("gpt-4-vision-preview",)
GPT_4_128K_MODELS = ("gpt-4-1106-preview","gpt-4-0125-preview","gpt-4-turbo-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09")
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS

GPT_4O_MODELS = ("gpt-4o",)
GPT_ALL_MODELS = GPT_3_MODELS + GPT_3_16K_MODELS + GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS

def default_max_tokens(model: str) -> int:
"""
Expand All @@ -42,7 +42,7 @@ def default_max_tokens(model: str) -> int:
return base
elif model in GPT_4_MODELS:
return base * 2
elif model in GPT_3_16K_MODELS:
elif model in GPT_3_16K_MODELS:
if model == "gpt-3.5-turbo-1106":
return 4096
return base * 4
Expand All @@ -52,6 +52,8 @@ def default_max_tokens(model: str) -> int:
return 4096
elif model in GPT_4_128K_MODELS:
return 4096
elif model in GPT_4O_MODELS:
return 4096


def are_functions_available(model: str) -> bool:
Expand Down Expand Up @@ -634,6 +636,8 @@ def __max_model_tokens(self):
return base * 31
if self.config['model'] in GPT_4_128K_MODELS:
return base * 31
if self.config['model'] in GPT_4O_MODELS:
return base * 31
raise NotImplementedError(
f"Max tokens for model {self.config['model']} is not implemented yet."
)
Expand All @@ -654,7 +658,7 @@ def __count_tokens(self, messages) -> int:
if model in GPT_3_MODELS + GPT_3_16K_MODELS:
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS:
elif model in GPT_4_MODELS + GPT_4_32K_MODELS + GPT_4_VISION_MODELS + GPT_4_128K_MODELS + GPT_4O_MODELS:
tokens_per_message = 3
tokens_per_name = 1
else:
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
python-dotenv~=1.0.0
pydub~=0.25.1
tiktoken==0.5.1
tiktoken==0.7.0
openai==1.3.3
python-telegram-bot==20.3
requests~=2.31.0
Expand Down

0 comments on commit 2040038

Please sign in to comment.