Skip to content

Commit

Permalink
Merge pull request #36 from AgentOps-AI/35-make-print-optional-in-upd…
Browse files Browse the repository at this point in the history
…ate_token_costs
  • Loading branch information
areibman authored Mar 27, 2024
2 parents 33d0e0e + e3a7eba commit ee120d3
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 10 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ tokencost = ["model_prices.json"]

[project]
name = "tokencost"
version = "0.1.6"
version = "0.1.7"
authors = [
{ name = "Trisha Pan", email = "[email protected]" },
{ name = "Alex Reibman", email = "[email protected]" },
Expand Down
1 change: 0 additions & 1 deletion tokencost/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ async def update_token_costs():
global TOKEN_COSTS
try:
TOKEN_COSTS = await fetch_costs()
print("TOKEN_COSTS updated successfully.")
except Exception as e:
logging.error(f"Failed to update TOKEN_COSTS: {e}")

Expand Down
13 changes: 5 additions & 8 deletions tokencost/costs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from typing import Union, List, Dict
from .constants import TOKEN_COSTS
from decimal import Decimal
import logging


# TODO: Add Claude support
Expand Down Expand Up @@ -39,7 +40,7 @@ def count_message_tokens(messages: List[Dict[str, str]], model: str) -> int:
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
logging.warning("Model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
Expand All @@ -56,14 +57,10 @@ def count_message_tokens(messages: List[Dict[str, str]], model: str) -> int:
tokens_per_message = 4
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print(
"Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613."
)
logging.warning("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return count_message_tokens(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print(
"Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613."
)
logging.warning("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return count_message_tokens(messages, model="gpt-4-0613")
else:
raise KeyError(
Expand Down Expand Up @@ -96,7 +93,7 @@ def count_string_tokens(prompt: str, model: str) -> int:
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
logging.warning("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")

return len(encoding.encode(prompt))
Expand Down

0 comments on commit ee120d3

Please sign in to comment.