Skip to content

Commit

Permalink
Merge pull request geekan#1552 from didiforgithub/main
Browse files Browse the repository at this point in the history
Fix JSON Load Error & Fix Claude Token Calculate Error.
  • Loading branch information
geekan authored Oct 30, 2024
2 parents 9db0874 + 6b8b9c8 commit fd7feb5
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 2 deletions.
7 changes: 6 additions & 1 deletion examples/aflow/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,12 @@ def parse_args():
parser.add_argument("--max_rounds", type=int, default=20, help="Max iteration rounds")
parser.add_argument("--check_convergence", type=bool, default=True, help="Whether to enable early stop")
parser.add_argument("--validation_rounds", type=int, default=5, help="Validation rounds")
parser.add_argument("--if_first_optimize", type=bool, default=True, help="Whether it's the first optimization")
parser.add_argument(
"--if_first_optimize",
type=lambda x: x.lower() == "true",
default=True,
help="Whether to download dataset for the first time",
)
return parser.parse_args()


Expand Down
6 changes: 5 additions & 1 deletion metagpt/ext/aflow/scripts/optimizer_utils/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,11 @@ def __init__(self, root_path: str):
def load_results(self, path: str) -> list:
result_path = os.path.join(path, "results.json")
if os.path.exists(result_path):
return read_json_file(result_path, encoding="utf-8")
with open(result_path, "r") as json_file:
try:
return json.load(json_file)
except json.JSONDecodeError:
return []
return []

def get_top_rounds(self, sample: int, path=None, mode="Graph"):
Expand Down
9 changes: 9 additions & 0 deletions metagpt/utils/token_counter.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
ref4: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
ref5: https://ai.google.dev/models/gemini
"""
import anthropic
import tiktoken
from openai.types import CompletionUsage
from openai.types.chat import ChatCompletionChunk
Expand Down Expand Up @@ -377,6 +378,10 @@

def count_input_tokens(messages, model="gpt-3.5-turbo-0125"):
"""Return the number of tokens used by a list of messages."""
if "claude" in model:
vo = anthropic.Client()
num_tokens = vo.count_tokens(str(messages))
return num_tokens
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
Expand Down Expand Up @@ -463,6 +468,10 @@ def count_output_tokens(string: str, model: str) -> int:
Returns:
int: The number of tokens in the text string.
"""
if "claude" in model:
vo = anthropic.Client()
num_tokens = vo.count_tokens(string)
return num_tokens
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
Expand Down

0 comments on commit fd7feb5

Please sign in to comment.