You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I got this error when try to use GPT_4 the gpt3 turbo was good but the problem is with GPT_4
Traceback (most recent call last):
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 382, in call
result = fn(*args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\utils.py", line 145, in wrapper
return func(self, *args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\agents\chat_agent.py", line 191, in step
response = self.model_backend.run(messages=openai_messages)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\model_backend.py", line 69, in run
response = openai.ChatCompletion.create(*args, **kwargs,
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_resources\chat_completion.py", line 25, in create
return super().create(*args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in create
response, _, api_key = requestor.request(
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_requestor.py", line 298, in request
resp, got_stream = self._interpret_response(result, stream)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_requestor.py", line 700, in _interpret_response
self._interpret_response_line(
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_requestor.py", line 763, in _interpret_response_line
raise self.handle_error_response(
openai.error.RateLimitError: Rate limit reached for 10KTPM-200RPM in organization org-BeYMpkHWmgmdJenGBYmjb6Dq on tokens per min. Limit: 10000 / min. Please try again in
6ms. Contact us through our help center at help.openai.com if you continue to have issues.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\run.py", line 111, in
chat_chain.execute_chain()
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\chat_chain.py", line 163, in execute_chain
self.execute_step(phase_item)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\chat_chain.py", line 133, in execute_step
self.chat_env = self.phases[phase].execute(self.chat_env,
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\phase.py", line 294, in execute
self.chatting(chat_env=chat_env,
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\utils.py", line 77, in wrapper
return func(*args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\phase.py", line 131, in chatting
assistant_response, user_response = role_play_session.step(input_user_msg, chat_turn_limit == 1)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\agents\role_playing.py", line 261, in step
user_response = self.user_agent.step(assistant_msg_rst)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 289, in wrapped_f
return self(f, *args, **kw)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 379, in call
do = self.iter(retry_state=retry_state)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 326, in iter
raise retry_exc from fut.exception()
tenacity.RetryError: RetryError[<Future at 0x23a9cd131f0 state=finished raised RateLimitError>]
The text was updated successfully, but these errors were encountered:
I got this error when try to use GPT_4 the gpt3 turbo was good but the problem is with GPT_4
Traceback (most recent call last):
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 382, in call
result = fn(*args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\utils.py", line 145, in wrapper
return func(self, *args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\agents\chat_agent.py", line 191, in step
response = self.model_backend.run(messages=openai_messages)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\model_backend.py", line 69, in run
response = openai.ChatCompletion.create(*args, **kwargs,
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_resources\chat_completion.py", line 25, in create
return super().create(*args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in create
response, _, api_key = requestor.request(
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_requestor.py", line 298, in request
resp, got_stream = self._interpret_response(result, stream)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_requestor.py", line 700, in _interpret_response
self._interpret_response_line(
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\openai\api_requestor.py", line 763, in _interpret_response_line
raise self.handle_error_response(
openai.error.RateLimitError: Rate limit reached for 10KTPM-200RPM in organization org-BeYMpkHWmgmdJenGBYmjb6Dq on tokens per min. Limit: 10000 / min. Please try again in
6ms. Contact us through our help center at help.openai.com if you continue to have issues.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\run.py", line 111, in
chat_chain.execute_chain()
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\chat_chain.py", line 163, in execute_chain
self.execute_step(phase_item)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\chat_chain.py", line 133, in execute_step
self.chat_env = self.phases[phase].execute(self.chat_env,
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\phase.py", line 294, in execute
self.chatting(chat_env=chat_env,
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\utils.py", line 77, in wrapper
return func(*args, **kwargs)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\chatdev\phase.py", line 131, in chatting
assistant_response, user_response = role_play_session.step(input_user_msg, chat_turn_limit == 1)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\ChatDev\camel\agents\role_playing.py", line 261, in step
user_response = self.user_agent.step(assistant_msg_rst)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 289, in wrapped_f
return self(f, *args, **kw)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 379, in call
do = self.iter(retry_state=retry_state)
File "D:\VSC_PROJECTS\ChatDev_Human2.venv\lib\site-packages\tenacity_init_.py", line 326, in iter
raise retry_exc from fut.exception()
tenacity.RetryError: RetryError[<Future at 0x23a9cd131f0 state=finished raised RateLimitError>]
The text was updated successfully, but these errors were encountered: