Skip to content

Commit

Permalink
🐛 Bug: 1. Fix the bug of log errors being repeatedly displayed.
Browse files Browse the repository at this point in the history
2. Fix the bug when the model list is empty.

💻 Code: Remove the redundant redirection code from the GPT model interface
  • Loading branch information
yym68686 committed Sep 6, 2024
1 parent 60e7a94 commit 8eca72e
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 56 deletions.
6 changes: 2 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,6 @@ async def process_request(request: Union[RequestModel, ImageGenerationRequest],

return response
except (Exception, HTTPException, asyncio.CancelledError, httpx.ReadError) as e:
logger.error(f"Error with provider {provider['provider']}: {str(e)}")

# 更新失败计数
async with app.middleware_stack.app.lock:
app.middleware_stack.app.channel_failure_counts[provider['provider']] += 1
Expand Down Expand Up @@ -340,9 +338,9 @@ async def request_model(self, request: Union[RequestModel, ImageGenerationReques
if provider['provider'] == provider_name:
new_matching_providers.append(provider)
matching_providers = new_matching_providers
# import json
# print("matching_providers", json.dumps(matching_providers, indent=4, ensure_ascii=False, default=circular_list_encoder))

# import json
# print("matching_providers", json.dumps(matching_providers, indent=4, ensure_ascii=False, default=circular_list_encoder))
use_round_robin = True
auto_retry = True
if safe_get(config, 'api_keys', api_index, "preferences", "USE_ROUND_ROBIN") == False:
Expand Down
52 changes: 12 additions & 40 deletions response.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,48 +140,20 @@ async def fetch_vertex_claude_response_stream(client, url, headers, payload, mod
yield "data: [DONE]\n\r\n"

async def fetch_gpt_response_stream(client, url, headers, payload, max_redirects=5):
redirect_count = 0
while redirect_count < max_redirects:
# logger.info(f"fetch_gpt_response_stream: {url}")
async with client.stream('POST', url, headers=headers, json=payload) as response:
error_message = await check_response(response, "fetch_gpt_response_stream")
if error_message:
yield error_message
return

buffer = ""
try:
async for chunk in response.aiter_text():
# logger.info(f"chunk: {repr(chunk)}")
buffer += chunk
if chunk.startswith("<script"):
import re
redirect_match = re.search(r"window\.location\.href\s*=\s*'([^']+)'", chunk)
if redirect_match:
new_url = redirect_match.group(1)
# logger.info(f"new_url: {new_url}")
if not new_url.startswith('http'):
# 如果是相对路径,构造完整URL
# logger.info(url.split('/'))
base_url = '/'.join(url.split('/')[:3])
new_url = base_url + new_url
url = new_url
# logger.info(f"new_url: {new_url}")
redirect_count += 1
break
redirect_count = 0
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
# logger.info("line: %s", repr(line))
if line and line != "data: " and line != "data:" and not line.startswith(": "):
yield line.strip() + "\n\r\n"
except httpx.RemoteProtocolError as e:
yield {"error": f"fetch_gpt_response_stream RemoteProtocolError {e.__class__.__name__}", "details": str(e)}
return
if redirect_count == 0:
async with client.stream('POST', url, headers=headers, json=payload) as response:
error_message = await check_response(response, "fetch_gpt_response_stream")
if error_message:
yield error_message
return

yield {"error": "Too many redirects", "details": f"Reached maximum of {max_redirects} redirects"}
buffer = ""
async for chunk in response.aiter_text():
buffer += chunk
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
# logger.info("line: %s", repr(line))
if line and line != "data: " and line != "data:" and not line.startswith(": "):
yield line.strip() + "\n\r\n"

async def fetch_claude_response_stream(client, url, headers, payload, model):
timestamp = datetime.timestamp(datetime.now())
Expand Down
25 changes: 13 additions & 12 deletions utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,18 +29,19 @@ def update_config(config_data):
for index, api_key in enumerate(config_data['api_keys']):
weights_dict = {}
models = []
for model in api_key.get('model'):
if isinstance(model, dict):
key, value = list(model.items())[0]
provider_name = key.split("/")[0]
if "/" in key:
weights_dict.update({provider_name: int(value)})
models.append(key)
if isinstance(model, str):
models.append(model)
config_data['api_keys'][index]['weights'] = weights_dict
config_data['api_keys'][index]['model'] = models
api_keys_db[index]['model'] = models
if api_key.get('model'):
for model in api_key.get('model'):
if isinstance(model, dict):
key, value = list(model.items())[0]
provider_name = key.split("/")[0]
if "/" in key:
weights_dict.update({provider_name: int(value)})
models.append(key)
if isinstance(model, str):
models.append(model)
config_data['api_keys'][index]['weights'] = weights_dict
config_data['api_keys'][index]['model'] = models
api_keys_db[index]['model'] = models

api_list = [item["api"] for item in api_keys_db]
# logger.info(json.dumps(config_data, indent=4, ensure_ascii=False, default=circular_list_encoder))
Expand Down

0 comments on commit 8eca72e

Please sign in to comment.