Skip to content
This repository was archived by the owner on Jun 5, 2025. It is now read-only.

Commit c6af226

Browse files
authored
Merge branch 'main' into issue-209
2 parents 6cafe76 + 8da7955 commit c6af226

File tree

19 files changed

+233
-70
lines changed

19 files changed

+233
-70
lines changed

api/openapi.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1607,7 +1607,8 @@
16071607
"vllm",
16081608
"ollama",
16091609
"lm_studio",
1610-
"llamacpp"
1610+
"llamacpp",
1611+
"openai"
16111612
],
16121613
"title": "ProviderType",
16131614
"description": "Represents the different types of providers we support."

poetry.lock

Lines changed: 5 additions & 5 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ PyYAML = "==6.0.2"
1212
fastapi = "==0.115.8"
1313
uvicorn = "==0.34.0"
1414
structlog = "==25.1.0"
15-
litellm = "==1.60.2"
15+
litellm = "==1.60.4"
1616
llama_cpp_python = "==0.3.5"
1717
cryptography = "==44.0.0"
1818
sqlalchemy = "==2.0.37"
@@ -41,7 +41,7 @@ ruff = "==0.9.4"
4141
bandit = "==1.8.2"
4242
build = "==1.2.2.post1"
4343
wheel = "==0.45.1"
44-
litellm = "==1.60.2"
44+
litellm = "==1.60.4"
4545
pytest-asyncio = "==0.25.3"
4646
llama_cpp_python = "==0.3.5"
4747
scikit-learn = "==1.6.1"

src/codegate/api/v1.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from fastapi.routing import APIRoute
99
from pydantic import BaseModel, ValidationError
1010

11+
import codegate.muxing.models as mux_models
1112
from codegate import __version__
1213
from codegate.api import v1_models, v1_processing
1314
from codegate.db.connection import AlreadyExistsError, DbReader
@@ -477,7 +478,7 @@ async def delete_workspace_custom_instructions(workspace_name: str):
477478
)
478479
async def get_workspace_muxes(
479480
workspace_name: str,
480-
) -> List[v1_models.MuxRule]:
481+
) -> List[mux_models.MuxRule]:
481482
"""Get the mux rules of a workspace.
482483
483484
The list is ordered in order of priority. That is, the first rule in the list
@@ -501,7 +502,7 @@ async def get_workspace_muxes(
501502
)
502503
async def set_workspace_muxes(
503504
workspace_name: str,
504-
request: List[v1_models.MuxRule],
505+
request: List[mux_models.MuxRule],
505506
):
506507
"""Set the mux rules of a workspace."""
507508
try:

src/codegate/api/v1_models.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -267,26 +267,3 @@ class ModelByProvider(pydantic.BaseModel):
267267

268268
def __str__(self):
269269
return f"{self.provider_name} / {self.name}"
270-
271-
272-
class MuxMatcherType(str, Enum):
273-
"""
274-
Represents the different types of matchers we support.
275-
"""
276-
277-
# Always match this prompt
278-
catch_all = "catch_all"
279-
280-
281-
class MuxRule(pydantic.BaseModel):
282-
"""
283-
Represents a mux rule for a provider.
284-
"""
285-
286-
provider_id: str
287-
model: str
288-
# The type of matcher to use
289-
matcher_type: MuxMatcherType
290-
# The actual matcher to use. Note that
291-
# this depends on the matcher type.
292-
matcher: Optional[str] = None

src/codegate/config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
# Default provider URLs
1818
DEFAULT_PROVIDER_URLS = {
1919
"openai": "https://api.openai.com/v1",
20+
"openrouter": "https://openrouter.ai/api/v1",
2021
"anthropic": "https://api.anthropic.com/v1",
2122
"vllm": "http://localhost:8000", # Base URL without /v1 path
2223
"ollama": "http://localhost:11434", # Default Ollama server URL

src/codegate/db/models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ class ProviderType(str, Enum):
128128
ollama = "ollama"
129129
lm_studio = "lm_studio"
130130
llamacpp = "llamacpp"
131+
openrouter = "openai"
131132

132133

133134
class GetPromptWithOutputsRow(BaseModel):

src/codegate/muxing/adapter.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,8 @@ def __init__(self):
106106
db_models.ProviderType.anthropic: self._format_antropic,
107107
# Our Lllamacpp provider emits OpenAI chunks
108108
db_models.ProviderType.llamacpp: self._format_openai,
109+
# OpenRouter is a dialect of OpenAI
110+
db_models.ProviderType.openrouter: self._format_openai,
109111
}
110112

111113
def _format_ollama(self, chunk: str) -> str:

src/codegate/muxing/models.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
from enum import Enum
2+
from typing import Optional
3+
4+
import pydantic
5+
6+
7+
class MuxMatcherType(str, Enum):
8+
"""
9+
Represents the different types of matchers we support.
10+
"""
11+
12+
# Always match this prompt
13+
catch_all = "catch_all"
14+
15+
16+
class MuxRule(pydantic.BaseModel):
17+
"""
18+
Represents a mux rule for a provider.
19+
"""
20+
21+
provider_id: str
22+
model: str
23+
# The type of matcher to use
24+
matcher_type: MuxMatcherType
25+
# The actual matcher to use. Note that
26+
# this depends on the matcher type.
27+
matcher: Optional[str] = None

src/codegate/muxing/router.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import structlog
44
from fastapi import APIRouter, HTTPException, Request
55

6+
from codegate.clients.detector import DetectClient
67
from codegate.muxing import rulematcher
78
from codegate.muxing.adapter import BodyAdapter, ResponseAdapter
89
from codegate.providers.registry import ProviderRegistry
@@ -38,6 +39,7 @@ def _ensure_path_starts_with_slash(self, path: str) -> str:
3839
def _setup_routes(self):
3940

4041
@self.router.post(f"/{self.route_name}/{{rest_of_path:path}}")
42+
@DetectClient()
4143
async def route_to_dest_provider(
4244
request: Request,
4345
rest_of_path: str = "",
@@ -73,7 +75,9 @@ async def route_to_dest_provider(
7375
api_key = model_route.auth_material.auth_blob
7476

7577
# Send the request to the destination provider. It will run the pipeline
76-
response = await provider.process_request(new_data, api_key, rest_of_path)
78+
response = await provider.process_request(
79+
new_data, api_key, rest_of_path, request.state.detected_client
80+
)
7781
# Format the response to the client always using the OpenAI format
7882
return self._response_adapter.format_response_to_client(
7983
response, model_route.endpoint.provider_type

0 commit comments

Comments
 (0)